hw/virtio/vhost-shadow-virtqueue.c | 38 +-- hw/virtio/vhost-shadow-virtqueue.h | 3 +- net/vhost-vdpa.c | 380 +++++++++++++++++++---------- 3 files changed, 276 insertions(+), 145 deletions(-)
This patchset allows QEMU to delay polling and checking the device
used buffer until either the SVQ is full or control commands shadow
buffers are full, instead of polling and checking immediately after
sending each SVQ control command, so that QEMU can send all the SVQ
control commands in parallel, which have better performance improvement.
I use vp_vdpa device to simulate vdpa device, and create 4094 VLANS in
guest to build a test environment for sending multiple CVQ state load
commands. This patch series can improve latency from 20455 us to
13732 us for about 4099 CVQ state load commands, about 1.64 us per command.
Note that this patch should be based on
patch "Vhost-vdpa Shadow Virtqueue VLAN support" at [1].
[1]. https://lore.kernel.org/all/cover.1690100802.git.yin31149@gmail.com/
TestStep
========
1. regression testing using vp-vdpa device
- For L0 guest, boot QEMU with two virtio-net-pci net device with
`ctrl_vq`, `ctrl_rx`, `ctrl_rx_extra` features on, command line like:
-device virtio-net-pci,disable-legacy=on,disable-modern=off,
iommu_platform=on,mq=on,ctrl_vq=on,guest_announce=off,
indirect_desc=off,queue_reset=off,ctrl_rx=on,ctrl_rx_extra=on,...
- For L1 guest, apply the patch series and compile the source code,
start QEMU with two vdpa device with svq mode on, enable the `ctrl_vq`,
`ctrl_rx`, `ctrl_rx_extra` features on, command line like:
-netdev type=vhost-vdpa,x-svq=true,...
-device virtio-net-pci,mq=on,guest_announce=off,ctrl_vq=on,
ctrl_rx=on,ctrl_rx_extra=on...
- For L2 source guest, run the following bash command:
```bash
#!/bin/sh
for idx1 in {0..9}
do
for idx2 in {0..9}
do
for idx3 in {0..6}
do
ip link add macvlan$idx1$idx2$idx3 link eth0
address 4a:30:10:19:$idx1$idx2:1$idx3 type macvlan mode bridge
ip link set macvlan$idx1$idx2$idx3 up
done
done
done
```
- Execute the live migration in L2 source monitor
- Result
* with this series, QEMU should not trigger any error or warning.
2. perf using vp-vdpa device
- For L0 guest, boot QEMU with two virtio-net-pci net device with
`ctrl_vq`, `ctrl_vlan` features on, command line like:
-device virtio-net-pci,disable-legacy=on,disable-modern=off,
iommu_platform=on,mq=on,ctrl_vq=on,guest_announce=off,
indirect_desc=off,queue_reset=off,ctrl_vlan=on,...
- For L1 guest, apply the patch series, then apply an addtional
patch to record the load time in microseconds as following:
```diff
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 6b958d6363..501b510fd2 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -295,7 +295,10 @@ static int vhost_net_start_one(struct vhost_net *net,
}
if (net->nc->info->load) {
+ int64_t start_us = g_get_monotonic_time();
r = net->nc->info->load(net->nc);
+ error_report("vhost_vdpa_net_load() = %ld us",
+ g_get_monotonic_time() - start_us);
if (r < 0) {
goto fail;
}
```
- For L1 guest, compile the code, and start QEMU with two vdpa device
with svq mode on, enable the `ctrl_vq`, `ctrl_vlan` features on,
command line like:
-netdev type=vhost-vdpa,x-svq=true,...
-device virtio-net-pci,mq=on,guest_announce=off,ctrl_vq=on,
ctrl_vlan=on...
- For L2 source guest, run the following bash command:
```bash
#!/bin/sh
for idx in {1..4094}
do
ip link add link eth0 name vlan$idx type vlan id $idx
done
```
- execute the live migration in L2 source monitor
- Result
* with this series, QEMU should not trigger any warning
or error except something like "vhost_vdpa_net_load() = 13732 us"
* without this series, QEMU should not trigger any warning
or error except something like "vhost_vdpa_net_load() = 20455 us"
ChangeLog
=========
v4:
- refactor subject line suggested by Eugenio in patch
"vhost: Add count argument to vhost_svq_poll()"
- split `in` to `vdpa_in` and `model_in` instead of reusing `in`
in vhost_vdpa_net_handle_ctrl_avail() suggested by Eugenio in patch
"vdpa: Use iovec for vhost_vdpa_net_cvq_add()"
- pack CVQ command by iov_from_buf() instead of accessing
`out` directly suggested by Eugenio in patch
"vdpa: Avoid using vhost_vdpa_net_load_*() outside vhost_vdpa_net_load()"
- always check the return value of vhost_vdpa_net_svq_poll()
suggested Eugenio in patch
"vdpa: Move vhost_svq_poll() to the caller of vhost_vdpa_net_cvq_add()"
- use `struct iovec` instead of `void **` as cursor,
add vhost_vdpa_net_load_cursor_reset() helper function
to reset the cursors, refactor vhost_vdpa_net_load_cmd() to prepare buffers
by iov_copy() instead of accessing `in` and `out` directly
suggested by Eugenio in patch
"vdpa: Introduce cursors to vhost_vdpa_net_loadx()"
- refactor argument `cmds_in_flight` to `len` for
vhost_vdpa_net_svq_full(), check the return value of
vhost_vdpa_net_svq_poll() in vhost_vdpa_net_svq_flush(),
use iov_size(), vhost_vdpa_net_load_cursor_reset()
and iov_discard_front() to update the cursors instead of
accessing it directly according to Eugenio in patch
"vdpa: Send cvq state load commands in parallel"
v3: https://lore.kernel.org/all/cover.1689748694.git.yin31149@gmail.com/
- refactor vhost_svq_poll() to accept cmds_in_flight
suggested by Jason and Eugenio
- refactor vhost_vdpa_net_cvq_add() to make control commands buffers
is not tied to `s->cvq_cmd_out_buffer` and `s->status`, so we can reuse
it suggested by Eugenio
- poll and check when SVQ is full or control commands shadow buffers is
full
v2: https://lore.kernel.org/all/cover.1683371965.git.yin31149@gmail.com/
- recover accidentally deleted rows
- remove extra newline
- refactor `need_poll_len` to `cmds_in_flight`
- return -EINVAL when vhost_svq_poll() return 0 or check
on buffers written by device fails
- change the type of `in_cursor`, and refactor the
code for updating cursor
- return directly when vhost_vdpa_net_load_{mac,mq}()
returns a failure in vhost_vdpa_net_load()
v1: https://lore.kernel.org/all/cover.1681732982.git.yin31149@gmail.com/
Hawkins Jiawei (8):
vhost: Add count argument to vhost_svq_poll()
vdpa: Use iovec for vhost_vdpa_net_cvq_add()
vhost: Expose vhost_svq_available_slots()
vdpa: Avoid using vhost_vdpa_net_load_*() outside
vhost_vdpa_net_load()
vdpa: Check device ack in vhost_vdpa_net_load_rx_mode()
vdpa: Move vhost_svq_poll() to the caller of vhost_vdpa_net_cvq_add()
vdpa: Introduce cursors to vhost_vdpa_net_loadx()
vdpa: Send cvq state load commands in parallel
hw/virtio/vhost-shadow-virtqueue.c | 38 +--
hw/virtio/vhost-shadow-virtqueue.h | 3 +-
net/vhost-vdpa.c | 380 +++++++++++++++++++----------
3 files changed, 276 insertions(+), 145 deletions(-)
--
2.25.1
On Tue, Aug 29, 2023 at 01:54:42PM +0800, Hawkins Jiawei wrote: > This patchset allows QEMU to delay polling and checking the device > used buffer until either the SVQ is full or control commands shadow > buffers are full, instead of polling and checking immediately after > sending each SVQ control command, so that QEMU can send all the SVQ > control commands in parallel, which have better performance improvement. > > I use vp_vdpa device to simulate vdpa device, and create 4094 VLANS in > guest to build a test environment for sending multiple CVQ state load > commands. This patch series can improve latency from 20455 us to > 13732 us for about 4099 CVQ state load commands, about 1.64 us per command. > > Note that this patch should be based on > patch "Vhost-vdpa Shadow Virtqueue VLAN support" at [1]. > > [1]. https://lore.kernel.org/all/cover.1690100802.git.yin31149@gmail.com/ Eugenio, you acked patch 1 but it's been a while - care to review the rest of the patchset? > TestStep > ======== > 1. regression testing using vp-vdpa device > - For L0 guest, boot QEMU with two virtio-net-pci net device with > `ctrl_vq`, `ctrl_rx`, `ctrl_rx_extra` features on, command line like: > -device virtio-net-pci,disable-legacy=on,disable-modern=off, > iommu_platform=on,mq=on,ctrl_vq=on,guest_announce=off, > indirect_desc=off,queue_reset=off,ctrl_rx=on,ctrl_rx_extra=on,... > > - For L1 guest, apply the patch series and compile the source code, > start QEMU with two vdpa device with svq mode on, enable the `ctrl_vq`, > `ctrl_rx`, `ctrl_rx_extra` features on, command line like: > -netdev type=vhost-vdpa,x-svq=true,... > -device virtio-net-pci,mq=on,guest_announce=off,ctrl_vq=on, > ctrl_rx=on,ctrl_rx_extra=on... > > - For L2 source guest, run the following bash command: > ```bash > #!/bin/sh > > for idx1 in {0..9} > do > for idx2 in {0..9} > do > for idx3 in {0..6} > do > ip link add macvlan$idx1$idx2$idx3 link eth0 > address 4a:30:10:19:$idx1$idx2:1$idx3 type macvlan mode bridge > ip link set macvlan$idx1$idx2$idx3 up > done > done > done > ``` > - Execute the live migration in L2 source monitor > > - Result > * with this series, QEMU should not trigger any error or warning. > > > > 2. perf using vp-vdpa device > - For L0 guest, boot QEMU with two virtio-net-pci net device with > `ctrl_vq`, `ctrl_vlan` features on, command line like: > -device virtio-net-pci,disable-legacy=on,disable-modern=off, > iommu_platform=on,mq=on,ctrl_vq=on,guest_announce=off, > indirect_desc=off,queue_reset=off,ctrl_vlan=on,... > > - For L1 guest, apply the patch series, then apply an addtional > patch to record the load time in microseconds as following: > ```diff > diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c > index 6b958d6363..501b510fd2 100644 > --- a/hw/net/vhost_net.c > +++ b/hw/net/vhost_net.c > @@ -295,7 +295,10 @@ static int vhost_net_start_one(struct vhost_net *net, > } > > if (net->nc->info->load) { > + int64_t start_us = g_get_monotonic_time(); > r = net->nc->info->load(net->nc); > + error_report("vhost_vdpa_net_load() = %ld us", > + g_get_monotonic_time() - start_us); > if (r < 0) { > goto fail; > } > ``` > > - For L1 guest, compile the code, and start QEMU with two vdpa device > with svq mode on, enable the `ctrl_vq`, `ctrl_vlan` features on, > command line like: > -netdev type=vhost-vdpa,x-svq=true,... > -device virtio-net-pci,mq=on,guest_announce=off,ctrl_vq=on, > ctrl_vlan=on... > > - For L2 source guest, run the following bash command: > ```bash > #!/bin/sh > > for idx in {1..4094} > do > ip link add link eth0 name vlan$idx type vlan id $idx > done > ``` > > - execute the live migration in L2 source monitor > > - Result > * with this series, QEMU should not trigger any warning > or error except something like "vhost_vdpa_net_load() = 13732 us" > * without this series, QEMU should not trigger any warning > or error except something like "vhost_vdpa_net_load() = 20455 us" > > ChangeLog > ========= > v4: > - refactor subject line suggested by Eugenio in patch > "vhost: Add count argument to vhost_svq_poll()" > - split `in` to `vdpa_in` and `model_in` instead of reusing `in` > in vhost_vdpa_net_handle_ctrl_avail() suggested by Eugenio in patch > "vdpa: Use iovec for vhost_vdpa_net_cvq_add()" > - pack CVQ command by iov_from_buf() instead of accessing > `out` directly suggested by Eugenio in patch > "vdpa: Avoid using vhost_vdpa_net_load_*() outside vhost_vdpa_net_load()" > - always check the return value of vhost_vdpa_net_svq_poll() > suggested Eugenio in patch > "vdpa: Move vhost_svq_poll() to the caller of vhost_vdpa_net_cvq_add()" > - use `struct iovec` instead of `void **` as cursor, > add vhost_vdpa_net_load_cursor_reset() helper function > to reset the cursors, refactor vhost_vdpa_net_load_cmd() to prepare buffers > by iov_copy() instead of accessing `in` and `out` directly > suggested by Eugenio in patch > "vdpa: Introduce cursors to vhost_vdpa_net_loadx()" > - refactor argument `cmds_in_flight` to `len` for > vhost_vdpa_net_svq_full(), check the return value of > vhost_vdpa_net_svq_poll() in vhost_vdpa_net_svq_flush(), > use iov_size(), vhost_vdpa_net_load_cursor_reset() > and iov_discard_front() to update the cursors instead of > accessing it directly according to Eugenio in patch > "vdpa: Send cvq state load commands in parallel" > > v3: https://lore.kernel.org/all/cover.1689748694.git.yin31149@gmail.com/ > - refactor vhost_svq_poll() to accept cmds_in_flight > suggested by Jason and Eugenio > - refactor vhost_vdpa_net_cvq_add() to make control commands buffers > is not tied to `s->cvq_cmd_out_buffer` and `s->status`, so we can reuse > it suggested by Eugenio > - poll and check when SVQ is full or control commands shadow buffers is > full > > v2: https://lore.kernel.org/all/cover.1683371965.git.yin31149@gmail.com/ > - recover accidentally deleted rows > - remove extra newline > - refactor `need_poll_len` to `cmds_in_flight` > - return -EINVAL when vhost_svq_poll() return 0 or check > on buffers written by device fails > - change the type of `in_cursor`, and refactor the > code for updating cursor > - return directly when vhost_vdpa_net_load_{mac,mq}() > returns a failure in vhost_vdpa_net_load() > > v1: https://lore.kernel.org/all/cover.1681732982.git.yin31149@gmail.com/ > > Hawkins Jiawei (8): > vhost: Add count argument to vhost_svq_poll() > vdpa: Use iovec for vhost_vdpa_net_cvq_add() > vhost: Expose vhost_svq_available_slots() > vdpa: Avoid using vhost_vdpa_net_load_*() outside > vhost_vdpa_net_load() > vdpa: Check device ack in vhost_vdpa_net_load_rx_mode() > vdpa: Move vhost_svq_poll() to the caller of vhost_vdpa_net_cvq_add() > vdpa: Introduce cursors to vhost_vdpa_net_loadx() > vdpa: Send cvq state load commands in parallel > > hw/virtio/vhost-shadow-virtqueue.c | 38 +-- > hw/virtio/vhost-shadow-virtqueue.h | 3 +- > net/vhost-vdpa.c | 380 +++++++++++++++++++---------- > 3 files changed, 276 insertions(+), 145 deletions(-) > > -- > 2.25.1
On Sun, Oct 1, 2023 at 9:56 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > On Tue, Aug 29, 2023 at 01:54:42PM +0800, Hawkins Jiawei wrote: > > This patchset allows QEMU to delay polling and checking the device > > used buffer until either the SVQ is full or control commands shadow > > buffers are full, instead of polling and checking immediately after > > sending each SVQ control command, so that QEMU can send all the SVQ > > control commands in parallel, which have better performance improvement. > > > > I use vp_vdpa device to simulate vdpa device, and create 4094 VLANS in > > guest to build a test environment for sending multiple CVQ state load > > commands. This patch series can improve latency from 20455 us to > > 13732 us for about 4099 CVQ state load commands, about 1.64 us per command. > > > > Note that this patch should be based on > > patch "Vhost-vdpa Shadow Virtqueue VLAN support" at [1]. > > > > [1]. https://lore.kernel.org/all/cover.1690100802.git.yin31149@gmail.com/ > > Eugenio, you acked patch 1 but it's been a while - care to review the > rest of the patchset? > I'm sorry, I was under the impression that this should go after optimizing the memory maps. I'll continue with the revision. Thanks! > > TestStep > > ======== > > 1. regression testing using vp-vdpa device > > - For L0 guest, boot QEMU with two virtio-net-pci net device with > > `ctrl_vq`, `ctrl_rx`, `ctrl_rx_extra` features on, command line like: > > -device virtio-net-pci,disable-legacy=on,disable-modern=off, > > iommu_platform=on,mq=on,ctrl_vq=on,guest_announce=off, > > indirect_desc=off,queue_reset=off,ctrl_rx=on,ctrl_rx_extra=on,... > > > > - For L1 guest, apply the patch series and compile the source code, > > start QEMU with two vdpa device with svq mode on, enable the `ctrl_vq`, > > `ctrl_rx`, `ctrl_rx_extra` features on, command line like: > > -netdev type=vhost-vdpa,x-svq=true,... > > -device virtio-net-pci,mq=on,guest_announce=off,ctrl_vq=on, > > ctrl_rx=on,ctrl_rx_extra=on... > > > > - For L2 source guest, run the following bash command: > > ```bash > > #!/bin/sh > > > > for idx1 in {0..9} > > do > > for idx2 in {0..9} > > do > > for idx3 in {0..6} > > do > > ip link add macvlan$idx1$idx2$idx3 link eth0 > > address 4a:30:10:19:$idx1$idx2:1$idx3 type macvlan mode bridge > > ip link set macvlan$idx1$idx2$idx3 up > > done > > done > > done > > ``` > > - Execute the live migration in L2 source monitor > > > > - Result > > * with this series, QEMU should not trigger any error or warning. > > > > > > > > 2. perf using vp-vdpa device > > - For L0 guest, boot QEMU with two virtio-net-pci net device with > > `ctrl_vq`, `ctrl_vlan` features on, command line like: > > -device virtio-net-pci,disable-legacy=on,disable-modern=off, > > iommu_platform=on,mq=on,ctrl_vq=on,guest_announce=off, > > indirect_desc=off,queue_reset=off,ctrl_vlan=on,... > > > > - For L1 guest, apply the patch series, then apply an addtional > > patch to record the load time in microseconds as following: > > ```diff > > diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c > > index 6b958d6363..501b510fd2 100644 > > --- a/hw/net/vhost_net.c > > +++ b/hw/net/vhost_net.c > > @@ -295,7 +295,10 @@ static int vhost_net_start_one(struct vhost_net *net, > > } > > > > if (net->nc->info->load) { > > + int64_t start_us = g_get_monotonic_time(); > > r = net->nc->info->load(net->nc); > > + error_report("vhost_vdpa_net_load() = %ld us", > > + g_get_monotonic_time() - start_us); > > if (r < 0) { > > goto fail; > > } > > ``` > > > > - For L1 guest, compile the code, and start QEMU with two vdpa device > > with svq mode on, enable the `ctrl_vq`, `ctrl_vlan` features on, > > command line like: > > -netdev type=vhost-vdpa,x-svq=true,... > > -device virtio-net-pci,mq=on,guest_announce=off,ctrl_vq=on, > > ctrl_vlan=on... > > > > - For L2 source guest, run the following bash command: > > ```bash > > #!/bin/sh > > > > for idx in {1..4094} > > do > > ip link add link eth0 name vlan$idx type vlan id $idx > > done > > ``` > > > > - execute the live migration in L2 source monitor > > > > - Result > > * with this series, QEMU should not trigger any warning > > or error except something like "vhost_vdpa_net_load() = 13732 us" > > * without this series, QEMU should not trigger any warning > > or error except something like "vhost_vdpa_net_load() = 20455 us" > > > > ChangeLog > > ========= > > v4: > > - refactor subject line suggested by Eugenio in patch > > "vhost: Add count argument to vhost_svq_poll()" > > - split `in` to `vdpa_in` and `model_in` instead of reusing `in` > > in vhost_vdpa_net_handle_ctrl_avail() suggested by Eugenio in patch > > "vdpa: Use iovec for vhost_vdpa_net_cvq_add()" > > - pack CVQ command by iov_from_buf() instead of accessing > > `out` directly suggested by Eugenio in patch > > "vdpa: Avoid using vhost_vdpa_net_load_*() outside vhost_vdpa_net_load()" > > - always check the return value of vhost_vdpa_net_svq_poll() > > suggested Eugenio in patch > > "vdpa: Move vhost_svq_poll() to the caller of vhost_vdpa_net_cvq_add()" > > - use `struct iovec` instead of `void **` as cursor, > > add vhost_vdpa_net_load_cursor_reset() helper function > > to reset the cursors, refactor vhost_vdpa_net_load_cmd() to prepare buffers > > by iov_copy() instead of accessing `in` and `out` directly > > suggested by Eugenio in patch > > "vdpa: Introduce cursors to vhost_vdpa_net_loadx()" > > - refactor argument `cmds_in_flight` to `len` for > > vhost_vdpa_net_svq_full(), check the return value of > > vhost_vdpa_net_svq_poll() in vhost_vdpa_net_svq_flush(), > > use iov_size(), vhost_vdpa_net_load_cursor_reset() > > and iov_discard_front() to update the cursors instead of > > accessing it directly according to Eugenio in patch > > "vdpa: Send cvq state load commands in parallel" > > > > v3: https://lore.kernel.org/all/cover.1689748694.git.yin31149@gmail.com/ > > - refactor vhost_svq_poll() to accept cmds_in_flight > > suggested by Jason and Eugenio > > - refactor vhost_vdpa_net_cvq_add() to make control commands buffers > > is not tied to `s->cvq_cmd_out_buffer` and `s->status`, so we can reuse > > it suggested by Eugenio > > - poll and check when SVQ is full or control commands shadow buffers is > > full > > > > v2: https://lore.kernel.org/all/cover.1683371965.git.yin31149@gmail.com/ > > - recover accidentally deleted rows > > - remove extra newline > > - refactor `need_poll_len` to `cmds_in_flight` > > - return -EINVAL when vhost_svq_poll() return 0 or check > > on buffers written by device fails > > - change the type of `in_cursor`, and refactor the > > code for updating cursor > > - return directly when vhost_vdpa_net_load_{mac,mq}() > > returns a failure in vhost_vdpa_net_load() > > > > v1: https://lore.kernel.org/all/cover.1681732982.git.yin31149@gmail.com/ > > > > Hawkins Jiawei (8): > > vhost: Add count argument to vhost_svq_poll() > > vdpa: Use iovec for vhost_vdpa_net_cvq_add() > > vhost: Expose vhost_svq_available_slots() > > vdpa: Avoid using vhost_vdpa_net_load_*() outside > > vhost_vdpa_net_load() > > vdpa: Check device ack in vhost_vdpa_net_load_rx_mode() > > vdpa: Move vhost_svq_poll() to the caller of vhost_vdpa_net_cvq_add() > > vdpa: Introduce cursors to vhost_vdpa_net_loadx() > > vdpa: Send cvq state load commands in parallel > > > > hw/virtio/vhost-shadow-virtqueue.c | 38 +-- > > hw/virtio/vhost-shadow-virtqueue.h | 3 +- > > net/vhost-vdpa.c | 380 +++++++++++++++++++---------- > > 3 files changed, 276 insertions(+), 145 deletions(-) > > > > -- > > 2.25.1 >
On 2023/8/29 13:54, Hawkins Jiawei wrote: > This patchset allows QEMU to delay polling and checking the device > used buffer until either the SVQ is full or control commands shadow > buffers are full, instead of polling and checking immediately after > sending each SVQ control command, so that QEMU can send all the SVQ > control commands in parallel, which have better performance improvement. > > I use vp_vdpa device to simulate vdpa device, and create 4094 VLANS in > guest to build a test environment for sending multiple CVQ state load > commands. This patch series can improve latency from 20455 us to > 13732 us for about 4099 CVQ state load commands, about 1.64 us per command. > > Note that this patch should be based on > patch "Vhost-vdpa Shadow Virtqueue VLAN support" at [1]. > > [1]. https://lore.kernel.org/all/cover.1690100802.git.yin31149@gmail.com/ Sorry for the outdated link. The correct link for this patch should be https://lore.kernel.org/all/cover.1690106284.git.yin31149@gmail.com/ Thanks! > > TestStep > ======== > 1. regression testing using vp-vdpa device > - For L0 guest, boot QEMU with two virtio-net-pci net device with > `ctrl_vq`, `ctrl_rx`, `ctrl_rx_extra` features on, command line like: > -device virtio-net-pci,disable-legacy=on,disable-modern=off, > iommu_platform=on,mq=on,ctrl_vq=on,guest_announce=off, > indirect_desc=off,queue_reset=off,ctrl_rx=on,ctrl_rx_extra=on,... > > - For L1 guest, apply the patch series and compile the source code, > start QEMU with two vdpa device with svq mode on, enable the `ctrl_vq`, > `ctrl_rx`, `ctrl_rx_extra` features on, command line like: > -netdev type=vhost-vdpa,x-svq=true,... > -device virtio-net-pci,mq=on,guest_announce=off,ctrl_vq=on, > ctrl_rx=on,ctrl_rx_extra=on... > > - For L2 source guest, run the following bash command: > ```bash > #!/bin/sh > > for idx1 in {0..9} > do > for idx2 in {0..9} > do > for idx3 in {0..6} > do > ip link add macvlan$idx1$idx2$idx3 link eth0 > address 4a:30:10:19:$idx1$idx2:1$idx3 type macvlan mode bridge > ip link set macvlan$idx1$idx2$idx3 up > done > done > done > ``` > - Execute the live migration in L2 source monitor > > - Result > * with this series, QEMU should not trigger any error or warning. > > > > 2. perf using vp-vdpa device > - For L0 guest, boot QEMU with two virtio-net-pci net device with > `ctrl_vq`, `ctrl_vlan` features on, command line like: > -device virtio-net-pci,disable-legacy=on,disable-modern=off, > iommu_platform=on,mq=on,ctrl_vq=on,guest_announce=off, > indirect_desc=off,queue_reset=off,ctrl_vlan=on,... > > - For L1 guest, apply the patch series, then apply an addtional > patch to record the load time in microseconds as following: > ```diff > diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c > index 6b958d6363..501b510fd2 100644 > --- a/hw/net/vhost_net.c > +++ b/hw/net/vhost_net.c > @@ -295,7 +295,10 @@ static int vhost_net_start_one(struct vhost_net *net, > } > > if (net->nc->info->load) { > + int64_t start_us = g_get_monotonic_time(); > r = net->nc->info->load(net->nc); > + error_report("vhost_vdpa_net_load() = %ld us", > + g_get_monotonic_time() - start_us); > if (r < 0) { > goto fail; > } > ``` > > - For L1 guest, compile the code, and start QEMU with two vdpa device > with svq mode on, enable the `ctrl_vq`, `ctrl_vlan` features on, > command line like: > -netdev type=vhost-vdpa,x-svq=true,... > -device virtio-net-pci,mq=on,guest_announce=off,ctrl_vq=on, > ctrl_vlan=on... > > - For L2 source guest, run the following bash command: > ```bash > #!/bin/sh > > for idx in {1..4094} > do > ip link add link eth0 name vlan$idx type vlan id $idx > done > ``` > > - execute the live migration in L2 source monitor > > - Result > * with this series, QEMU should not trigger any warning > or error except something like "vhost_vdpa_net_load() = 13732 us" > * without this series, QEMU should not trigger any warning > or error except something like "vhost_vdpa_net_load() = 20455 us" > > ChangeLog > ========= > v4: > - refactor subject line suggested by Eugenio in patch > "vhost: Add count argument to vhost_svq_poll()" > - split `in` to `vdpa_in` and `model_in` instead of reusing `in` > in vhost_vdpa_net_handle_ctrl_avail() suggested by Eugenio in patch > "vdpa: Use iovec for vhost_vdpa_net_cvq_add()" > - pack CVQ command by iov_from_buf() instead of accessing > `out` directly suggested by Eugenio in patch > "vdpa: Avoid using vhost_vdpa_net_load_*() outside vhost_vdpa_net_load()" > - always check the return value of vhost_vdpa_net_svq_poll() > suggested Eugenio in patch > "vdpa: Move vhost_svq_poll() to the caller of vhost_vdpa_net_cvq_add()" > - use `struct iovec` instead of `void **` as cursor, > add vhost_vdpa_net_load_cursor_reset() helper function > to reset the cursors, refactor vhost_vdpa_net_load_cmd() to prepare buffers > by iov_copy() instead of accessing `in` and `out` directly > suggested by Eugenio in patch > "vdpa: Introduce cursors to vhost_vdpa_net_loadx()" > - refactor argument `cmds_in_flight` to `len` for > vhost_vdpa_net_svq_full(), check the return value of > vhost_vdpa_net_svq_poll() in vhost_vdpa_net_svq_flush(), > use iov_size(), vhost_vdpa_net_load_cursor_reset() > and iov_discard_front() to update the cursors instead of > accessing it directly according to Eugenio in patch > "vdpa: Send cvq state load commands in parallel" > > v3: https://lore.kernel.org/all/cover.1689748694.git.yin31149@gmail.com/ > - refactor vhost_svq_poll() to accept cmds_in_flight > suggested by Jason and Eugenio > - refactor vhost_vdpa_net_cvq_add() to make control commands buffers > is not tied to `s->cvq_cmd_out_buffer` and `s->status`, so we can reuse > it suggested by Eugenio > - poll and check when SVQ is full or control commands shadow buffers is > full > > v2: https://lore.kernel.org/all/cover.1683371965.git.yin31149@gmail.com/ > - recover accidentally deleted rows > - remove extra newline > - refactor `need_poll_len` to `cmds_in_flight` > - return -EINVAL when vhost_svq_poll() return 0 or check > on buffers written by device fails > - change the type of `in_cursor`, and refactor the > code for updating cursor > - return directly when vhost_vdpa_net_load_{mac,mq}() > returns a failure in vhost_vdpa_net_load() > > v1: https://lore.kernel.org/all/cover.1681732982.git.yin31149@gmail.com/ > > Hawkins Jiawei (8): > vhost: Add count argument to vhost_svq_poll() > vdpa: Use iovec for vhost_vdpa_net_cvq_add() > vhost: Expose vhost_svq_available_slots() > vdpa: Avoid using vhost_vdpa_net_load_*() outside > vhost_vdpa_net_load() > vdpa: Check device ack in vhost_vdpa_net_load_rx_mode() > vdpa: Move vhost_svq_poll() to the caller of vhost_vdpa_net_cvq_add() > vdpa: Introduce cursors to vhost_vdpa_net_loadx() > vdpa: Send cvq state load commands in parallel > > hw/virtio/vhost-shadow-virtqueue.c | 38 +-- > hw/virtio/vhost-shadow-virtqueue.h | 3 +- > net/vhost-vdpa.c | 380 +++++++++++++++++++---------- > 3 files changed, 276 insertions(+), 145 deletions(-) >
Next patches in this series will no longer perform an
immediate poll and check of the device's used buffers
for each CVQ state load command. Instead, they will
send CVQ state load commands in parallel by polling
multiple pending buffers at once.
To achieve this, this patch refactoring vhost_svq_poll()
to accept a new argument `num`, which allows vhost_svq_poll()
to wait for the device to use multiple elements,
rather than polling for a single element.
Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
Acked-by: Eugenio Pérez <eperezma@redhat.com>
---
v4:
- refactor subject line suggested by Eugenio
v3: https://lore.kernel.org/all/77c1d8b358644b49992e6dbca55a5c9e62c941a8.1689748694.git.yin31149@gmail.com/
hw/virtio/vhost-shadow-virtqueue.c | 36 ++++++++++++++++++------------
hw/virtio/vhost-shadow-virtqueue.h | 2 +-
net/vhost-vdpa.c | 2 +-
3 files changed, 24 insertions(+), 16 deletions(-)
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index 49e5aed931..e731b1d2ea 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -514,29 +514,37 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
}
/**
- * Poll the SVQ for one device used buffer.
+ * Poll the SVQ to wait for the device to use the specified number
+ * of elements and return the total length written by the device.
*
* This function race with main event loop SVQ polling, so extra
* synchronization is needed.
*
- * Return the length written by the device.
+ * @svq: The svq
+ * @num: The number of elements that need to be used
*/
-size_t vhost_svq_poll(VhostShadowVirtqueue *svq)
+size_t vhost_svq_poll(VhostShadowVirtqueue *svq, size_t num)
{
- int64_t start_us = g_get_monotonic_time();
- uint32_t len = 0;
+ size_t len = 0;
+ uint32_t r;
- do {
- if (vhost_svq_more_used(svq)) {
- break;
- }
+ while (num--) {
+ int64_t start_us = g_get_monotonic_time();
- if (unlikely(g_get_monotonic_time() - start_us > 10e6)) {
- return 0;
- }
- } while (true);
+ do {
+ if (vhost_svq_more_used(svq)) {
+ break;
+ }
+
+ if (unlikely(g_get_monotonic_time() - start_us > 10e6)) {
+ return len;
+ }
+ } while (true);
+
+ vhost_svq_get_buf(svq, &r);
+ len += r;
+ }
- vhost_svq_get_buf(svq, &len);
return len;
}
diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
index 6efe051a70..5bce67837b 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -119,7 +119,7 @@ void vhost_svq_push_elem(VhostShadowVirtqueue *svq,
int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
size_t out_num, const struct iovec *in_sg, size_t in_num,
VirtQueueElement *elem);
-size_t vhost_svq_poll(VhostShadowVirtqueue *svq);
+size_t vhost_svq_poll(VhostShadowVirtqueue *svq, size_t num);
void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd);
void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd);
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 73e9063fa0..3acda8591a 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -625,7 +625,7 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
* descriptor. Also, we need to take the answer before SVQ pulls by itself,
* when BQL is released
*/
- return vhost_svq_poll(svq);
+ return vhost_svq_poll(svq, 1);
}
static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
--
2.25.1
Next patches in this series will no longer perform an
immediate poll and check of the device's used buffers
for each CVQ state load command. Consequently, there
will be multiple pending buffers in the shadow VirtQueue,
making it a must for every control command to have its
own buffer.
To achieve this, this patch refactor vhost_vdpa_net_cvq_add()
to accept `struct iovec`, which eliminates the coupling of
control commands to `s->cvq_cmd_out_buffer` and `s->status`,
allowing them to use their own buffer.
Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
---
v4:
- split `in` to `vdpa_in` and `model_in` instead of reusing `in`
in vhost_vdpa_net_handle_ctrl_avail() suggested by Eugenio
v3: https://lore.kernel.org/all/b1d473772ec4bcb254ab0d12430c9b1efe758606.1689748694.git.yin31149@gmail.com/
net/vhost-vdpa.c | 39 ++++++++++++++++++++++-----------------
1 file changed, 22 insertions(+), 17 deletions(-)
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 3acda8591a..a875767ee9 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -596,22 +596,14 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
vhost_vdpa_net_client_stop(nc);
}
-static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
- size_t in_len)
+static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s,
+ const struct iovec *out_sg, size_t out_num,
+ const struct iovec *in_sg, size_t in_num)
{
- /* Buffers for the device */
- const struct iovec out = {
- .iov_base = s->cvq_cmd_out_buffer,
- .iov_len = out_len,
- };
- const struct iovec in = {
- .iov_base = s->status,
- .iov_len = sizeof(virtio_net_ctrl_ack),
- };
VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
int r;
- r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
+ r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL);
if (unlikely(r != 0)) {
if (unlikely(r == -ENOSPC)) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
@@ -637,6 +629,15 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
.cmd = cmd,
};
size_t data_size = iov_size(data_sg, data_num);
+ /* Buffers for the device */
+ const struct iovec out = {
+ .iov_base = s->cvq_cmd_out_buffer,
+ .iov_len = sizeof(ctrl) + data_size,
+ };
+ const struct iovec in = {
+ .iov_base = s->status,
+ .iov_len = sizeof(*s->status),
+ };
assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
@@ -647,8 +648,7 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
iov_to_buf(data_sg, data_num, 0,
s->cvq_cmd_out_buffer + sizeof(ctrl), data_size);
- return vhost_vdpa_net_cvq_add(s, data_size + sizeof(ctrl),
- sizeof(virtio_net_ctrl_ack));
+ return vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
}
static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
@@ -1222,10 +1222,15 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
.iov_base = s->cvq_cmd_out_buffer,
};
/* in buffer used for device model */
- const struct iovec in = {
+ const struct iovec model_in = {
.iov_base = &status,
.iov_len = sizeof(status),
};
+ /* in buffer used for vdpa device */
+ const struct iovec vdpa_in = {
+ .iov_base = s->status,
+ .iov_len = sizeof(*s->status),
+ };
ssize_t dev_written = -EINVAL;
out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
@@ -1259,7 +1264,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
goto out;
}
} else {
- dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
+ dev_written = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1);
if (unlikely(dev_written < 0)) {
goto out;
}
@@ -1275,7 +1280,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
}
status = VIRTIO_NET_ERR;
- virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
+ virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1);
if (status != VIRTIO_NET_OK) {
error_report("Bad CVQ processing in model");
}
--
2.25.1
On Tue, Aug 29, 2023 at 7:55 AM Hawkins Jiawei <yin31149@gmail.com> wrote: > > Next patches in this series will no longer perform an > immediate poll and check of the device's used buffers > for each CVQ state load command. Consequently, there > will be multiple pending buffers in the shadow VirtQueue, > making it a must for every control command to have its > own buffer. > > To achieve this, this patch refactor vhost_vdpa_net_cvq_add() > to accept `struct iovec`, which eliminates the coupling of > control commands to `s->cvq_cmd_out_buffer` and `s->status`, > allowing them to use their own buffer. > > Signed-off-by: Hawkins Jiawei <yin31149@gmail.com> Acked-by: Eugenio Pérez <eperezma@redhat.com> > --- > v4: > - split `in` to `vdpa_in` and `model_in` instead of reusing `in` > in vhost_vdpa_net_handle_ctrl_avail() suggested by Eugenio > > v3: https://lore.kernel.org/all/b1d473772ec4bcb254ab0d12430c9b1efe758606.1689748694.git.yin31149@gmail.com/ > > net/vhost-vdpa.c | 39 ++++++++++++++++++++++----------------- > 1 file changed, 22 insertions(+), 17 deletions(-) > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c > index 3acda8591a..a875767ee9 100644 > --- a/net/vhost-vdpa.c > +++ b/net/vhost-vdpa.c > @@ -596,22 +596,14 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc) > vhost_vdpa_net_client_stop(nc); > } > > -static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len, > - size_t in_len) > +static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, > + const struct iovec *out_sg, size_t out_num, > + const struct iovec *in_sg, size_t in_num) > { > - /* Buffers for the device */ > - const struct iovec out = { > - .iov_base = s->cvq_cmd_out_buffer, > - .iov_len = out_len, > - }; > - const struct iovec in = { > - .iov_base = s->status, > - .iov_len = sizeof(virtio_net_ctrl_ack), > - }; > VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); > int r; > > - r = vhost_svq_add(svq, &out, 1, &in, 1, NULL); > + r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL); > if (unlikely(r != 0)) { > if (unlikely(r == -ENOSPC)) { > qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n", > @@ -637,6 +629,15 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, > .cmd = cmd, > }; > size_t data_size = iov_size(data_sg, data_num); > + /* Buffers for the device */ > + const struct iovec out = { > + .iov_base = s->cvq_cmd_out_buffer, > + .iov_len = sizeof(ctrl) + data_size, > + }; > + const struct iovec in = { > + .iov_base = s->status, > + .iov_len = sizeof(*s->status), > + }; > > assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); > > @@ -647,8 +648,7 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, > iov_to_buf(data_sg, data_num, 0, > s->cvq_cmd_out_buffer + sizeof(ctrl), data_size); > > - return vhost_vdpa_net_cvq_add(s, data_size + sizeof(ctrl), > - sizeof(virtio_net_ctrl_ack)); > + return vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1); > } > > static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n) > @@ -1222,10 +1222,15 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, > .iov_base = s->cvq_cmd_out_buffer, > }; > /* in buffer used for device model */ > - const struct iovec in = { > + const struct iovec model_in = { > .iov_base = &status, > .iov_len = sizeof(status), > }; > + /* in buffer used for vdpa device */ > + const struct iovec vdpa_in = { > + .iov_base = s->status, > + .iov_len = sizeof(*s->status), > + }; > ssize_t dev_written = -EINVAL; > > out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0, > @@ -1259,7 +1264,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, > goto out; > } > } else { > - dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status)); > + dev_written = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1); > if (unlikely(dev_written < 0)) { > goto out; > } > @@ -1275,7 +1280,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, > } > > status = VIRTIO_NET_ERR; > - virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1); > + virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1); > if (status != VIRTIO_NET_OK) { > error_report("Bad CVQ processing in model"); > } > -- > 2.25.1 >
Next patches in this series will delay the polling
and checking of buffers until either the SVQ is
full or control commands shadow buffers are full,
no longer perform an immediate poll and check of
the device's used buffers for each CVQ state load command.
To achieve this, this patch exposes
vhost_svq_available_slots() and introduces a helper function,
allowing QEMU to know whether the SVQ is full.
Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
Acked-by: Eugenio Pérez <eperezma@redhat.com>
---
hw/virtio/vhost-shadow-virtqueue.c | 2 +-
hw/virtio/vhost-shadow-virtqueue.h | 1 +
net/vhost-vdpa.c | 9 +++++++++
3 files changed, 11 insertions(+), 1 deletion(-)
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index e731b1d2ea..fc5f408f77 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -66,7 +66,7 @@ bool vhost_svq_valid_features(uint64_t features, Error **errp)
*
* @svq: The svq
*/
-static uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq)
+uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq)
{
return svq->num_free;
}
diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
index 5bce67837b..19c842a15b 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -114,6 +114,7 @@ typedef struct VhostShadowVirtqueue {
bool vhost_svq_valid_features(uint64_t features, Error **errp);
+uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq);
void vhost_svq_push_elem(VhostShadowVirtqueue *svq,
const VirtQueueElement *elem, uint32_t len);
int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index a875767ee9..e6342b213f 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -620,6 +620,13 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s,
return vhost_svq_poll(svq, 1);
}
+/* Convenience wrapper to get number of available SVQ descriptors */
+static uint16_t vhost_vdpa_net_svq_available_slots(VhostVDPAState *s)
+{
+ VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
+ return vhost_svq_available_slots(svq);
+}
+
static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
uint8_t cmd, const struct iovec *data_sg,
size_t data_num)
@@ -640,6 +647,8 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
};
assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
+ /* Each CVQ command has one out descriptor and one in descriptor */
+ assert(vhost_vdpa_net_svq_available_slots(s) >= 2);
/* pack the CVQ command header */
memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
--
2.25.1
On Tue, Aug 29, 2023 at 7:55 AM Hawkins Jiawei <yin31149@gmail.com> wrote: > > Next patches in this series will delay the polling > and checking of buffers until either the SVQ is > full or control commands shadow buffers are full, > no longer perform an immediate poll and check of > the device's used buffers for each CVQ state load command. > > To achieve this, this patch exposes > vhost_svq_available_slots() and introduces a helper function, > allowing QEMU to know whether the SVQ is full. > > Signed-off-by: Hawkins Jiawei <yin31149@gmail.com> > Acked-by: Eugenio Pérez <eperezma@redhat.com> > --- > hw/virtio/vhost-shadow-virtqueue.c | 2 +- > hw/virtio/vhost-shadow-virtqueue.h | 1 + > net/vhost-vdpa.c | 9 +++++++++ > 3 files changed, 11 insertions(+), 1 deletion(-) > > diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c > index e731b1d2ea..fc5f408f77 100644 > --- a/hw/virtio/vhost-shadow-virtqueue.c > +++ b/hw/virtio/vhost-shadow-virtqueue.c > @@ -66,7 +66,7 @@ bool vhost_svq_valid_features(uint64_t features, Error **errp) > * > * @svq: The svq > */ > -static uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq) > +uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq) > { > return svq->num_free; > } > diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h > index 5bce67837b..19c842a15b 100644 > --- a/hw/virtio/vhost-shadow-virtqueue.h > +++ b/hw/virtio/vhost-shadow-virtqueue.h > @@ -114,6 +114,7 @@ typedef struct VhostShadowVirtqueue { > > bool vhost_svq_valid_features(uint64_t features, Error **errp); > > +uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq); > void vhost_svq_push_elem(VhostShadowVirtqueue *svq, > const VirtQueueElement *elem, uint32_t len); > int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, I think it is ok to split this export in its own patch. If you decide to do it that way, you can add my Acked-by. > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c > index a875767ee9..e6342b213f 100644 > --- a/net/vhost-vdpa.c > +++ b/net/vhost-vdpa.c > @@ -620,6 +620,13 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, > return vhost_svq_poll(svq, 1); > } > > +/* Convenience wrapper to get number of available SVQ descriptors */ > +static uint16_t vhost_vdpa_net_svq_available_slots(VhostVDPAState *s) > +{ > + VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); This is not really generic enough for all VhostVDPAState, as dataplane ones have two svqs. I think the best is to just inline the function in the caller, as there is only one, isn't it? If not, would it work to just replace _net_ by _cvq_ or similar? > + return vhost_svq_available_slots(svq); > +} > + > static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, > uint8_t cmd, const struct iovec *data_sg, > size_t data_num) > @@ -640,6 +647,8 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, > }; > > assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); > + /* Each CVQ command has one out descriptor and one in descriptor */ > + assert(vhost_vdpa_net_svq_available_slots(s) >= 2); > I think we should remove this assertion. By the end of the series there is an "if" checks explicitly for the opposite condition, and flushing the queue in that case, so the code can never reach it. > /* pack the CVQ command header */ > memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl)); > -- > 2.25.1 >
在 2023/10/4 01:44, Eugenio Perez Martin 写道: > On Tue, Aug 29, 2023 at 7:55 AM Hawkins Jiawei <yin31149@gmail.com> wrote: >> >> Next patches in this series will delay the polling >> and checking of buffers until either the SVQ is >> full or control commands shadow buffers are full, >> no longer perform an immediate poll and check of >> the device's used buffers for each CVQ state load command. >> >> To achieve this, this patch exposes >> vhost_svq_available_slots() and introduces a helper function, >> allowing QEMU to know whether the SVQ is full. >> >> Signed-off-by: Hawkins Jiawei <yin31149@gmail.com> >> Acked-by: Eugenio Pérez <eperezma@redhat.com> >> --- >> hw/virtio/vhost-shadow-virtqueue.c | 2 +- >> hw/virtio/vhost-shadow-virtqueue.h | 1 + >> net/vhost-vdpa.c | 9 +++++++++ >> 3 files changed, 11 insertions(+), 1 deletion(-) >> >> diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c >> index e731b1d2ea..fc5f408f77 100644 >> --- a/hw/virtio/vhost-shadow-virtqueue.c >> +++ b/hw/virtio/vhost-shadow-virtqueue.c >> @@ -66,7 +66,7 @@ bool vhost_svq_valid_features(uint64_t features, Error **errp) >> * >> * @svq: The svq >> */ >> -static uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq) >> +uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq) >> { >> return svq->num_free; >> } >> diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h >> index 5bce67837b..19c842a15b 100644 >> --- a/hw/virtio/vhost-shadow-virtqueue.h >> +++ b/hw/virtio/vhost-shadow-virtqueue.h >> @@ -114,6 +114,7 @@ typedef struct VhostShadowVirtqueue { >> >> bool vhost_svq_valid_features(uint64_t features, Error **errp); >> >> +uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq); >> void vhost_svq_push_elem(VhostShadowVirtqueue *svq, >> const VirtQueueElement *elem, uint32_t len); >> int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, > > I think it is ok to split this export in its own patch. If you decide > to do it that way, you can add my Acked-by. I will split this in its own patch, thanks for your suggestion! > >> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c >> index a875767ee9..e6342b213f 100644 >> --- a/net/vhost-vdpa.c >> +++ b/net/vhost-vdpa.c >> @@ -620,6 +620,13 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, >> return vhost_svq_poll(svq, 1); >> } >> >> +/* Convenience wrapper to get number of available SVQ descriptors */ >> +static uint16_t vhost_vdpa_net_svq_available_slots(VhostVDPAState *s) >> +{ >> + VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); > > This is not really generic enough for all VhostVDPAState, as dataplane > ones have two svqs. > > I think the best is to just inline the function in the caller, as > there is only one, isn't it? If not, would it work to just replace > _net_ by _cvq_ or similar? > Yes, there should be only one user for this function, I will inline the function in the caller. >> + return vhost_svq_available_slots(svq); >> +} >> + >> static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, >> uint8_t cmd, const struct iovec *data_sg, >> size_t data_num) >> @@ -640,6 +647,8 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, >> }; >> >> assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); >> + /* Each CVQ command has one out descriptor and one in descriptor */ >> + assert(vhost_vdpa_net_svq_available_slots(s) >= 2); >> > > I think we should remove this assertion. By the end of the series > there is an "if" checks explicitly for the opposite condition, and > flushing the queue in that case, so the code can never reach it. > Yes, you are right. I will remove this assertion. Thanks! >> /* pack the CVQ command header */ >> memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl)); >> -- >> 2.25.1 >> >
Next patches in this series will refactor vhost_vdpa_net_load_cmd()
to iterate through the control commands shadow buffers, allowing QEMU
to send CVQ state load commands in parallel at device startup.
Considering that QEMU always forwards the CVQ command serialized
outside of vhost_vdpa_net_load(), it is more elegant to send the
CVQ commands directly without invoking vhost_vdpa_net_load_*() helpers.
Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
---
v4:
- pack CVQ command by iov_from_buf() instead of accessing
`out` directly suggested by Eugenio
v3: https://lore.kernel.org/all/428a8fac2a29b37757fa15ca747be93c0226cb1f.1689748694.git.yin31149@gmail.com/
net/vhost-vdpa.c | 16 +++++++++++++---
1 file changed, 13 insertions(+), 3 deletions(-)
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index e6342b213f..7c67063469 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -1097,12 +1097,14 @@ static NetClientInfo net_vhost_vdpa_cvq_info = {
*/
static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
VirtQueueElement *elem,
- struct iovec *out)
+ struct iovec *out,
+ const struct iovec *in)
{
struct virtio_net_ctrl_mac mac_data, *mac_ptr;
struct virtio_net_ctrl_hdr *hdr_ptr;
uint32_t cursor;
ssize_t r;
+ uint8_t on = 1;
/* parse the non-multicast MAC address entries from CVQ command */
cursor = sizeof(*hdr_ptr);
@@ -1150,7 +1152,15 @@ static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
* filter table to the vdpa device, it should send the
* VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
*/
- r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 1);
+ cursor = 0;
+ hdr_ptr = out->iov_base;
+ out->iov_len = sizeof(*hdr_ptr) + sizeof(on);
+ assert(out->iov_len < vhost_vdpa_net_cvq_cmd_page_len());
+
+ hdr_ptr->class = VIRTIO_NET_CTRL_RX;
+ hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC;
+ iov_from_buf(out, 1, sizeof(*hdr_ptr), &on, sizeof(on));
+ r = vhost_vdpa_net_cvq_add(s, out, 1, in, 1);
if (unlikely(r < 0)) {
return r;
}
@@ -1268,7 +1278,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
* the CVQ command direclty.
*/
dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
- &out);
+ &out, &vdpa_in);
if (unlikely(dev_written < 0)) {
goto out;
}
--
2.25.1
On Tue, Aug 29, 2023 at 7:55 AM Hawkins Jiawei <yin31149@gmail.com> wrote: > > Next patches in this series will refactor vhost_vdpa_net_load_cmd() > to iterate through the control commands shadow buffers, allowing QEMU > to send CVQ state load commands in parallel at device startup. > > Considering that QEMU always forwards the CVQ command serialized > outside of vhost_vdpa_net_load(), it is more elegant to send the > CVQ commands directly without invoking vhost_vdpa_net_load_*() helpers. > > Signed-off-by: Hawkins Jiawei <yin31149@gmail.com> > --- > v4: > - pack CVQ command by iov_from_buf() instead of accessing > `out` directly suggested by Eugenio > > v3: https://lore.kernel.org/all/428a8fac2a29b37757fa15ca747be93c0226cb1f.1689748694.git.yin31149@gmail.com/ > > net/vhost-vdpa.c | 16 +++++++++++++--- > 1 file changed, 13 insertions(+), 3 deletions(-) > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c > index e6342b213f..7c67063469 100644 > --- a/net/vhost-vdpa.c > +++ b/net/vhost-vdpa.c > @@ -1097,12 +1097,14 @@ static NetClientInfo net_vhost_vdpa_cvq_info = { > */ > static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s, > VirtQueueElement *elem, > - struct iovec *out) > + struct iovec *out, > + const struct iovec *in) > { > struct virtio_net_ctrl_mac mac_data, *mac_ptr; > struct virtio_net_ctrl_hdr *hdr_ptr; > uint32_t cursor; > ssize_t r; > + uint8_t on = 1; > > /* parse the non-multicast MAC address entries from CVQ command */ > cursor = sizeof(*hdr_ptr); > @@ -1150,7 +1152,15 @@ static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s, > * filter table to the vdpa device, it should send the > * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode > */ > - r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 1); > + cursor = 0; I think this is redundant, as "cursor" is not used by the new code and it is already set to 0 before its usage, isn't it? > + hdr_ptr = out->iov_base; > + out->iov_len = sizeof(*hdr_ptr) + sizeof(on); > + assert(out->iov_len < vhost_vdpa_net_cvq_cmd_page_len()); > + I think we can assume this assertion is never hit, as out->iov_len is controlled by this function. Apart from these nits, Acked-by: Eugenio Pérez <eperezma@redhat.com> > + hdr_ptr->class = VIRTIO_NET_CTRL_RX; > + hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC; > + iov_from_buf(out, 1, sizeof(*hdr_ptr), &on, sizeof(on)); > + r = vhost_vdpa_net_cvq_add(s, out, 1, in, 1); > if (unlikely(r < 0)) { > return r; > } > @@ -1268,7 +1278,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, > * the CVQ command direclty. > */ > dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem, > - &out); > + &out, &vdpa_in); > if (unlikely(dev_written < 0)) { > goto out; > } > -- > 2.25.1 >
在 2023/10/4 01:48, Eugenio Perez Martin 写道: > On Tue, Aug 29, 2023 at 7:55 AM Hawkins Jiawei <yin31149@gmail.com> wrote: >> >> Next patches in this series will refactor vhost_vdpa_net_load_cmd() >> to iterate through the control commands shadow buffers, allowing QEMU >> to send CVQ state load commands in parallel at device startup. >> >> Considering that QEMU always forwards the CVQ command serialized >> outside of vhost_vdpa_net_load(), it is more elegant to send the >> CVQ commands directly without invoking vhost_vdpa_net_load_*() helpers. >> >> Signed-off-by: Hawkins Jiawei <yin31149@gmail.com> >> --- >> v4: >> - pack CVQ command by iov_from_buf() instead of accessing >> `out` directly suggested by Eugenio >> >> v3: https://lore.kernel.org/all/428a8fac2a29b37757fa15ca747be93c0226cb1f.1689748694.git.yin31149@gmail.com/ >> >> net/vhost-vdpa.c | 16 +++++++++++++--- >> 1 file changed, 13 insertions(+), 3 deletions(-) >> >> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c >> index e6342b213f..7c67063469 100644 >> --- a/net/vhost-vdpa.c >> +++ b/net/vhost-vdpa.c >> @@ -1097,12 +1097,14 @@ static NetClientInfo net_vhost_vdpa_cvq_info = { >> */ >> static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s, >> VirtQueueElement *elem, >> - struct iovec *out) >> + struct iovec *out, >> + const struct iovec *in) >> { >> struct virtio_net_ctrl_mac mac_data, *mac_ptr; >> struct virtio_net_ctrl_hdr *hdr_ptr; >> uint32_t cursor; >> ssize_t r; >> + uint8_t on = 1; >> >> /* parse the non-multicast MAC address entries from CVQ command */ >> cursor = sizeof(*hdr_ptr); >> @@ -1150,7 +1152,15 @@ static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s, >> * filter table to the vdpa device, it should send the >> * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode >> */ >> - r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 1); >> + cursor = 0; > > I think this is redundant, as "cursor" is not used by the new code and > it is already set to 0 before its usage, isn't it? > You are right, I will remove this code in the v5 patch. >> + hdr_ptr = out->iov_base; >> + out->iov_len = sizeof(*hdr_ptr) + sizeof(on); >> + assert(out->iov_len < vhost_vdpa_net_cvq_cmd_page_len()); >> + > > I think we can assume this assertion is never hit, as out->iov_len is > controlled by this function. > Thanks for your suggestion, I will remove this assertion. Thanks! > Apart from these nits, > > Acked-by: Eugenio Pérez <eperezma@redhat.com> > >> + hdr_ptr->class = VIRTIO_NET_CTRL_RX; >> + hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC; >> + iov_from_buf(out, 1, sizeof(*hdr_ptr), &on, sizeof(on)); >> + r = vhost_vdpa_net_cvq_add(s, out, 1, in, 1); >> if (unlikely(r < 0)) { >> return r; >> } >> @@ -1268,7 +1278,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, >> * the CVQ command direclty. >> */ >> dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem, >> - &out); >> + &out, &vdpa_in); >> if (unlikely(dev_written < 0)) { >> goto out; >> } >> -- >> 2.25.1 >> >
Considering that vhost_vdpa_net_load_rx_mode() is only called
within vhost_vdpa_net_load_rx() now, this patch refactors
vhost_vdpa_net_load_rx_mode() to include a check for the
device's ack, simplifying the code and improving its maintainability.
Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
Acked-by: Eugenio Pérez <eperezma@redhat.com>
---
net/vhost-vdpa.c | 76 ++++++++++++++++++++----------------------------
1 file changed, 31 insertions(+), 45 deletions(-)
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 7c67063469..116a06cc45 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -814,14 +814,24 @@ static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
.iov_base = &on,
.iov_len = sizeof(on),
};
- return vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX,
- cmd, &data, 1);
+ ssize_t dev_written;
+
+ dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX,
+ cmd, &data, 1);
+ if (unlikely(dev_written < 0)) {
+ return dev_written;
+ }
+ if (*s->status != VIRTIO_NET_OK) {
+ return -EIO;
+ }
+
+ return 0;
}
static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
const VirtIONet *n)
{
- ssize_t dev_written;
+ ssize_t r;
if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
return 0;
@@ -846,13 +856,9 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (!n->mac_table.uni_overflow && !n->promisc) {
- dev_written = vhost_vdpa_net_load_rx_mode(s,
- VIRTIO_NET_CTRL_RX_PROMISC, 0);
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 0);
+ if (unlikely(r < 0)) {
+ return r;
}
}
@@ -874,13 +880,9 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->mac_table.multi_overflow || n->allmulti) {
- dev_written = vhost_vdpa_net_load_rx_mode(s,
- VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
+ if (unlikely(r < 0)) {
+ return r;
}
}
@@ -899,13 +901,9 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->alluni) {
- dev_written = vhost_vdpa_net_load_rx_mode(s,
- VIRTIO_NET_CTRL_RX_ALLUNI, 1);
- if (dev_written < 0) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_ALLUNI, 1);
+ if (r < 0) {
+ return r;
}
}
@@ -920,13 +918,9 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->nomulti) {
- dev_written = vhost_vdpa_net_load_rx_mode(s,
- VIRTIO_NET_CTRL_RX_NOMULTI, 1);
- if (dev_written < 0) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_NOMULTI, 1);
+ if (r < 0) {
+ return r;
}
}
@@ -941,13 +935,9 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->nouni) {
- dev_written = vhost_vdpa_net_load_rx_mode(s,
- VIRTIO_NET_CTRL_RX_NOUNI, 1);
- if (dev_written < 0) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_NOUNI, 1);
+ if (r < 0) {
+ return r;
}
}
@@ -962,13 +952,9 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->nobcast) {
- dev_written = vhost_vdpa_net_load_rx_mode(s,
- VIRTIO_NET_CTRL_RX_NOBCAST, 1);
- if (dev_written < 0) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_NOBCAST, 1);
+ if (r < 0) {
+ return r;
}
}
--
2.25.1
This patch moves vhost_svq_poll() to the caller of
vhost_vdpa_net_cvq_add() and introduces a helper funtion.
By making this change, next patches in this series is
able to refactor vhost_vdpa_net_load_x() only to delay
the polling and checking process until either the SVQ
is full or control commands shadow buffers are full.
Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
---
v4:
- always check the return value of vhost_vdpa_net_svq_poll()
suggested Eugenio
v3: https://lore.kernel.org/all/152177c4e7082236fba9d31d535e40f8c2984349.1689748694.git.yin31149@gmail.com/
net/vhost-vdpa.c | 53 +++++++++++++++++++++++++++++++++++++++---------
1 file changed, 43 insertions(+), 10 deletions(-)
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 116a06cc45..d9b8b3cf6c 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -609,15 +609,21 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s,
qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
__func__);
}
- return r;
}
- /*
- * We can poll here since we've had BQL from the time we sent the
- * descriptor. Also, we need to take the answer before SVQ pulls by itself,
- * when BQL is released
- */
- return vhost_svq_poll(svq, 1);
+ return r;
+}
+
+/*
+ * Convenience wrapper to poll SVQ for multiple control commands.
+ *
+ * Caller should hold the BQL when invoking this function, and should take
+ * the answer before SVQ pulls by itself when BQL is released.
+ */
+static ssize_t vhost_vdpa_net_svq_poll(VhostVDPAState *s, size_t cmds_in_flight)
+{
+ VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
+ return vhost_svq_poll(svq, cmds_in_flight);
}
/* Convenience wrapper to get number of available SVQ descriptors */
@@ -645,6 +651,7 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
.iov_base = s->status,
.iov_len = sizeof(*s->status),
};
+ ssize_t r;
assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
/* Each CVQ command has one out descriptor and one in descriptor */
@@ -657,7 +664,16 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
iov_to_buf(data_sg, data_num, 0,
s->cvq_cmd_out_buffer + sizeof(ctrl), data_size);
- return vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
+ r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
+ if (unlikely(r < 0)) {
+ return r;
+ }
+
+ /*
+ * We can poll here since we've had BQL from the time
+ * we sent the descriptor.
+ */
+ return vhost_vdpa_net_svq_poll(s, 1);
}
static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
@@ -1150,6 +1166,15 @@ static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
if (unlikely(r < 0)) {
return r;
}
+
+ /*
+ * We can poll here since we've had BQL from the time
+ * we sent the descriptor.
+ */
+ r = vhost_vdpa_net_svq_poll(s, 1);
+ if (unlikely(r < sizeof(*s->status))) {
+ return r;
+ }
if (*s->status != VIRTIO_NET_OK) {
return sizeof(*s->status);
}
@@ -1269,10 +1294,18 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
goto out;
}
} else {
- dev_written = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1);
- if (unlikely(dev_written < 0)) {
+ ssize_t r;
+ r = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1);
+ if (unlikely(r < 0)) {
+ dev_written = r;
goto out;
}
+
+ /*
+ * We can poll here since we've had BQL from the time
+ * we sent the descriptor.
+ */
+ dev_written = vhost_vdpa_net_svq_poll(s, 1);
}
if (unlikely(dev_written < sizeof(status))) {
--
2.25.1
This patch introduces two new arugments, `out_cursor`
and `in_cursor`, to vhost_vdpa_net_loadx(). Addtionally,
it includes a helper function
vhost_vdpa_net_load_cursor_reset() for resetting these
cursors.
Furthermore, this patch refactors vhost_vdpa_net_load_cmd()
so that vhost_vdpa_net_load_cmd() prepares buffers
for the device using the cursors arguments, instead
of directly accesses `s->cvq_cmd_out_buffer` and
`s->status` fields.
By making these change, next patches in this series
can refactor vhost_vdpa_net_load_cmd() directly to
iterate through the control commands shadow buffers,
allowing QEMU to send CVQ state load commands in parallel
at device startup.
Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
---
v4:
- use `struct iovec` instead of `void **` as cursor
suggested by Eugenio
- add vhost_vdpa_net_load_cursor_reset() helper function
to reset the cursors
- refactor vhost_vdpa_net_load_cmd() to prepare buffers
by iov_copy() instead of accessing `in` and `out` directly
suggested by Eugenio
v3: https://lore.kernel.org/all/bf390934673f2b613359ea9d7ac6c89199c31384.1689748694.git.yin31149@gmail.com/
net/vhost-vdpa.c | 114 ++++++++++++++++++++++++++++++++---------------
1 file changed, 77 insertions(+), 37 deletions(-)
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index d9b8b3cf6c..a71e8c9090 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -633,7 +633,22 @@ static uint16_t vhost_vdpa_net_svq_available_slots(VhostVDPAState *s)
return vhost_svq_available_slots(svq);
}
-static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
+static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s,
+ struct iovec *out_cursor,
+ struct iovec *in_cursor)
+{
+ /* reset the cursor of the output buffer for the device */
+ out_cursor->iov_base = s->cvq_cmd_out_buffer;
+ out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
+
+ /* reset the cursor of the in buffer for the device */
+ in_cursor->iov_base = s->status;
+ in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
+}
+
+static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s,
+ struct iovec *out_cursor,
+ struct iovec *in_cursor, uint8_t class,
uint8_t cmd, const struct iovec *data_sg,
size_t data_num)
{
@@ -641,28 +656,25 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
.class = class,
.cmd = cmd,
};
- size_t data_size = iov_size(data_sg, data_num);
- /* Buffers for the device */
- const struct iovec out = {
- .iov_base = s->cvq_cmd_out_buffer,
- .iov_len = sizeof(ctrl) + data_size,
- };
- const struct iovec in = {
- .iov_base = s->status,
- .iov_len = sizeof(*s->status),
- };
+ size_t data_size = iov_size(data_sg, data_num),
+ cmd_size = sizeof(ctrl) + data_size;
+ struct iovec out, in;
ssize_t r;
assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
/* Each CVQ command has one out descriptor and one in descriptor */
assert(vhost_vdpa_net_svq_available_slots(s) >= 2);
- /* pack the CVQ command header */
- memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
+ /* Prepare the buffer for out descriptor for the device */
+ iov_copy(&out, 1, out_cursor, 1, 0, cmd_size);
+ /* Prepare the buffer for in descriptor for the device. */
+ iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status));
+ /* pack the CVQ command header */
+ iov_from_buf(&out, 1, 0, &ctrl, sizeof(ctrl));
/* pack the CVQ command command-specific-data */
iov_to_buf(data_sg, data_num, 0,
- s->cvq_cmd_out_buffer + sizeof(ctrl), data_size);
+ out.iov_base + sizeof(ctrl), data_size);
r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
if (unlikely(r < 0)) {
@@ -676,14 +688,17 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
return vhost_vdpa_net_svq_poll(s, 1);
}
-static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
+static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n,
+ struct iovec *out_cursor,
+ struct iovec *in_cursor)
{
if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
const struct iovec data = {
.iov_base = (void *)n->mac,
.iov_len = sizeof(n->mac),
};
- ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
+ ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_ADDR_SET,
&data, 1);
if (unlikely(dev_written < 0)) {
@@ -735,7 +750,7 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
.iov_len = mul_macs_size,
},
};
- ssize_t dev_written = vhost_vdpa_net_load_cmd(s,
+ ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_TABLE_SET,
data, ARRAY_SIZE(data));
@@ -750,7 +765,9 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
}
static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
- const VirtIONet *n)
+ const VirtIONet *n,
+ struct iovec *out_cursor,
+ struct iovec *in_cursor)
{
struct virtio_net_ctrl_mq mq;
ssize_t dev_written;
@@ -764,7 +781,8 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
.iov_base = &mq,
.iov_len = sizeof(mq),
};
- dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
+ dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_MQ,
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
&data, 1);
if (unlikely(dev_written < 0)) {
@@ -778,7 +796,9 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
}
static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
- const VirtIONet *n)
+ const VirtIONet *n,
+ struct iovec *out_cursor,
+ struct iovec *in_cursor)
{
uint64_t offloads;
ssize_t dev_written;
@@ -809,7 +829,8 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
.iov_base = &offloads,
.iov_len = sizeof(offloads),
};
- dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
+ dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_GUEST_OFFLOADS,
VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
&data, 1);
if (unlikely(dev_written < 0)) {
@@ -823,6 +844,8 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
}
static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
+ struct iovec *out_cursor,
+ struct iovec *in_cursor,
uint8_t cmd,
uint8_t on)
{
@@ -832,7 +855,8 @@ static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
};
ssize_t dev_written;
- dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX,
+ dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_RX,
cmd, &data, 1);
if (unlikely(dev_written < 0)) {
return dev_written;
@@ -845,7 +869,9 @@ static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
}
static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
- const VirtIONet *n)
+ const VirtIONet *n,
+ struct iovec *out_cursor,
+ struct iovec *in_cursor)
{
ssize_t r;
@@ -872,7 +898,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (!n->mac_table.uni_overflow && !n->promisc) {
- r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 0);
+ r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_RX_PROMISC, 0);
if (unlikely(r < 0)) {
return r;
}
@@ -896,7 +923,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->mac_table.multi_overflow || n->allmulti) {
- r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
+ r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
if (unlikely(r < 0)) {
return r;
}
@@ -917,7 +945,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->alluni) {
- r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_ALLUNI, 1);
+ r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_RX_ALLUNI, 1);
if (r < 0) {
return r;
}
@@ -934,7 +963,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->nomulti) {
- r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_NOMULTI, 1);
+ r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_RX_NOMULTI, 1);
if (r < 0) {
return r;
}
@@ -951,7 +981,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->nouni) {
- r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_NOUNI, 1);
+ r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_RX_NOUNI, 1);
if (r < 0) {
return r;
}
@@ -968,7 +999,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->nobcast) {
- r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_NOBCAST, 1);
+ r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_RX_NOBCAST, 1);
if (r < 0) {
return r;
}
@@ -979,13 +1011,16 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
const VirtIONet *n,
+ struct iovec *out_cursor,
+ struct iovec *in_cursor,
uint16_t vid)
{
const struct iovec data = {
.iov_base = &vid,
.iov_len = sizeof(vid),
};
- ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
+ ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD,
&data, 1);
if (unlikely(dev_written < 0)) {
@@ -999,7 +1034,9 @@ static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
}
static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
- const VirtIONet *n)
+ const VirtIONet *n,
+ struct iovec *out_cursor,
+ struct iovec *in_cursor)
{
int r;
@@ -1010,7 +1047,8 @@ static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
for (int i = 0; i < MAX_VLAN >> 5; i++) {
for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
if (n->vlans[i] & (1U << j)) {
- r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
+ r = vhost_vdpa_net_load_single_vlan(s, n, out_cursor,
+ in_cursor, (i << 5) + j);
if (unlikely(r != 0)) {
return r;
}
@@ -1027,6 +1065,7 @@ static int vhost_vdpa_net_load(NetClientState *nc)
struct vhost_vdpa *v = &s->vhost_vdpa;
const VirtIONet *n;
int r;
+ struct iovec out_cursor, in_cursor;
assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
@@ -1035,23 +1074,24 @@ static int vhost_vdpa_net_load(NetClientState *nc)
}
n = VIRTIO_NET(v->dev->vdev);
- r = vhost_vdpa_net_load_mac(s, n);
+ vhost_vdpa_net_load_cursor_reset(s, &out_cursor, &in_cursor);
+ r = vhost_vdpa_net_load_mac(s, n, &out_cursor, &in_cursor);
if (unlikely(r < 0)) {
return r;
}
- r = vhost_vdpa_net_load_mq(s, n);
+ r = vhost_vdpa_net_load_mq(s, n, &out_cursor, &in_cursor);
if (unlikely(r)) {
return r;
}
- r = vhost_vdpa_net_load_offloads(s, n);
+ r = vhost_vdpa_net_load_offloads(s, n, &out_cursor, &in_cursor);
if (unlikely(r)) {
return r;
}
- r = vhost_vdpa_net_load_rx(s, n);
+ r = vhost_vdpa_net_load_rx(s, n, &out_cursor, &in_cursor);
if (unlikely(r)) {
return r;
}
- r = vhost_vdpa_net_load_vlan(s, n);
+ r = vhost_vdpa_net_load_vlan(s, n, &out_cursor, &in_cursor);
if (unlikely(r)) {
return r;
}
--
2.25.1
On Tue, Aug 29, 2023 at 7:55 AM Hawkins Jiawei <yin31149@gmail.com> wrote: > > This patch introduces two new arugments, `out_cursor` > and `in_cursor`, to vhost_vdpa_net_loadx(). Addtionally, > it includes a helper function > vhost_vdpa_net_load_cursor_reset() for resetting these > cursors. > > Furthermore, this patch refactors vhost_vdpa_net_load_cmd() > so that vhost_vdpa_net_load_cmd() prepares buffers > for the device using the cursors arguments, instead > of directly accesses `s->cvq_cmd_out_buffer` and > `s->status` fields. > > By making these change, next patches in this series > can refactor vhost_vdpa_net_load_cmd() directly to > iterate through the control commands shadow buffers, > allowing QEMU to send CVQ state load commands in parallel > at device startup. > > Signed-off-by: Hawkins Jiawei <yin31149@gmail.com> > --- > v4: > - use `struct iovec` instead of `void **` as cursor > suggested by Eugenio > - add vhost_vdpa_net_load_cursor_reset() helper function > to reset the cursors > - refactor vhost_vdpa_net_load_cmd() to prepare buffers > by iov_copy() instead of accessing `in` and `out` directly > suggested by Eugenio > > v3: https://lore.kernel.org/all/bf390934673f2b613359ea9d7ac6c89199c31384.1689748694.git.yin31149@gmail.com/ > > net/vhost-vdpa.c | 114 ++++++++++++++++++++++++++++++++--------------- > 1 file changed, 77 insertions(+), 37 deletions(-) > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c > index d9b8b3cf6c..a71e8c9090 100644 > --- a/net/vhost-vdpa.c > +++ b/net/vhost-vdpa.c > @@ -633,7 +633,22 @@ static uint16_t vhost_vdpa_net_svq_available_slots(VhostVDPAState *s) > return vhost_svq_available_slots(svq); > } > > -static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, > +static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s, > + struct iovec *out_cursor, > + struct iovec *in_cursor) > +{ > + /* reset the cursor of the output buffer for the device */ > + out_cursor->iov_base = s->cvq_cmd_out_buffer; > + out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); > + > + /* reset the cursor of the in buffer for the device */ > + in_cursor->iov_base = s->status; > + in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); > +} > + > +static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, > + struct iovec *out_cursor, > + struct iovec *in_cursor, uint8_t class, > uint8_t cmd, const struct iovec *data_sg, > size_t data_num) > { > @@ -641,28 +656,25 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, > .class = class, > .cmd = cmd, > }; > - size_t data_size = iov_size(data_sg, data_num); > - /* Buffers for the device */ > - const struct iovec out = { > - .iov_base = s->cvq_cmd_out_buffer, > - .iov_len = sizeof(ctrl) + data_size, > - }; > - const struct iovec in = { > - .iov_base = s->status, > - .iov_len = sizeof(*s->status), > - }; > + size_t data_size = iov_size(data_sg, data_num), > + cmd_size = sizeof(ctrl) + data_size; > + struct iovec out, in; > ssize_t r; > > assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); > /* Each CVQ command has one out descriptor and one in descriptor */ > assert(vhost_vdpa_net_svq_available_slots(s) >= 2); > > - /* pack the CVQ command header */ > - memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl)); > + /* Prepare the buffer for out descriptor for the device */ > + iov_copy(&out, 1, out_cursor, 1, 0, cmd_size); I may be missing something here, but cmd_size should be the bytes available in "out", so we don't overrun it. > + /* Prepare the buffer for in descriptor for the device. */ > + iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status)); > Same here, although it is impossible for the moment to overrun it as all cmds only return one byte. > + /* pack the CVQ command header */ > + iov_from_buf(&out, 1, 0, &ctrl, sizeof(ctrl)); > /* pack the CVQ command command-specific-data */ > iov_to_buf(data_sg, data_num, 0, > - s->cvq_cmd_out_buffer + sizeof(ctrl), data_size); > + out.iov_base + sizeof(ctrl), data_size); Is it possible to replace this by: iov_to_buf(data_sg, data_num, sizeof(ctrl), out.iov_base, data_size) In other words, let iov_to_but handle the offset in the buffer instead of adding it manually. The rest looks good to me. > > r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1); > if (unlikely(r < 0)) { > @@ -676,14 +688,17 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, > return vhost_vdpa_net_svq_poll(s, 1); > } > > -static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n) > +static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n, > + struct iovec *out_cursor, > + struct iovec *in_cursor) > { > if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) { > const struct iovec data = { > .iov_base = (void *)n->mac, > .iov_len = sizeof(n->mac), > }; > - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC, > + ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, > + VIRTIO_NET_CTRL_MAC, > VIRTIO_NET_CTRL_MAC_ADDR_SET, > &data, 1); > if (unlikely(dev_written < 0)) { > @@ -735,7 +750,7 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n) > .iov_len = mul_macs_size, > }, > }; > - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, > + ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, > VIRTIO_NET_CTRL_MAC, > VIRTIO_NET_CTRL_MAC_TABLE_SET, > data, ARRAY_SIZE(data)); > @@ -750,7 +765,9 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n) > } > > static int vhost_vdpa_net_load_mq(VhostVDPAState *s, > - const VirtIONet *n) > + const VirtIONet *n, > + struct iovec *out_cursor, > + struct iovec *in_cursor) > { > struct virtio_net_ctrl_mq mq; > ssize_t dev_written; > @@ -764,7 +781,8 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s, > .iov_base = &mq, > .iov_len = sizeof(mq), > }; > - dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ, > + dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, > + VIRTIO_NET_CTRL_MQ, > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, > &data, 1); > if (unlikely(dev_written < 0)) { > @@ -778,7 +796,9 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s, > } > > static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, > - const VirtIONet *n) > + const VirtIONet *n, > + struct iovec *out_cursor, > + struct iovec *in_cursor) > { > uint64_t offloads; > ssize_t dev_written; > @@ -809,7 +829,8 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, > .iov_base = &offloads, > .iov_len = sizeof(offloads), > }; > - dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_GUEST_OFFLOADS, > + dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, > + VIRTIO_NET_CTRL_GUEST_OFFLOADS, > VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, > &data, 1); > if (unlikely(dev_written < 0)) { > @@ -823,6 +844,8 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, > } > > static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s, > + struct iovec *out_cursor, > + struct iovec *in_cursor, > uint8_t cmd, > uint8_t on) > { > @@ -832,7 +855,8 @@ static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s, > }; > ssize_t dev_written; > > - dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX, > + dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, > + VIRTIO_NET_CTRL_RX, > cmd, &data, 1); > if (unlikely(dev_written < 0)) { > return dev_written; > @@ -845,7 +869,9 @@ static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s, > } > > static int vhost_vdpa_net_load_rx(VhostVDPAState *s, > - const VirtIONet *n) > + const VirtIONet *n, > + struct iovec *out_cursor, > + struct iovec *in_cursor) > { > ssize_t r; > > @@ -872,7 +898,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, > * configuration only at live migration. > */ > if (!n->mac_table.uni_overflow && !n->promisc) { > - r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 0); > + r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, > + VIRTIO_NET_CTRL_RX_PROMISC, 0); > if (unlikely(r < 0)) { > return r; > } > @@ -896,7 +923,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, > * configuration only at live migration. > */ > if (n->mac_table.multi_overflow || n->allmulti) { > - r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_ALLMULTI, 1); > + r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, > + VIRTIO_NET_CTRL_RX_ALLMULTI, 1); > if (unlikely(r < 0)) { > return r; > } > @@ -917,7 +945,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, > * configuration only at live migration. > */ > if (n->alluni) { > - r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_ALLUNI, 1); > + r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, > + VIRTIO_NET_CTRL_RX_ALLUNI, 1); > if (r < 0) { > return r; > } > @@ -934,7 +963,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, > * configuration only at live migration. > */ > if (n->nomulti) { > - r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_NOMULTI, 1); > + r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, > + VIRTIO_NET_CTRL_RX_NOMULTI, 1); > if (r < 0) { > return r; > } > @@ -951,7 +981,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, > * configuration only at live migration. > */ > if (n->nouni) { > - r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_NOUNI, 1); > + r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, > + VIRTIO_NET_CTRL_RX_NOUNI, 1); > if (r < 0) { > return r; > } > @@ -968,7 +999,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, > * configuration only at live migration. > */ > if (n->nobcast) { > - r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_NOBCAST, 1); > + r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, > + VIRTIO_NET_CTRL_RX_NOBCAST, 1); > if (r < 0) { > return r; > } > @@ -979,13 +1011,16 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, > > static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s, > const VirtIONet *n, > + struct iovec *out_cursor, > + struct iovec *in_cursor, > uint16_t vid) > { > const struct iovec data = { > .iov_base = &vid, > .iov_len = sizeof(vid), > }; > - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN, > + ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, > + VIRTIO_NET_CTRL_VLAN, > VIRTIO_NET_CTRL_VLAN_ADD, > &data, 1); > if (unlikely(dev_written < 0)) { > @@ -999,7 +1034,9 @@ static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s, > } > > static int vhost_vdpa_net_load_vlan(VhostVDPAState *s, > - const VirtIONet *n) > + const VirtIONet *n, > + struct iovec *out_cursor, > + struct iovec *in_cursor) > { > int r; > > @@ -1010,7 +1047,8 @@ static int vhost_vdpa_net_load_vlan(VhostVDPAState *s, > for (int i = 0; i < MAX_VLAN >> 5; i++) { > for (int j = 0; n->vlans[i] && j <= 0x1f; j++) { > if (n->vlans[i] & (1U << j)) { > - r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j); > + r = vhost_vdpa_net_load_single_vlan(s, n, out_cursor, > + in_cursor, (i << 5) + j); > if (unlikely(r != 0)) { > return r; > } > @@ -1027,6 +1065,7 @@ static int vhost_vdpa_net_load(NetClientState *nc) > struct vhost_vdpa *v = &s->vhost_vdpa; > const VirtIONet *n; > int r; > + struct iovec out_cursor, in_cursor; > > assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); > > @@ -1035,23 +1074,24 @@ static int vhost_vdpa_net_load(NetClientState *nc) > } > > n = VIRTIO_NET(v->dev->vdev); > - r = vhost_vdpa_net_load_mac(s, n); > + vhost_vdpa_net_load_cursor_reset(s, &out_cursor, &in_cursor); > + r = vhost_vdpa_net_load_mac(s, n, &out_cursor, &in_cursor); > if (unlikely(r < 0)) { > return r; > } > - r = vhost_vdpa_net_load_mq(s, n); > + r = vhost_vdpa_net_load_mq(s, n, &out_cursor, &in_cursor); > if (unlikely(r)) { > return r; > } > - r = vhost_vdpa_net_load_offloads(s, n); > + r = vhost_vdpa_net_load_offloads(s, n, &out_cursor, &in_cursor); > if (unlikely(r)) { > return r; > } > - r = vhost_vdpa_net_load_rx(s, n); > + r = vhost_vdpa_net_load_rx(s, n, &out_cursor, &in_cursor); > if (unlikely(r)) { > return r; > } > - r = vhost_vdpa_net_load_vlan(s, n); > + r = vhost_vdpa_net_load_vlan(s, n, &out_cursor, &in_cursor); > if (unlikely(r)) { > return r; > } > -- > 2.25.1 >
在 2023/10/4 15:21, Eugenio Perez Martin 写道: > On Tue, Aug 29, 2023 at 7:55 AM Hawkins Jiawei <yin31149@gmail.com> wrote: >> >> This patch introduces two new arugments, `out_cursor` >> and `in_cursor`, to vhost_vdpa_net_loadx(). Addtionally, >> it includes a helper function >> vhost_vdpa_net_load_cursor_reset() for resetting these >> cursors. >> >> Furthermore, this patch refactors vhost_vdpa_net_load_cmd() >> so that vhost_vdpa_net_load_cmd() prepares buffers >> for the device using the cursors arguments, instead >> of directly accesses `s->cvq_cmd_out_buffer` and >> `s->status` fields. >> >> By making these change, next patches in this series >> can refactor vhost_vdpa_net_load_cmd() directly to >> iterate through the control commands shadow buffers, >> allowing QEMU to send CVQ state load commands in parallel >> at device startup. >> >> Signed-off-by: Hawkins Jiawei <yin31149@gmail.com> >> --- >> v4: >> - use `struct iovec` instead of `void **` as cursor >> suggested by Eugenio >> - add vhost_vdpa_net_load_cursor_reset() helper function >> to reset the cursors >> - refactor vhost_vdpa_net_load_cmd() to prepare buffers >> by iov_copy() instead of accessing `in` and `out` directly >> suggested by Eugenio >> >> v3: https://lore.kernel.org/all/bf390934673f2b613359ea9d7ac6c89199c31384.1689748694.git.yin31149@gmail.com/ >> >> net/vhost-vdpa.c | 114 ++++++++++++++++++++++++++++++++--------------- >> 1 file changed, 77 insertions(+), 37 deletions(-) >> >> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c >> index d9b8b3cf6c..a71e8c9090 100644 >> --- a/net/vhost-vdpa.c >> +++ b/net/vhost-vdpa.c >> @@ -633,7 +633,22 @@ static uint16_t vhost_vdpa_net_svq_available_slots(VhostVDPAState *s) >> return vhost_svq_available_slots(svq); >> } >> >> -static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, >> +static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s, >> + struct iovec *out_cursor, >> + struct iovec *in_cursor) >> +{ >> + /* reset the cursor of the output buffer for the device */ >> + out_cursor->iov_base = s->cvq_cmd_out_buffer; >> + out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); >> + >> + /* reset the cursor of the in buffer for the device */ >> + in_cursor->iov_base = s->status; >> + in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); >> +} >> + >> +static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, >> + struct iovec *out_cursor, >> + struct iovec *in_cursor, uint8_t class, >> uint8_t cmd, const struct iovec *data_sg, >> size_t data_num) >> { >> @@ -641,28 +656,25 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, >> .class = class, >> .cmd = cmd, >> }; >> - size_t data_size = iov_size(data_sg, data_num); >> - /* Buffers for the device */ >> - const struct iovec out = { >> - .iov_base = s->cvq_cmd_out_buffer, >> - .iov_len = sizeof(ctrl) + data_size, >> - }; >> - const struct iovec in = { >> - .iov_base = s->status, >> - .iov_len = sizeof(*s->status), >> - }; >> + size_t data_size = iov_size(data_sg, data_num), >> + cmd_size = sizeof(ctrl) + data_size; >> + struct iovec out, in; >> ssize_t r; >> >> assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); >> /* Each CVQ command has one out descriptor and one in descriptor */ >> assert(vhost_vdpa_net_svq_available_slots(s) >= 2); >> >> - /* pack the CVQ command header */ >> - memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl)); >> + /* Prepare the buffer for out descriptor for the device */ >> + iov_copy(&out, 1, out_cursor, 1, 0, cmd_size); > > I may be missing something here, but cmd_size should be the bytes > available in "out", so we don't overrun it. > >> + /* Prepare the buffer for in descriptor for the device. */ >> + iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status)); >> > > Same here, although it is impossible for the moment to overrun it as > all cmds only return one byte. > Here we just use iov_copy() to initialize the `out` and `in` variables, something like: /* extract the required buffer from the cursor for output */ out.iov_len = cmd_size; out.iov_base = out_cursor->iov_base; /* extract the required buffer from the cursor for input */ in.iov_len = sizeof(*s->status); in.iov_base = in_cursor->iov_base; I think iov_copy() can improve readability, what do you think? >> + /* pack the CVQ command header */ >> + iov_from_buf(&out, 1, 0, &ctrl, sizeof(ctrl)); >> /* pack the CVQ command command-specific-data */ >> iov_to_buf(data_sg, data_num, 0, >> - s->cvq_cmd_out_buffer + sizeof(ctrl), data_size); >> + out.iov_base + sizeof(ctrl), data_size); > > Is it possible to replace this by: > iov_to_buf(data_sg, data_num, sizeof(ctrl), out.iov_base, data_size) > > In other words, let iov_to_but handle the offset in the buffer instead > of adding it manually. It seems that iov_to_buf(const struct iovec *iov, const unsigned int iov_cnt, size_t offset, void *buf, size_t bytes) is copying data from `iov` starting at offset `offset` and with a length of `bytes`, to buf starting at offset `0`. Yet here we want to copy data from `data_sg` starting at offset `0` and with a length of `data_size`, to `out.iov_base` starting at offset `sizeof(ctrl)`, so I think we cannot replace this(Please correct me if I am wrong). Thanks! > > The rest looks good to me. > >> >> r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1); >> if (unlikely(r < 0)) { >> @@ -676,14 +688,17 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, >> return vhost_vdpa_net_svq_poll(s, 1); >> } >> >> -static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n) >> +static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n, >> + struct iovec *out_cursor, >> + struct iovec *in_cursor) >> { >> if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) { >> const struct iovec data = { >> .iov_base = (void *)n->mac, >> .iov_len = sizeof(n->mac), >> }; >> - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC, >> + ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, >> + VIRTIO_NET_CTRL_MAC, >> VIRTIO_NET_CTRL_MAC_ADDR_SET, >> &data, 1); >> if (unlikely(dev_written < 0)) { >> @@ -735,7 +750,7 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n) >> .iov_len = mul_macs_size, >> }, >> }; >> - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, >> + ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, >> VIRTIO_NET_CTRL_MAC, >> VIRTIO_NET_CTRL_MAC_TABLE_SET, >> data, ARRAY_SIZE(data)); >> @@ -750,7 +765,9 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n) >> } >> >> static int vhost_vdpa_net_load_mq(VhostVDPAState *s, >> - const VirtIONet *n) >> + const VirtIONet *n, >> + struct iovec *out_cursor, >> + struct iovec *in_cursor) >> { >> struct virtio_net_ctrl_mq mq; >> ssize_t dev_written; >> @@ -764,7 +781,8 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s, >> .iov_base = &mq, >> .iov_len = sizeof(mq), >> }; >> - dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ, >> + dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, >> + VIRTIO_NET_CTRL_MQ, >> VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, >> &data, 1); >> if (unlikely(dev_written < 0)) { >> @@ -778,7 +796,9 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s, >> } >> >> static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, >> - const VirtIONet *n) >> + const VirtIONet *n, >> + struct iovec *out_cursor, >> + struct iovec *in_cursor) >> { >> uint64_t offloads; >> ssize_t dev_written; >> @@ -809,7 +829,8 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, >> .iov_base = &offloads, >> .iov_len = sizeof(offloads), >> }; >> - dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_GUEST_OFFLOADS, >> + dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, >> + VIRTIO_NET_CTRL_GUEST_OFFLOADS, >> VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, >> &data, 1); >> if (unlikely(dev_written < 0)) { >> @@ -823,6 +844,8 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, >> } >> >> static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s, >> + struct iovec *out_cursor, >> + struct iovec *in_cursor, >> uint8_t cmd, >> uint8_t on) >> { >> @@ -832,7 +855,8 @@ static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s, >> }; >> ssize_t dev_written; >> >> - dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX, >> + dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, >> + VIRTIO_NET_CTRL_RX, >> cmd, &data, 1); >> if (unlikely(dev_written < 0)) { >> return dev_written; >> @@ -845,7 +869,9 @@ static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s, >> } >> >> static int vhost_vdpa_net_load_rx(VhostVDPAState *s, >> - const VirtIONet *n) >> + const VirtIONet *n, >> + struct iovec *out_cursor, >> + struct iovec *in_cursor) >> { >> ssize_t r; >> >> @@ -872,7 +898,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, >> * configuration only at live migration. >> */ >> if (!n->mac_table.uni_overflow && !n->promisc) { >> - r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 0); >> + r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, >> + VIRTIO_NET_CTRL_RX_PROMISC, 0); >> if (unlikely(r < 0)) { >> return r; >> } >> @@ -896,7 +923,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, >> * configuration only at live migration. >> */ >> if (n->mac_table.multi_overflow || n->allmulti) { >> - r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_ALLMULTI, 1); >> + r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, >> + VIRTIO_NET_CTRL_RX_ALLMULTI, 1); >> if (unlikely(r < 0)) { >> return r; >> } >> @@ -917,7 +945,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, >> * configuration only at live migration. >> */ >> if (n->alluni) { >> - r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_ALLUNI, 1); >> + r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, >> + VIRTIO_NET_CTRL_RX_ALLUNI, 1); >> if (r < 0) { >> return r; >> } >> @@ -934,7 +963,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, >> * configuration only at live migration. >> */ >> if (n->nomulti) { >> - r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_NOMULTI, 1); >> + r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, >> + VIRTIO_NET_CTRL_RX_NOMULTI, 1); >> if (r < 0) { >> return r; >> } >> @@ -951,7 +981,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, >> * configuration only at live migration. >> */ >> if (n->nouni) { >> - r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_NOUNI, 1); >> + r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, >> + VIRTIO_NET_CTRL_RX_NOUNI, 1); >> if (r < 0) { >> return r; >> } >> @@ -968,7 +999,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, >> * configuration only at live migration. >> */ >> if (n->nobcast) { >> - r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_NOBCAST, 1); >> + r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, >> + VIRTIO_NET_CTRL_RX_NOBCAST, 1); >> if (r < 0) { >> return r; >> } >> @@ -979,13 +1011,16 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, >> >> static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s, >> const VirtIONet *n, >> + struct iovec *out_cursor, >> + struct iovec *in_cursor, >> uint16_t vid) >> { >> const struct iovec data = { >> .iov_base = &vid, >> .iov_len = sizeof(vid), >> }; >> - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN, >> + ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, >> + VIRTIO_NET_CTRL_VLAN, >> VIRTIO_NET_CTRL_VLAN_ADD, >> &data, 1); >> if (unlikely(dev_written < 0)) { >> @@ -999,7 +1034,9 @@ static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s, >> } >> >> static int vhost_vdpa_net_load_vlan(VhostVDPAState *s, >> - const VirtIONet *n) >> + const VirtIONet *n, >> + struct iovec *out_cursor, >> + struct iovec *in_cursor) >> { >> int r; >> >> @@ -1010,7 +1047,8 @@ static int vhost_vdpa_net_load_vlan(VhostVDPAState *s, >> for (int i = 0; i < MAX_VLAN >> 5; i++) { >> for (int j = 0; n->vlans[i] && j <= 0x1f; j++) { >> if (n->vlans[i] & (1U << j)) { >> - r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j); >> + r = vhost_vdpa_net_load_single_vlan(s, n, out_cursor, >> + in_cursor, (i << 5) + j); >> if (unlikely(r != 0)) { >> return r; >> } >> @@ -1027,6 +1065,7 @@ static int vhost_vdpa_net_load(NetClientState *nc) >> struct vhost_vdpa *v = &s->vhost_vdpa; >> const VirtIONet *n; >> int r; >> + struct iovec out_cursor, in_cursor; >> >> assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); >> >> @@ -1035,23 +1074,24 @@ static int vhost_vdpa_net_load(NetClientState *nc) >> } >> >> n = VIRTIO_NET(v->dev->vdev); >> - r = vhost_vdpa_net_load_mac(s, n); >> + vhost_vdpa_net_load_cursor_reset(s, &out_cursor, &in_cursor); >> + r = vhost_vdpa_net_load_mac(s, n, &out_cursor, &in_cursor); >> if (unlikely(r < 0)) { >> return r; >> } >> - r = vhost_vdpa_net_load_mq(s, n); >> + r = vhost_vdpa_net_load_mq(s, n, &out_cursor, &in_cursor); >> if (unlikely(r)) { >> return r; >> } >> - r = vhost_vdpa_net_load_offloads(s, n); >> + r = vhost_vdpa_net_load_offloads(s, n, &out_cursor, &in_cursor); >> if (unlikely(r)) { >> return r; >> } >> - r = vhost_vdpa_net_load_rx(s, n); >> + r = vhost_vdpa_net_load_rx(s, n, &out_cursor, &in_cursor); >> if (unlikely(r)) { >> return r; >> } >> - r = vhost_vdpa_net_load_vlan(s, n); >> + r = vhost_vdpa_net_load_vlan(s, n, &out_cursor, &in_cursor); >> if (unlikely(r)) { >> return r; >> } >> -- >> 2.25.1 >> >
This patch enables sending CVQ state load commands
in parallel at device startup by following steps:
* Refactor vhost_vdpa_net_load_cmd() to iterate through
the control commands shadow buffers. This allows different
CVQ state load commands to use their own unique buffers.
* Delay the polling and checking of buffers until either
the SVQ is full or control commands shadow buffers are full.
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1578
Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
---
v4:
- refactor argument `cmds_in_flight` to `len` for
vhost_vdpa_net_svq_full()
- check the return value of vhost_vdpa_net_svq_poll()
in vhost_vdpa_net_svq_flush() suggested by Eugenio
- use iov_size(), vhost_vdpa_net_load_cursor_reset()
and iov_discard_front() to update the cursors instead of
accessing it directly according to Eugenio
v3: https://lore.kernel.org/all/3a002790e6c880af928c6470ecbf03e7c65a68bb.1689748694.git.yin31149@gmail.com/
net/vhost-vdpa.c | 155 +++++++++++++++++++++++++++++------------------
1 file changed, 97 insertions(+), 58 deletions(-)
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index a71e8c9090..818464b702 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -646,6 +646,31 @@ static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s,
in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
}
+/*
+ * Poll SVQ for multiple pending control commands and check the device's ack.
+ *
+ * Caller should hold the BQL when invoking this function.
+ *
+ * @s: The VhostVDPAState
+ * @len: The length of the pending status shadow buffer
+ */
+static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len)
+{
+ /* Device uses a one-byte length ack for each control command */
+ ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len);
+ if (unlikely(dev_written != len)) {
+ return -EIO;
+ }
+
+ /* check the device's ack */
+ for (int i = 0; i < len; ++i) {
+ if (s->status[i] != VIRTIO_NET_OK) {
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s,
struct iovec *out_cursor,
struct iovec *in_cursor, uint8_t class,
@@ -660,10 +685,30 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s,
cmd_size = sizeof(ctrl) + data_size;
struct iovec out, in;
ssize_t r;
+ unsigned dummy_cursor_iov_cnt;
assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
+ if (vhost_vdpa_net_svq_available_slots(s) < 2 ||
+ iov_size(out_cursor, 1) < cmd_size) {
+ /*
+ * It is time to flush all pending control commands if SVQ is full
+ * or control commands shadow buffers are full.
+ *
+ * We can poll here since we've had BQL from the time
+ * we sent the descriptor.
+ */
+ r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base -
+ (void *)s->status);
+ if (unlikely(r < 0)) {
+ return r;
+ }
+
+ vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor);
+ }
+
/* Each CVQ command has one out descriptor and one in descriptor */
assert(vhost_vdpa_net_svq_available_slots(s) >= 2);
+ assert(iov_size(out_cursor, 1) >= cmd_size);
/* Prepare the buffer for out descriptor for the device */
iov_copy(&out, 1, out_cursor, 1, 0, cmd_size);
@@ -681,11 +726,13 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s,
return r;
}
- /*
- * We can poll here since we've had BQL from the time
- * we sent the descriptor.
- */
- return vhost_vdpa_net_svq_poll(s, 1);
+ /* iterate the cursors */
+ dummy_cursor_iov_cnt = 1;
+ iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size);
+ dummy_cursor_iov_cnt = 1;
+ iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status));
+
+ return 0;
}
static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n,
@@ -697,15 +744,12 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n,
.iov_base = (void *)n->mac,
.iov_len = sizeof(n->mac),
};
- ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
- VIRTIO_NET_CTRL_MAC,
- VIRTIO_NET_CTRL_MAC_ADDR_SET,
- &data, 1);
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_MAC,
+ VIRTIO_NET_CTRL_MAC_ADDR_SET,
+ &data, 1);
+ if (unlikely(r < 0)) {
+ return r;
}
}
@@ -750,15 +794,12 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n,
.iov_len = mul_macs_size,
},
};
- ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_TABLE_SET,
data, ARRAY_SIZE(data));
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ if (unlikely(r < 0)) {
+ return r;
}
return 0;
@@ -770,7 +811,7 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
struct iovec *in_cursor)
{
struct virtio_net_ctrl_mq mq;
- ssize_t dev_written;
+ ssize_t r;
if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
return 0;
@@ -781,15 +822,12 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
.iov_base = &mq,
.iov_len = sizeof(mq),
};
- dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
- VIRTIO_NET_CTRL_MQ,
- VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
- &data, 1);
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_MQ,
+ VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
+ &data, 1);
+ if (unlikely(r < 0)) {
+ return r;
}
return 0;
@@ -801,7 +839,7 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
struct iovec *in_cursor)
{
uint64_t offloads;
- ssize_t dev_written;
+ ssize_t r;
if (!virtio_vdev_has_feature(&n->parent_obj,
VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
@@ -829,15 +867,12 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
.iov_base = &offloads,
.iov_len = sizeof(offloads),
};
- dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
- VIRTIO_NET_CTRL_GUEST_OFFLOADS,
- VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
- &data, 1);
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_GUEST_OFFLOADS,
+ VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
+ &data, 1);
+ if (unlikely(r < 0)) {
+ return r;
}
return 0;
@@ -853,16 +888,12 @@ static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
.iov_base = &on,
.iov_len = sizeof(on),
};
- ssize_t dev_written;
+ ssize_t r;
- dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
- VIRTIO_NET_CTRL_RX,
- cmd, &data, 1);
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_RX, cmd, &data, 1);
+ if (unlikely(r < 0)) {
+ return r;
}
return 0;
@@ -1019,15 +1050,12 @@ static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
.iov_base = &vid,
.iov_len = sizeof(vid),
};
- ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
- VIRTIO_NET_CTRL_VLAN,
- VIRTIO_NET_CTRL_VLAN_ADD,
- &data, 1);
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (unlikely(*s->status != VIRTIO_NET_OK)) {
- return -EIO;
+ ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_VLAN,
+ VIRTIO_NET_CTRL_VLAN_ADD,
+ &data, 1);
+ if (unlikely(r < 0)) {
+ return r;
}
return 0;
@@ -1096,6 +1124,17 @@ static int vhost_vdpa_net_load(NetClientState *nc)
return r;
}
+ /*
+ * We need to poll and check all pending device's used buffers.
+ *
+ * We can poll here since we've had BQL from the time
+ * we sent the descriptor.
+ */
+ r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status);
+ if (unlikely(r)) {
+ return r;
+ }
+
return 0;
}
--
2.25.1
On Tue, Aug 29, 2023 at 7:55 AM Hawkins Jiawei <yin31149@gmail.com> wrote: > > This patch enables sending CVQ state load commands > in parallel at device startup by following steps: > > * Refactor vhost_vdpa_net_load_cmd() to iterate through > the control commands shadow buffers. This allows different > CVQ state load commands to use their own unique buffers. > > * Delay the polling and checking of buffers until either > the SVQ is full or control commands shadow buffers are full. > > Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1578 > Signed-off-by: Hawkins Jiawei <yin31149@gmail.com> > --- > v4: > - refactor argument `cmds_in_flight` to `len` for > vhost_vdpa_net_svq_full() > - check the return value of vhost_vdpa_net_svq_poll() > in vhost_vdpa_net_svq_flush() suggested by Eugenio > - use iov_size(), vhost_vdpa_net_load_cursor_reset() > and iov_discard_front() to update the cursors instead of > accessing it directly according to Eugenio > > v3: https://lore.kernel.org/all/3a002790e6c880af928c6470ecbf03e7c65a68bb.1689748694.git.yin31149@gmail.com/ > > net/vhost-vdpa.c | 155 +++++++++++++++++++++++++++++------------------ > 1 file changed, 97 insertions(+), 58 deletions(-) > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c > index a71e8c9090..818464b702 100644 > --- a/net/vhost-vdpa.c > +++ b/net/vhost-vdpa.c > @@ -646,6 +646,31 @@ static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s, > in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); > } > > +/* > + * Poll SVQ for multiple pending control commands and check the device's ack. > + * > + * Caller should hold the BQL when invoking this function. > + * > + * @s: The VhostVDPAState > + * @len: The length of the pending status shadow buffer > + */ > +static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len) > +{ > + /* Device uses a one-byte length ack for each control command */ > + ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len); > + if (unlikely(dev_written != len)) { > + return -EIO; > + } > + > + /* check the device's ack */ > + for (int i = 0; i < len; ++i) { > + if (s->status[i] != VIRTIO_NET_OK) { > + return -EIO; > + } > + } > + return 0; > +} > + > static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, > struct iovec *out_cursor, > struct iovec *in_cursor, uint8_t class, > @@ -660,10 +685,30 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, > cmd_size = sizeof(ctrl) + data_size; > struct iovec out, in; > ssize_t r; > + unsigned dummy_cursor_iov_cnt; > > assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); > + if (vhost_vdpa_net_svq_available_slots(s) < 2 || > + iov_size(out_cursor, 1) < cmd_size) { > + /* > + * It is time to flush all pending control commands if SVQ is full > + * or control commands shadow buffers are full. > + * > + * We can poll here since we've had BQL from the time > + * we sent the descriptor. > + */ > + r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base - > + (void *)s->status); > + if (unlikely(r < 0)) { > + return r; > + } > + > + vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor); > + } > + It would be great to merge this flush with the one at vhost_vdpa_net_load. We would need to return ENOSPC or similar and handle it there. But it would make it more difficult to iterate through the loading of the different parameters, so I think it can be done on top. > /* Each CVQ command has one out descriptor and one in descriptor */ > assert(vhost_vdpa_net_svq_available_slots(s) >= 2); > + assert(iov_size(out_cursor, 1) >= cmd_size); > Same here, I think we can avoid the assertion, right? Apart from that, Acked-by: Eugenio Pérez <eperezma@redhat.com> > /* Prepare the buffer for out descriptor for the device */ > iov_copy(&out, 1, out_cursor, 1, 0, cmd_size); > @@ -681,11 +726,13 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, > return r; > } > > - /* > - * We can poll here since we've had BQL from the time > - * we sent the descriptor. > - */ > - return vhost_vdpa_net_svq_poll(s, 1); > + /* iterate the cursors */ > + dummy_cursor_iov_cnt = 1; > + iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size); > + dummy_cursor_iov_cnt = 1; > + iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status)); > + > + return 0; > } > > static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n, > @@ -697,15 +744,12 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n, > .iov_base = (void *)n->mac, > .iov_len = sizeof(n->mac), > }; > - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, > - VIRTIO_NET_CTRL_MAC, > - VIRTIO_NET_CTRL_MAC_ADDR_SET, > - &data, 1); > - if (unlikely(dev_written < 0)) { > - return dev_written; > - } > - if (*s->status != VIRTIO_NET_OK) { > - return -EIO; > + ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, > + VIRTIO_NET_CTRL_MAC, > + VIRTIO_NET_CTRL_MAC_ADDR_SET, > + &data, 1); > + if (unlikely(r < 0)) { > + return r; > } > } > > @@ -750,15 +794,12 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n, > .iov_len = mul_macs_size, > }, > }; > - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, > + ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, > VIRTIO_NET_CTRL_MAC, > VIRTIO_NET_CTRL_MAC_TABLE_SET, > data, ARRAY_SIZE(data)); > - if (unlikely(dev_written < 0)) { > - return dev_written; > - } > - if (*s->status != VIRTIO_NET_OK) { > - return -EIO; > + if (unlikely(r < 0)) { > + return r; > } > > return 0; > @@ -770,7 +811,7 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s, > struct iovec *in_cursor) > { > struct virtio_net_ctrl_mq mq; > - ssize_t dev_written; > + ssize_t r; > > if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) { > return 0; > @@ -781,15 +822,12 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s, > .iov_base = &mq, > .iov_len = sizeof(mq), > }; > - dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, > - VIRTIO_NET_CTRL_MQ, > - VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, > - &data, 1); > - if (unlikely(dev_written < 0)) { > - return dev_written; > - } > - if (*s->status != VIRTIO_NET_OK) { > - return -EIO; > + r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, > + VIRTIO_NET_CTRL_MQ, > + VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, > + &data, 1); > + if (unlikely(r < 0)) { > + return r; > } > > return 0; > @@ -801,7 +839,7 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, > struct iovec *in_cursor) > { > uint64_t offloads; > - ssize_t dev_written; > + ssize_t r; > > if (!virtio_vdev_has_feature(&n->parent_obj, > VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { > @@ -829,15 +867,12 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, > .iov_base = &offloads, > .iov_len = sizeof(offloads), > }; > - dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, > - VIRTIO_NET_CTRL_GUEST_OFFLOADS, > - VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, > - &data, 1); > - if (unlikely(dev_written < 0)) { > - return dev_written; > - } > - if (*s->status != VIRTIO_NET_OK) { > - return -EIO; > + r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, > + VIRTIO_NET_CTRL_GUEST_OFFLOADS, > + VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, > + &data, 1); > + if (unlikely(r < 0)) { > + return r; > } > > return 0; > @@ -853,16 +888,12 @@ static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s, > .iov_base = &on, > .iov_len = sizeof(on), > }; > - ssize_t dev_written; > + ssize_t r; > > - dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, > - VIRTIO_NET_CTRL_RX, > - cmd, &data, 1); > - if (unlikely(dev_written < 0)) { > - return dev_written; > - } > - if (*s->status != VIRTIO_NET_OK) { > - return -EIO; > + r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, > + VIRTIO_NET_CTRL_RX, cmd, &data, 1); > + if (unlikely(r < 0)) { > + return r; > } > > return 0; > @@ -1019,15 +1050,12 @@ static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s, > .iov_base = &vid, > .iov_len = sizeof(vid), > }; > - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, > - VIRTIO_NET_CTRL_VLAN, > - VIRTIO_NET_CTRL_VLAN_ADD, > - &data, 1); > - if (unlikely(dev_written < 0)) { > - return dev_written; > - } > - if (unlikely(*s->status != VIRTIO_NET_OK)) { > - return -EIO; > + ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, > + VIRTIO_NET_CTRL_VLAN, > + VIRTIO_NET_CTRL_VLAN_ADD, > + &data, 1); > + if (unlikely(r < 0)) { > + return r; > } > > return 0; > @@ -1096,6 +1124,17 @@ static int vhost_vdpa_net_load(NetClientState *nc) > return r; > } > > + /* > + * We need to poll and check all pending device's used buffers. > + * > + * We can poll here since we've had BQL from the time > + * we sent the descriptor. > + */ > + r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status); > + if (unlikely(r)) { > + return r; > + } > + > return 0; > } > > -- > 2.25.1 >
在 2023/10/4 15:33, Eugenio Perez Martin 写道: > On Tue, Aug 29, 2023 at 7:55 AM Hawkins Jiawei <yin31149@gmail.com> wrote: >> >> This patch enables sending CVQ state load commands >> in parallel at device startup by following steps: >> >> * Refactor vhost_vdpa_net_load_cmd() to iterate through >> the control commands shadow buffers. This allows different >> CVQ state load commands to use their own unique buffers. >> >> * Delay the polling and checking of buffers until either >> the SVQ is full or control commands shadow buffers are full. >> >> Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1578 >> Signed-off-by: Hawkins Jiawei <yin31149@gmail.com> >> --- >> v4: >> - refactor argument `cmds_in_flight` to `len` for >> vhost_vdpa_net_svq_full() >> - check the return value of vhost_vdpa_net_svq_poll() >> in vhost_vdpa_net_svq_flush() suggested by Eugenio >> - use iov_size(), vhost_vdpa_net_load_cursor_reset() >> and iov_discard_front() to update the cursors instead of >> accessing it directly according to Eugenio >> >> v3: https://lore.kernel.org/all/3a002790e6c880af928c6470ecbf03e7c65a68bb.1689748694.git.yin31149@gmail.com/ >> >> net/vhost-vdpa.c | 155 +++++++++++++++++++++++++++++------------------ >> 1 file changed, 97 insertions(+), 58 deletions(-) >> >> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c >> index a71e8c9090..818464b702 100644 >> --- a/net/vhost-vdpa.c >> +++ b/net/vhost-vdpa.c >> @@ -646,6 +646,31 @@ static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s, >> in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); >> } >> >> +/* >> + * Poll SVQ for multiple pending control commands and check the device's ack. >> + * >> + * Caller should hold the BQL when invoking this function. >> + * >> + * @s: The VhostVDPAState >> + * @len: The length of the pending status shadow buffer >> + */ >> +static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len) >> +{ >> + /* Device uses a one-byte length ack for each control command */ >> + ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len); >> + if (unlikely(dev_written != len)) { >> + return -EIO; >> + } >> + >> + /* check the device's ack */ >> + for (int i = 0; i < len; ++i) { >> + if (s->status[i] != VIRTIO_NET_OK) { >> + return -EIO; >> + } >> + } >> + return 0; >> +} >> + >> static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, >> struct iovec *out_cursor, >> struct iovec *in_cursor, uint8_t class, >> @@ -660,10 +685,30 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, >> cmd_size = sizeof(ctrl) + data_size; >> struct iovec out, in; >> ssize_t r; >> + unsigned dummy_cursor_iov_cnt; >> >> assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); >> + if (vhost_vdpa_net_svq_available_slots(s) < 2 || >> + iov_size(out_cursor, 1) < cmd_size) { >> + /* >> + * It is time to flush all pending control commands if SVQ is full >> + * or control commands shadow buffers are full. >> + * >> + * We can poll here since we've had BQL from the time >> + * we sent the descriptor. >> + */ >> + r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base - >> + (void *)s->status); >> + if (unlikely(r < 0)) { >> + return r; >> + } >> + >> + vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor); >> + } >> + > > It would be great to merge this flush with the one at > vhost_vdpa_net_load. We would need to return ENOSPC or similar and > handle it there. > > But it would make it more difficult to iterate through the loading of > the different parameters, so I think it can be done on top. > Hi Eugenio, Please forgive my poor English, so do you mean the approach in my patch is acceptable for you? >> /* Each CVQ command has one out descriptor and one in descriptor */ >> assert(vhost_vdpa_net_svq_available_slots(s) >= 2); >> + assert(iov_size(out_cursor, 1) >= cmd_size); >> > > Same here, I think we can avoid the assertion, right? You are right, I will remove this assertion. Thanks! > > Apart from that, > > Acked-by: Eugenio Pérez <eperezma@redhat.com> > >> /* Prepare the buffer for out descriptor for the device */ >> iov_copy(&out, 1, out_cursor, 1, 0, cmd_size); >> @@ -681,11 +726,13 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, >> return r; >> } >> >> - /* >> - * We can poll here since we've had BQL from the time >> - * we sent the descriptor. >> - */ >> - return vhost_vdpa_net_svq_poll(s, 1); >> + /* iterate the cursors */ >> + dummy_cursor_iov_cnt = 1; >> + iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size); >> + dummy_cursor_iov_cnt = 1; >> + iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status)); >> + >> + return 0; >> } >> >> static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n, >> @@ -697,15 +744,12 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n, >> .iov_base = (void *)n->mac, >> .iov_len = sizeof(n->mac), >> }; >> - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, >> - VIRTIO_NET_CTRL_MAC, >> - VIRTIO_NET_CTRL_MAC_ADDR_SET, >> - &data, 1); >> - if (unlikely(dev_written < 0)) { >> - return dev_written; >> - } >> - if (*s->status != VIRTIO_NET_OK) { >> - return -EIO; >> + ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, >> + VIRTIO_NET_CTRL_MAC, >> + VIRTIO_NET_CTRL_MAC_ADDR_SET, >> + &data, 1); >> + if (unlikely(r < 0)) { >> + return r; >> } >> } >> >> @@ -750,15 +794,12 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n, >> .iov_len = mul_macs_size, >> }, >> }; >> - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, >> + ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, >> VIRTIO_NET_CTRL_MAC, >> VIRTIO_NET_CTRL_MAC_TABLE_SET, >> data, ARRAY_SIZE(data)); >> - if (unlikely(dev_written < 0)) { >> - return dev_written; >> - } >> - if (*s->status != VIRTIO_NET_OK) { >> - return -EIO; >> + if (unlikely(r < 0)) { >> + return r; >> } >> >> return 0; >> @@ -770,7 +811,7 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s, >> struct iovec *in_cursor) >> { >> struct virtio_net_ctrl_mq mq; >> - ssize_t dev_written; >> + ssize_t r; >> >> if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) { >> return 0; >> @@ -781,15 +822,12 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s, >> .iov_base = &mq, >> .iov_len = sizeof(mq), >> }; >> - dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, >> - VIRTIO_NET_CTRL_MQ, >> - VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, >> - &data, 1); >> - if (unlikely(dev_written < 0)) { >> - return dev_written; >> - } >> - if (*s->status != VIRTIO_NET_OK) { >> - return -EIO; >> + r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, >> + VIRTIO_NET_CTRL_MQ, >> + VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, >> + &data, 1); >> + if (unlikely(r < 0)) { >> + return r; >> } >> >> return 0; >> @@ -801,7 +839,7 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, >> struct iovec *in_cursor) >> { >> uint64_t offloads; >> - ssize_t dev_written; >> + ssize_t r; >> >> if (!virtio_vdev_has_feature(&n->parent_obj, >> VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { >> @@ -829,15 +867,12 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, >> .iov_base = &offloads, >> .iov_len = sizeof(offloads), >> }; >> - dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, >> - VIRTIO_NET_CTRL_GUEST_OFFLOADS, >> - VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, >> - &data, 1); >> - if (unlikely(dev_written < 0)) { >> - return dev_written; >> - } >> - if (*s->status != VIRTIO_NET_OK) { >> - return -EIO; >> + r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, >> + VIRTIO_NET_CTRL_GUEST_OFFLOADS, >> + VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, >> + &data, 1); >> + if (unlikely(r < 0)) { >> + return r; >> } >> >> return 0; >> @@ -853,16 +888,12 @@ static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s, >> .iov_base = &on, >> .iov_len = sizeof(on), >> }; >> - ssize_t dev_written; >> + ssize_t r; >> >> - dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, >> - VIRTIO_NET_CTRL_RX, >> - cmd, &data, 1); >> - if (unlikely(dev_written < 0)) { >> - return dev_written; >> - } >> - if (*s->status != VIRTIO_NET_OK) { >> - return -EIO; >> + r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, >> + VIRTIO_NET_CTRL_RX, cmd, &data, 1); >> + if (unlikely(r < 0)) { >> + return r; >> } >> >> return 0; >> @@ -1019,15 +1050,12 @@ static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s, >> .iov_base = &vid, >> .iov_len = sizeof(vid), >> }; >> - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, >> - VIRTIO_NET_CTRL_VLAN, >> - VIRTIO_NET_CTRL_VLAN_ADD, >> - &data, 1); >> - if (unlikely(dev_written < 0)) { >> - return dev_written; >> - } >> - if (unlikely(*s->status != VIRTIO_NET_OK)) { >> - return -EIO; >> + ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, >> + VIRTIO_NET_CTRL_VLAN, >> + VIRTIO_NET_CTRL_VLAN_ADD, >> + &data, 1); >> + if (unlikely(r < 0)) { >> + return r; >> } >> >> return 0; >> @@ -1096,6 +1124,17 @@ static int vhost_vdpa_net_load(NetClientState *nc) >> return r; >> } >> >> + /* >> + * We need to poll and check all pending device's used buffers. >> + * >> + * We can poll here since we've had BQL from the time >> + * we sent the descriptor. >> + */ >> + r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status); >> + if (unlikely(r)) { >> + return r; >> + } >> + >> return 0; >> } >> >> -- >> 2.25.1 >> >
© 2016 - 2024 Red Hat, Inc.