From nobody Mon Feb 9 02:14:33 2026 Delivered-To: importer@patchew.org Received-SPF: pass (zoho.com: domain of gnu.org designates 208.118.235.17 as permitted sender) client-ip=208.118.235.17; envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org; helo=lists.gnu.org; Authentication-Results: mx.zoho.com; spf=pass (zoho.com: domain of gnu.org designates 208.118.235.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org; Return-Path: Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) by mx.zohomail.com with SMTPS id 1487361882142998.0034831045292; Fri, 17 Feb 2017 12:04:42 -0800 (PST) Received: from localhost ([::1]:55588 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1ceoli-0004oW-Fs for importer@patchew.org; Fri, 17 Feb 2017 15:04:38 -0500 Received: from eggs.gnu.org ([2001:4830:134:3::10]:44258) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1ceoc7-0004K7-Rf for qemu-devel@nongnu.org; Fri, 17 Feb 2017 14:54:46 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1ceoc5-0005ix-Cd for qemu-devel@nongnu.org; Fri, 17 Feb 2017 14:54:43 -0500 Received: from mail.kernel.org ([198.145.29.136]:34472) by eggs.gnu.org with esmtps (TLS1.0:DHE_RSA_AES_256_CBC_SHA1:32) (Exim 4.71) (envelope-from ) id 1ceoc5-0005i2-1x for qemu-devel@nongnu.org; Fri, 17 Feb 2017 14:54:41 -0500 Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 8F7FF20220; Fri, 17 Feb 2017 19:54:38 +0000 (UTC) Received: from redhat.com (pool-96-237-235-121.bstnma.fios.verizon.net [96.237.235.121]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id AB029201FE; Fri, 17 Feb 2017 19:54:36 +0000 (UTC) Date: Fri, 17 Feb 2017 21:54:35 +0200 From: "Michael S. Tsirkin" To: qemu-devel@nongnu.org Message-ID: <1487361200-29966-13-git-send-email-mst@redhat.com> References: <1487361200-29966-1-git-send-email-mst@redhat.com> MIME-Version: 1.0 Content-Disposition: inline In-Reply-To: <1487361200-29966-1-git-send-email-mst@redhat.com> X-Mailer: git-send-email 2.8.0.287.g0deeb61 X-Mutt-Fcc: =sent X-Virus-Scanned: ClamAV using ClamSMTP X-detected-operating-system: by eggs.gnu.org: GNU/Linux 3.x [fuzzy] X-Received-From: 198.145.29.136 Subject: [Qemu-devel] [PULL 12/23] virtio: use VRingMemoryRegionCaches for avail and used rings X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.21 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Peter Maydell , Jason Wang , Stefan Hajnoczi , Paolo Bonzini Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org Sender: "Qemu-devel" X-ZohoMail: RSF_0 Z_629925259 SPT_0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Paolo Bonzini The virtio-net change is necessary because it uses virtqueue_fill and virtqueue_flush instead of the more convenient virtqueue_push. Reviewed-by: Stefan Hajnoczi Signed-off-by: Paolo Bonzini Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin Tested-by: Alex Williamson --- hw/net/virtio-net.c | 14 +++++- hw/virtio/virtio.c | 132 ++++++++++++++++++++++++++++++++++++++----------= ---- 2 files changed, 109 insertions(+), 37 deletions(-) diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 354a19e..c321680 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -1130,7 +1130,8 @@ static int receive_filter(VirtIONet *n, const uint8_t= *buf, int size) return 0; } =20 -static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, = size_t size) +static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *b= uf, + size_t size) { VirtIONet *n =3D qemu_get_nic_opaque(nc); VirtIONetQueue *q =3D virtio_net_get_subqueue(nc); @@ -1233,6 +1234,17 @@ static ssize_t virtio_net_receive(NetClientState *nc= , const uint8_t *buf, size_t return size; } =20 +static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, + size_t size) +{ + ssize_t r; + + rcu_read_lock(); + r =3D virtio_net_receive_rcu(nc, buf, size); + rcu_read_unlock(); + return r; +} + static int32_t virtio_net_flush_tx(VirtIONetQueue *q); =20 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len) diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index cdafcec..c08e50f 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -173,6 +173,7 @@ void virtio_queue_update_rings(VirtIODevice *vdev, int = n) virtio_init_region_cache(vdev, n); } =20 +/* Called within rcu_read_lock(). */ static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc, MemoryRegionCache *cache, int i) { @@ -184,88 +185,110 @@ static void vring_desc_read(VirtIODevice *vdev, VRin= gDesc *desc, virtio_tswap16s(vdev, &desc->next); } =20 +/* Called within rcu_read_lock(). */ static inline uint16_t vring_avail_flags(VirtQueue *vq) { - hwaddr pa; - pa =3D vq->vring.avail + offsetof(VRingAvail, flags); - return virtio_lduw_phys(vq->vdev, pa); + VRingMemoryRegionCaches *caches =3D atomic_rcu_read(&vq->vring.caches); + hwaddr pa =3D offsetof(VRingAvail, flags); + return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); } =20 +/* Called within rcu_read_lock(). */ static inline uint16_t vring_avail_idx(VirtQueue *vq) { - hwaddr pa; - pa =3D vq->vring.avail + offsetof(VRingAvail, idx); - vq->shadow_avail_idx =3D virtio_lduw_phys(vq->vdev, pa); + VRingMemoryRegionCaches *caches =3D atomic_rcu_read(&vq->vring.caches); + hwaddr pa =3D offsetof(VRingAvail, idx); + vq->shadow_avail_idx =3D virtio_lduw_phys_cached(vq->vdev, &caches->av= ail, pa); return vq->shadow_avail_idx; } =20 +/* Called within rcu_read_lock(). */ static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) { - hwaddr pa; - pa =3D vq->vring.avail + offsetof(VRingAvail, ring[i]); - return virtio_lduw_phys(vq->vdev, pa); + VRingMemoryRegionCaches *caches =3D atomic_rcu_read(&vq->vring.caches); + hwaddr pa =3D offsetof(VRingAvail, ring[i]); + return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); } =20 +/* Called within rcu_read_lock(). */ static inline uint16_t vring_get_used_event(VirtQueue *vq) { return vring_avail_ring(vq, vq->vring.num); } =20 +/* Called within rcu_read_lock(). */ static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem, int i) { - hwaddr pa; + VRingMemoryRegionCaches *caches =3D atomic_rcu_read(&vq->vring.caches); + hwaddr pa =3D offsetof(VRingUsed, ring[i]); virtio_tswap32s(vq->vdev, &uelem->id); virtio_tswap32s(vq->vdev, &uelem->len); - pa =3D vq->vring.used + offsetof(VRingUsed, ring[i]); - address_space_write(vq->vdev->dma_as, pa, MEMTXATTRS_UNSPECIFIED, - (void *)uelem, sizeof(VRingUsedElem)); + address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedE= lem)); + address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem= )); } =20 +/* Called within rcu_read_lock(). */ static uint16_t vring_used_idx(VirtQueue *vq) { - hwaddr pa; - pa =3D vq->vring.used + offsetof(VRingUsed, idx); - return virtio_lduw_phys(vq->vdev, pa); + VRingMemoryRegionCaches *caches =3D atomic_rcu_read(&vq->vring.caches); + hwaddr pa =3D offsetof(VRingUsed, idx); + return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); } =20 +/* Called within rcu_read_lock(). */ static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val) { - hwaddr pa; - pa =3D vq->vring.used + offsetof(VRingUsed, idx); - virtio_stw_phys(vq->vdev, pa, val); + VRingMemoryRegionCaches *caches =3D atomic_rcu_read(&vq->vring.caches); + hwaddr pa =3D offsetof(VRingUsed, idx); + virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val); + address_space_cache_invalidate(&caches->used, pa, sizeof(val)); vq->used_idx =3D val; } =20 +/* Called within rcu_read_lock(). */ static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) { + VRingMemoryRegionCaches *caches =3D atomic_rcu_read(&vq->vring.caches); VirtIODevice *vdev =3D vq->vdev; - hwaddr pa; - pa =3D vq->vring.used + offsetof(VRingUsed, flags); - virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask); + hwaddr pa =3D offsetof(VRingUsed, flags); + uint16_t flags =3D virtio_lduw_phys_cached(vq->vdev, &caches->used, pa= ); + + virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask); + address_space_cache_invalidate(&caches->used, pa, sizeof(flags)); } =20 +/* Called within rcu_read_lock(). */ static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) { + VRingMemoryRegionCaches *caches =3D atomic_rcu_read(&vq->vring.caches); VirtIODevice *vdev =3D vq->vdev; - hwaddr pa; - pa =3D vq->vring.used + offsetof(VRingUsed, flags); - virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask); + hwaddr pa =3D offsetof(VRingUsed, flags); + uint16_t flags =3D virtio_lduw_phys_cached(vq->vdev, &caches->used, pa= ); + + virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask); + address_space_cache_invalidate(&caches->used, pa, sizeof(flags)); } =20 +/* Called within rcu_read_lock(). */ static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val) { + VRingMemoryRegionCaches *caches; hwaddr pa; if (!vq->notification) { return; } - pa =3D vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]); - virtio_stw_phys(vq->vdev, pa, val); + + caches =3D atomic_rcu_read(&vq->vring.caches); + pa =3D offsetof(VRingUsed, ring[vq->vring.num]); + virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val); } =20 void virtio_queue_set_notification(VirtQueue *vq, int enable) { vq->notification =3D enable; + + rcu_read_lock(); if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) { vring_set_avail_event(vq, vring_avail_idx(vq)); } else if (enable) { @@ -277,6 +300,7 @@ void virtio_queue_set_notification(VirtQueue *vq, int e= nable) /* Expose avail event/used flags before caller checks the avail id= x. */ smp_mb(); } + rcu_read_unlock(); } =20 int virtio_queue_ready(VirtQueue *vq) @@ -285,8 +309,9 @@ int virtio_queue_ready(VirtQueue *vq) } =20 /* Fetch avail_idx from VQ memory only when we really need to know if - * guest has added some buffers. */ -int virtio_queue_empty(VirtQueue *vq) + * guest has added some buffers. + * Called within rcu_read_lock(). */ +static int virtio_queue_empty_rcu(VirtQueue *vq) { if (vq->shadow_avail_idx !=3D vq->last_avail_idx) { return 0; @@ -295,6 +320,20 @@ int virtio_queue_empty(VirtQueue *vq) return vring_avail_idx(vq) =3D=3D vq->last_avail_idx; } =20 +int virtio_queue_empty(VirtQueue *vq) +{ + bool empty; + + if (vq->shadow_avail_idx !=3D vq->last_avail_idx) { + return 0; + } + + rcu_read_lock(); + empty =3D vring_avail_idx(vq) =3D=3D vq->last_avail_idx; + rcu_read_unlock(); + return empty; +} + static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem, unsigned int len) { @@ -373,6 +412,7 @@ bool virtqueue_rewind(VirtQueue *vq, unsigned int num) return true; } =20 +/* Called within rcu_read_lock(). */ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, unsigned int len, unsigned int idx) { @@ -393,6 +433,7 @@ void virtqueue_fill(VirtQueue *vq, const VirtQueueEleme= nt *elem, vring_used_write(vq, &uelem, idx); } =20 +/* Called within rcu_read_lock(). */ void virtqueue_flush(VirtQueue *vq, unsigned int count) { uint16_t old, new; @@ -416,10 +457,13 @@ void virtqueue_flush(VirtQueue *vq, unsigned int coun= t) void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem, unsigned int len) { + rcu_read_lock(); virtqueue_fill(vq, elem, len, 0); virtqueue_flush(vq, 1); + rcu_read_unlock(); } =20 +/* Called within rcu_read_lock(). */ static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx) { uint16_t num_heads =3D vring_avail_idx(vq) - idx; @@ -439,6 +483,7 @@ static int virtqueue_num_heads(VirtQueue *vq, unsigned = int idx) return num_heads; } =20 +/* Called within rcu_read_lock(). */ static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx, unsigned int *head) { @@ -740,8 +785,9 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz) if (unlikely(vdev->broken)) { return NULL; } - if (virtio_queue_empty(vq)) { - return NULL; + rcu_read_lock(); + if (virtio_queue_empty_rcu(vq)) { + goto done; } /* Needed after virtio_queue_empty(), see comment in * virtqueue_num_heads(). */ @@ -754,11 +800,11 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz) =20 if (vq->inuse >=3D vq->vring.num) { virtio_error(vdev, "Virtqueue size exceeded"); - return NULL; + goto done; } =20 if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) { - return NULL; + goto done; } =20 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { @@ -767,7 +813,6 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz) =20 i =3D head; =20 - rcu_read_lock(); caches =3D atomic_rcu_read(&vq->vring.caches); if (caches->desc.len < max * sizeof(VRingDesc)) { virtio_error(vdev, "Cannot map descriptor ring"); @@ -1483,6 +1528,7 @@ static void virtio_set_isr(VirtIODevice *vdev, int va= lue) } } =20 +/* Called within rcu_read_lock(). */ static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq) { uint16_t old, new; @@ -1508,7 +1554,12 @@ static bool virtio_should_notify(VirtIODevice *vdev,= VirtQueue *vq) =20 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq) { - if (!virtio_should_notify(vdev, vq)) { + bool should_notify; + rcu_read_lock(); + should_notify =3D virtio_should_notify(vdev, vq); + rcu_read_unlock(); + + if (!should_notify) { return; } =20 @@ -1535,7 +1586,12 @@ void virtio_notify_irqfd(VirtIODevice *vdev, VirtQue= ue *vq) =20 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq) { - if (!virtio_should_notify(vdev, vq)) { + bool should_notify; + rcu_read_lock(); + should_notify =3D virtio_should_notify(vdev, vq); + rcu_read_unlock(); + + if (!should_notify) { return; } =20 @@ -1996,6 +2052,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int = version_id) } } =20 + rcu_read_lock(); for (i =3D 0; i < num; i++) { if (vdev->vq[i].vring.desc) { uint16_t nheads; @@ -2030,6 +2087,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int = version_id) } } } + rcu_read_unlock(); =20 return 0; } @@ -2156,9 +2214,11 @@ void virtio_queue_set_last_avail_idx(VirtIODevice *v= dev, int n, uint16_t idx) =20 void virtio_queue_update_used_idx(VirtIODevice *vdev, int n) { + rcu_read_lock(); if (vdev->vq[n].vring.desc) { vdev->vq[n].used_idx =3D vring_used_idx(&vdev->vq[n]); } + rcu_read_unlock(); } =20 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n) --=20 MST