From nobody Thu Dec 18 05:17:27 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id D11C2EE49A6 for ; Mon, 21 Aug 2023 18:23:04 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S237198AbjHUSXF (ORCPT ); Mon, 21 Aug 2023 14:23:05 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:45308 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S237182AbjHUSXD (ORCPT ); Mon, 21 Aug 2023 14:23:03 -0400 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 9D7EFA1 for ; Mon, 21 Aug 2023 11:23:01 -0700 (PDT) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 592DC11FB; Mon, 21 Aug 2023 11:23:42 -0700 (PDT) Received: from e121345-lin.cambridge.arm.com (e121345-lin.cambridge.arm.com [10.1.196.40]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 314963F762; Mon, 21 Aug 2023 11:23:00 -0700 (PDT) From: Robin Murphy To: joro@8bytes.org Cc: will@kernel.org, iommu@lists.linux.dev, linux-kernel@vger.kernel.org, zhangzekun11@huawei.com, john.g.garry@oracle.com, dheerajkumar.srivastava@amd.com, jsnitsel@redhat.com Subject: [PATCH v2 1/2] iommu/iova: Make the rcache depot scale better Date: Mon, 21 Aug 2023 19:22:51 +0100 Message-Id: X-Mailer: git-send-email 2.39.2.101.g768bb238c484.dirty In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" The algorithm in the original paper specifies the storage of full magazines in the depot as an unbounded list rather than a fixed-size array. It turns out to be pretty straightforward to do this in our implementation with no significant loss of efficiency. This allows the depot to scale up to the working set sizes of larger systems, while also potentially saving some memory on smaller ones too. Since this involves touching struct iova_magazine with the requisite care, we may as well reinforce the comment with a proper assertion too. Reviewed-by: John Garry Signed-off-by: Robin Murphy Reviewed-by: Jerry Snitselaar --- v2: Fix freeing loops, Improve comment and add matching assertion drivers/iommu/iova.c | 65 ++++++++++++++++++++++++-------------------- 1 file changed, 35 insertions(+), 30 deletions(-) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 10b964600948..dd2309e9a6c5 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -622,15 +622,19 @@ EXPORT_SYMBOL_GPL(reserve_iova); /* * As kmalloc's buffer size is fixed to power of 2, 127 is chosen to * assure size of 'iova_magazine' to be 1024 bytes, so that no memory - * will be wasted. + * will be wasted. Since only full magazines are inserted into the depot, + * we don't need to waste PFN capacity on a separate list head either. */ #define IOVA_MAG_SIZE 127 -#define MAX_GLOBAL_MAGS 32 /* magazines per bin */ =20 struct iova_magazine { - unsigned long size; + union { + unsigned long size; + struct iova_magazine *next; + }; unsigned long pfns[IOVA_MAG_SIZE]; }; +static_assert(!(sizeof(struct iova_magazine) & (sizeof(struct iova_magazin= e) - 1))); =20 struct iova_cpu_rcache { spinlock_t lock; @@ -640,8 +644,7 @@ struct iova_cpu_rcache { =20 struct iova_rcache { spinlock_t lock; - unsigned long depot_size; - struct iova_magazine *depot[MAX_GLOBAL_MAGS]; + struct iova_magazine *depot; struct iova_cpu_rcache __percpu *cpu_rcaches; }; =20 @@ -717,6 +720,21 @@ static void iova_magazine_push(struct iova_magazine *m= ag, unsigned long pfn) mag->pfns[mag->size++] =3D pfn; } =20 +static struct iova_magazine *iova_depot_pop(struct iova_rcache *rcache) +{ + struct iova_magazine *mag =3D rcache->depot; + + rcache->depot =3D mag->next; + mag->size =3D IOVA_MAG_SIZE; + return mag; +} + +static void iova_depot_push(struct iova_rcache *rcache, struct iova_magazi= ne *mag) +{ + mag->next =3D rcache->depot; + rcache->depot =3D mag; +} + int iova_domain_init_rcaches(struct iova_domain *iovad) { unsigned int cpu; @@ -734,7 +752,6 @@ int iova_domain_init_rcaches(struct iova_domain *iovad) =20 rcache =3D &iovad->rcaches[i]; spin_lock_init(&rcache->lock); - rcache->depot_size =3D 0; rcache->cpu_rcaches =3D __alloc_percpu(sizeof(*cpu_rcache), cache_line_size()); if (!rcache->cpu_rcaches) { @@ -776,7 +793,6 @@ static bool __iova_rcache_insert(struct iova_domain *io= vad, struct iova_rcache *rcache, unsigned long iova_pfn) { - struct iova_magazine *mag_to_free =3D NULL; struct iova_cpu_rcache *cpu_rcache; bool can_insert =3D false; unsigned long flags; @@ -794,12 +810,7 @@ static bool __iova_rcache_insert(struct iova_domain *i= ovad, =20 if (new_mag) { spin_lock(&rcache->lock); - if (rcache->depot_size < MAX_GLOBAL_MAGS) { - rcache->depot[rcache->depot_size++] =3D - cpu_rcache->loaded; - } else { - mag_to_free =3D cpu_rcache->loaded; - } + iova_depot_push(rcache, cpu_rcache->loaded); spin_unlock(&rcache->lock); =20 cpu_rcache->loaded =3D new_mag; @@ -812,11 +823,6 @@ static bool __iova_rcache_insert(struct iova_domain *i= ovad, =20 spin_unlock_irqrestore(&cpu_rcache->lock, flags); =20 - if (mag_to_free) { - iova_magazine_free_pfns(mag_to_free, iovad); - iova_magazine_free(mag_to_free); - } - return can_insert; } =20 @@ -854,9 +860,9 @@ static unsigned long __iova_rcache_get(struct iova_rcac= he *rcache, has_pfn =3D true; } else { spin_lock(&rcache->lock); - if (rcache->depot_size > 0) { + if (rcache->depot) { iova_magazine_free(cpu_rcache->loaded); - cpu_rcache->loaded =3D rcache->depot[--rcache->depot_size]; + cpu_rcache->loaded =3D iova_depot_pop(rcache); has_pfn =3D true; } spin_unlock(&rcache->lock); @@ -895,9 +901,8 @@ static void free_iova_rcaches(struct iova_domain *iovad) struct iova_rcache *rcache; struct iova_cpu_rcache *cpu_rcache; unsigned int cpu; - int i, j; =20 - for (i =3D 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { + for (int i =3D 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { rcache =3D &iovad->rcaches[i]; if (!rcache->cpu_rcaches) break; @@ -907,8 +912,8 @@ static void free_iova_rcaches(struct iova_domain *iovad) iova_magazine_free(cpu_rcache->prev); } free_percpu(rcache->cpu_rcaches); - for (j =3D 0; j < rcache->depot_size; ++j) - iova_magazine_free(rcache->depot[j]); + while (rcache->depot) + iova_magazine_free(iova_depot_pop(rcache)); } =20 kfree(iovad->rcaches); @@ -942,16 +947,16 @@ static void free_global_cached_iovas(struct iova_doma= in *iovad) { struct iova_rcache *rcache; unsigned long flags; - int i, j; =20 - for (i =3D 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { + for (int i =3D 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { rcache =3D &iovad->rcaches[i]; spin_lock_irqsave(&rcache->lock, flags); - for (j =3D 0; j < rcache->depot_size; ++j) { - iova_magazine_free_pfns(rcache->depot[j], iovad); - iova_magazine_free(rcache->depot[j]); + while (rcache->depot) { + struct iova_magazine *mag =3D iova_depot_pop(rcache); + + iova_magazine_free_pfns(mag, iovad); + iova_magazine_free(mag); } - rcache->depot_size =3D 0; spin_unlock_irqrestore(&rcache->lock, flags); } } --=20 2.39.2.101.g768bb238c484.dirty