From nobody Thu Oct 2 09:16:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 4ABB4313D4B; Thu, 18 Sep 2025 14:10:13 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758204613; cv=none; b=JAcvbNDavuVTtmTYx1q2JEPfIACKVxj+sXE3btjOTbEpGtrg28hrthvPt6gwxrkg6a6RcMKJ2fzB8MJUyyHrKQHHGNve+4htkcrqC0ocvQHuuJ66GCgIEW+fRtMRJaTW9BZvi/oQHMsQDtNpfk4Rhxn151wdys4G9gBSYaJnAzY= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758204613; c=relaxed/simple; bh=EBwyVcXAx8ZG32+m2XXWq6jJ96SEAu9YsPWb4/XSEXM=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=UNtyYMhDReo3wFfexPAh+rkslsE3DRL+f0GxcvotwEzFNmhS9f19hJgAebjTAqZ0X7w5C3ymdf4i/D12m6UQ5Px3G4MKcq/c8A0opXnP+fVTVLlEYciyI+hPJwa0x7+hqaU2j2LjIoGf/s1PET4rfDVIKKOD7O5SK3u1Wb3jC/4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=g91qhJGx; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="g91qhJGx" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 89A72C4CEE7; Thu, 18 Sep 2025 14:10:12 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1758204613; bh=EBwyVcXAx8ZG32+m2XXWq6jJ96SEAu9YsPWb4/XSEXM=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=g91qhJGxeoCacHMXVvMTz7RIXE3W5xSK+eWI3ZLQKEzqvjgAwd3KWhjzecEB9E9XW Rog6AOcsA1/dXWOM5n73e63ivD27zsQZVe8Pd0yJtupjYQMR1BlhsemQeWHwRdWjHs JU52tCUyMio20fbK9hzR2G+f4bLvtIhFNOhtpO7+0S9foMZCCJ6QioLxkx1s1F1nZ5 sqsdKXDyDwfs2QJPX9Ye4hYDNMVOMX+HVDKgXwQWdZ3DiDgAiH4+OI3r8YRkv/Szwm pYMc+5LhUcjh+0xKf0SYlpfujhwDmXUrnf0YX8zhaTeFiJx+/JlbkjdZ6oTbI2n83v 2kL6eEufZWuLQ== From: Leon Romanovsky To: Marek Szyprowski Cc: Leon Romanovsky , Jason Gunthorpe , iommu@lists.linux.dev, Juergen Gross , linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, Russell King , Stefano Stabellini , xen-devel@lists.xenproject.org Subject: [PATCH v4 1/6] dma-mapping: prepare dma_map_ops to conversion to physical address Date: Thu, 18 Sep 2025 17:09:24 +0300 Message-ID: <24d324344913170315f66cb43ac6692b3132a145.1758203802.git.leon@kernel.org> X-Mailer: git-send-email 2.51.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Leon Romanovsky Add new .map_phys() and .unmap_phys() callbacks to dma_map_ops as a preparation to replace .map_page() and .unmap_page() respectively. Reviewed-by: Jason Gunthorpe Signed-off-by: Leon Romanovsky --- include/linux/dma-map-ops.h | 7 +++++++ kernel/dma/mapping.c | 4 ++++ kernel/dma/ops_helpers.c | 12 ++++++++++-- 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 71f5b30254159..25603cb273769 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -37,6 +37,13 @@ struct dma_map_ops { void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir, unsigned long attrs); + + dma_addr_t (*map_phys)(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir, + unsigned long attrs); + void (*unmap_phys)(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + unsigned long attrs); /* * map_sg should return a negative error code on error. See * dma_map_sgtable() for a list of appropriate error codes diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index fe7472f13b106..4080aebe5debb 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -169,6 +169,8 @@ dma_addr_t dma_map_phys(struct device *dev, phys_addr_t= phys, size_t size, addr =3D dma_direct_map_phys(dev, phys, size, dir, attrs); else if (use_dma_iommu(dev)) addr =3D iommu_dma_map_phys(dev, phys, size, dir, attrs); + else if (ops->map_phys) + addr =3D ops->map_phys(dev, phys, size, dir, attrs); else if (is_mmio) { if (!ops->map_resource) return DMA_MAPPING_ERROR; @@ -223,6 +225,8 @@ void dma_unmap_phys(struct device *dev, dma_addr_t addr= , size_t size, dma_direct_unmap_phys(dev, addr, size, dir, attrs); else if (use_dma_iommu(dev)) iommu_dma_unmap_phys(dev, addr, size, dir, attrs); + else if (ops->unmap_phys) + ops->unmap_phys(dev, addr, size, dir, attrs); else if (is_mmio) { if (ops->unmap_resource) ops->unmap_resource(dev, addr, size, dir, attrs); diff --git a/kernel/dma/ops_helpers.c b/kernel/dma/ops_helpers.c index 6f9d604d9d406..1eccbdbc99c1e 100644 --- a/kernel/dma/ops_helpers.c +++ b/kernel/dma/ops_helpers.c @@ -64,6 +64,7 @@ struct page *dma_common_alloc_pages(struct device *dev, s= ize_t size, { const struct dma_map_ops *ops =3D get_dma_ops(dev); struct page *page; + phys_addr_t phys; =20 page =3D dma_alloc_contiguous(dev, size, gfp); if (!page) @@ -71,9 +72,13 @@ struct page *dma_common_alloc_pages(struct device *dev, = size_t size, if (!page) return NULL; =20 + phys =3D page_to_phys(page); if (use_dma_iommu(dev)) - *dma_handle =3D iommu_dma_map_phys(dev, page_to_phys(page), size, - dir, DMA_ATTR_SKIP_CPU_SYNC); + *dma_handle =3D iommu_dma_map_phys(dev, phys, size, dir, + DMA_ATTR_SKIP_CPU_SYNC); + else if (ops->map_phys) + *dma_handle =3D ops->map_phys(dev, phys, size, dir, + DMA_ATTR_SKIP_CPU_SYNC); else *dma_handle =3D ops->map_page(dev, page, 0, size, dir, DMA_ATTR_SKIP_CPU_SYNC); @@ -94,6 +99,9 @@ void dma_common_free_pages(struct device *dev, size_t siz= e, struct page *page, if (use_dma_iommu(dev)) iommu_dma_unmap_phys(dev, dma_handle, size, dir, DMA_ATTR_SKIP_CPU_SYNC); + else if (ops->unmap_phys) + ops->unmap_phys(dev, dma_handle, size, dir, + DMA_ATTR_SKIP_CPU_SYNC); else if (ops->unmap_page) ops->unmap_page(dev, dma_handle, size, dir, DMA_ATTR_SKIP_CPU_SYNC); --=20 2.51.0 From nobody Thu Oct 2 09:16:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id F34D130C61B; Thu, 18 Sep 2025 14:10:04 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758204605; cv=none; b=GoukwDG2eYuz8TDUnGoYj9/MC33BFfRWdtWe9R2ANkPegqs/bf4H425b9vyQDEbYNxjRbIIRhTK/8X3E7moAsdNlymR79HLndVjqQ089anR9UdfeHCxfgau5pom4RhOX96M4gvV0GHeR9aro6PJsf9Buva4SSNK0DXWBGsn15uM= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758204605; c=relaxed/simple; bh=ZvzOne7JVuKRt51UWjTla7x7wVEVuv97Aoq0lfqLgUE=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=L0Uwm/NV9I8fTUbAFKMBRSC3vQyXyTb0HM+S7NDfth3w71SMIg6yBBXc13VzwIpdM2nHbCozNXXJIUuIRGa0+7hWJU0KOn8pi1je1WTez4j0Efo+IzmWGjtjIrVQp3RhSVpfxBGROPngDsbkfI6v9XC4KhoNLaZoGOqV5C4AfYg= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=O2sUbafD; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="O2sUbafD" Received: by smtp.kernel.org (Postfix) with ESMTPSA id F01F1C4CEE7; Thu, 18 Sep 2025 14:10:03 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1758204604; bh=ZvzOne7JVuKRt51UWjTla7x7wVEVuv97Aoq0lfqLgUE=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=O2sUbafDsMkWNheNKKvFdT0pthF4IgeoVmIjIo++piQ6FogpLWmh11dTZNVWb3Lgt xVp0UuxUxSl+/XMTgvDsaAyPnd2xLvAzkXpBLbm5vCRv96x8WHPfJE+qiNWEth97mP ++DZ3RgCBKBQkuqybTvt076vShyirPkNS59GSoA9yXyxPnHzGJEzNG8dO2TZw9CfZ8 X9bZOFqGSa2JOAjg4fXO6khWdhV9Kbgmm4YBoxToy/OHtelnqrqIRnZVosrpwLWJpK Ujl4jM6AeazDmL80IJtF4LMtsDUun/4izas+/borBV4rJ/iv+dIbicjUwqa9UP2Va1 AojXJZ3AWcy9g== From: Leon Romanovsky To: Marek Szyprowski Cc: Leon Romanovsky , Jason Gunthorpe , iommu@lists.linux.dev, Juergen Gross , linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, Russell King , Stefano Stabellini , xen-devel@lists.xenproject.org Subject: [PATCH v4 2/6] dma-mapping: convert dummy ops to physical address mapping Date: Thu, 18 Sep 2025 17:09:25 +0300 Message-ID: <9a1d5ba5f4e5c4ac1216253e35a4a6a7cb941802.1758203802.git.leon@kernel.org> X-Mailer: git-send-email 2.51.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Leon Romanovsky Change dma_dummy_map_page and dma_dummy_unmap_page routines to accept physical address and rename them. Reviewed-by: Jason Gunthorpe Signed-off-by: Leon Romanovsky --- kernel/dma/dummy.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/kernel/dma/dummy.c b/kernel/dma/dummy.c index 92de80e5b057e..16a51736a2a39 100644 --- a/kernel/dma/dummy.c +++ b/kernel/dma/dummy.c @@ -11,17 +11,16 @@ static int dma_dummy_mmap(struct device *dev, struct vm= _area_struct *vma, return -ENXIO; } =20 -static dma_addr_t dma_dummy_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs) +static dma_addr_t dma_dummy_map_phys(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir, unsigned long attrs) { return DMA_MAPPING_ERROR; } -static void dma_dummy_unmap_page(struct device *dev, dma_addr_t dma_handle, +static void dma_dummy_unmap_phys(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir, unsigned long attrs) { /* - * Dummy ops doesn't support map_page, so unmap_page should never be + * Dummy ops doesn't support map_phys, so unmap_page should never be * called. */ WARN_ON_ONCE(true); @@ -51,8 +50,8 @@ static int dma_dummy_supported(struct device *hwdev, u64 = mask) =20 const struct dma_map_ops dma_dummy_ops =3D { .mmap =3D dma_dummy_mmap, - .map_page =3D dma_dummy_map_page, - .unmap_page =3D dma_dummy_unmap_page, + .map_phys =3D dma_dummy_map_phys, + .unmap_phys =3D dma_dummy_unmap_phys, .map_sg =3D dma_dummy_map_sg, .unmap_sg =3D dma_dummy_unmap_sg, .dma_supported =3D dma_dummy_supported, --=20 2.51.0 From nobody Thu Oct 2 09:16:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 567E5313299; Thu, 18 Sep 2025 14:10:09 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758204609; cv=none; b=lrif59v1t6PbOXeKr94lF0p74GjfQ5BAHNoH0fFJyFwTYZN7T0BfbEN804yp3wK80Lh+XrQJvTsqUHm0wn6gXsL80Bw6tu0utMg87IwL6GHbDlsFG6TYdWoriPswPzDvyplD9mCIUk70RiOLMqkmF/mAeAw8tdSUqG1NqzVblLk= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758204609; c=relaxed/simple; bh=7ctlDOwNiFgZVAZcPGLZSJOnlazVmdQdCYwzHXR5exA=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=cUQMMV4ub8HPerw3XVleGMus0VIRD3ICfXa1zA2Ahh9P8cD1dGAoPWJ/Q7lr4k8UQeSS7UsgZkAzYqdpbrv5acZBw8bWGQXoUgYfKYcD6KsrZRQglvllh3ZTl9nd/ww4is8A2K2DFQ2idKT+Ic76LrrWi7JZT9FJ80zpfl9SnjQ= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=oyqwu64B; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="oyqwu64B" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 767C8C4CEE7; Thu, 18 Sep 2025 14:10:08 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1758204609; bh=7ctlDOwNiFgZVAZcPGLZSJOnlazVmdQdCYwzHXR5exA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=oyqwu64BZu6RjG9CnpKCr3myw4HDldvhOSfCWTkgNiDvrlE8uIAhIfcBCeVXmGVZ1 7umCbbQ1rkX/ipSoOD/4RSe4fc3dPqQlnTQQPXW5mgROoLKyXZGyhuVeRO8vVC4Bm4 Ya5UjyQSZykWjAs/mbE6JiW1lGgrnuInyF64+7D0FD+3db1GQBVAoxo7SV2P7U2vO/ XrBA0HfGqAPvG/9WZFcnNE3y7efxI0z4oHvfJRZDszI58XGdtYFbilpCXKZX1Q7ai2 +C+GLYL87J6QpD7PHzQDnkJX7tmCDP6r6jfMMWEd+mSevtuOibCVA8pXdo+pUAobaV SyMXCEQEsZLOA== From: Leon Romanovsky To: Marek Szyprowski Cc: Leon Romanovsky , Jason Gunthorpe , iommu@lists.linux.dev, Juergen Gross , linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, Russell King , Stefano Stabellini , xen-devel@lists.xenproject.org Subject: [PATCH v4 3/6] ARM: dma-mapping: Reduce struct page exposure in arch_sync_dma*() Date: Thu, 18 Sep 2025 17:09:26 +0300 Message-ID: <2f20069c2b616808c034ba4e75905820b94e0e22.1758203802.git.leon@kernel.org> X-Mailer: git-send-email 2.51.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Leon Romanovsky As a preparation to changing from .map_page to use .map_phys DMA callbacks, convert arch_sync_dma*() functions to use physical addresses instead of struct page. Signed-off-by: Leon Romanovsky Reviewed-by: Jason Gunthorpe --- arch/arm/mm/dma-mapping.c | 82 +++++++++++++++------------------------ 1 file changed, 31 insertions(+), 51 deletions(-) diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 88c2d68a69c9e..449fe6bf525e5 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -624,16 +624,14 @@ static void __arm_dma_free(struct device *dev, size_t= size, void *cpu_addr, kfree(buf); } =20 -static void dma_cache_maint_page(struct page *page, unsigned long offset, - size_t size, enum dma_data_direction dir, +static void dma_cache_maint_page(phys_addr_t phys, size_t size, + enum dma_data_direction dir, void (*op)(const void *, size_t, int)) { - unsigned long pfn; + unsigned long offset =3D offset_in_page(phys); + unsigned long pfn =3D __phys_to_pfn(phys); size_t left =3D size; =20 - pfn =3D page_to_pfn(page) + offset / PAGE_SIZE; - offset %=3D PAGE_SIZE; - /* * A single sg entry may refer to multiple physically contiguous * pages. But we still need to process highmem pages individually. @@ -644,17 +642,18 @@ static void dma_cache_maint_page(struct page *page, u= nsigned long offset, size_t len =3D left; void *vaddr; =20 - page =3D pfn_to_page(pfn); - - if (PageHighMem(page)) { + phys =3D __pfn_to_phys(pfn); + if (PhysHighMem(phys)) { if (len + offset > PAGE_SIZE) len =3D PAGE_SIZE - offset; =20 if (cache_is_vipt_nonaliasing()) { - vaddr =3D kmap_atomic(page); + vaddr =3D kmap_atomic_pfn(pfn); op(vaddr + offset, len, dir); kunmap_atomic(vaddr); } else { + struct page *page =3D phys_to_page(phys); + vaddr =3D kmap_high_get(page); if (vaddr) { op(vaddr + offset, len, dir); @@ -662,7 +661,8 @@ static void dma_cache_maint_page(struct page *page, uns= igned long offset, } } } else { - vaddr =3D page_address(page) + offset; + phys +=3D offset; + vaddr =3D phys_to_virt(phys); op(vaddr, len, dir); } offset =3D 0; @@ -676,14 +676,11 @@ static void dma_cache_maint_page(struct page *page, u= nsigned long offset, * Note: Drivers should NOT use this function directly. * Use the driver DMA support - see dma-mapping.h (dma_sync_*) */ -static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, - size_t size, enum dma_data_direction dir) +void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) { - phys_addr_t paddr; - - dma_cache_maint_page(page, off, size, dir, dmac_map_area); + dma_cache_maint_page(paddr, size, dir, dmac_map_area); =20 - paddr =3D page_to_phys(page) + off; if (dir =3D=3D DMA_FROM_DEVICE) { outer_inv_range(paddr, paddr + size); } else { @@ -692,17 +689,15 @@ static void __dma_page_cpu_to_dev(struct page *page, = unsigned long off, /* FIXME: non-speculating: flush on bidirectional mappings? */ } =20 -static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, - size_t size, enum dma_data_direction dir) +void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) { - phys_addr_t paddr =3D page_to_phys(page) + off; - /* FIXME: non-speculating: not required */ /* in any case, don't bother invalidating if DMA to device */ if (dir !=3D DMA_TO_DEVICE) { outer_inv_range(paddr, paddr + size); =20 - dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); + dma_cache_maint_page(paddr, size, dir, dmac_unmap_area); } =20 /* @@ -1205,7 +1200,7 @@ static int __map_sg_chunk(struct device *dev, struct = scatterlist *sg, unsigned int len =3D PAGE_ALIGN(s->offset + s->length); =20 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); + arch_sync_dma_for_device(sg_phys(s), s->length, dir); =20 prot =3D __dma_info_to_prot(dir, attrs); =20 @@ -1307,8 +1302,7 @@ static void arm_iommu_unmap_sg(struct device *dev, __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - __dma_page_dev_to_cpu(sg_page(s), s->offset, - s->length, dir); + arch_sync_dma_for_cpu(sg_phys(s), s->length, dir); } } =20 @@ -1330,7 +1324,7 @@ static void arm_iommu_sync_sg_for_cpu(struct device *= dev, return; =20 for_each_sg(sg, s, nents, i) - __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); + arch_sync_dma_for_cpu(sg_phys(s), s->length, dir); =20 } =20 @@ -1352,7 +1346,7 @@ static void arm_iommu_sync_sg_for_device(struct devic= e *dev, return; =20 for_each_sg(sg, s, nents, i) - __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); + arch_sync_dma_for_device(sg_phys(s), s->length, dir); } =20 /** @@ -1374,7 +1368,7 @@ static dma_addr_t arm_iommu_map_page(struct device *d= ev, struct page *page, int ret, prot, len =3D PAGE_ALIGN(size + offset); =20 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - __dma_page_cpu_to_dev(page, offset, size, dir); + arch_sync_dma_for_device(page_to_phys(page), offset, size, dir); =20 dma_addr =3D __alloc_iova(mapping, len); if (dma_addr =3D=3D DMA_MAPPING_ERROR) @@ -1407,7 +1401,6 @@ static void arm_iommu_unmap_page(struct device *dev, = dma_addr_t handle, { struct dma_iommu_mapping *mapping =3D to_dma_iommu_mapping(dev); dma_addr_t iova =3D handle & PAGE_MASK; - struct page *page; int offset =3D handle & ~PAGE_MASK; int len =3D PAGE_ALIGN(size + offset); =20 @@ -1415,8 +1408,9 @@ static void arm_iommu_unmap_page(struct device *dev, = dma_addr_t handle, return; =20 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { - page =3D phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); - __dma_page_dev_to_cpu(page, offset, size, dir); + phys_addr_t phys =3D iommu_iova_to_phys(mapping->domain, iova); + + arch_sync_dma_for_cpu(phys + offset, size, dir); } =20 iommu_unmap(mapping->domain, iova, len); @@ -1485,14 +1479,14 @@ static void arm_iommu_sync_single_for_cpu(struct de= vice *dev, { struct dma_iommu_mapping *mapping =3D to_dma_iommu_mapping(dev); dma_addr_t iova =3D handle & PAGE_MASK; - struct page *page; unsigned int offset =3D handle & ~PAGE_MASK; + phys_addr_t phys; =20 if (dev->dma_coherent || !iova) return; =20 - page =3D phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); - __dma_page_dev_to_cpu(page, offset, size, dir); + phys =3D iommu_iova_to_phys(mapping->domain, iova); + arch_sync_dma_for_cpu(phys + offset, size, dir); } =20 static void arm_iommu_sync_single_for_device(struct device *dev, @@ -1500,14 +1494,14 @@ static void arm_iommu_sync_single_for_device(struct= device *dev, { struct dma_iommu_mapping *mapping =3D to_dma_iommu_mapping(dev); dma_addr_t iova =3D handle & PAGE_MASK; - struct page *page; unsigned int offset =3D handle & ~PAGE_MASK; + phys_addr_t phys; =20 if (dev->dma_coherent || !iova) return; =20 - page =3D phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); - __dma_page_cpu_to_dev(page, offset, size, dir); + phys =3D iommu_iova_to_phys(mapping->domain, iova); + arch_sync_dma_for_device(phys + offset, size, dir); } =20 static const struct dma_map_ops iommu_ops =3D { @@ -1794,20 +1788,6 @@ void arch_teardown_dma_ops(struct device *dev) set_dma_ops(dev, NULL); } =20 -void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, - enum dma_data_direction dir) -{ - __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), - size, dir); -} - -void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, - enum dma_data_direction dir) -{ - __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), - size, dir); -} - void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_hand= le, gfp_t gfp, unsigned long attrs) { --=20 2.51.0 From nobody Thu Oct 2 09:16:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 2352E313D49; Thu, 18 Sep 2025 14:10:26 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758204626; cv=none; b=VZ7Gmt2hDPT12eIPmcJ2SQ9kdMChBgB2BgkQp0GXDO/7LMWM/B7r5E8WR2EhHNFOw/vZ5u5pAdt5o5HcE3uIJdv5ozEXM/4kMYfvST67Q+xz4S/fzPNpPfGAoVdWFjIQcewwf+ppQ2tLJrn6PpA06pPxe++E0VywjLNZZhqcYb0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758204626; c=relaxed/simple; bh=bCSF8Et7U2q9+qx5MPBbsnViuVi0S7sFmZruYcbRwJE=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=PMh3XYWwMNQRJdkbiiVrO0HIKEA8ShuynQTjt3qFckWDeCUg/wfIxV5lg+ea/+BHl/xm0JI9GGGbudGC8aDBasbDTy29whyIPOR1otNABnth4S0v/gorPJHlb7wrV/UvFDLbRFxObJnfiMZGNabnESVzARsRTXwPeJKqdKziC/g= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=MoVP/5b0; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="MoVP/5b0" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 73A7EC4CEF7; Thu, 18 Sep 2025 14:10:25 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1758204626; bh=bCSF8Et7U2q9+qx5MPBbsnViuVi0S7sFmZruYcbRwJE=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=MoVP/5b00YLfpONB+ZmiTt6bw1i/Juohr4q/ZKYvTZpHSp5bkKkRXU1owc5fimxKL JEsDthAvGnkY9ujXmwjpMkELIgpmXqCejwBzjtfW4bEYdBEZezMp0IR2njJfDh4MIq EFMZGE46XQiSkNEHXSpjI6QbfiAqc/DFCOObBoepOtJrsVd4G4/AEUGxjSWA35zitw M6HJlzy+fVyWwp1dW5gJnd5gnkbJesqSXTVbmS/ccfh45O28M1ujNm1IzqCUV/n7Bl 6RbWxO+0Fhu8b5bqS8lHfOr9GkMWNeIAwu0W/zlJWFX+FfnQowWK3ntA4XWemR8xyz Qbh3YgOgpdY4g== From: Leon Romanovsky To: Marek Szyprowski Cc: Leon Romanovsky , Jason Gunthorpe , iommu@lists.linux.dev, Juergen Gross , linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, Russell King , Stefano Stabellini , xen-devel@lists.xenproject.org Subject: [PATCH v4 4/6] ARM: dma-mapping: Switch to physical address mapping callbacks Date: Thu, 18 Sep 2025 17:09:27 +0300 Message-ID: <110f3bd0ad179b6a26cf236c3214818dbec6a723.1758203802.git.leon@kernel.org> X-Mailer: git-send-email 2.51.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Leon Romanovsky Combine resource and page mappings routines to one function, which handles both these flows at the same manner. This conversion allows us to remove .map_resource/.unmap_resource callbacks completely. Reviewed-by: Jason Gunthorpe Signed-off-by: Leon Romanovsky --- arch/arm/mm/dma-mapping.c | 100 +++++++++----------------------------- 1 file changed, 23 insertions(+), 77 deletions(-) diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 449fe6bf525e5..a6606ba0584f4 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -732,6 +732,9 @@ static int __dma_info_to_prot(enum dma_data_direction d= ir, unsigned long attrs) if (attrs & DMA_ATTR_PRIVILEGED) prot |=3D IOMMU_PRIV; =20 + if (attrs & DMA_ATTR_MMIO) + prot |=3D IOMMU_MMIO; + switch (dir) { case DMA_BIDIRECTIONAL: return prot | IOMMU_READ | IOMMU_WRITE; @@ -1350,25 +1353,27 @@ static void arm_iommu_sync_sg_for_device(struct dev= ice *dev, } =20 /** - * arm_iommu_map_page + * arm_iommu_map_phys * @dev: valid struct device pointer - * @page: page that buffer resides in - * @offset: offset into page for start of buffer + * @phys: physical address that buffer resides in * @size: size of buffer to map * @dir: DMA transfer direction + * @attrs: DMA mapping attributes * * IOMMU aware version of arm_dma_map_page() */ -static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs) +static dma_addr_t arm_iommu_map_phys(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir, unsigned long attrs) { struct dma_iommu_mapping *mapping =3D to_dma_iommu_mapping(dev); + int len =3D PAGE_ALIGN(size + offset_in_page(phys)); + phys_addr_t addr =3D phys & PAGE_MASK; dma_addr_t dma_addr; - int ret, prot, len =3D PAGE_ALIGN(size + offset); + int ret, prot; =20 - if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - arch_sync_dma_for_device(page_to_phys(page), offset, size, dir); + if (!dev->dma_coherent && + !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))) + arch_sync_dma_for_device(phys, size, dir); =20 dma_addr =3D __alloc_iova(mapping, len); if (dma_addr =3D=3D DMA_MAPPING_ERROR) @@ -1376,12 +1381,11 @@ static dma_addr_t arm_iommu_map_page(struct device = *dev, struct page *page, =20 prot =3D __dma_info_to_prot(dir, attrs); =20 - ret =3D iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, - prot, GFP_KERNEL); + ret =3D iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL); if (ret < 0) goto fail; =20 - return dma_addr + offset; + return dma_addr + offset_in_page(phys); fail: __free_iova(mapping, dma_addr, len); return DMA_MAPPING_ERROR; @@ -1393,10 +1397,11 @@ static dma_addr_t arm_iommu_map_page(struct device = *dev, struct page *page, * @handle: DMA address of buffer * @size: size of buffer (same as passed to dma_map_page) * @dir: DMA transfer direction (same as passed to dma_map_page) + * @attrs: DMA mapping attributes * - * IOMMU aware version of arm_dma_unmap_page() + * IOMMU aware version of arm_dma_unmap_phys() */ -static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, +static void arm_iommu_unmap_phys(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, unsigned long attrs) { struct dma_iommu_mapping *mapping =3D to_dma_iommu_mapping(dev); @@ -1407,7 +1412,8 @@ static void arm_iommu_unmap_page(struct device *dev, = dma_addr_t handle, if (!iova) return; =20 - if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { + if (!dev->dma_coherent && + !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))) { phys_addr_t phys =3D iommu_iova_to_phys(mapping->domain, iova); =20 arch_sync_dma_for_cpu(phys + offset, size, dir); @@ -1417,63 +1423,6 @@ static void arm_iommu_unmap_page(struct device *dev,= dma_addr_t handle, __free_iova(mapping, iova, len); } =20 -/** - * arm_iommu_map_resource - map a device resource for DMA - * @dev: valid struct device pointer - * @phys_addr: physical address of resource - * @size: size of resource to map - * @dir: DMA transfer direction - */ -static dma_addr_t arm_iommu_map_resource(struct device *dev, - phys_addr_t phys_addr, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - struct dma_iommu_mapping *mapping =3D to_dma_iommu_mapping(dev); - dma_addr_t dma_addr; - int ret, prot; - phys_addr_t addr =3D phys_addr & PAGE_MASK; - unsigned int offset =3D phys_addr & ~PAGE_MASK; - size_t len =3D PAGE_ALIGN(size + offset); - - dma_addr =3D __alloc_iova(mapping, len); - if (dma_addr =3D=3D DMA_MAPPING_ERROR) - return dma_addr; - - prot =3D __dma_info_to_prot(dir, attrs) | IOMMU_MMIO; - - ret =3D iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL); - if (ret < 0) - goto fail; - - return dma_addr + offset; -fail: - __free_iova(mapping, dma_addr, len); - return DMA_MAPPING_ERROR; -} - -/** - * arm_iommu_unmap_resource - unmap a device DMA resource - * @dev: valid struct device pointer - * @dma_handle: DMA address to resource - * @size: size of resource to map - * @dir: DMA transfer direction - */ -static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_ha= ndle, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - struct dma_iommu_mapping *mapping =3D to_dma_iommu_mapping(dev); - dma_addr_t iova =3D dma_handle & PAGE_MASK; - unsigned int offset =3D dma_handle & ~PAGE_MASK; - size_t len =3D PAGE_ALIGN(size + offset); - - if (!iova) - return; - - iommu_unmap(mapping->domain, iova, len); - __free_iova(mapping, iova, len); -} - static void arm_iommu_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { @@ -1510,8 +1459,8 @@ static const struct dma_map_ops iommu_ops =3D { .mmap =3D arm_iommu_mmap_attrs, .get_sgtable =3D arm_iommu_get_sgtable, =20 - .map_page =3D arm_iommu_map_page, - .unmap_page =3D arm_iommu_unmap_page, + .map_phys =3D arm_iommu_map_phys, + .unmap_phys =3D arm_iommu_unmap_phys, .sync_single_for_cpu =3D arm_iommu_sync_single_for_cpu, .sync_single_for_device =3D arm_iommu_sync_single_for_device, =20 @@ -1519,9 +1468,6 @@ static const struct dma_map_ops iommu_ops =3D { .unmap_sg =3D arm_iommu_unmap_sg, .sync_sg_for_cpu =3D arm_iommu_sync_sg_for_cpu, .sync_sg_for_device =3D arm_iommu_sync_sg_for_device, - - .map_resource =3D arm_iommu_map_resource, - .unmap_resource =3D arm_iommu_unmap_resource, }; =20 /** --=20 2.51.0 From nobody Thu Oct 2 09:16:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 5B039313D49; Thu, 18 Sep 2025 14:10:18 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758204618; cv=none; b=mN7+DOnPA0Oy1vfaE1nL6rWZkfD+Oo9QtpBxUTKK+sQvggkIrMZgpJBgWVlN8qdapJOBNTTG3TWeESbSFL52fT4YcHy94aVLTwTKw5VgQOVr+mXrc19HcNaRzJ4Kt1xySKTCx4TqsPoUUixHPR3ULFDX0Frg1PjBdhcAIgJPbf0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758204618; c=relaxed/simple; bh=m3D6g8FDHOZH6474XMcPOpnGaU1UwHseRqgAFUOfdrA=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=Z9lAVIUWHNxERasTtegfSnTr/PLSKc/W+9hx2Ghfrc+8+cc3ohu+AelUlo8uTrJa0ZsepGh2e++qrxyZcYhKai4lTvPyg+qz3IEQZj5uYL/rRRgcQqh5YbP+62NqM+f+Dy+tX8P+KlDT/kixO4/jey24Th/vOAojWxYFMqizsVg= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=LsgW3cFp; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="LsgW3cFp" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 04D2CC4CEE7; Thu, 18 Sep 2025 14:10:16 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1758204617; bh=m3D6g8FDHOZH6474XMcPOpnGaU1UwHseRqgAFUOfdrA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=LsgW3cFpYtCJcNpT12ghofulEYz5SE3I1+MEc+VZwiV2aowmuN6OaLrwwHtaY3r62 Un7zmoh5+VozkXySig2p0YiWfk7Loy24bLch1JLoDULUJlZ0+s1bosOrGr6Qzps9G4 bTciv3Y9YjxxtlFXDXQOA/OUJnji3JaqIfapiByz1FAfMeEl6gUdz3r0CsibDAXsSL hBODJnZhl/XMFnr2oqiyEBEmThANZ2dqKjUTgYaQXdmtJvFNokS+KVbOb2zKA5lM33 faKpQN6yt9V/166f9jvRgOthV1TZoTwvTEz1KV3BkmMtdElPbfE7H1ZPamvbmLHuL+ Z5qCSgG53oUHA== From: Leon Romanovsky To: Marek Szyprowski Cc: Leon Romanovsky , Jason Gunthorpe , iommu@lists.linux.dev, Juergen Gross , linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, Russell King , Stefano Stabellini , xen-devel@lists.xenproject.org Subject: [PATCH v4 5/6] xen: swiotlb: Switch to physical address mapping callbacks Date: Thu, 18 Sep 2025 17:09:28 +0300 Message-ID: <997c0122a24c355b4d7ee353902041a7617f4c9e.1758203802.git.leon@kernel.org> X-Mailer: git-send-email 2.51.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Leon Romanovsky Combine resource and page mappings routines to one function and remove .map_resource/.unmap_resource callbacks completely. Signed-off-by: Leon Romanovsky --- drivers/xen/swiotlb-xen.c | 63 ++++++++++++++++++--------------------- 1 file changed, 29 insertions(+), 34 deletions(-) diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index dd7747a2de879..48936179c940b 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -200,17 +200,32 @@ xen_swiotlb_free_coherent(struct device *dev, size_t = size, void *vaddr, * physical address to use is returned. * * Once the device is given the dma address, the device owns this memory u= ntil - * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is perform= ed. + * either xen_swiotlb_unmap_phys or xen_swiotlb_dma_sync_single is perform= ed. */ -static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *pa= ge, - unsigned long offset, size_t size, - enum dma_data_direction dir, +static dma_addr_t xen_swiotlb_map_phys(struct device *dev, phys_addr_t phy= s, + size_t size, enum dma_data_direction dir, unsigned long attrs) { - phys_addr_t map, phys =3D page_to_phys(page) + offset; - dma_addr_t dev_addr =3D xen_phys_to_dma(dev, phys); + dma_addr_t dev_addr; + phys_addr_t map; =20 BUG_ON(dir =3D=3D DMA_NONE); + + if (attrs & DMA_ATTR_MMIO) { + if (unlikely(!dma_capable(dev, phys, size, false))) { + dev_err_once( + dev, + "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", + &dma_addr, size, *dev->dma_mask, + dev->bus_dma_limit); + WARN_ON_ONCE(1); + return DMA_MAPPING_ERROR; + } + return phys; + } + + dev_addr =3D xen_phys_to_dma(dev, phys); + /* * If the address happens to be in the device's DMA window, * we can safely return the device addr and not worry about bounce @@ -257,13 +272,13 @@ static dma_addr_t xen_swiotlb_map_page(struct device = *dev, struct page *page, =20 /* * Unmap a single streaming mode DMA translation. The dma_addr and size m= ust - * match what was provided for in a previous xen_swiotlb_map_page call. A= ll + * match what was provided for in a previous xen_swiotlb_map_phys call. A= ll * other usages are undefined. * * After this call, reads by the cpu to the buffer are guaranteed to see * whatever the device wrote there. */ -static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_ad= dr, +static void xen_swiotlb_unmap_phys(struct device *hwdev, dma_addr_t dev_ad= dr, size_t size, enum dma_data_direction dir, unsigned long attrs) { phys_addr_t paddr =3D xen_dma_to_phys(hwdev, dev_addr); @@ -325,7 +340,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, = dma_addr_t dma_addr, =20 /* * Unmap a set of streaming mode DMA translations. Again, cpu read rules - * concerning calls here are the same as for swiotlb_unmap_page() above. + * concerning calls here are the same as for swiotlb_unmap_phys() above. */ static void xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int ne= lems, @@ -337,7 +352,7 @@ xen_swiotlb_unmap_sg(struct device *hwdev, struct scatt= erlist *sgl, int nelems, BUG_ON(dir =3D=3D DMA_NONE); =20 for_each_sg(sgl, sg, nelems, i) - xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg), + xen_swiotlb_unmap_phys(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs); =20 } @@ -352,8 +367,8 @@ xen_swiotlb_map_sg(struct device *dev, struct scatterli= st *sgl, int nelems, BUG_ON(dir =3D=3D DMA_NONE); =20 for_each_sg(sgl, sg, nelems, i) { - sg->dma_address =3D xen_swiotlb_map_page(dev, sg_page(sg), - sg->offset, sg->length, dir, attrs); + sg->dma_address =3D xen_swiotlb_map_phys(dev, sg_phys(sg), + sg->length, dir, attrs); if (sg->dma_address =3D=3D DMA_MAPPING_ERROR) goto out_unmap; sg_dma_len(sg) =3D sg->length; @@ -392,25 +407,6 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, str= uct scatterlist *sgl, } } =20 -static dma_addr_t xen_swiotlb_direct_map_resource(struct device *dev, - phys_addr_t paddr, - size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - dma_addr_t dma_addr =3D paddr; - - if (unlikely(!dma_capable(dev, dma_addr, size, false))) { - dev_err_once(dev, - "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", - &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); - WARN_ON_ONCE(1); - return DMA_MAPPING_ERROR; - } - - return dma_addr; -} - /* * Return whether the given device DMA address mask can be supported * properly. For example, if your device can only drive the low 24-bits @@ -437,13 +433,12 @@ const struct dma_map_ops xen_swiotlb_dma_ops =3D { .sync_sg_for_device =3D xen_swiotlb_sync_sg_for_device, .map_sg =3D xen_swiotlb_map_sg, .unmap_sg =3D xen_swiotlb_unmap_sg, - .map_page =3D xen_swiotlb_map_page, - .unmap_page =3D xen_swiotlb_unmap_page, + .map_phys =3D xen_swiotlb_map_phys, + .unmap_phys =3D xen_swiotlb_unmap_phys, .dma_supported =3D xen_swiotlb_dma_supported, .mmap =3D dma_common_mmap, .get_sgtable =3D dma_common_get_sgtable, .alloc_pages_op =3D dma_common_alloc_pages, .free_pages =3D dma_common_free_pages, .max_mapping_size =3D swiotlb_max_mapping_size, - .map_resource =3D xen_swiotlb_direct_map_resource, }; --=20 2.51.0 From nobody Thu Oct 2 09:16:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 6F68D313D49; Thu, 18 Sep 2025 14:10:22 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758204622; cv=none; b=GIRI4JuXVEamcsFDh7Kd/Eme0ZICwFkTPZa+2MuKEOgxpLkp5SzFFecpv0gktQ6Ys9tUb0YwoglkaZga+yDmxqt3R5JeHqfKdijCy79whLIi++uA54gAVKfVUaqzWwcHlym6d1LUgw6qfZybURNnh/7/ujWa5Hz6aB2nImVcjjw= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758204622; c=relaxed/simple; bh=+mItofUWbT/SzLs0Mrc6WXSkqZWGdUCuVYLGJSLcHDY=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=pggQgsnmegsgmukJcKGicAyjzXtFU5qUeJztD8j+HgxM7LIY8EUEQf7V0QuGne7fMqIchCDeI7hmO2r7Y/KwOzeXWzxIoRv2j5H1CDHFawX1xvfHQkYUlLyLVrHpQcQ6ABJXo/vdcCLKMUu2hNIpBOWwTEIjd92Vr39LBcbgdyc= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=V9JsUocX; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="V9JsUocX" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 7AEFAC4CEFB; Thu, 18 Sep 2025 14:10:21 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1758204622; bh=+mItofUWbT/SzLs0Mrc6WXSkqZWGdUCuVYLGJSLcHDY=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=V9JsUocXjcR/CRRuPUbMLUrynQTHxnyRZUck8xrjf9keCJjbsa1bH9eWOXAHqWeqU juQIOVyIP8IsObWlS/GQ+c+Uprfg0vQs/uSvxRKuOSh8ip7nCbACSbb1d2V8B94szO gm7otPQ0YarFehrESzgfR/EeZvdmrVvboJRbxtk+1PNZhXi9imBesuxF1A4Rpoq0ll 6I34Kw2ZKFhTFUM3h5ErNDcdSGH3nZ6uRpyw5T2To3AYoFeaBoieE+XTvKRGewbaJI 34bqm0hVT5cN07YS2CqWqEONmyD00OuGPY64KYjPokFIOiGj3K230Si2JLMvb8d/wc XDjD3ji1pCB0g== From: Leon Romanovsky To: Marek Szyprowski Cc: Leon Romanovsky , Jason Gunthorpe , iommu@lists.linux.dev, Juergen Gross , linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, Russell King , Stefano Stabellini , xen-devel@lists.xenproject.org Subject: [PATCH v4 6/6] dma-mapping: remove unused mapping resource callbacks Date: Thu, 18 Sep 2025 17:09:29 +0300 Message-ID: X-Mailer: git-send-email 2.51.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Leon Romanovsky After ARM and XEN conversions to use physical addresses for the mapping, there are no in-kernel users for map_resource/unmap_resource callbacks, so remove them. Reviewed-by: Jason Gunthorpe Signed-off-by: Leon Romanovsky --- include/linux/dma-map-ops.h | 6 ------ kernel/dma/mapping.c | 16 ++++------------ 2 files changed, 4 insertions(+), 18 deletions(-) diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 25603cb273769..a2ec1566aa270 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -53,12 +53,6 @@ struct dma_map_ops { enum dma_data_direction dir, unsigned long attrs); void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs); - dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs); - void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir, - unsigned long attrs); void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir); void (*sync_single_for_device)(struct device *dev, diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 4080aebe5debb..32a85bfdf873a 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -157,7 +157,7 @@ dma_addr_t dma_map_phys(struct device *dev, phys_addr_t= phys, size_t size, { const struct dma_map_ops *ops =3D get_dma_ops(dev); bool is_mmio =3D attrs & DMA_ATTR_MMIO; - dma_addr_t addr; + dma_addr_t addr =3D DMA_MAPPING_ERROR; =20 BUG_ON(!valid_dma_direction(dir)); =20 @@ -171,18 +171,13 @@ dma_addr_t dma_map_phys(struct device *dev, phys_addr= _t phys, size_t size, addr =3D iommu_dma_map_phys(dev, phys, size, dir, attrs); else if (ops->map_phys) addr =3D ops->map_phys(dev, phys, size, dir, attrs); - else if (is_mmio) { - if (!ops->map_resource) - return DMA_MAPPING_ERROR; - - addr =3D ops->map_resource(dev, phys, size, dir, attrs); - } else { + else if (!is_mmio && ops->map_page) { struct page *page =3D phys_to_page(phys); size_t offset =3D offset_in_page(phys); =20 /* * The dma_ops API contract for ops->map_page() requires - * kmappable memory, while ops->map_resource() does not. + * kmappable memory. */ addr =3D ops->map_page(dev, page, offset, size, dir, attrs); } @@ -227,10 +222,7 @@ void dma_unmap_phys(struct device *dev, dma_addr_t add= r, size_t size, iommu_dma_unmap_phys(dev, addr, size, dir, attrs); else if (ops->unmap_phys) ops->unmap_phys(dev, addr, size, dir, attrs); - else if (is_mmio) { - if (ops->unmap_resource) - ops->unmap_resource(dev, addr, size, dir, attrs); - } else + else ops->unmap_page(dev, addr, size, dir, attrs); trace_dma_unmap_phys(dev, addr, size, dir, attrs); debug_dma_unmap_phys(dev, addr, size, dir); --=20 2.51.0