From nobody Thu Oct 2 09:16:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 0FDB52D2397; Thu, 18 Sep 2025 18:45:34 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758221135; cv=none; b=YFnjoAxjkry49ChjInvKV/i+HgwiH5OkpNDcMmJB/k8qiTtXxkIuvi9an9c2EbhyEHzTKG85U696/Z6t9HYh/xbA8zuMFJI6/E4XfVG9ts7Z46il/Za4g3HKPKS86leDjDKEoy0EcmJ3ccxXcIna9q3/oAag+oAYGucsEXaYbCQ= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758221135; c=relaxed/simple; bh=mRrCuK2cRKam5wDdOYPzkMc4IzLVg72Ycd5tZAIZOwA=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=E+b01tLaVLXWOqghx5pS0GWXWZnlwBoNAoV8/siV/dt7jVMeQe1SCKtm2KTiPcCP3y6LHjyc5qOyZB3s4P5KuePJpGeTHelA/HqE9niOBFyTfLpUECQWeL+ZLL0FvSjMT0hfbhVr70Kvo+LXqXZdeW0136bX2eUsMcWP+z3PDnI= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=BrezbUs7; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="BrezbUs7" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 76C21C4CEF0; Thu, 18 Sep 2025 18:45:33 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1758221134; bh=mRrCuK2cRKam5wDdOYPzkMc4IzLVg72Ycd5tZAIZOwA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=BrezbUs7YQQoFiEnq4O37fij9yQqGwhFFKxGtk7W85Qc/47lo4WDrX08x8pBhOMBl x3C+EqTXn/J9a/3WVpKBvLTRlkB+do/ZfwaAE2Mb7IopCTXi7LrOLKMJkkNTJfvDUp YqF2kAB3MxM4nlAlg21f/GeFvZw8zg11jBWISdL9T8Q0n5qtbkyadIU6XtD1/orTr1 7w2ouYNTNdHOHzAOrJP4usoDUr3cv4AUvmi2rODsSOE2yxEfZKK0CI1huASdVmWoDj bBSTZ3whPx0z/NsNtq26Y7bbhfgNlmo/ggZU7pwAyS1i89RmTF6XqFtx+J2G8mxe/e /R99gASdkRTZg== From: Leon Romanovsky To: Marek Szyprowski Cc: Leon Romanovsky , Jason Gunthorpe , Andreas Larsson , Borislav Petkov , Dave Hansen , "David S. Miller" , Geoff Levand , Helge Deller , Ingo Molnar , iommu@lists.linux.dev, "James E.J. Bottomley" , Jason Wang , Juergen Gross , linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mips@vger.kernel.org, linux-parisc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, Madhavan Srinivasan , Matt Turner , Michael Ellerman , "Michael S. Tsirkin" , Richard Henderson , sparclinux@vger.kernel.org, Stefano Stabellini , Thomas Bogendoerfer , Thomas Gleixner , virtualization@lists.linux.dev, x86@kernel.org, xen-devel@lists.xenproject.org Subject: [PATCH 1/9] alpha: Convert mapping routine to rely on physical address Date: Thu, 18 Sep 2025 21:45:01 +0300 Message-ID: <0c64474985af55b1aa934b857808068a0e609c6e.1758219787.git.leon@kernel.org> X-Mailer: git-send-email 2.51.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Leon Romanovsky Alpha doesn't need struct *page and can perform mapping based on physical addresses. So convert it to implement new .map_phys callback. As part of this change, remove useless BUG_ON() as DMA mapping layer ensures that right direction is provided. Signed-off-by: Leon Romanovsky --- arch/alpha/kernel/pci_iommu.c | 47 +++++++++++++++-------------------- 1 file changed, 20 insertions(+), 27 deletions(-) diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index dc91de50f906d..b62d9937d1d3a 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -224,28 +224,25 @@ static int pci_dac_dma_supported(struct pci_dev *dev,= u64 mask) until either pci_unmap_single or pci_dma_sync_single is performed. */ =20 static dma_addr_t -pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, +pci_map_single_1(struct pci_dev *pdev, phys_addr_t paddr, size_t size, int dac_allowed) { struct pci_controller *hose =3D pdev ? pdev->sysdata : pci_isa_hose; dma_addr_t max_dma =3D pdev ? pdev->dma_mask : ISA_DMA_MASK; struct pci_iommu_arena *arena; long npages, dma_ofs, i; - unsigned long paddr; dma_addr_t ret; unsigned int align =3D 0; struct device *dev =3D pdev ? &pdev->dev : NULL; =20 - paddr =3D __pa(cpu_addr); - #if !DEBUG_NODIRECT /* First check to see if we can use the direct map window. */ if (paddr + size + __direct_map_base - 1 <=3D max_dma && paddr + size <=3D __direct_map_size) { ret =3D paddr + __direct_map_base; =20 - DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %ps\n", - cpu_addr, size, ret, __builtin_return_address(0)); + DBGA2("pci_map_single: [%pa,%zx] -> direct %llx from %ps\n", + &paddr, size, ret, __builtin_return_address(0)); =20 return ret; } @@ -255,8 +252,8 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, = size_t size, if (dac_allowed) { ret =3D paddr + alpha_mv.pci_dac_offset; =20 - DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %ps\n", - cpu_addr, size, ret, __builtin_return_address(0)); + DBGA2("pci_map_single: [%pa,%zx] -> DAC %llx from %ps\n", + &paddr, size, ret, __builtin_return_address(0)); =20 return ret; } @@ -290,10 +287,10 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr= , size_t size, arena->ptes[i + dma_ofs] =3D mk_iommu_pte(paddr); =20 ret =3D arena->dma_base + dma_ofs * PAGE_SIZE; - ret +=3D (unsigned long)cpu_addr & ~PAGE_MASK; + ret +=3D offset_in_page(paddr); =20 - DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %ps\n", - cpu_addr, size, npages, ret, __builtin_return_address(0)); + DBGA2("pci_map_single: [%pa,%zx] np %ld -> sg %llx from %ps\n", + &paddr, size, npages, ret, __builtin_return_address(0)); =20 return ret; } @@ -322,19 +319,18 @@ static struct pci_dev *alpha_gendev_to_pci(struct dev= ice *dev) return NULL; } =20 -static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, +static dma_addr_t alpha_pci_map_phys(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir, unsigned long attrs) { struct pci_dev *pdev =3D alpha_gendev_to_pci(dev); int dac_allowed; =20 - BUG_ON(dir =3D=3D DMA_NONE); + if (attrs & DMA_ATTR_MMIO) + return DMA_MAPPING_ERROR; =20 - dac_allowed =3D pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;=20 - return pci_map_single_1(pdev, (char *)page_address(page) + offset,=20 - size, dac_allowed); + dac_allowed =3D pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; + return pci_map_single_1(pdev, phys, size, dac_allowed); } =20 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and @@ -343,7 +339,7 @@ static dma_addr_t alpha_pci_map_page(struct device *dev= , struct page *page, the cpu to the buffer are guaranteed to see whatever the device wrote there. */ =20 -static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr, +static void alpha_pci_unmap_phys(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { @@ -353,8 +349,6 @@ static void alpha_pci_unmap_page(struct device *dev, dm= a_addr_t dma_addr, struct pci_iommu_arena *arena; long dma_ofs, npages; =20 - BUG_ON(dir =3D=3D DMA_NONE); - if (dma_addr >=3D __direct_map_base && dma_addr < __direct_map_base + __direct_map_size) { /* Nothing to do. */ @@ -429,7 +423,7 @@ static void *alpha_pci_alloc_coherent(struct device *de= v, size_t size, } memset(cpu_addr, 0, size); =20 - *dma_addrp =3D pci_map_single_1(pdev, cpu_addr, size, 0); + *dma_addrp =3D pci_map_single_1(pdev, virt_to_phys(cpu_addr), size, 0); if (*dma_addrp =3D=3D DMA_MAPPING_ERROR) { free_pages((unsigned long)cpu_addr, order); if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA)) @@ -643,9 +637,8 @@ static int alpha_pci_map_sg(struct device *dev, struct = scatterlist *sg, /* Fast path single entry scatterlists. */ if (nents =3D=3D 1) { sg->dma_length =3D sg->length; - sg->dma_address - =3D pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg), - sg->length, dac_allowed); + sg->dma_address =3D pci_map_single_1(pdev, sg_phys(sg), + sg->length, dac_allowed); if (sg->dma_address =3D=3D DMA_MAPPING_ERROR) return -EIO; return 1; @@ -917,8 +910,8 @@ iommu_unbind(struct pci_iommu_arena *arena, long pg_sta= rt, long pg_count) const struct dma_map_ops alpha_pci_ops =3D { .alloc =3D alpha_pci_alloc_coherent, .free =3D alpha_pci_free_coherent, - .map_page =3D alpha_pci_map_page, - .unmap_page =3D alpha_pci_unmap_page, + .map_phys =3D alpha_pci_map_phys, + .unmap_phys =3D alpha_pci_unmap_phys, .map_sg =3D alpha_pci_map_sg, .unmap_sg =3D alpha_pci_unmap_sg, .dma_supported =3D alpha_pci_supported, --=20 2.51.0 From nobody Thu Oct 2 09:16:56 2025 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) client-ip=192.237.175.120; envelope-from=xen-devel-bounces@lists.xenproject.org; helo=lists.xenproject.org; Authentication-Results: mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) smtp.mailfrom=xen-devel-bounces@lists.xenproject.org; dmarc=pass(p=quarantine dis=none) header.from=kernel.org ARC-Seal: i=1; a=rsa-sha256; t=1758221164; cv=none; d=zohomail.com; s=zohoarc; b=Gugc2BXwzQZPeHiDF0aWqZL2u83yYj4+6PTRTrddU504zz8bfKA7V6tMFkeYWE6GfB9uZqx8xxNcJ9KjblAi/lU4nhMBbSWoomMCKbIZzny9W7YnPNQXjXxRA0Vtk/7oKazqRF7KiYjmiQUfHjmaIYXLE7UrM20k5DGU9djmWGM= ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zohomail.com; s=zohoarc; t=1758221164; h=Content-Transfer-Encoding:Cc:Cc:Date:Date:From:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Help:List-Unsubscribe:MIME-Version:Message-ID:References:Sender:Subject:Subject:To:To:Message-Id:Reply-To; bh=bg8xqpMBUtdlqTI2ykkShdQnC07dLGsTGDMSd10qgbo=; b=nbxHJOuPm0J2LtKCu5H7IgULqXQJz4P8Ts0xWBRwGuhQGxBQgkoZuDY2oVqpjgVjCD0aPM9fQL1YUAUeGGm0hJkiRnd6XJ4+zpp0o83/UhWhaISEu5UZnNee11iJMcnQzmD/GVNUoU+IyX/fMFe4lLe/MA0zK4ZQ5K1PA0z60JA= ARC-Authentication-Results: i=1; mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) smtp.mailfrom=xen-devel-bounces@lists.xenproject.org; dmarc=pass header.from= (p=quarantine dis=none) Return-Path: Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) by mx.zohomail.com with SMTPS id 1758221164547310.1758620754556; Thu, 18 Sep 2025 11:46:04 -0700 (PDT) Received: from list by lists.xenproject.org with outflank-mailman.1126438.1468033 (Exim 4.92) (envelope-from ) id 1uzJdg-0004PD-H9; Thu, 18 Sep 2025 18:45:52 +0000 Received: by outflank-mailman (output) from mailman id 1126438.1468033; Thu, 18 Sep 2025 18:45:52 +0000 Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.92) (envelope-from ) id 1uzJdg-0004P2-EU; Thu, 18 Sep 2025 18:45:52 +0000 Received: by outflank-mailman (input) for mailman id 1126438; Thu, 18 Sep 2025 18:45:50 +0000 Received: from se1-gles-flk1-in.inumbo.com ([94.247.172.50] helo=se1-gles-flk1.inumbo.com) by lists.xenproject.org with esmtp (Exim 4.92) (envelope-from ) id 1uzJde-0003Vt-Rq for xen-devel@lists.xenproject.org; Thu, 18 Sep 2025 18:45:50 +0000 Received: from tor.source.kernel.org (tor.source.kernel.org [172.105.4.254]) by se1-gles-flk1.inumbo.com (Halon) with ESMTPS id b1703eb6-94bf-11f0-9809-7dc792cee155; Thu, 18 Sep 2025 20:45:49 +0200 (CEST) Received: from smtp.kernel.org (transwarp.subspace.kernel.org [100.75.92.58]) by tor.source.kernel.org (Postfix) with ESMTP id 441A56020F; Thu, 18 Sep 2025 18:45:48 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 160A2C4CEE7; Thu, 18 Sep 2025 18:45:47 +0000 (UTC) X-Outflank-Mailman: Message body and most headers restored to incoming version X-BeenThere: xen-devel@lists.xenproject.org List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Errors-To: xen-devel-bounces@lists.xenproject.org Precedence: list Sender: "Xen-devel" X-Inumbo-ID: b1703eb6-94bf-11f0-9809-7dc792cee155 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1758221147; bh=cZbejEi6nkNmkzheeCBAGk92h84S9bA1MI/LRocXjWE=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=sNr2edIlUVPpwBqbOV9yBQP1/FOhaJ6YeDv3YF3kmR8I+TMxL1QjnGjSU1FxrbpQ/ lQ6VcMvO1t0Fx87meU0SneChvQ6BiwlfAXAVkQw/+mZLpALEBWt/7dbuxqQajOetpg E0UpfldbzcJ5ehVg/st5XBRiSC6U7x11vRJ6y4GDfv5SOH4qS/egi0PrkYatzt2/al IaK4aFx2ASuaQdtIxto4VB/hGINa/kGkbBcnJ0j1gtF3HkRNcWWSOO8gn40Leemglo 4dtHyTyMQB3UcRW4eJch2wTwtgCcaQp2MoPH5kDS581Z+XWWHAWeK4xod4u549Jg4N Sqf5FMk/Rnwhw== From: Leon Romanovsky To: Marek Szyprowski Cc: Leon Romanovsky , Jason Gunthorpe , Andreas Larsson , Borislav Petkov , Dave Hansen , "David S. Miller" , Geoff Levand , Helge Deller , Ingo Molnar , iommu@lists.linux.dev, "James E.J. Bottomley" , Jason Wang , Juergen Gross , linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mips@vger.kernel.org, linux-parisc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, Madhavan Srinivasan , Matt Turner , Michael Ellerman , "Michael S. Tsirkin" , Richard Henderson , sparclinux@vger.kernel.org, Stefano Stabellini , Thomas Bogendoerfer , Thomas Gleixner , virtualization@lists.linux.dev, x86@kernel.org, xen-devel@lists.xenproject.org Subject: [PATCH 2/9] MIPS/jazzdma: Provide physical address directly Date: Thu, 18 Sep 2025 21:45:02 +0300 Message-ID: X-Mailer: git-send-email 2.51.0 In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-ZohoMail-DKIM: pass (identity @kernel.org) X-ZM-MESSAGEID: 1758221165069116601 Content-Type: text/plain; charset="utf-8" From: Leon Romanovsky MIPS jazz uses physical addresses for mapping pages, so convert it to get them directly from DMA mapping routine. Signed-off-by: Leon Romanovsky --- arch/mips/jazz/jazzdma.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index c97b089b99029..45fe71aa454b7 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c @@ -521,18 +521,24 @@ static void jazz_dma_free(struct device *dev, size_t = size, void *vaddr, __free_pages(virt_to_page(vaddr), get_order(size)); } =20 -static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs) +static dma_addr_t jazz_dma_map_phys(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir, unsigned long attrs) { - phys_addr_t phys =3D page_to_phys(page) + offset; + if (attrs & DMA_ATTR_MMIO) + /* + * This check is included because older versions of the code lacked + * MMIO path support, and my ability to test this path is limited. + * However, from a software technical standpoint, there is no restrictio= n, + * as the following code operates solely on physical addresses. + */ + return DMA_MAPPING_ERROR; =20 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) arch_sync_dma_for_device(phys, size, dir); return vdma_alloc(phys, size); } =20 -static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, +static void jazz_dma_unmap_phys(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) @@ -607,8 +613,8 @@ static void jazz_dma_sync_sg_for_cpu(struct device *dev, const struct dma_map_ops jazz_dma_ops =3D { .alloc =3D jazz_dma_alloc, .free =3D jazz_dma_free, - .map_page =3D jazz_dma_map_page, - .unmap_page =3D jazz_dma_unmap_page, + .map_phys =3D jazz_dma_map_phys, + .unmap_phys =3D jazz_dma_unmap_phys, .map_sg =3D jazz_dma_map_sg, .unmap_sg =3D jazz_dma_unmap_sg, .sync_single_for_cpu =3D jazz_dma_sync_single_for_cpu, --=20 2.51.0 From nobody Thu Oct 2 09:16:56 2025 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) client-ip=192.237.175.120; envelope-from=xen-devel-bounces@lists.xenproject.org; helo=lists.xenproject.org; Authentication-Results: mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) smtp.mailfrom=xen-devel-bounces@lists.xenproject.org; dmarc=pass(p=quarantine dis=none) header.from=kernel.org ARC-Seal: i=1; a=rsa-sha256; t=1758221153; cv=none; d=zohomail.com; s=zohoarc; b=WhWGvdZaIldnXWXJAv6LKyKLpGWssp7CF6bAXRqkv5eoWwqaYC2P1MDSqnDtE/TgjiAHayAM2uzaWysji6MZTaY41I84IInw8RDFtHJtnEhcmd9YsG9RBGdElZtFLPR6LmxjN2fQ3YIPL52RGpb5kfqELNcVsetNy/aXEda3BaI= ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zohomail.com; s=zohoarc; t=1758221153; h=Content-Transfer-Encoding:Cc:Cc:Date:Date:From:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Help:List-Unsubscribe:MIME-Version:Message-ID:References:Sender:Subject:Subject:To:To:Message-Id:Reply-To; bh=zuA2/EbjAjP+IDiVRg8tCpvb0l3w5/9bBHRhhMxW4Wg=; b=nhmbdOvELwjkJM7ZQk74OTAdnDFFAUg1M3qVG325c7VuI7FrWtSuv9niqDTm4gkD3G9R34hUl/m7WUo3sO9gSU8C0zTRB+EfMOmOhaHhNAbc4WMvrHeaL5JsqAbwKoDa2hXPt7EGot0OpTjer26eEeMVUdeZJvsDJbn4UfWYW40= ARC-Authentication-Results: i=1; mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) smtp.mailfrom=xen-devel-bounces@lists.xenproject.org; dmarc=pass header.from= (p=quarantine dis=none) Return-Path: Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) by mx.zohomail.com with SMTPS id 1758221153941421.91591826067247; Thu, 18 Sep 2025 11:45:53 -0700 (PDT) Received: from list by lists.xenproject.org with outflank-mailman.1126434.1468009 (Exim 4.92) (envelope-from ) id 1uzJdX-0003a2-0J; Thu, 18 Sep 2025 18:45:43 +0000 Received: by outflank-mailman (output) from mailman id 1126434.1468009; Thu, 18 Sep 2025 18:45:42 +0000 Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.92) (envelope-from ) id 1uzJdW-0003Yy-Oz; Thu, 18 Sep 2025 18:45:42 +0000 Received: by outflank-mailman (input) for mailman id 1126434; Thu, 18 Sep 2025 18:45:42 +0000 Received: from se1-gles-flk1-in.inumbo.com ([94.247.172.50] helo=se1-gles-flk1.inumbo.com) by lists.xenproject.org with esmtp (Exim 4.92) (envelope-from ) id 1uzJdW-0003Vt-66 for xen-devel@lists.xenproject.org; Thu, 18 Sep 2025 18:45:42 +0000 Received: from tor.source.kernel.org (tor.source.kernel.org [172.105.4.254]) by se1-gles-flk1.inumbo.com (Halon) with ESMTPS id ac2cde88-94bf-11f0-9809-7dc792cee155; Thu, 18 Sep 2025 20:45:40 +0200 (CEST) Received: from smtp.kernel.org (transwarp.subspace.kernel.org [100.75.92.58]) by tor.source.kernel.org (Postfix) with ESMTP id 6D84660211; Thu, 18 Sep 2025 18:45:39 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 4E3E1C4CEE7; Thu, 18 Sep 2025 18:45:38 +0000 (UTC) X-Outflank-Mailman: Message body and most headers restored to incoming version X-BeenThere: xen-devel@lists.xenproject.org List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Errors-To: xen-devel-bounces@lists.xenproject.org Precedence: list Sender: "Xen-devel" X-Inumbo-ID: ac2cde88-94bf-11f0-9809-7dc792cee155 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1758221139; bh=lYina26sWVeUcMXQiYVzGEsG3s2F3WRjYogRL1Hc1Kg=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=nmeOm+sqpgOPQj79lggvc1K6wpDOki3X4yJuxCb2vpWB7Yrit2zqRh0RiDIvOfhbE evc12i0uWXAtIGkWhu78jwz295HLWPOAaZO6IZ/cviymzjBAJS/z9YITfLbhvXtCFj ky8D8HIF4+CT3b196kV7uzpo4mZHe5HQ3Dm4EG7bvT2H0SCDQYzaGQ0o23uNJgkbi+ 6aZi+d9itiPvwVg22aSegOmezzgMa5H8zXDrUjIenHJYjuH2xplO3ZAhthDLRLlTV2 U0DeJS9SxdDkBapfGs1dH8YVifrYc3iq4bbDeAIVs7BBOR9OCRgcGcxYPk4A43PYIU SJ9lnFSV8Evzw== From: Leon Romanovsky To: Marek Szyprowski Cc: Leon Romanovsky , Jason Gunthorpe , Andreas Larsson , Borislav Petkov , Dave Hansen , "David S. Miller" , Geoff Levand , Helge Deller , Ingo Molnar , iommu@lists.linux.dev, "James E.J. Bottomley" , Jason Wang , Juergen Gross , linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mips@vger.kernel.org, linux-parisc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, Madhavan Srinivasan , Matt Turner , Michael Ellerman , "Michael S. Tsirkin" , Richard Henderson , sparclinux@vger.kernel.org, Stefano Stabellini , Thomas Bogendoerfer , Thomas Gleixner , virtualization@lists.linux.dev, x86@kernel.org, xen-devel@lists.xenproject.org Subject: [PATCH 3/9] parisc: Convert DMA map_page to map_phys interface Date: Thu, 18 Sep 2025 21:45:03 +0300 Message-ID: <56c4c3b14f46c0a785f196315b673b0b1bcfb3b1.1758219787.git.leon@kernel.org> X-Mailer: git-send-email 2.51.0 In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-ZohoMail-DKIM: pass (identity @kernel.org) X-ZM-MESSAGEID: 1758221155273116600 Content-Type: text/plain; charset="utf-8" From: Leon Romanovsky Perform mechanical conversion from .map_page to .map_phys callback. Signed-off-by: Leon Romanovsky --- drivers/parisc/ccio-dma.c | 25 +++++++++++++------------ drivers/parisc/sba_iommu.c | 23 ++++++++++++----------- 2 files changed, 25 insertions(+), 23 deletions(-) diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index feef537257d05..d45f3634f8270 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -773,17 +773,18 @@ ccio_map_single(struct device *dev, void *addr, size_= t size, =20 =20 static dma_addr_t -ccio_map_page(struct device *dev, struct page *page, unsigned long offset, - size_t size, enum dma_data_direction direction, - unsigned long attrs) +ccio_map_phys(struct device *dev, phys_addr_t phys, size_t size, + enum dma_data_direction direction, unsigned long attrs) { - return ccio_map_single(dev, page_address(page) + offset, size, - direction); + if (attrs & DMA_ATTR_MMIO) + return DMA_MAPPING_ERROR; + + return ccio_map_single(dev, phys_to_virt(phys), size, direction); } =20 =20 /** - * ccio_unmap_page - Unmap an address range from the IOMMU. + * ccio_unmap_phys - Unmap an address range from the IOMMU. * @dev: The PCI device. * @iova: The start address of the DMA region. * @size: The length of the DMA region. @@ -791,7 +792,7 @@ ccio_map_page(struct device *dev, struct page *page, un= signed long offset, * @attrs: attributes */ static void=20 -ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size, +ccio_unmap_phys(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction, unsigned long attrs) { struct ioc *ioc; @@ -873,7 +874,7 @@ static void ccio_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) { - ccio_unmap_page(dev, dma_handle, size, 0, 0); + ccio_unmap_phys(dev, dma_handle, size, 0, 0); free_pages((unsigned long)cpu_addr, get_order(size)); } =20 @@ -1004,7 +1005,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist = *sglist, int nents, #ifdef CCIO_COLLECT_STATS ioc->usg_pages +=3D sg_dma_len(sglist) >> PAGE_SHIFT; #endif - ccio_unmap_page(dev, sg_dma_address(sglist), + ccio_unmap_phys(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction, 0); ++sglist; nents--; @@ -1017,8 +1018,8 @@ static const struct dma_map_ops ccio_ops =3D { .dma_supported =3D ccio_dma_supported, .alloc =3D ccio_alloc, .free =3D ccio_free, - .map_page =3D ccio_map_page, - .unmap_page =3D ccio_unmap_page, + .map_phys =3D ccio_map_phys, + .unmap_phys =3D ccio_unmap_phys, .map_sg =3D ccio_map_sg, .unmap_sg =3D ccio_unmap_sg, .get_sgtable =3D dma_common_get_sgtable, @@ -1072,7 +1073,7 @@ static int ccio_proc_info(struct seq_file *m, void *p) ioc->msingle_calls, ioc->msingle_pages, (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls)); =20 - /* KLUGE - unmap_sg calls unmap_page for each mapped page */ + /* KLUGE - unmap_sg calls unmap_phys for each mapped page */ min =3D ioc->usingle_calls - ioc->usg_calls; max =3D ioc->usingle_pages - ioc->usg_pages; seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n", diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index fc3863c09f83d..8040aa4e6ff42 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -778,17 +778,18 @@ sba_map_single(struct device *dev, void *addr, size_t= size, =20 =20 static dma_addr_t -sba_map_page(struct device *dev, struct page *page, unsigned long offset, - size_t size, enum dma_data_direction direction, - unsigned long attrs) +sba_map_phys(struct device *dev, phys_addr_t phys, size_t size, + enum dma_data_direction direction, unsigned long attrs) { - return sba_map_single(dev, page_address(page) + offset, size, - direction); + if (attrs & DMA_ATTR_MMIO) + return DMA_MAPPING_ERROR; + + return sba_map_single(dev, phys_to_virt(phys), size, direction); } =20 =20 /** - * sba_unmap_page - unmap one IOVA and free resources + * sba_unmap_phys - unmap one IOVA and free resources * @dev: instance of PCI owned by the driver that's asking. * @iova: IOVA of driver buffer previously mapped. * @size: number of bytes mapped in driver buffer. @@ -798,7 +799,7 @@ sba_map_page(struct device *dev, struct page *page, uns= igned long offset, * See Documentation/core-api/dma-api-howto.rst */ static void -sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, +sba_unmap_phys(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction, unsigned long attrs) { struct ioc *ioc; @@ -914,7 +915,7 @@ static void sba_free(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { - sba_unmap_page(hwdev, dma_handle, size, 0, 0); + sba_unmap_phys(hwdev, dma_handle, size, 0, 0); free_pages((unsigned long) vaddr, get_order(size)); } =20 @@ -1061,7 +1062,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *= sglist, int nents, =20 while (nents && sg_dma_len(sglist)) { =20 - sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist), + sba_unmap_phys(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction, 0); #ifdef SBA_COLLECT_STATS ioc->usg_pages +=3D ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(= sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT; @@ -1085,8 +1086,8 @@ static const struct dma_map_ops sba_ops =3D { .dma_supported =3D sba_dma_supported, .alloc =3D sba_alloc, .free =3D sba_free, - .map_page =3D sba_map_page, - .unmap_page =3D sba_unmap_page, + .map_phys =3D sba_map_phys, + .unmap_phys =3D sba_unmap_phys, .map_sg =3D sba_map_sg, .unmap_sg =3D sba_unmap_sg, .get_sgtable =3D dma_common_get_sgtable, --=20 2.51.0 From nobody Thu Oct 2 09:16:56 2025 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) client-ip=192.237.175.120; envelope-from=xen-devel-bounces@lists.xenproject.org; helo=lists.xenproject.org; Authentication-Results: mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) smtp.mailfrom=xen-devel-bounces@lists.xenproject.org; dmarc=pass(p=quarantine dis=none) header.from=kernel.org ARC-Seal: i=1; a=rsa-sha256; t=1758221162; cv=none; d=zohomail.com; s=zohoarc; b=Si/e2V1xMkgSsRxbnwka2PbPWY5dlInq8sGlAc+31Rj/4hkQzjEpiCPkgMi5TLibqe5hreDmg7s3D0b7gZdLTQa6reP/rhe74B3e9O3/JTy9qypLhDknETphOnF+rjew9Tjn9qOaOP1Nztr3Iqc9PtvKmUEwRBKoneCVSE4xkUQ= ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zohomail.com; s=zohoarc; t=1758221162; h=Content-Transfer-Encoding:Cc:Cc:Date:Date:From:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Help:List-Unsubscribe:MIME-Version:Message-ID:References:Sender:Subject:Subject:To:To:Message-Id:Reply-To; bh=sJMQvlFc+h6C27T9m+Zl3NcEAk3Mru4+yLmcb43eL/o=; b=oFEt31h4+2P0m4puu3imNx+7hm9++QDm0UkesRFil2x1g8K9aY61nU7PXn1v5n2ONRTmSkhn/InJ8+bSNhuHt7AkmGGqAGBC83+eInlOwJHiT4N9G1ZTa7O17SjzCWf4qvpFf80yzhTWXe8RG4Ti2tpKfgXKctAagJg6BQodY1Y= ARC-Authentication-Results: i=1; mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) smtp.mailfrom=xen-devel-bounces@lists.xenproject.org; dmarc=pass header.from= (p=quarantine dis=none) Return-Path: Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) by mx.zohomail.com with SMTPS id 1758221162607497.0556142359152; Thu, 18 Sep 2025 11:46:02 -0700 (PDT) Received: from list by lists.xenproject.org with outflank-mailman.1126435.1468022 (Exim 4.92) (envelope-from ) id 1uzJdb-000434-AF; Thu, 18 Sep 2025 18:45:47 +0000 Received: by outflank-mailman (output) from mailman id 1126435.1468022; Thu, 18 Sep 2025 18:45:47 +0000 Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.92) (envelope-from ) id 1uzJdb-00042x-74; Thu, 18 Sep 2025 18:45:47 +0000 Received: by outflank-mailman (input) for mailman id 1126435; Thu, 18 Sep 2025 18:45:45 +0000 Received: from se1-gles-sth1-in.inumbo.com ([159.253.27.254] helo=se1-gles-sth1.inumbo.com) by lists.xenproject.org with esmtp (Exim 4.92) (envelope-from ) id 1uzJdZ-0003Gi-Gt for xen-devel@lists.xenproject.org; Thu, 18 Sep 2025 18:45:45 +0000 Received: from tor.source.kernel.org (tor.source.kernel.org [172.105.4.254]) by se1-gles-sth1.inumbo.com (Halon) with ESMTPS id aed91f74-94bf-11f0-9d14-b5c5bf9af7f9; Thu, 18 Sep 2025 20:45:44 +0200 (CEST) Received: from smtp.kernel.org (transwarp.subspace.kernel.org [100.75.92.58]) by tor.source.kernel.org (Postfix) with ESMTP id BB6316021B; Thu, 18 Sep 2025 18:45:43 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 92CC2C4CEF0; Thu, 18 Sep 2025 18:45:42 +0000 (UTC) X-Outflank-Mailman: Message body and most headers restored to incoming version X-BeenThere: xen-devel@lists.xenproject.org List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Errors-To: xen-devel-bounces@lists.xenproject.org Precedence: list Sender: "Xen-devel" X-Inumbo-ID: aed91f74-94bf-11f0-9d14-b5c5bf9af7f9 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1758221143; bh=A5MYBsSBE9JCUM3+eZAexfkVHCWWAKynqqD0ex2RShQ=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=VFTsFBtZuq3RVObanJ1b8fM2UGDGA4Zn3T9RhLMeI1McT3DbXl46onA5ciizSBNyt e1Vw09lscJuL0hnn6ctfrD5eU+i7hLzqF3jIQGA/SfsG96PEvQDpJF6yBiUKZzSiiN RNAJGFUHjhdtEDlComVmJQGBIYwgOkRmRy2rLupZxtPiW5SJcaI9uhtProb48+4fSX JbKVzymUDqJfmrrPQcihptQS9xWeAWhDEYX4vllaFcywrHZ5boMeAplPClyw3NxKlp Bs9mimCO1/mzRXV4Y1CUgSLgIAKGxCQi6jAm1nL7j5KU5pU+q0+gPCmFWy+umttOfe UFfFNv7ET5k2w== From: Leon Romanovsky To: Marek Szyprowski Cc: Leon Romanovsky , Jason Gunthorpe , Andreas Larsson , Borislav Petkov , Dave Hansen , "David S. Miller" , Geoff Levand , Helge Deller , Ingo Molnar , iommu@lists.linux.dev, "James E.J. Bottomley" , Jason Wang , Juergen Gross , linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mips@vger.kernel.org, linux-parisc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, Madhavan Srinivasan , Matt Turner , Michael Ellerman , "Michael S. Tsirkin" , Richard Henderson , sparclinux@vger.kernel.org, Stefano Stabellini , Thomas Bogendoerfer , Thomas Gleixner , virtualization@lists.linux.dev, x86@kernel.org, xen-devel@lists.xenproject.org Subject: [PATCH 4/9] powerpc: Convert to physical address DMA mapping Date: Thu, 18 Sep 2025 21:45:04 +0300 Message-ID: <6fd5222ca5eb2e6cba0d28e5cdab8ca3e6152ee5.1758219787.git.leon@kernel.org> X-Mailer: git-send-email 2.51.0 In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-ZohoMail-DKIM: pass (identity @kernel.org) X-ZM-MESSAGEID: 1758221163282116600 Content-Type: text/plain; charset="utf-8" From: Leon Romanovsky Adapt PowerPC DMA to use physical addresses in order to prepare code to removal .map_page and .unmap_page. Signed-off-by: Leon Romanovsky --- arch/powerpc/include/asm/iommu.h | 8 +++--- arch/powerpc/kernel/dma-iommu.c | 22 +++++++--------- arch/powerpc/kernel/iommu.c | 14 +++++----- arch/powerpc/platforms/ps3/system-bus.c | 33 ++++++++++++++---------- arch/powerpc/platforms/pseries/ibmebus.c | 15 ++++++----- arch/powerpc/platforms/pseries/vio.c | 21 ++++++++------- 6 files changed, 60 insertions(+), 53 deletions(-) diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/io= mmu.h index b410021ad4c69..eafdd63cd6c4f 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -274,12 +274,12 @@ extern void *iommu_alloc_coherent(struct device *dev,= struct iommu_table *tbl, unsigned long mask, gfp_t flag, int node); extern void iommu_free_coherent(struct iommu_table *tbl, size_t size, void *vaddr, dma_addr_t dma_handle); -extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *t= bl, - struct page *page, unsigned long offset, - size_t size, unsigned long mask, +extern dma_addr_t iommu_map_phys(struct device *dev, struct iommu_table *t= bl, + phys_addr_t phys, size_t size, + unsigned long mask, enum dma_data_direction direction, unsigned long attrs); -extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handl= e, +extern void iommu_unmap_phys(struct iommu_table *tbl, dma_addr_t dma_handl= e, size_t size, enum dma_data_direction direction, unsigned long attrs); =20 diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iomm= u.c index 0359ab72cd3ba..aa3689d619179 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -93,28 +93,26 @@ static void dma_iommu_free_coherent(struct device *dev,= size_t size, =20 /* Creates TCEs for a user provided buffer. The user buffer must be * contiguous real kernel storage (not vmalloc). The address passed here - * comprises a page address and offset into that page. The dma_addr_t - * returned will point to the same byte within the page as was passed in. + * is a physical address to that page. The dma_addr_t returned will point + * to the same byte within the page as was passed in. */ -static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, +static dma_addr_t dma_iommu_map_phys(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction direction, unsigned long attrs) { - return iommu_map_page(dev, get_iommu_table_base(dev), page, offset, - size, dma_get_mask(dev), direction, attrs); + return iommu_map_phys(dev, get_iommu_table_base(dev), phys, size, + dma_get_mask(dev), direction, attrs); } =20 - -static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, +static void dma_iommu_unmap_phys(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction, unsigned long attrs) { - iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction, + iommu_unmap_phys(get_iommu_table_base(dev), dma_handle, size, direction, attrs); } =20 - static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, unsigned long attrs) @@ -211,8 +209,8 @@ const struct dma_map_ops dma_iommu_ops =3D { .map_sg =3D dma_iommu_map_sg, .unmap_sg =3D dma_iommu_unmap_sg, .dma_supported =3D dma_iommu_dma_supported, - .map_page =3D dma_iommu_map_page, - .unmap_page =3D dma_iommu_unmap_page, + .map_phys =3D dma_iommu_map_phys, + .unmap_phys =3D dma_iommu_unmap_phys, .get_required_mask =3D dma_iommu_get_required_mask, .mmap =3D dma_common_mmap, .get_sgtable =3D dma_common_get_sgtable, diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 244eb4857e7f4..6b5f4b72ce97f 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -848,12 +848,12 @@ EXPORT_SYMBOL_GPL(iommu_tce_table_put); =20 /* Creates TCEs for a user provided buffer. The user buffer must be * contiguous real kernel storage (not vmalloc). The address passed here - * comprises a page address and offset into that page. The dma_addr_t - * returned will point to the same byte within the page as was passed in. + * is physical address into that page. The dma_addr_t returned will point + * to the same byte within the page as was passed in. */ -dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, - struct page *page, unsigned long offset, size_t size, - unsigned long mask, enum dma_data_direction direction, +dma_addr_t iommu_map_phys(struct device *dev, struct iommu_table *tbl, + phys_addr_t phys, size_t size, unsigned long mask, + enum dma_data_direction direction, unsigned long attrs) { dma_addr_t dma_handle =3D DMA_MAPPING_ERROR; @@ -863,7 +863,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct io= mmu_table *tbl, =20 BUG_ON(direction =3D=3D DMA_NONE); =20 - vaddr =3D page_address(page) + offset; + vaddr =3D phys_to_virt(phys); uaddr =3D (unsigned long)vaddr; =20 if (tbl) { @@ -890,7 +890,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct io= mmu_table *tbl, return dma_handle; } =20 -void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, +void iommu_unmap_phys(struct iommu_table *tbl, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction, unsigned long attrs) { diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platfor= ms/ps3/system-bus.c index afbaabf182d01..6adcf76c70219 100644 --- a/arch/powerpc/platforms/ps3/system-bus.c +++ b/arch/powerpc/platforms/ps3/system-bus.c @@ -551,18 +551,20 @@ static void ps3_free_coherent(struct device *_dev, si= ze_t size, void *vaddr, =20 /* Creates TCEs for a user provided buffer. The user buffer must be * contiguous real kernel storage (not vmalloc). The address passed here - * comprises a page address and offset into that page. The dma_addr_t - * returned will point to the same byte within the page as was passed in. + * is physical address to that hat page. The dma_addr_t returned will point + * to the same byte within the page as was passed in. */ =20 -static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction direction, - unsigned long attrs) +static dma_addr_t ps3_sb_map_phys(struct device *_dev, phys_addr_t phys, + size_t size, enum dma_data_direction direction, unsigned long attrs) { struct ps3_system_bus_device *dev =3D ps3_dev_to_system_bus_dev(_dev); int result; dma_addr_t bus_addr; - void *ptr =3D page_address(page) + offset; + void *ptr =3D phys_to_virt(phys); + + if (attrs & DMA_ATTR_MMIO) + return DMA_MAPPING_ERROR; =20 result =3D ps3_dma_map(dev->d_region, (unsigned long)ptr, size, &bus_addr, @@ -577,8 +579,8 @@ static dma_addr_t ps3_sb_map_page(struct device *_dev, = struct page *page, return bus_addr; } =20 -static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page, - unsigned long offset, size_t size, +static dma_addr_t ps3_ioc0_map_phys(struct device *_dev, phys_addr_t phys, + size_t size, enum dma_data_direction direction, unsigned long attrs) { @@ -586,7 +588,10 @@ static dma_addr_t ps3_ioc0_map_page(struct device *_de= v, struct page *page, int result; dma_addr_t bus_addr; u64 iopte_flag; - void *ptr =3D page_address(page) + offset; + void *ptr =3D phys_to_virt(phys); + + if (attrs & DMA_ATTR_MMIO) + return; =20 iopte_flag =3D CBE_IOPTE_M; switch (direction) { @@ -613,7 +618,7 @@ static dma_addr_t ps3_ioc0_map_page(struct device *_dev= , struct page *page, return bus_addr; } =20 -static void ps3_unmap_page(struct device *_dev, dma_addr_t dma_addr, +static void ps3_unmap_phys(struct device *_dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction, unsigned long attrs) { struct ps3_system_bus_device *dev =3D ps3_dev_to_system_bus_dev(_dev); @@ -690,8 +695,8 @@ static const struct dma_map_ops ps3_sb_dma_ops =3D { .map_sg =3D ps3_sb_map_sg, .unmap_sg =3D ps3_sb_unmap_sg, .dma_supported =3D ps3_dma_supported, - .map_page =3D ps3_sb_map_page, - .unmap_page =3D ps3_unmap_page, + .map_phys =3D ps3_sb_map_phys, + .unmap_phys =3D ps3_unmap_phys, .mmap =3D dma_common_mmap, .get_sgtable =3D dma_common_get_sgtable, .alloc_pages_op =3D dma_common_alloc_pages, @@ -704,8 +709,8 @@ static const struct dma_map_ops ps3_ioc0_dma_ops =3D { .map_sg =3D ps3_ioc0_map_sg, .unmap_sg =3D ps3_ioc0_unmap_sg, .dma_supported =3D ps3_dma_supported, - .map_page =3D ps3_ioc0_map_page, - .unmap_page =3D ps3_unmap_page, + .map_phys =3D ps3_ioc0_map_phys, + .unmap_phys =3D ps3_unmap_phys, .mmap =3D dma_common_mmap, .get_sgtable =3D dma_common_get_sgtable, .alloc_pages_op =3D dma_common_alloc_pages, diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platfo= rms/pseries/ibmebus.c index 3436b0af795e2..cad2deb7e70d9 100644 --- a/arch/powerpc/platforms/pseries/ibmebus.c +++ b/arch/powerpc/platforms/pseries/ibmebus.c @@ -86,17 +86,18 @@ static void ibmebus_free_coherent(struct device *dev, kfree(vaddr); } =20 -static dma_addr_t ibmebus_map_page(struct device *dev, - struct page *page, - unsigned long offset, +static dma_addr_t ibmebus_map_phys(struct device *dev, phys_addr_t phys, size_t size, enum dma_data_direction direction, unsigned long attrs) { - return (dma_addr_t)(page_address(page) + offset); + if (attrs & DMA_ATTR_MMIO) + return DMA_MAPPING_ERROR; + + return (dma_addr_t)(phys_to_virt(phys)); } =20 -static void ibmebus_unmap_page(struct device *dev, +static void ibmebus_unmap_phys(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction, @@ -146,8 +147,8 @@ static const struct dma_map_ops ibmebus_dma_ops =3D { .unmap_sg =3D ibmebus_unmap_sg, .dma_supported =3D ibmebus_dma_supported, .get_required_mask =3D ibmebus_dma_get_required_mask, - .map_page =3D ibmebus_map_page, - .unmap_page =3D ibmebus_unmap_page, + .map_phys =3D ibmebus_map_phys, + .unmap_phys =3D ibmebus_unmap_phys, }; =20 static int ibmebus_match_path(struct device *dev, const void *data) diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/= pseries/vio.c index ac1d2d2c9a88a..838e29d473785 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -512,18 +512,21 @@ static void vio_dma_iommu_free_coherent(struct device= *dev, size_t size, vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); } =20 -static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *= page, - unsigned long offset, size_t size, - enum dma_data_direction direction, - unsigned long attrs) +static dma_addr_t vio_dma_iommu_map_phys(struct device *dev, phys_addr_t p= hys, + size_t size, + enum dma_data_direction direction, + unsigned long attrs) { struct vio_dev *viodev =3D to_vio_dev(dev); struct iommu_table *tbl =3D get_iommu_table_base(dev); dma_addr_t ret =3D DMA_MAPPING_ERROR; =20 + if (attrs & DMA_ATTR_MMIO) + return ret; + if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) goto out_fail; - ret =3D iommu_map_page(dev, tbl, page, offset, size, dma_get_mask(dev), + ret =3D iommu_map_phys(dev, tbl, phys, size, dma_get_mask(dev), direction, attrs); if (unlikely(ret =3D=3D DMA_MAPPING_ERROR)) goto out_deallocate; @@ -536,7 +539,7 @@ static dma_addr_t vio_dma_iommu_map_page(struct device = *dev, struct page *page, return DMA_MAPPING_ERROR; } =20 -static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_ha= ndle, +static void vio_dma_iommu_unmap_phys(struct device *dev, dma_addr_t dma_ha= ndle, size_t size, enum dma_data_direction direction, unsigned long attrs) @@ -544,7 +547,7 @@ static void vio_dma_iommu_unmap_page(struct device *dev= , dma_addr_t dma_handle, struct vio_dev *viodev =3D to_vio_dev(dev); struct iommu_table *tbl =3D get_iommu_table_base(dev); =20 - iommu_unmap_page(tbl, dma_handle, size, direction, attrs); + iommu_unmap_phys(tbl, dma_handle, size, direction, attrs); vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); } =20 @@ -605,8 +608,8 @@ static const struct dma_map_ops vio_dma_mapping_ops =3D= { .free =3D vio_dma_iommu_free_coherent, .map_sg =3D vio_dma_iommu_map_sg, .unmap_sg =3D vio_dma_iommu_unmap_sg, - .map_page =3D vio_dma_iommu_map_page, - .unmap_page =3D vio_dma_iommu_unmap_page, + .map_phys =3D vio_dma_iommu_map_phys, + .unmap_phys =3D vio_dma_iommu_unmap_phys, .dma_supported =3D dma_iommu_dma_supported, .get_required_mask =3D dma_iommu_get_required_mask, .mmap =3D dma_common_mmap, --=20 2.51.0 From nobody Thu Oct 2 09:16:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 706862D838A; Thu, 18 Sep 2025 18:46:02 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758221163; cv=none; b=IIMNJW6yy39jtaMf17VPk/WOvcXIAPlnkY3jxqGPyk4zW4rjo3esNv1fSzdKEqjVSJV2wybNxC4uVdQFXugKZtFluVj2yJdfJezhttaCawlF5PHOUxIUyLd+wcCtM7YXsDK0Lf+At61qaSQ4Utg1cG9f0a8dS5Yjkb7Y56ALqMY= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758221163; c=relaxed/simple; bh=RuMQ11goh2JNImFK4PcuJ9yLKsyHcn6/phLR164pkzU=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=S6ZEVNbkgtpf5bRdBOfevocA+xcPdL9ugCE96YI3Tm6y9lXPOmcdTMLaJb3lcYvI7/3qIMTVpFi/foAeVkEeNjhdxPAC7sZNA8RtTfs977G9z+rbF8726xKJdXLQU9uzD5QwsjUgxlZ56kbPNxnYAFPFjPBzOc6y3U0ze+cBkxo= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=Q5B3soPW; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="Q5B3soPW" Received: by smtp.kernel.org (Postfix) with ESMTPSA id AF400C4CEE7; Thu, 18 Sep 2025 18:46:00 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1758221162; bh=RuMQ11goh2JNImFK4PcuJ9yLKsyHcn6/phLR164pkzU=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=Q5B3soPWe59i7AumuQAtzGCpEErxVeTC7wi+Di8G6eOVT1MVZTt/JgzUnlVjGRPoW ErCqMMeGsidHG1N+knFqgXjPEsOdPPiFQ0rIz4DZLI0HbYVEM3hq9hxORsFYl5H2fx J60n2vb7xaJx4ibgjmaY8IKcwdgAFKEXlRg60rf39+CjCdf5WsqUpjL/TGOC7MGeDk X/Ja5mBKKeU7UABgCctHcfJDW3VdXWr07H5WLSQu0NCMdcZtRlu/szIqbspA2f3jrE yZJ63F0xUTPrPP7BvjqshrVIrvOkK4Vw3omPkJrYVm7ziJdzMMMM8Sk6cOqpJu0RSX bLwC9O22I7FmA== From: Leon Romanovsky To: Marek Szyprowski Cc: Leon Romanovsky , Jason Gunthorpe , Andreas Larsson , Borislav Petkov , Dave Hansen , "David S. Miller" , Geoff Levand , Helge Deller , Ingo Molnar , iommu@lists.linux.dev, "James E.J. Bottomley" , Jason Wang , Juergen Gross , linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mips@vger.kernel.org, linux-parisc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, Madhavan Srinivasan , Matt Turner , Michael Ellerman , "Michael S. Tsirkin" , Richard Henderson , sparclinux@vger.kernel.org, Stefano Stabellini , Thomas Bogendoerfer , Thomas Gleixner , virtualization@lists.linux.dev, x86@kernel.org, xen-devel@lists.xenproject.org Subject: [PATCH 5/9] sparc64: Use physical address DMA mapping Date: Thu, 18 Sep 2025 21:45:05 +0300 Message-ID: X-Mailer: git-send-email 2.51.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Leon Romanovsky Convert sparc architecture DMA code to use .map_phys callback. Signed-off-by: Leon Romanovsky --- arch/sparc/kernel/iommu.c | 16 ++++++------ arch/sparc/kernel/pci_sun4v.c | 16 ++++++------ arch/sparc/mm/io-unit.c | 13 +++++----- arch/sparc/mm/iommu.c | 46 ++++++++++++++++++----------------- 4 files changed, 48 insertions(+), 43 deletions(-) diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index da03636925283..72bd7519ac2ab 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -260,9 +260,8 @@ static void dma_4u_free_coherent(struct device *dev, si= ze_t size, free_pages((unsigned long)cpu, order); } =20 -static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t sz, - enum dma_data_direction direction, +static dma_addr_t dma_4u_map_phys(struct device *dev, phys_addr_t phys, + size_t sz, enum dma_data_direction direction, unsigned long attrs) { struct iommu *iommu; @@ -273,13 +272,16 @@ static dma_addr_t dma_4u_map_page(struct device *dev,= struct page *page, u32 bus_addr, ret; unsigned long iopte_protection; =20 + if (attrs & DMA_ATTR_MMIO) + goto bad; + iommu =3D dev->archdata.iommu; strbuf =3D dev->archdata.stc; =20 if (unlikely(direction =3D=3D DMA_NONE)) goto bad_no_ctx; =20 - oaddr =3D (unsigned long)(page_address(page) + offset); + oaddr =3D (unsigned long)(phys_to_virt(phys)); npages =3D IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); npages >>=3D IO_PAGE_SHIFT; =20 @@ -383,7 +385,7 @@ static void strbuf_flush(struct strbuf *strbuf, struct = iommu *iommu, vaddr, ctx, npages); } =20 -static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, +static void dma_4u_unmap_phys(struct device *dev, dma_addr_t bus_addr, size_t sz, enum dma_data_direction direction, unsigned long attrs) { @@ -753,8 +755,8 @@ static int dma_4u_supported(struct device *dev, u64 dev= ice_mask) static const struct dma_map_ops sun4u_dma_ops =3D { .alloc =3D dma_4u_alloc_coherent, .free =3D dma_4u_free_coherent, - .map_page =3D dma_4u_map_page, - .unmap_page =3D dma_4u_unmap_page, + .map_phys =3D dma_4u_map_phys, + .unmap_phys =3D dma_4u_unmap_phys, .map_sg =3D dma_4u_map_sg, .unmap_sg =3D dma_4u_unmap_sg, .sync_single_for_cpu =3D dma_4u_sync_single_for_cpu, diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index b720b21ccfbd8..d9d2464a948c9 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -352,9 +352,8 @@ static void dma_4v_free_coherent(struct device *dev, si= ze_t size, void *cpu, free_pages((unsigned long)cpu, order); } =20 -static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t sz, - enum dma_data_direction direction, +static dma_addr_t dma_4v_map_phys(struct device *dev, phys_addr_t phys, + size_t sz, enum dma_data_direction direction, unsigned long attrs) { struct iommu *iommu; @@ -367,13 +366,16 @@ static dma_addr_t dma_4v_map_page(struct device *dev,= struct page *page, dma_addr_t bus_addr, ret; long entry; =20 + if (attrs & DMA_ATTR_MMIO) + goto bad; + iommu =3D dev->archdata.iommu; atu =3D iommu->atu; =20 if (unlikely(direction =3D=3D DMA_NONE)) goto bad; =20 - oaddr =3D (unsigned long)(page_address(page) + offset); + oaddr =3D (unsigned long)(phys_to_virt(phys)); npages =3D IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); npages >>=3D IO_PAGE_SHIFT; =20 @@ -426,7 +428,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, s= truct page *page, return DMA_MAPPING_ERROR; } =20 -static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, +static void dma_4v_unmap_phys(struct device *dev, dma_addr_t bus_addr, size_t sz, enum dma_data_direction direction, unsigned long attrs) { @@ -686,8 +688,8 @@ static int dma_4v_supported(struct device *dev, u64 dev= ice_mask) static const struct dma_map_ops sun4v_dma_ops =3D { .alloc =3D dma_4v_alloc_coherent, .free =3D dma_4v_free_coherent, - .map_page =3D dma_4v_map_page, - .unmap_page =3D dma_4v_unmap_page, + .map_phys =3D dma_4v_map_phys, + .unmap_phys =3D dma_4v_unmap_phys, .map_sg =3D dma_4v_map_sg, .unmap_sg =3D dma_4v_unmap_sg, .dma_supported =3D dma_4v_supported, diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c index d8376f61b4d08..fab303cc33700 100644 --- a/arch/sparc/mm/io-unit.c +++ b/arch/sparc/mm/io-unit.c @@ -142,11 +142,10 @@ nexti: scan =3D find_next_zero_bit(iounit->bmap, limi= t, scan); return vaddr; } =20 -static dma_addr_t iounit_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t len, enum dma_data_direction dir, - unsigned long attrs) +static dma_addr_t iounit_map_phys(struct device *dev, phys_addr_t phys, + size_t len, enum dma_data_direction dir, unsigned long attrs) { - void *vaddr =3D page_address(page) + offset; + void *vaddr =3D phys_to_virt(phys); struct iounit_struct *iounit =3D dev->archdata.iommu; unsigned long ret, flags; =09 @@ -178,7 +177,7 @@ static int iounit_map_sg(struct device *dev, struct sca= tterlist *sgl, int nents, return nents; } =20 -static void iounit_unmap_page(struct device *dev, dma_addr_t vaddr, size_t= len, +static void iounit_unmap_phys(struct device *dev, dma_addr_t vaddr, size_t= len, enum dma_data_direction dir, unsigned long attrs) { struct iounit_struct *iounit =3D dev->archdata.iommu; @@ -279,8 +278,8 @@ static const struct dma_map_ops iounit_dma_ops =3D { .alloc =3D iounit_alloc, .free =3D iounit_free, #endif - .map_page =3D iounit_map_page, - .unmap_page =3D iounit_unmap_page, + .map_phys =3D iounit_map_phys, + .unmap_phys =3D iounit_unmap_phys, .map_sg =3D iounit_map_sg, .unmap_sg =3D iounit_unmap_sg, }; diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 5a5080db800f5..dfcd981fa7efc 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -181,18 +181,20 @@ static void iommu_flush_iotlb(iopte_t *iopte, unsigne= d int niopte) } } =20 -static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *p= age, - unsigned long offset, size_t len, bool per_page_flush) +static dma_addr_t __sbus_iommu_map_phys(struct device *dev, phys_addr_t pa= ddr, + size_t len, bool per_page_flush, unsigned long attrs) { struct iommu_struct *iommu =3D dev->archdata.iommu; - phys_addr_t paddr =3D page_to_phys(page) + offset; - unsigned long off =3D paddr & ~PAGE_MASK; + unsigned long off =3D offset_in_page(paddr); unsigned long npages =3D (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned long pfn =3D __phys_to_pfn(paddr); unsigned int busa, busa0; iopte_t *iopte, *iopte0; int ioptex, i; =20 + if (attrs & DMA_ATTR_MMIO) + return DMA_MAPPING_ERROR; + /* XXX So what is maxphys for us and how do drivers know it? */ if (!len || len > 256 * 1024) return DMA_MAPPING_ERROR; @@ -202,10 +204,10 @@ static dma_addr_t __sbus_iommu_map_page(struct device= *dev, struct page *page, * XXX Is this a good assumption? * XXX What if someone else unmaps it here and races us? */ - if (per_page_flush && !PageHighMem(page)) { + if (per_page_flush && !PhysHighMem(paddr)) { unsigned long vaddr, p; =20 - vaddr =3D (unsigned long)page_address(page) + offset; + vaddr =3D (unsigned long)phys_to_virt(paddr); for (p =3D vaddr & PAGE_MASK; p < vaddr + len; p +=3D PAGE_SIZE) flush_page_for_dma(p); } @@ -231,19 +233,19 @@ static dma_addr_t __sbus_iommu_map_page(struct device= *dev, struct page *page, return busa0 + off; } =20 -static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev, - struct page *page, unsigned long offset, size_t len, - enum dma_data_direction dir, unsigned long attrs) +static dma_addr_t sbus_iommu_map_phys_gflush(struct device *dev, + phys_addr_t phys, size_t len, enum dma_data_direction dir, + unsigned long attrs) { flush_page_for_dma(0); - return __sbus_iommu_map_page(dev, page, offset, len, false); + return __sbus_iommu_map_phys(dev, phys, len, false, attrs); } =20 -static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev, - struct page *page, unsigned long offset, size_t len, - enum dma_data_direction dir, unsigned long attrs) +static dma_addr_t sbus_iommu_map_phys_pflush(struct device *dev, + phys_addr_t phys, size_t len, enum dma_data_direction dir, + unsigned long attrs) { - return __sbus_iommu_map_page(dev, page, offset, len, true); + return __sbus_iommu_map_phys(dev, phys, len, true, attrs); } =20 static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl, @@ -254,8 +256,8 @@ static int __sbus_iommu_map_sg(struct device *dev, stru= ct scatterlist *sgl, int j; =20 for_each_sg(sgl, sg, nents, j) { - sg->dma_address =3D__sbus_iommu_map_page(dev, sg_page(sg), - sg->offset, sg->length, per_page_flush); + sg->dma_address =3D __sbus_iommu_map_phys(dev, sg_phys(sg), + sg->length, per_page_flush, attrs); if (sg->dma_address =3D=3D DMA_MAPPING_ERROR) return -EIO; sg->dma_length =3D sg->length; @@ -277,7 +279,7 @@ static int sbus_iommu_map_sg_pflush(struct device *dev,= struct scatterlist *sgl, return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, true); } =20 -static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr, +static void sbus_iommu_unmap_phys(struct device *dev, dma_addr_t dma_addr, size_t len, enum dma_data_direction dir, unsigned long attrs) { struct iommu_struct *iommu =3D dev->archdata.iommu; @@ -303,7 +305,7 @@ static void sbus_iommu_unmap_sg(struct device *dev, str= uct scatterlist *sgl, int i; =20 for_each_sg(sgl, sg, nents, i) { - sbus_iommu_unmap_page(dev, sg->dma_address, sg->length, dir, + sbus_iommu_unmap_phys(dev, sg->dma_address, sg->length, dir, attrs); sg->dma_address =3D 0x21212121; } @@ -426,8 +428,8 @@ static const struct dma_map_ops sbus_iommu_dma_gflush_o= ps =3D { .alloc =3D sbus_iommu_alloc, .free =3D sbus_iommu_free, #endif - .map_page =3D sbus_iommu_map_page_gflush, - .unmap_page =3D sbus_iommu_unmap_page, + .map_phys =3D sbus_iommu_map_phys_gflush, + .unmap_phys =3D sbus_iommu_unmap_phys, .map_sg =3D sbus_iommu_map_sg_gflush, .unmap_sg =3D sbus_iommu_unmap_sg, }; @@ -437,8 +439,8 @@ static const struct dma_map_ops sbus_iommu_dma_pflush_o= ps =3D { .alloc =3D sbus_iommu_alloc, .free =3D sbus_iommu_free, #endif - .map_page =3D sbus_iommu_map_page_pflush, - .unmap_page =3D sbus_iommu_unmap_page, + .map_phys =3D sbus_iommu_map_phys_pflush, + .unmap_phys =3D sbus_iommu_unmap_phys, .map_sg =3D sbus_iommu_map_sg_pflush, .unmap_sg =3D sbus_iommu_unmap_sg, }; --=20 2.51.0 From nobody Thu Oct 2 09:16:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 4BE232DC333; Thu, 18 Sep 2025 18:45:52 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758221153; cv=none; b=NiStBup9HDDFyWX9N/ImeV6WiaBZszEGEldwQQIfjU8BUICZHjheX0q9QfO3sTdftoAAIsYDQc8ATKLTaZ0vUh4czXp2D9+Apr6IgOb5JiARgd0nrHQ7YVlQWkBskO/Mo/OwnPos+xzoQIAJ5UgZ/x0b2PsaxTenWKB7NiylBDU= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758221153; c=relaxed/simple; bh=t2K4sW07xLmohMbEHNKuCoco8SWkCKi7+Pye3QSpGNA=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=dDe0TN/GPF+yZNGpU8OHbRcDpyrbptmyspe9pvc9EH02BiqjKmumd3n8pXyCNFyk5sLwfkTMqSP71A6Xr0dL/xRBdTkCgfaM3ECSE4bQOOQ3R7/NAOqnRucYrYynPCyPbSVnTvvUrSZSq1Z8TG8cfvpr69Ev3CbZJCplUUHGRwg= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=ApQAftbQ; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="ApQAftbQ" Received: by smtp.kernel.org (Postfix) with ESMTPSA id BED33C4CEF1; Thu, 18 Sep 2025 18:45:51 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1758221152; bh=t2K4sW07xLmohMbEHNKuCoco8SWkCKi7+Pye3QSpGNA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=ApQAftbQR/jnJU2HzvT/hfBSzMBH8l9oVxYJvIJILBwOkF6XJJC/bSesqHAbmBP6u P4kPqKENM/py7u+cYWOMrhH3+pE/5jyp4qqyE4UDzVb5l/1iNNb4ImC6JoUa2UrQ9g HoDSVJ65VO2VyCLe7wSh33JQyAtAUnbol0LakPZkT5VHxvJDQfwyo07wdmIvS+pi4k JMnu0sgJrSZKHOTVtah/XqsI+nbf7OeODzaRmr6BIlvfuaSaKWkBbjh/9OLnt/fQR+ lKPYEZvVCBIvFLx87aT6KZk9bOv12pyvRFyBIive/3aZYoD0fFeZBsrH9oOTiFNFcj 4zp+LosB4q3Vw== From: Leon Romanovsky To: Marek Szyprowski Cc: Leon Romanovsky , Jason Gunthorpe , Andreas Larsson , Borislav Petkov , Dave Hansen , "David S. Miller" , Geoff Levand , Helge Deller , Ingo Molnar , iommu@lists.linux.dev, "James E.J. Bottomley" , Jason Wang , Juergen Gross , linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mips@vger.kernel.org, linux-parisc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, Madhavan Srinivasan , Matt Turner , Michael Ellerman , "Michael S. Tsirkin" , Richard Henderson , sparclinux@vger.kernel.org, Stefano Stabellini , Thomas Bogendoerfer , Thomas Gleixner , virtualization@lists.linux.dev, x86@kernel.org, xen-devel@lists.xenproject.org Subject: [PATCH 6/9] x86: Use physical address for DMA mapping Date: Thu, 18 Sep 2025 21:45:06 +0300 Message-ID: X-Mailer: git-send-email 2.51.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Leon Romanovsky Perform mechanical conversion from DMA .map_page to .map_phys. Signed-off-by: Leon Romanovsky --- arch/x86/kernel/amd_gart_64.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index 3485d419c2f5e..f1ffdc0e4a3ab 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -222,13 +222,14 @@ static dma_addr_t dma_map_area(struct device *dev, dm= a_addr_t phys_mem, } =20 /* Map a single area into the IOMMU */ -static dma_addr_t gart_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, +static dma_addr_t gart_map_phys(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir, unsigned long attrs) { unsigned long bus; - phys_addr_t paddr =3D page_to_phys(page) + offset; + + if (attrs & DMA_ATTR_MMIO) + return DMA_MAPPING_ERROR; =20 if (!need_iommu(dev, paddr, size)) return paddr; @@ -242,7 +243,7 @@ static dma_addr_t gart_map_page(struct device *dev, str= uct page *page, /* * Free a DMA mapping. */ -static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, +static void gart_unmap_phys(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { @@ -282,7 +283,7 @@ static void gart_unmap_sg(struct device *dev, struct sc= atterlist *sg, int nents, for_each_sg(sg, s, nents, i) { if (!s->dma_length || !s->length) break; - gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0); + gart_unmap_phys(dev, s->dma_address, s->dma_length, dir, 0); } } =20 @@ -487,7 +488,7 @@ static void gart_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, unsigned long attrs) { - gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0); + gart_unmap_phys(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0); dma_direct_free(dev, size, vaddr, dma_addr, attrs); } =20 @@ -668,8 +669,8 @@ static __init int init_amd_gatt(struct agp_kern_info *i= nfo) static const struct dma_map_ops gart_dma_ops =3D { .map_sg =3D gart_map_sg, .unmap_sg =3D gart_unmap_sg, - .map_page =3D gart_map_page, - .unmap_page =3D gart_unmap_page, + .map_phys =3D gart_map_phys, + .unmap_phys =3D gart_unmap_phys, .alloc =3D gart_alloc_coherent, .free =3D gart_free_coherent, .mmap =3D dma_common_mmap, --=20 2.51.0 From nobody Thu Oct 2 09:16:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id B3D5C2D6E4E; Thu, 18 Sep 2025 18:45:57 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758221157; cv=none; b=XuOClYioD6m6mepIptDJOcZyPAsU/tLD3q5vSdbJRWoNLoixOhfuiI+jpnQekfXL/2EdyErJfsz0t0qXuZUAK//pRz724NQAGtt1tU+yFhb5QM96fBnok9yNKTzMY7NJRu33DW10Iukdt3RcWU+cZGES3azs6h5fjsyJN9m78gk= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758221157; c=relaxed/simple; bh=1mDXfVNbsYLo4a+duzDpY85ZUnJWos79Vyrao8w+xkA=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=i5qJjcg8coYAJH7x2wDOI2GfVznXabH9S6BUf0ERrrE2PcT/JnAmeY+u1Ipn5MtN4HExAPs73FycFYo1y/jNAe381EweoWANzQsRyKw7edZgdeItd5EPjidi8lUNOFN7EskZIC91Ibjkjk5QPetgu9lcUpmuFiGt2y22pC68KUk= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=uJjcqkji; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="uJjcqkji" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 65B01C4CEE7; Thu, 18 Sep 2025 18:45:56 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1758221157; bh=1mDXfVNbsYLo4a+duzDpY85ZUnJWos79Vyrao8w+xkA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=uJjcqkjioeaPHR3oTtovXk7XnK/pd4Dpvt6tqBF2mLVNs5Q4naZBBRjxEELRfi2+Y D4kIQlN6hJhac0eiwI3sASKq7B/5bGRMKu7BFRE0t7mA5u6NXWj4T2DNxp00i+Dz8P CDKvCCa9nHcj0qbtseSusPltdxhIdxKZ4Yl5D4KaZQNkpzvLKwIkvugBIZkuUi+n41 sRDjxFx3/OcIdrXVx9M+pcPfgc0XKte5yCfdgAtR79e5ju1JRnVXErNA0tRs33R8Nz 1UIUOZkvr968XteLjAHpFkZyPWqxmq1DajIw+0WZAlLFornxw8ZulfpFSWmmyNnZ69 l7OvidjZ6KebQ== From: Leon Romanovsky To: Marek Szyprowski Cc: Leon Romanovsky , Jason Gunthorpe , Andreas Larsson , Borislav Petkov , Dave Hansen , "David S. Miller" , Geoff Levand , Helge Deller , Ingo Molnar , iommu@lists.linux.dev, "James E.J. Bottomley" , Jason Wang , Juergen Gross , linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mips@vger.kernel.org, linux-parisc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, Madhavan Srinivasan , Matt Turner , Michael Ellerman , "Michael S. Tsirkin" , Richard Henderson , sparclinux@vger.kernel.org, Stefano Stabellini , Thomas Bogendoerfer , Thomas Gleixner , virtualization@lists.linux.dev, x86@kernel.org, xen-devel@lists.xenproject.org Subject: [PATCH 7/9] vdpa: Convert to physical address DMA mapping Date: Thu, 18 Sep 2025 21:45:07 +0300 Message-ID: <517785de56c12927a782b6bc51cc84e06493958d.1758219787.git.leon@kernel.org> X-Mailer: git-send-email 2.51.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Leon Romanovsky Use physical address directly in DMA mapping flow. Signed-off-by: Leon Romanovsky --- drivers/vdpa/vdpa_user/iova_domain.c | 11 +++++------ drivers/vdpa/vdpa_user/iova_domain.h | 8 ++++---- drivers/vdpa/vdpa_user/vduse_dev.c | 18 ++++++++++-------- 3 files changed, 19 insertions(+), 18 deletions(-) diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/= iova_domain.c index 58116f89d8dae..c0ecf01003cd3 100644 --- a/drivers/vdpa/vdpa_user/iova_domain.c +++ b/drivers/vdpa/vdpa_user/iova_domain.c @@ -396,17 +396,16 @@ void vduse_domain_sync_single_for_cpu(struct vduse_io= va_domain *domain, read_unlock(&domain->bounce_lock); } =20 -dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain, - struct page *page, unsigned long offset, - size_t size, enum dma_data_direction dir, +dma_addr_t vduse_domain_map_phys(struct vduse_iova_domain *domain, + phys_addr_t pa, size_t size, + enum dma_data_direction dir, unsigned long attrs) { struct iova_domain *iovad =3D &domain->stream_iovad; unsigned long limit =3D domain->bounce_size - 1; - phys_addr_t pa =3D page_to_phys(page) + offset; dma_addr_t iova =3D vduse_domain_alloc_iova(iovad, size, limit); =20 - if (!iova) + if (!iova || (attrs & DMA_ATTR_MMIO)) return DMA_MAPPING_ERROR; =20 if (vduse_domain_init_bounce_map(domain)) @@ -430,7 +429,7 @@ dma_addr_t vduse_domain_map_page(struct vduse_iova_doma= in *domain, return DMA_MAPPING_ERROR; } =20 -void vduse_domain_unmap_page(struct vduse_iova_domain *domain, +void vduse_domain_unmap_phys(struct vduse_iova_domain *domain, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/= iova_domain.h index 7f3f0928ec781..7c4546fd856ab 100644 --- a/drivers/vdpa/vdpa_user/iova_domain.h +++ b/drivers/vdpa/vdpa_user/iova_domain.h @@ -53,12 +53,12 @@ void vduse_domain_sync_single_for_cpu(struct vduse_iova= _domain *domain, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir); =20 -dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain, - struct page *page, unsigned long offset, - size_t size, enum dma_data_direction dir, +dma_addr_t vduse_domain_map_phys(struct vduse_iova_domain *domain, + phys_addr_t phys, size_t size, + enum dma_data_direction dir, unsigned long attrs); =20 -void vduse_domain_unmap_page(struct vduse_iova_domain *domain, +void vduse_domain_unmap_phys(struct vduse_iova_domain *domain, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, unsigned long attrs); =20 diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vd= use_dev.c index 04620bb77203d..75aa3c9f83fb5 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -834,25 +834,27 @@ static void vduse_dev_sync_single_for_cpu(struct devi= ce *dev, vduse_domain_sync_single_for_cpu(domain, dma_addr, size, dir); } =20 -static dma_addr_t vduse_dev_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, +static dma_addr_t vduse_dev_map_phys(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir, unsigned long attrs) { struct vduse_dev *vdev =3D dev_to_vduse(dev); struct vduse_iova_domain *domain =3D vdev->domain; =20 - return vduse_domain_map_page(domain, page, offset, size, dir, attrs); + if (attrs & DMA_ATTR_MMIO) + return DMA_MAPPING_ERROR; + + return vduse_domain_map_phys(domain, phys, size, dir, attrs); } =20 -static void vduse_dev_unmap_page(struct device *dev, dma_addr_t dma_addr, +static void vduse_dev_unmap_phys(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { struct vduse_dev *vdev =3D dev_to_vduse(dev); struct vduse_iova_domain *domain =3D vdev->domain; =20 - return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs); + return vduse_domain_unmap_phys(domain, dma_addr, size, dir, attrs); } =20 static void *vduse_dev_alloc_coherent(struct device *dev, size_t size, @@ -896,8 +898,8 @@ static size_t vduse_dev_max_mapping_size(struct device = *dev) static const struct dma_map_ops vduse_dev_dma_ops =3D { .sync_single_for_device =3D vduse_dev_sync_single_for_device, .sync_single_for_cpu =3D vduse_dev_sync_single_for_cpu, - .map_page =3D vduse_dev_map_page, - .unmap_page =3D vduse_dev_unmap_page, + .map_phys =3D vduse_dev_map_phys, + .unmap_phys =3D vduse_dev_unmap_phys, .alloc =3D vduse_dev_alloc_coherent, .free =3D vduse_dev_free_coherent, .max_mapping_size =3D vduse_dev_max_mapping_size, --=20 2.51.0 From nobody Thu Oct 2 09:16:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 5D91D2D9494; Thu, 18 Sep 2025 18:46:11 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758221171; cv=none; b=RRO4UmSfwNtdMVnDMSYyeSwPO8M7F2pD1rwZH++X0UOvwgCNBRarBfdDrvMJLrCp0CGHxCOFd3MNCEnUCzNQx5zg2cqQiRu9XHPtLtUc+A0A4BVJ0oIXIKOftgnwUzvMnCW5irp17hl2Z9oYhOHKT2mHONa3A214Tg08ipgGMx0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758221171; c=relaxed/simple; bh=uXtCKxAn4lJIk0uyzU0s1tMehbPq4hSpIpuFU2JE2Qo=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=BjrdRT7dN9FDKOFmQqYcJCPKgOSHKgovMLjku/ki78qdj78ijm4e1lnxBqdy9ALtfXhkydNKWwcR0kW/rMLkQw4JdnpnJpk9N0ssejrAFRU6Tipb6aGYXcR3DA8j/LJlGy7oApD1q9uDv/CtVkSJ3Vv8KcDOJaUWL0oRk+xwD+Y= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=XcDUWoml; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="XcDUWoml" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 41BE7C4CEE7; Thu, 18 Sep 2025 18:46:10 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1758221171; bh=uXtCKxAn4lJIk0uyzU0s1tMehbPq4hSpIpuFU2JE2Qo=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=XcDUWomlStMMeFZ3/TJvShNKVI2ne4pT3ymiWQxIQ5cB6pCz+OArBrbwKpELnS63X AsFRL1spAgwyeCLnIS2Br262lbJnqBbpCVd4ULOZVZtnj2C10kT6CvJ8n+8JUEAfYP jFqxdtfM+opMXomXA3Ct9jKZuU3nvXXXih1anUIUmeYA2+NA0FUjI4Yl436qNKhCCW Aa/Vm8u8X4+psfEgiqdpsNqbGjvUEPS8JA0bNbKSpeXk0dd7H1N/gfHZ8mh5pUso/C fhMyzuZ9ej7kREv+eyKj/qcU15OhLAX2+3kVxzRJlcJjo/s2nJbBu1ygM503fdcjcp lkA9Tjnlkw/og== From: Leon Romanovsky To: Marek Szyprowski Cc: Leon Romanovsky , Jason Gunthorpe , Andreas Larsson , Borislav Petkov , Dave Hansen , "David S. Miller" , Geoff Levand , Helge Deller , Ingo Molnar , iommu@lists.linux.dev, "James E.J. Bottomley" , Jason Wang , Juergen Gross , linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mips@vger.kernel.org, linux-parisc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, Madhavan Srinivasan , Matt Turner , Michael Ellerman , "Michael S. Tsirkin" , Richard Henderson , sparclinux@vger.kernel.org, Stefano Stabellini , Thomas Bogendoerfer , Thomas Gleixner , virtualization@lists.linux.dev, x86@kernel.org, xen-devel@lists.xenproject.org Subject: [PATCH 8/9] xen: swiotlb: Convert mapping routine to rely on physical address Date: Thu, 18 Sep 2025 21:45:08 +0300 Message-ID: X-Mailer: git-send-email 2.51.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Leon Romanovsky Switch to .map_phys callback instead of .map_page. Signed-off-by: Leon Romanovsky --- drivers/xen/grant-dma-ops.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c index 29257d2639dbf..7f76e516fe24c 100644 --- a/drivers/xen/grant-dma-ops.c +++ b/drivers/xen/grant-dma-ops.c @@ -163,18 +163,22 @@ static void xen_grant_dma_free_pages(struct device *d= ev, size_t size, xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0); } =20 -static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *= page, - unsigned long offset, size_t size, +static dma_addr_t xen_grant_dma_map_phys(struct device *dev, phys_addr_t p= hys, + size_t size, enum dma_data_direction dir, unsigned long attrs) { struct xen_grant_dma_data *data; + unsigned long offset =3D offset_in_page(phys); unsigned long dma_offset =3D xen_offset_in_page(offset), pfn_offset =3D XEN_PFN_DOWN(offset); unsigned int i, n_pages =3D XEN_PFN_UP(dma_offset + size); grant_ref_t grant; dma_addr_t dma_handle; =20 + if (attrs & DMA_ATTR_MMIO) + return DMA_MAPPING_ERROR; + if (WARN_ON(dir =3D=3D DMA_NONE)) return DMA_MAPPING_ERROR; =20 @@ -190,7 +194,7 @@ static dma_addr_t xen_grant_dma_map_page(struct device = *dev, struct page *page, =20 for (i =3D 0; i < n_pages; i++) { gnttab_grant_foreign_access_ref(grant + i, data->backend_domid, - pfn_to_gfn(page_to_xen_pfn(page) + i + pfn_offset), + pfn_to_gfn(page_to_xen_pfn(phys_to_page(phys)) + i + pfn_offset), dir =3D=3D DMA_TO_DEVICE); } =20 @@ -199,7 +203,7 @@ static dma_addr_t xen_grant_dma_map_page(struct device = *dev, struct page *page, return dma_handle; } =20 -static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_ha= ndle, +static void xen_grant_dma_unmap_phys(struct device *dev, dma_addr_t dma_ha= ndle, size_t size, enum dma_data_direction dir, unsigned long attrs) { @@ -242,7 +246,7 @@ static void xen_grant_dma_unmap_sg(struct device *dev, = struct scatterlist *sg, return; =20 for_each_sg(sg, s, nents, i) - xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir, + xen_grant_dma_unmap_phys(dev, s->dma_address, sg_dma_len(s), dir, attrs); } =20 @@ -257,7 +261,7 @@ static int xen_grant_dma_map_sg(struct device *dev, str= uct scatterlist *sg, return -EINVAL; =20 for_each_sg(sg, s, nents, i) { - s->dma_address =3D xen_grant_dma_map_page(dev, sg_page(s), s->offset, + s->dma_address =3D xen_grant_dma_map_phys(dev, sg_phys(s), s->length, dir, attrs); if (s->dma_address =3D=3D DMA_MAPPING_ERROR) goto out; @@ -286,8 +290,8 @@ static const struct dma_map_ops xen_grant_dma_ops =3D { .free_pages =3D xen_grant_dma_free_pages, .mmap =3D dma_common_mmap, .get_sgtable =3D dma_common_get_sgtable, - .map_page =3D xen_grant_dma_map_page, - .unmap_page =3D xen_grant_dma_unmap_page, + .map_phys =3D xen_grant_dma_map_phys, + .unmap_phys =3D xen_grant_dma_unmap_phys, .map_sg =3D xen_grant_dma_map_sg, .unmap_sg =3D xen_grant_dma_unmap_sg, .dma_supported =3D xen_grant_dma_supported, --=20 2.51.0 From nobody Thu Oct 2 09:16:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 7DD502D8DCF; Thu, 18 Sep 2025 18:46:07 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758221167; cv=none; b=Ekd55Nx2kscjeqPN5MfJKErHtYu80lZKNDy6swjcEYQP/425dzbviOKLBPKw5A/hXYY6Dq+9ks9eOL3SkmfO1lwjGlvpnCfPRBHYjRy4Rs6/3aHndLkyzvW8Idfd7uRh3fP6DbhZj6T7ZiJK3m3cMdbzMxX6ucU7rgZ+A1s6lbQ= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758221167; c=relaxed/simple; bh=3bqXOAYctDiXe+OCMHlitClbfEhC7Di6QPs9FEZ1pPI=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=leI/0ZsDmp513dEv4Ux8F4GOm83SqdxqodDTbHw5txl5zDSfZVZRcEi7HDbroKJibw20fz7BBnzDq/o8W7LJE4trZYPQjkIrPGWHWcNondxAGEKOnzwLnGuUFnq98jt/jRh4uU6o21cZo4TrLPESgHSiDeJ6R87zQ+W9zoiymHI= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=DrF4UB5v; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="DrF4UB5v" Received: by smtp.kernel.org (Postfix) with ESMTPSA id BE03FC4CEE7; Thu, 18 Sep 2025 18:46:05 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1758221167; bh=3bqXOAYctDiXe+OCMHlitClbfEhC7Di6QPs9FEZ1pPI=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=DrF4UB5vNiS1ar6EQULnP2g8xHqnzBBCJmALQvRpfCCwMERikovmxa5qLdv90lwcz ISbg/kGE2Tx15VZG2vdkPFE7TQRrtuhFvEA4HHp4uvvjl43WFlncTqgHCP9W8wX6U0 xkIrRlvfY93G0GJ6rLphYBdU4/P5Y3W5660GWNxfdMbw9xHyNKR5bspM/f6LGlmcb0 udD+iUQiHebDEBJeOzgdkD6z4qs8pqG/CD7tfwkbjzQ863aDbZfxl2ejEdBKmr2Eva UOsKQLShS++Ltzx7J/LCHkdw5NkgqooNDn4P7ThMastTfrzbD5CblRWK/tnQFtzeWT PGzH6N+KD8OlA== From: Leon Romanovsky To: Marek Szyprowski Cc: Leon Romanovsky , Jason Gunthorpe , Andreas Larsson , Borislav Petkov , Dave Hansen , "David S. Miller" , Geoff Levand , Helge Deller , Ingo Molnar , iommu@lists.linux.dev, "James E.J. Bottomley" , Jason Wang , Juergen Gross , linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mips@vger.kernel.org, linux-parisc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, Madhavan Srinivasan , Matt Turner , Michael Ellerman , "Michael S. Tsirkin" , Richard Henderson , sparclinux@vger.kernel.org, Stefano Stabellini , Thomas Bogendoerfer , Thomas Gleixner , virtualization@lists.linux.dev, x86@kernel.org, xen-devel@lists.xenproject.org Subject: [PATCH 9/9] dma-mapping: remove unused map_page callback Date: Thu, 18 Sep 2025 21:45:09 +0300 Message-ID: X-Mailer: git-send-email 2.51.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Leon Romanovsky After conversion of arch code to use physical address mapping, there are no users of .map_page() and .unmap_page() callbacks, so let's remove them. Signed-off-by: Leon Romanovsky --- include/linux/dma-map-ops.h | 7 ------- kernel/dma/mapping.c | 12 ------------ kernel/dma/ops_helpers.c | 8 +------- 3 files changed, 1 insertion(+), 26 deletions(-) diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index a2ec1566aa270..e0a78991fa8a3 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -31,13 +31,6 @@ struct dma_map_ops { void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); =20 - dma_addr_t (*map_page)(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, unsigned long attrs); - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir, - unsigned long attrs); - dma_addr_t (*map_phys)(struct device *dev, phys_addr_t phys, size_t size, enum dma_data_direction dir, unsigned long attrs); diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 32a85bfdf873a..37163eb49f9fa 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -171,16 +171,6 @@ dma_addr_t dma_map_phys(struct device *dev, phys_addr_= t phys, size_t size, addr =3D iommu_dma_map_phys(dev, phys, size, dir, attrs); else if (ops->map_phys) addr =3D ops->map_phys(dev, phys, size, dir, attrs); - else if (!is_mmio && ops->map_page) { - struct page *page =3D phys_to_page(phys); - size_t offset =3D offset_in_page(phys); - - /* - * The dma_ops API contract for ops->map_page() requires - * kmappable memory. - */ - addr =3D ops->map_page(dev, page, offset, size, dir, attrs); - } =20 if (!is_mmio) kmsan_handle_dma(phys, size, dir); @@ -222,8 +212,6 @@ void dma_unmap_phys(struct device *dev, dma_addr_t addr= , size_t size, iommu_dma_unmap_phys(dev, addr, size, dir, attrs); else if (ops->unmap_phys) ops->unmap_phys(dev, addr, size, dir, attrs); - else - ops->unmap_page(dev, addr, size, dir, attrs); trace_dma_unmap_phys(dev, addr, size, dir, attrs); debug_dma_unmap_phys(dev, addr, size, dir); } diff --git a/kernel/dma/ops_helpers.c b/kernel/dma/ops_helpers.c index 1eccbdbc99c1e..20caf9cabf699 100644 --- a/kernel/dma/ops_helpers.c +++ b/kernel/dma/ops_helpers.c @@ -76,11 +76,8 @@ struct page *dma_common_alloc_pages(struct device *dev, = size_t size, if (use_dma_iommu(dev)) *dma_handle =3D iommu_dma_map_phys(dev, phys, size, dir, DMA_ATTR_SKIP_CPU_SYNC); - else if (ops->map_phys) - *dma_handle =3D ops->map_phys(dev, phys, size, dir, - DMA_ATTR_SKIP_CPU_SYNC); else - *dma_handle =3D ops->map_page(dev, page, 0, size, dir, + *dma_handle =3D ops->map_phys(dev, phys, size, dir, DMA_ATTR_SKIP_CPU_SYNC); if (*dma_handle =3D=3D DMA_MAPPING_ERROR) { dma_free_contiguous(dev, page, size); @@ -102,8 +99,5 @@ void dma_common_free_pages(struct device *dev, size_t si= ze, struct page *page, else if (ops->unmap_phys) ops->unmap_phys(dev, dma_handle, size, dir, DMA_ATTR_SKIP_CPU_SYNC); - else if (ops->unmap_page) - ops->unmap_page(dev, dma_handle, size, dir, - DMA_ATTR_SKIP_CPU_SYNC); dma_free_contiguous(dev, page, size); } --=20 2.51.0