From nobody Sat Nov 30 03:33:50 2024 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 0A07C1A01BF; Thu, 12 Sep 2024 11:16:53 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1726139814; cv=none; b=mqM/AAs1Lr9hYS3630KJHxb+w6qTruhKPjeyHVW7a8E7z7siWwrNig6D+8i5651WNvjfIcY++oy1De/IEv/1yRRoDix9tLOK9K7KSP8Rok1Pd2aX1vNmjzBnZ+yA2e7mxjPa1A/c+HcbvZ4siEJK6YXu1IBBM4dUxUYgV8OHKhM= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1726139814; c=relaxed/simple; bh=yM7hzIxBdo84p+8J0W8C8RyNqM4t4h1sCQMfoWGuons=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=dqVnewrnSsZJKBYDg3p1UeZYp0MZ6FiiYzvtp+hg5tsk+cyLFZaCCU82zeUHOCBg5gYfzcvRhbmOb5ttbU+BMNMGOOrOYvXYuof9e15r6wpEZrL9Ffc2/DC0qobLICvuQ2fyQ7lr8wIAcHLqNncgnGG/OZF+Cxy5awphy61/Fx0= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=KcO5hqoq; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="KcO5hqoq" Received: by smtp.kernel.org (Postfix) with ESMTPSA id D69B4C4CEC5; Thu, 12 Sep 2024 11:16:52 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1726139813; bh=yM7hzIxBdo84p+8J0W8C8RyNqM4t4h1sCQMfoWGuons=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=KcO5hqoqU35uJkwTk2yXTFB4Zx39KtK7hLnT6ftctxrL1AUdcl2c9tnGfUA2Fvw2M r5iFRNfnVbZv6yTQ48Tjd/51FH0f19FXC8BtccsPCy7E0m8ApLxF1G5F76sHNMTt08 fF3pMkKWUGBB039tUpB9ZsNe7dvQ2urhvG1SBgra3+HegAm5UPpxSCG/xYfD9p+slm iQ2atj875hpQG5yGAlNljzrlOsJXskm97Rsc6suFUTq9OBYSlV8Yzgx6F9X2SySLHy OzJA/eLp51lYQsqHxESmhnqv+PE06l4lHYcZq7dAQQz89lRv1f943RJZNMgpUqV/6n axJKYYHuNlJuw== From: Leon Romanovsky To: Jens Axboe , Jason Gunthorpe , Robin Murphy , Joerg Roedel , Will Deacon , Keith Busch , Christoph Hellwig , "Zeng, Oak" , Chaitanya Kulkarni Cc: Leon Romanovsky , Sagi Grimberg , Bjorn Helgaas , Logan Gunthorpe , Yishai Hadas , Shameer Kolothum , Kevin Tian , Alex Williamson , Marek Szyprowski , =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Andrew Morton , linux-block@vger.kernel.org, linux-kernel@vger.kernel.org, linux-rdma@vger.kernel.org, iommu@lists.linux.dev, linux-nvme@lists.infradead.org, linux-pci@vger.kernel.org, kvm@vger.kernel.org, linux-mm@kvack.org Subject: [RFC v2 08/21] mm/hmm: let users to tag specific PFN with DMA mapped bit Date: Thu, 12 Sep 2024 14:15:43 +0300 Message-ID: <3c68ab13bcabe908c35388c66bf38a43f5d68c8b.1726138681.git.leon@kernel.org> X-Mailer: git-send-email 2.46.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Leon Romanovsky Introduce new sticky flag (HMM_PFN_DMA_MAPPED), which isn't overwritten by HMM range fault. Such flag allows users to tag specific PFNs with inform= ation if this specific PFN was already DMA mapped. Signed-off-by: Leon Romanovsky --- include/linux/hmm.h | 4 ++++ mm/hmm.c | 34 +++++++++++++++++++++------------- 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 126a36571667..2999697db83a 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -23,6 +23,8 @@ struct mmu_interval_notifier; * HMM_PFN_WRITE - if the page memory can be written to (requires HMM_PFN_= VALID) * HMM_PFN_ERROR - accessing the pfn is impossible and the device should * fail. ie poisoned memory, special pages, no vma, etc + * HMM_PFN_DMA_MAPPED - Flag preserved on input-to-output transformation + * to mark that page is already DMA mapped * * On input: * 0 - Return the current state of the page, do not fault = it. @@ -36,6 +38,8 @@ enum hmm_pfn_flags { HMM_PFN_VALID =3D 1UL << (BITS_PER_LONG - 1), HMM_PFN_WRITE =3D 1UL << (BITS_PER_LONG - 2), HMM_PFN_ERROR =3D 1UL << (BITS_PER_LONG - 3), + /* Sticky lag, carried from Input to Output */ + HMM_PFN_DMA_MAPPED =3D 1UL << (BITS_PER_LONG - 7), HMM_PFN_ORDER_SHIFT =3D (BITS_PER_LONG - 8), =20 /* Input flags */ diff --git a/mm/hmm.c b/mm/hmm.c index 7e0229ae4a5a..2a0c34d7cb2b 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -44,8 +44,10 @@ static int hmm_pfns_fill(unsigned long addr, unsigned lo= ng end, { unsigned long i =3D (addr - range->start) >> PAGE_SHIFT; =20 - for (; addr < end; addr +=3D PAGE_SIZE, i++) - range->hmm_pfns[i] =3D cpu_flags; + for (; addr < end; addr +=3D PAGE_SIZE, i++) { + range->hmm_pfns[i] &=3D HMM_PFN_DMA_MAPPED; + range->hmm_pfns[i] |=3D cpu_flags; + } return 0; } =20 @@ -202,8 +204,10 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, un= signed long addr, return hmm_vma_fault(addr, end, required_fault, walk); =20 pfn =3D pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); - for (i =3D 0; addr < end; addr +=3D PAGE_SIZE, i++, pfn++) - hmm_pfns[i] =3D pfn | cpu_flags; + for (i =3D 0; addr < end; addr +=3D PAGE_SIZE, i++, pfn++) { + hmm_pfns[i] &=3D HMM_PFN_DMA_MAPPED; + hmm_pfns[i] |=3D pfn | cpu_flags; + } return 0; } #else /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -236,7 +240,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, uns= igned long addr, hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0); if (required_fault) goto fault; - *hmm_pfn =3D 0; + *hmm_pfn =3D *hmm_pfn & HMM_PFN_DMA_MAPPED; return 0; } =20 @@ -253,14 +257,14 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, u= nsigned long addr, cpu_flags =3D HMM_PFN_VALID; if (is_writable_device_private_entry(entry)) cpu_flags |=3D HMM_PFN_WRITE; - *hmm_pfn =3D swp_offset_pfn(entry) | cpu_flags; + *hmm_pfn =3D (*hmm_pfn & HMM_PFN_DMA_MAPPED) | swp_offset_pfn(entry) | = cpu_flags; return 0; } =20 required_fault =3D hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0); if (!required_fault) { - *hmm_pfn =3D 0; + *hmm_pfn =3D *hmm_pfn & HMM_PFN_DMA_MAPPED; return 0; } =20 @@ -304,11 +308,11 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, u= nsigned long addr, pte_unmap(ptep); return -EFAULT; } - *hmm_pfn =3D HMM_PFN_ERROR; + *hmm_pfn =3D (*hmm_pfn & HMM_PFN_DMA_MAPPED) | HMM_PFN_ERROR; return 0; } =20 - *hmm_pfn =3D pte_pfn(pte) | cpu_flags; + *hmm_pfn =3D (*hmm_pfn & HMM_PFN_DMA_MAPPED) | pte_pfn(pte) | cpu_flags; return 0; =20 fault: @@ -448,8 +452,10 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long= start, unsigned long end, } =20 pfn =3D pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); - for (i =3D 0; i < npages; ++i, ++pfn) - hmm_pfns[i] =3D pfn | cpu_flags; + for (i =3D 0; i < npages; ++i, ++pfn) { + hmm_pfns[i] &=3D HMM_PFN_DMA_MAPPED; + hmm_pfns[i] |=3D pfn | cpu_flags; + } goto out_unlock; } =20 @@ -507,8 +513,10 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsi= gned long hmask, } =20 pfn =3D pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT); - for (; addr < end; addr +=3D PAGE_SIZE, i++, pfn++) - range->hmm_pfns[i] =3D pfn | cpu_flags; + for (; addr < end; addr +=3D PAGE_SIZE, i++, pfn++) { + range->hmm_pfns[i] &=3D HMM_PFN_DMA_MAPPED; + range->hmm_pfns[i] |=3D pfn | cpu_flags; + } =20 spin_unlock(ptl); return 0; --=20 2.46.0