Use kmap_local_folio() instead of kmap_local_page().
Replaces 2 calls to compound_head() with one.
This prepares us for the removal of unmap_and_put_page().
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
mm/memory.c | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index d63f0d5abcc9..3dd6c57e6511 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -6691,6 +6691,7 @@ static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
while (len) {
int bytes, offset;
void *maddr;
+ struct folio *folio;
struct vm_area_struct *vma = NULL;
struct page *page = get_user_page_vma_remote(mm, addr,
gup_flags, &vma);
@@ -6722,21 +6723,22 @@ static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
if (bytes <= 0)
break;
} else {
+ folio = page_folio(page);
bytes = len;
offset = addr & (PAGE_SIZE-1);
if (bytes > PAGE_SIZE-offset)
bytes = PAGE_SIZE-offset;
- maddr = kmap_local_page(page);
+ maddr = kmap_local_folio(folio, folio_page_idx(folio, page) * PAGE_SIZE);
if (write) {
copy_to_user_page(vma, page, addr,
maddr + offset, buf, bytes);
- set_page_dirty_lock(page);
+ folio_mark_dirty_lock(folio);
} else {
copy_from_user_page(vma, page, addr,
buf, maddr + offset, bytes);
}
- unmap_and_put_page(page, maddr);
+ folio_release_kmap(folio, maddr);
}
len -= bytes;
buf += bytes;
--
2.50.0
On 09.07.25 21:40, Vishal Moola (Oracle) wrote: > Use kmap_local_folio() instead of kmap_local_page(). > Replaces 2 calls to compound_head() with one. > > This prepares us for the removal of unmap_and_put_page(). > > Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> > --- > mm/memory.c | 8 +++++--- > 1 file changed, 5 insertions(+), 3 deletions(-) > > diff --git a/mm/memory.c b/mm/memory.c > index d63f0d5abcc9..3dd6c57e6511 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -6691,6 +6691,7 @@ static int __access_remote_vm(struct mm_struct *mm, unsigned long addr, > while (len) { > int bytes, offset; > void *maddr; > + struct folio *folio; > struct vm_area_struct *vma = NULL; > struct page *page = get_user_page_vma_remote(mm, addr, > gup_flags, &vma); > @@ -6722,21 +6723,22 @@ static int __access_remote_vm(struct mm_struct *mm, unsigned long addr, > if (bytes <= 0) > break; > } else { > + folio = page_folio(page); > bytes = len; > offset = addr & (PAGE_SIZE-1); > if (bytes > PAGE_SIZE-offset) > bytes = PAGE_SIZE-offset; > > - maddr = kmap_local_page(page); > + maddr = kmap_local_folio(folio, folio_page_idx(folio, page) * PAGE_SIZE); > if (write) { > copy_to_user_page(vma, page, addr, > maddr + offset, buf, bytes); > - set_page_dirty_lock(page); > + folio_mark_dirty_lock(folio); > } else { > copy_from_user_page(vma, page, addr, > buf, maddr + offset, bytes); > } > - unmap_and_put_page(page, maddr); > + folio_release_kmap(folio, maddr); Acked-by: David Hildenbrand <david@redhat.com> -- Cheers, David / dhildenb
© 2016 - 2025 Red Hat, Inc.