In __get_user_pages(), it will traverse page table and take a reference
to the page the given user address corresponds to if GUP_GET or GUP_PIN
is et. However, it's not supported both GUP_GET and GUP_PIN are set.
This check should be done earlier, but not doing it till entering into
follow_page_pte() and failed.
Here move the checking to the beginning of __get_user_pages().
Signed-off-by: Baoquan He <bhe@redhat.com>
---
v1->v2:
- Fix code bug caused by copy-and-paste error, this is reported by
lkp test robot.
mm/gup.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/mm/gup.c b/mm/gup.c
index 73777b1de679..f9bce14ed3cd 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -847,11 +847,6 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
pte_t *ptep, pte;
int ret;
- /* FOLL_GET and FOLL_PIN are mutually exclusive. */
- if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
- (FOLL_PIN | FOLL_GET)))
- return ERR_PTR(-EINVAL);
-
ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
if (!ptep)
return no_page_table(vma, flags, address);
@@ -1434,6 +1429,11 @@ static long __get_user_pages(struct mm_struct *mm,
VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
+ /* FOLL_GET and FOLL_PIN are mutually exclusive. */
+ if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) ==
+ (FOLL_PIN | FOLL_GET)))
+ return -EINVAL;
+
do {
struct page *page;
unsigned int page_increm;
--
2.41.0
On 31.03.25 10:13, Baoquan He wrote: > In __get_user_pages(), it will traverse page table and take a reference > to the page the given user address corresponds to if GUP_GET or GUP_PIN > is et. However, it's not supported both GUP_GET and GUP_PIN are set. > This check should be done earlier, but not doing it till entering into > follow_page_pte() and failed. > > Here move the checking to the beginning of __get_user_pages(). > > Signed-off-by: Baoquan He <bhe@redhat.com> > --- > v1->v2: > - Fix code bug caused by copy-and-paste error, this is reported by > lkp test robot. > > mm/gup.c | 10 +++++----- > 1 file changed, 5 insertions(+), 5 deletions(-) > > diff --git a/mm/gup.c b/mm/gup.c > index 73777b1de679..f9bce14ed3cd 100644 > --- a/mm/gup.c > +++ b/mm/gup.c > @@ -847,11 +847,6 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, > pte_t *ptep, pte; > int ret; > > - /* FOLL_GET and FOLL_PIN are mutually exclusive. */ > - if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == > - (FOLL_PIN | FOLL_GET))) > - return ERR_PTR(-EINVAL); > - > ptep = pte_offset_map_lock(mm, pmd, address, &ptl); > if (!ptep) > return no_page_table(vma, flags, address); > @@ -1434,6 +1429,11 @@ static long __get_user_pages(struct mm_struct *mm, > > VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); > > + /* FOLL_GET and FOLL_PIN are mutually exclusive. */ > + if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) == > + (FOLL_PIN | FOLL_GET))) > + return -EINVAL; > + We already have that check in is_valid_gup_args(), that catches all external users that could possibly mess this up. So we can just convert that into a VM_WARN_ON_ONCE(), and while doing that, we should convert the VM_BUG_ON() above to a VM_WARN_ON_ONCE() as well. -- Cheers, David / dhildenb
On 04/01/25 at 10:02am, David Hildenbrand wrote: > On 31.03.25 10:13, Baoquan He wrote: > > In __get_user_pages(), it will traverse page table and take a reference > > to the page the given user address corresponds to if GUP_GET or GUP_PIN > > is et. However, it's not supported both GUP_GET and GUP_PIN are set. > > This check should be done earlier, but not doing it till entering into > > follow_page_pte() and failed. > > > > Here move the checking to the beginning of __get_user_pages(). > > > > Signed-off-by: Baoquan He <bhe@redhat.com> > > --- > > v1->v2: > > - Fix code bug caused by copy-and-paste error, this is reported by > > lkp test robot. > > > > mm/gup.c | 10 +++++----- > > 1 file changed, 5 insertions(+), 5 deletions(-) > > > > diff --git a/mm/gup.c b/mm/gup.c > > index 73777b1de679..f9bce14ed3cd 100644 > > --- a/mm/gup.c > > +++ b/mm/gup.c > > @@ -847,11 +847,6 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, > > pte_t *ptep, pte; > > int ret; > > - /* FOLL_GET and FOLL_PIN are mutually exclusive. */ > > - if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == > > - (FOLL_PIN | FOLL_GET))) > > - return ERR_PTR(-EINVAL); > > - > > ptep = pte_offset_map_lock(mm, pmd, address, &ptl); > > if (!ptep) > > return no_page_table(vma, flags, address); > > @@ -1434,6 +1429,11 @@ static long __get_user_pages(struct mm_struct *mm, > > VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); > > + /* FOLL_GET and FOLL_PIN are mutually exclusive. */ > > + if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) == > > + (FOLL_PIN | FOLL_GET))) > > + return -EINVAL; > > + > > We already have that check in is_valid_gup_args(), that catches all external > users that could possibly mess this up. Right. > > So we can just convert that into a VM_WARN_ON_ONCE(), and while doing that, > we should convert the VM_BUG_ON() above to a VM_WARN_ON_ONCE() as well. Sounds great to me, will put below change into this patch of v3 as suggested. Thanks. diff --git a/mm/gup.c b/mm/gup.c index 9e4ed09c578b..d551da9549b1 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1427,10 +1427,10 @@ static long __get_user_pages(struct mm_struct *mm, start = untagged_addr_remote(mm, start); - VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); + VM_WARN_ON_ONCE(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); /* FOLL_GET and FOLL_PIN are mutually exclusive. */ - if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) == + if (VM_WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) == (FOLL_PIN | FOLL_GET))) return -EINVAL;
On 01.04.25 16:34, Baoquan He wrote: > On 04/01/25 at 10:02am, David Hildenbrand wrote: >> On 31.03.25 10:13, Baoquan He wrote: >>> In __get_user_pages(), it will traverse page table and take a reference >>> to the page the given user address corresponds to if GUP_GET or GUP_PIN >>> is et. However, it's not supported both GUP_GET and GUP_PIN are set. >>> This check should be done earlier, but not doing it till entering into >>> follow_page_pte() and failed. >>> >>> Here move the checking to the beginning of __get_user_pages(). >>> >>> Signed-off-by: Baoquan He <bhe@redhat.com> >>> --- >>> v1->v2: >>> - Fix code bug caused by copy-and-paste error, this is reported by >>> lkp test robot. >>> >>> mm/gup.c | 10 +++++----- >>> 1 file changed, 5 insertions(+), 5 deletions(-) >>> >>> diff --git a/mm/gup.c b/mm/gup.c >>> index 73777b1de679..f9bce14ed3cd 100644 >>> --- a/mm/gup.c >>> +++ b/mm/gup.c >>> @@ -847,11 +847,6 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, >>> pte_t *ptep, pte; >>> int ret; >>> - /* FOLL_GET and FOLL_PIN are mutually exclusive. */ >>> - if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == >>> - (FOLL_PIN | FOLL_GET))) >>> - return ERR_PTR(-EINVAL); >>> - >>> ptep = pte_offset_map_lock(mm, pmd, address, &ptl); >>> if (!ptep) >>> return no_page_table(vma, flags, address); >>> @@ -1434,6 +1429,11 @@ static long __get_user_pages(struct mm_struct *mm, >>> VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); >>> + /* FOLL_GET and FOLL_PIN are mutually exclusive. */ >>> + if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) == >>> + (FOLL_PIN | FOLL_GET))) >>> + return -EINVAL; >>> + >> >> We already have that check in is_valid_gup_args(), that catches all external >> users that could possibly mess this up. > > Right. > >> >> So we can just convert that into a VM_WARN_ON_ONCE(), and while doing that, >> we should convert the VM_BUG_ON() above to a VM_WARN_ON_ONCE() as well. > > Sounds great to me, will put below change into this patch of v3 as suggested. > Thanks. > > diff --git a/mm/gup.c b/mm/gup.c > index 9e4ed09c578b..d551da9549b1 100644 > --- a/mm/gup.c > +++ b/mm/gup.c > @@ -1427,10 +1427,10 @@ static long __get_user_pages(struct mm_struct *mm, > > start = untagged_addr_remote(mm, start); > > - VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); > + VM_WARN_ON_ONCE(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); > > /* FOLL_GET and FOLL_PIN are mutually exclusive. */ > - if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) == > + if (VM_WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) == > (FOLL_PIN | FOLL_GET))) That won't work (or shouldn't work :) ). Just drop the handling, it's a sanity check we don't expect to ever trigger, just like the old VM_BUG_ON above. -- Cheers, David / dhildenb
On 04/01/25 at 04:37pm, David Hildenbrand wrote:
> On 01.04.25 16:34, Baoquan He wrote:
> > On 04/01/25 at 10:02am, David Hildenbrand wrote:
> > > On 31.03.25 10:13, Baoquan He wrote:
...snip...
> > > We already have that check in is_valid_gup_args(), that catches all external
> > > users that could possibly mess this up.
> >
> > Right.
> >
> > >
> > > So we can just convert that into a VM_WARN_ON_ONCE(), and while doing that,
> > > we should convert the VM_BUG_ON() above to a VM_WARN_ON_ONCE() as well.
> >
> > Sounds great to me, will put below change into this patch of v3 as suggested.
> > Thanks.
> >
> > diff --git a/mm/gup.c b/mm/gup.c
> > index 9e4ed09c578b..d551da9549b1 100644
> > --- a/mm/gup.c
> > +++ b/mm/gup.c
> > @@ -1427,10 +1427,10 @@ static long __get_user_pages(struct mm_struct *mm,
> > start = untagged_addr_remote(mm, start);
> > - VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
> > + VM_WARN_ON_ONCE(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
> > /* FOLL_GET and FOLL_PIN are mutually exclusive. */
> > - if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) ==
> > + if (VM_WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) ==
> > (FOLL_PIN | FOLL_GET)))
>
> That won't work (or shouldn't work :) ). Just drop the handling, it's a
> sanity check we don't expect to ever trigger, just like the old VM_BUG_ON
> above.
Ah, I misunderstood it. Will add below change into this patch of v3.
diff --git a/mm/gup.c b/mm/gup.c
index 9e4ed09c578b..a8a10a8ebf14 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1427,12 +1427,9 @@ static long __get_user_pages(struct mm_struct *mm,
start = untagged_addr_remote(mm, start);
- VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
-
- /* FOLL_GET and FOLL_PIN are mutually exclusive. */
- if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) ==
- (FOLL_PIN | FOLL_GET)))
- return -EINVAL;
+ VM_WARN_ON_ONCE(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
+ VM_WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) ==
+ (FOLL_PIN | FOLL_GET));
do {
struct page *page;
© 2016 - 2025 Red Hat, Inc.