mm/ksm.c | 2 +- mm/rmap.c | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-)
From: Ye Liu <liuye@kylinos.cn>
Add NULL pointer checks for rmap_one callback in rmap_walk operations
to prevent potential NULL pointer dereferences. Also clean up some
code by removing redundant comments and caching folio_nr_pages().
Signed-off-by: Ye Liu <liuye@kylinos.cn>
---
mm/ksm.c | 2 +-
mm/rmap.c | 14 +++++++-------
2 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/mm/ksm.c b/mm/ksm.c
index 18b3690bb69a..22ad069d1860 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -3068,7 +3068,7 @@ void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue;
- if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) {
+ if (rwc->rmap_one && !rwc->rmap_one(folio, vma, addr, rwc->arg)) {
anon_vma_unlock_read(anon_vma);
return;
}
diff --git a/mm/rmap.c b/mm/rmap.c
index fb63d9256f09..17d43d104a0d 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1202,8 +1202,7 @@ int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff,
if (!mapping)
return 0;
- __rmap_walk_file(/* folio = */NULL, mapping, pgoff, nr_pages, &rwc,
- /* locked = */false);
+ __rmap_walk_file(NULL, mapping, pgoff, nr_pages, &rwc, false);
return state.cleaned;
}
@@ -2806,6 +2805,7 @@ static void rmap_walk_anon(struct folio *folio,
struct anon_vma *anon_vma;
pgoff_t pgoff_start, pgoff_end;
struct anon_vma_chain *avc;
+ unsigned long nr_pages;
if (locked) {
anon_vma = folio_anon_vma(folio);
@@ -2817,13 +2817,13 @@ static void rmap_walk_anon(struct folio *folio,
if (!anon_vma)
return;
+ nr_pages = folio_nr_pages(folio);
pgoff_start = folio_pgoff(folio);
- pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
+ pgoff_end = pgoff_start + nr_pages - 1;
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
pgoff_start, pgoff_end) {
struct vm_area_struct *vma = avc->vma;
- unsigned long address = vma_address(vma, pgoff_start,
- folio_nr_pages(folio));
+ unsigned long address = vma_address(vma, pgoff_start, nr_pages);
VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched();
@@ -2831,7 +2831,7 @@ static void rmap_walk_anon(struct folio *folio,
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue;
- if (!rwc->rmap_one(folio, vma, address, rwc->arg))
+ if (rwc->rmap_one && !rwc->rmap_one(folio, vma, address, rwc->arg))
break;
if (rwc->done && rwc->done(folio))
break;
@@ -2894,7 +2894,7 @@ static void __rmap_walk_file(struct folio *folio, struct address_space *mapping,
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue;
- if (!rwc->rmap_one(folio, vma, address, rwc->arg))
+ if (rwc->rmap_one && !rwc->rmap_one(folio, vma, address, rwc->arg))
goto done;
if (rwc->done && rwc->done(folio))
goto done;
--
2.25.1
On 19/06/25 1:20 pm, Ye Liu wrote: > From: Ye Liu <liuye@kylinos.cn> > > Add NULL pointer checks for rmap_one callback in rmap_walk operations > to prevent potential NULL pointer dereferences. Also clean up some > code by removing redundant comments and caching folio_nr_pages(). > > Signed-off-by: Ye Liu <liuye@kylinos.cn> > --- Don't really see the point of this patch. The rmap_one call back will always be there as we need a way to define how to unmap/do the reverse map walk for one VMA at a time. And the folio_nr_pages() will probably get cached by the compiler anyways. > mm/ksm.c | 2 +- > mm/rmap.c | 14 +++++++------- > 2 files changed, 8 insertions(+), 8 deletions(-) > > diff --git a/mm/ksm.c b/mm/ksm.c > index 18b3690bb69a..22ad069d1860 100644 > --- a/mm/ksm.c > +++ b/mm/ksm.c > @@ -3068,7 +3068,7 @@ void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) > if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) > continue; > > - if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) { > + if (rwc->rmap_one && !rwc->rmap_one(folio, vma, addr, rwc->arg)) { > anon_vma_unlock_read(anon_vma); > return; > } > diff --git a/mm/rmap.c b/mm/rmap.c > index fb63d9256f09..17d43d104a0d 100644 > --- a/mm/rmap.c > +++ b/mm/rmap.c > @@ -1202,8 +1202,7 @@ int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff, > if (!mapping) > return 0; > > - __rmap_walk_file(/* folio = */NULL, mapping, pgoff, nr_pages, &rwc, > - /* locked = */false); > + __rmap_walk_file(NULL, mapping, pgoff, nr_pages, &rwc, false); > > return state.cleaned; > } > @@ -2806,6 +2805,7 @@ static void rmap_walk_anon(struct folio *folio, > struct anon_vma *anon_vma; > pgoff_t pgoff_start, pgoff_end; > struct anon_vma_chain *avc; > + unsigned long nr_pages; > > if (locked) { > anon_vma = folio_anon_vma(folio); > @@ -2817,13 +2817,13 @@ static void rmap_walk_anon(struct folio *folio, > if (!anon_vma) > return; > > + nr_pages = folio_nr_pages(folio); > pgoff_start = folio_pgoff(folio); > - pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; > + pgoff_end = pgoff_start + nr_pages - 1; > anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, > pgoff_start, pgoff_end) { > struct vm_area_struct *vma = avc->vma; > - unsigned long address = vma_address(vma, pgoff_start, > - folio_nr_pages(folio)); > + unsigned long address = vma_address(vma, pgoff_start, nr_pages); > > VM_BUG_ON_VMA(address == -EFAULT, vma); > cond_resched(); > @@ -2831,7 +2831,7 @@ static void rmap_walk_anon(struct folio *folio, > if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) > continue; > > - if (!rwc->rmap_one(folio, vma, address, rwc->arg)) > + if (rwc->rmap_one && !rwc->rmap_one(folio, vma, address, rwc->arg)) > break; > if (rwc->done && rwc->done(folio)) > break; > @@ -2894,7 +2894,7 @@ static void __rmap_walk_file(struct folio *folio, struct address_space *mapping, > if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) > continue; > > - if (!rwc->rmap_one(folio, vma, address, rwc->arg)) > + if (rwc->rmap_one && !rwc->rmap_one(folio, vma, address, rwc->arg)) > goto done; > if (rwc->done && rwc->done(folio)) > goto done;
Hi Lorenzo and Dev, Thanks for your feedback. I understand all your points and will drop this patch. Best regards, Ye Liu On 2025/6/19 16:17, Dev Jain wrote: > > On 19/06/25 1:20 pm, Ye Liu wrote: >> From: Ye Liu <liuye@kylinos.cn> >> >> Add NULL pointer checks for rmap_one callback in rmap_walk operations >> to prevent potential NULL pointer dereferences. Also clean up some >> code by removing redundant comments and caching folio_nr_pages(). >> >> Signed-off-by: Ye Liu <liuye@kylinos.cn> >> --- > > Don't really see the point of this patch. The rmap_one call back will > always be there as we need a way to define how to unmap/do the reverse > map walk for one VMA at a time. And the folio_nr_pages() will probably > get cached by the compiler anyways. > >> mm/ksm.c | 2 +- >> mm/rmap.c | 14 +++++++------- >> 2 files changed, 8 insertions(+), 8 deletions(-) >> >> diff --git a/mm/ksm.c b/mm/ksm.c >> index 18b3690bb69a..22ad069d1860 100644 >> --- a/mm/ksm.c >> +++ b/mm/ksm.c >> @@ -3068,7 +3068,7 @@ void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) >> if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) >> continue; >> - if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) { >> + if (rwc->rmap_one && !rwc->rmap_one(folio, vma, addr, rwc->arg)) { >> anon_vma_unlock_read(anon_vma); >> return; >> } >> diff --git a/mm/rmap.c b/mm/rmap.c >> index fb63d9256f09..17d43d104a0d 100644 >> --- a/mm/rmap.c >> +++ b/mm/rmap.c >> @@ -1202,8 +1202,7 @@ int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff, >> if (!mapping) >> return 0; >> - __rmap_walk_file(/* folio = */NULL, mapping, pgoff, nr_pages, &rwc, >> - /* locked = */false); >> + __rmap_walk_file(NULL, mapping, pgoff, nr_pages, &rwc, false); >> return state.cleaned; >> } >> @@ -2806,6 +2805,7 @@ static void rmap_walk_anon(struct folio *folio, >> struct anon_vma *anon_vma; >> pgoff_t pgoff_start, pgoff_end; >> struct anon_vma_chain *avc; >> + unsigned long nr_pages; >> if (locked) { >> anon_vma = folio_anon_vma(folio); >> @@ -2817,13 +2817,13 @@ static void rmap_walk_anon(struct folio *folio, >> if (!anon_vma) >> return; >> + nr_pages = folio_nr_pages(folio); >> pgoff_start = folio_pgoff(folio); >> - pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; >> + pgoff_end = pgoff_start + nr_pages - 1; >> anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, >> pgoff_start, pgoff_end) { >> struct vm_area_struct *vma = avc->vma; >> - unsigned long address = vma_address(vma, pgoff_start, >> - folio_nr_pages(folio)); >> + unsigned long address = vma_address(vma, pgoff_start, nr_pages); >> VM_BUG_ON_VMA(address == -EFAULT, vma); >> cond_resched(); >> @@ -2831,7 +2831,7 @@ static void rmap_walk_anon(struct folio *folio, >> if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) >> continue; >> - if (!rwc->rmap_one(folio, vma, address, rwc->arg)) >> + if (rwc->rmap_one && !rwc->rmap_one(folio, vma, address, rwc->arg)) >> break; >> if (rwc->done && rwc->done(folio)) >> break; >> @@ -2894,7 +2894,7 @@ static void __rmap_walk_file(struct folio *folio, struct address_space *mapping, >> if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) >> continue; >> - if (!rwc->rmap_one(folio, vma, address, rwc->arg)) >> + if (rwc->rmap_one && !rwc->rmap_one(folio, vma, address, rwc->arg)) >> goto done; >> if (rwc->done && rwc->done(folio)) >> goto done;
On Thu, Jun 19, 2025 at 03:50:40PM +0800, Ye Liu wrote: > From: Ye Liu <liuye@kylinos.cn> > > Add NULL pointer checks for rmap_one callback in rmap_walk operations > to prevent potential NULL pointer dereferences. Also clean up some > code by removing redundant comments and caching folio_nr_pages(). > > Signed-off-by: Ye Liu <liuye@kylinos.cn> No sorry this patch is no good, none of these changes add any value. > --- > mm/ksm.c | 2 +- > mm/rmap.c | 14 +++++++------- > 2 files changed, 8 insertions(+), 8 deletions(-) > > diff --git a/mm/ksm.c b/mm/ksm.c > index 18b3690bb69a..22ad069d1860 100644 > --- a/mm/ksm.c > +++ b/mm/ksm.c > @@ -3068,7 +3068,7 @@ void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) > if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) > continue; > > - if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) { > + if (rwc->rmap_one && !rwc->rmap_one(folio, vma, addr, rwc->arg)) { It's convention that this will be set. If it's not set, a kernel developer did something wrong, so why the hell are we checking this every single time? And why if we are are we just fine to do nothing? This is not a useful change sorry. > anon_vma_unlock_read(anon_vma); > return; > } > diff --git a/mm/rmap.c b/mm/rmap.c > index fb63d9256f09..17d43d104a0d 100644 > --- a/mm/rmap.c > +++ b/mm/rmap.c > @@ -1202,8 +1202,7 @@ int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff, > if (!mapping) > return 0; > > - __rmap_walk_file(/* folio = */NULL, mapping, pgoff, nr_pages, &rwc, > - /* locked = */false); > + __rmap_walk_file(NULL, mapping, pgoff, nr_pages, &rwc, false); Please do not remove /* x = */ prefixes. This is not redundant in the slightest and aids readability so you know what's being referred to. > > return state.cleaned; > } > @@ -2806,6 +2805,7 @@ static void rmap_walk_anon(struct folio *folio, > struct anon_vma *anon_vma; > pgoff_t pgoff_start, pgoff_end; > struct anon_vma_chain *avc; > + unsigned long nr_pages; I don't think there's much value in adding this at this point to be honest. It's not expensive, the compiler knows what to do and there's two invocations. There's a trade off with noise here. > > if (locked) { > anon_vma = folio_anon_vma(folio); > @@ -2817,13 +2817,13 @@ static void rmap_walk_anon(struct folio *folio, > if (!anon_vma) > return; > > + nr_pages = folio_nr_pages(folio); > pgoff_start = folio_pgoff(folio); > - pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; > + pgoff_end = pgoff_start + nr_pages - 1; > anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, > pgoff_start, pgoff_end) { > struct vm_area_struct *vma = avc->vma; > - unsigned long address = vma_address(vma, pgoff_start, > - folio_nr_pages(folio)); > + unsigned long address = vma_address(vma, pgoff_start, nr_pages); > > VM_BUG_ON_VMA(address == -EFAULT, vma); > cond_resched(); > @@ -2831,7 +2831,7 @@ static void rmap_walk_anon(struct folio *folio, > if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) > continue; > > - if (!rwc->rmap_one(folio, vma, address, rwc->arg)) > + if (rwc->rmap_one && !rwc->rmap_one(folio, vma, address, rwc->arg)) Same comment as with ksm. > break; > if (rwc->done && rwc->done(folio)) > break; > @@ -2894,7 +2894,7 @@ static void __rmap_walk_file(struct folio *folio, struct address_space *mapping, > if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) > continue; > > - if (!rwc->rmap_one(folio, vma, address, rwc->arg)) > + if (rwc->rmap_one && !rwc->rmap_one(folio, vma, address, rwc->arg)) Same comment as with ksm. > goto done; > if (rwc->done && rwc->done(folio)) > goto done; > -- > 2.25.1 >
© 2016 - 2025 Red Hat, Inc.