include/linux/mm.h | 9 +++++++++ kernel/rcu/rcuscale.c | 2 +- kernel/sched/fair.c | 5 ++--- mm/backing-dev.c | 2 +- mm/huge_memory.c | 2 +- mm/swap.c | 2 +- 6 files changed, 15 insertions(+), 7 deletions(-)
From: Ye Liu <liuye@kylinos.cn>
Replace repeated (20 - PAGE_SHIFT) calculations with standard macros:
- MB_TO_PAGES(mb) converts MB to page count
- PAGES_TO_MB(pages) converts pages to MB
No functional change.
Signed-off-by: Ye Liu <liuye@kylinos.cn>
---
include/linux/mm.h | 9 +++++++++
kernel/rcu/rcuscale.c | 2 +-
kernel/sched/fair.c | 5 ++---
mm/backing-dev.c | 2 +-
mm/huge_memory.c | 2 +-
mm/swap.c | 2 +-
6 files changed, 15 insertions(+), 7 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 957acde6ae62..0c1b2c074142 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -69,6 +69,15 @@ static inline void totalram_pages_add(long count)
extern void * high_memory;
+/*
+ * Convert between pages and MB
+ * 20 is the shift for 1MB (2^20 = 1MB)
+ * PAGE_SHIFT is the shift for page size (e.g., 12 for 4KB pages)
+ * So (20 - PAGE_SHIFT) converts between pages and MB
+ */
+#define PAGES_TO_MB(pages) ((pages) >> (20 - PAGE_SHIFT))
+#define MB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
+
#ifdef CONFIG_SYSCTL
extern int sysctl_legacy_va_layout;
#else
diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c
index b521d0455992..7484d8ad5767 100644
--- a/kernel/rcu/rcuscale.c
+++ b/kernel/rcu/rcuscale.c
@@ -796,7 +796,7 @@ kfree_scale_thread(void *arg)
pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB\n",
(unsigned long long)(end_time - start_time), kfree_loops,
rcuscale_seq_diff(b_rcu_gp_test_finished, b_rcu_gp_test_started),
- (mem_begin - mem_during) >> (20 - PAGE_SHIFT));
+ PAGES_TO_MB(mem_begin - mem_during));
if (shutdown) {
smp_mb(); /* Assign before wake. */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b9b4bbbf0af6..ae1d9a7ef202 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1489,7 +1489,7 @@ static unsigned int task_nr_scan_windows(struct task_struct *p)
* by the PTE scanner and NUMA hinting faults should be trapped based
* on resident pages
*/
- nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
+ nr_scan_pages = MB_TO_PAGES(sysctl_numa_balancing_scan_size);
rss = get_mm_rss(p->mm);
if (!rss)
rss = nr_scan_pages;
@@ -1926,8 +1926,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct folio *folio,
}
def_th = sysctl_numa_balancing_hot_threshold;
- rate_limit = sysctl_numa_balancing_promote_rate_limit << \
- (20 - PAGE_SHIFT);
+ rate_limit = MB_TO_PAGES(sysctl_numa_balancing_promote_rate_limit);
numa_promotion_adjust_threshold(pgdat, rate_limit, def_th);
th = pgdat->nbp_threshold ? : def_th;
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 783904d8c5ef..e4d578e6121c 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -510,7 +510,7 @@ static void wb_update_bandwidth_workfn(struct work_struct *work)
/*
* Initial write bandwidth: 100 MB/s
*/
-#define INIT_BW (100 << (20 - PAGE_SHIFT))
+#define INIT_BW MB_TO_PAGES(100)
static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
gfp_t gfp)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 389620c65a5f..dcc33d9c300f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -911,7 +911,7 @@ static int __init hugepage_init(void)
* where the extra memory used could hurt more than TLB overhead
* is likely to save. The admin can still enable it through /sys.
*/
- if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
+ if (totalram_pages() < MB_TO_PAGES(512)) {
transparent_hugepage_flags = 0;
return 0;
}
diff --git a/mm/swap.c b/mm/swap.c
index 3632dd061beb..cb164f9ef9e3 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -1096,7 +1096,7 @@ static const struct ctl_table swap_sysctl_table[] = {
*/
void __init swap_setup(void)
{
- unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
+ unsigned long megs = PAGES_TO_MB(totalram_pages());
/* Use a smaller cluster for small-memory machines */
if (megs < 16)
--
2.43.0
On Thu, Jul 17, 2025 at 7:42 PM Ye Liu <ye.liu@linux.dev> wrote: > > From: Ye Liu <liuye@kylinos.cn> > > Replace repeated (20 - PAGE_SHIFT) calculations with standard macros: > - MB_TO_PAGES(mb) converts MB to page count > - PAGES_TO_MB(pages) converts pages to MB > > No functional change. Thanks for doing this. Acked-by: Chris Li <chrisl@kernel.org> Chris
On 17 Jul 2025, at 22:41, Ye Liu wrote: > From: Ye Liu <liuye@kylinos.cn> > > Replace repeated (20 - PAGE_SHIFT) calculations with standard macros: > - MB_TO_PAGES(mb) converts MB to page count > - PAGES_TO_MB(pages) converts pages to MB > > No functional change. > > Signed-off-by: Ye Liu <liuye@kylinos.cn> > --- > include/linux/mm.h | 9 +++++++++ > kernel/rcu/rcuscale.c | 2 +- > kernel/sched/fair.c | 5 ++--- > mm/backing-dev.c | 2 +- > mm/huge_memory.c | 2 +- > mm/swap.c | 2 +- > 6 files changed, 15 insertions(+), 7 deletions(-) > Thanks. Acked-by: Zi Yan <ziy@nvidia.com> -- Best Regards, Yan, Zi
On Fri, Jul 18, 2025 at 10:41:32AM +0800, Ye Liu wrote: > From: Ye Liu <liuye@kylinos.cn> > > Replace repeated (20 - PAGE_SHIFT) calculations with standard macros: > - MB_TO_PAGES(mb) converts MB to page count > - PAGES_TO_MB(pages) converts pages to MB > > No functional change. > > Signed-off-by: Ye Liu <liuye@kylinos.cn> Nice idea :) NOte I see arch/x86/include/asm/pgtable.h has a pages_to_mb() static inline declaration, but probably being an asm include can't ref mm.h so meh not a big deal. LGTM, so: Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> > --- > include/linux/mm.h | 9 +++++++++ > kernel/rcu/rcuscale.c | 2 +- > kernel/sched/fair.c | 5 ++--- > mm/backing-dev.c | 2 +- > mm/huge_memory.c | 2 +- > mm/swap.c | 2 +- > 6 files changed, 15 insertions(+), 7 deletions(-) > > diff --git a/include/linux/mm.h b/include/linux/mm.h > index 957acde6ae62..0c1b2c074142 100644 > --- a/include/linux/mm.h > +++ b/include/linux/mm.h > @@ -69,6 +69,15 @@ static inline void totalram_pages_add(long count) > > extern void * high_memory; > > +/* > + * Convert between pages and MB > + * 20 is the shift for 1MB (2^20 = 1MB) > + * PAGE_SHIFT is the shift for page size (e.g., 12 for 4KB pages) > + * So (20 - PAGE_SHIFT) converts between pages and MB > + */ > +#define PAGES_TO_MB(pages) ((pages) >> (20 - PAGE_SHIFT)) > +#define MB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) > + > #ifdef CONFIG_SYSCTL > extern int sysctl_legacy_va_layout; > #else > diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c > index b521d0455992..7484d8ad5767 100644 > --- a/kernel/rcu/rcuscale.c > +++ b/kernel/rcu/rcuscale.c > @@ -796,7 +796,7 @@ kfree_scale_thread(void *arg) > pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB\n", > (unsigned long long)(end_time - start_time), kfree_loops, > rcuscale_seq_diff(b_rcu_gp_test_finished, b_rcu_gp_test_started), > - (mem_begin - mem_during) >> (20 - PAGE_SHIFT)); > + PAGES_TO_MB(mem_begin - mem_during)); > > if (shutdown) { > smp_mb(); /* Assign before wake. */ > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c > index b9b4bbbf0af6..ae1d9a7ef202 100644 > --- a/kernel/sched/fair.c > +++ b/kernel/sched/fair.c > @@ -1489,7 +1489,7 @@ static unsigned int task_nr_scan_windows(struct task_struct *p) > * by the PTE scanner and NUMA hinting faults should be trapped based > * on resident pages > */ > - nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT); > + nr_scan_pages = MB_TO_PAGES(sysctl_numa_balancing_scan_size); > rss = get_mm_rss(p->mm); > if (!rss) > rss = nr_scan_pages; > @@ -1926,8 +1926,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct folio *folio, > } > > def_th = sysctl_numa_balancing_hot_threshold; > - rate_limit = sysctl_numa_balancing_promote_rate_limit << \ > - (20 - PAGE_SHIFT); > + rate_limit = MB_TO_PAGES(sysctl_numa_balancing_promote_rate_limit); > numa_promotion_adjust_threshold(pgdat, rate_limit, def_th); > > th = pgdat->nbp_threshold ? : def_th; > diff --git a/mm/backing-dev.c b/mm/backing-dev.c > index 783904d8c5ef..e4d578e6121c 100644 > --- a/mm/backing-dev.c > +++ b/mm/backing-dev.c > @@ -510,7 +510,7 @@ static void wb_update_bandwidth_workfn(struct work_struct *work) > /* > * Initial write bandwidth: 100 MB/s > */ > -#define INIT_BW (100 << (20 - PAGE_SHIFT)) > +#define INIT_BW MB_TO_PAGES(100) > > static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi, > gfp_t gfp) > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > index 389620c65a5f..dcc33d9c300f 100644 > --- a/mm/huge_memory.c > +++ b/mm/huge_memory.c > @@ -911,7 +911,7 @@ static int __init hugepage_init(void) > * where the extra memory used could hurt more than TLB overhead > * is likely to save. The admin can still enable it through /sys. > */ > - if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) { > + if (totalram_pages() < MB_TO_PAGES(512)) { > transparent_hugepage_flags = 0; > return 0; > } > diff --git a/mm/swap.c b/mm/swap.c > index 3632dd061beb..cb164f9ef9e3 100644 > --- a/mm/swap.c > +++ b/mm/swap.c > @@ -1096,7 +1096,7 @@ static const struct ctl_table swap_sysctl_table[] = { > */ > void __init swap_setup(void) > { > - unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT); > + unsigned long megs = PAGES_TO_MB(totalram_pages()); > > /* Use a smaller cluster for small-memory machines */ > if (megs < 16) > -- > 2.43.0 >
On Fri, Jul 18, 2025 at 10:57:36AM +0100, Lorenzo Stoakes wrote: > NOte I see arch/x86/include/asm/pgtable.h has a pages_to_mb() static inline > declaration, but probably being an asm include can't ref mm.h so meh not a big > deal. Should probably go to linux/sizes.h, except that it uses PAGE_SIZE which isn't available there. But asm-generic/getorder.h might be a good place for it. (hm, is including getorder.h safe by itself? looks like it relies on something else to bring in the definition of PAGE_SHIFT)
On 18/07/25 8:11 am, Ye Liu wrote: > From: Ye Liu <liuye@kylinos.cn> > > Replace repeated (20 - PAGE_SHIFT) calculations with standard macros: > - MB_TO_PAGES(mb) converts MB to page count > - PAGES_TO_MB(pages) converts pages to MB > > No functional change. > > Signed-off-by: Ye Liu <liuye@kylinos.cn> > --- > sh and x86 have their own pages_to_mb, drivers/target/target_core_user.c too. I guess no one likes to clean the kernel :)
On Fri, Jul 18, 2025 at 03:16:55PM +0530, Dev Jain wrote: > > On 18/07/25 8:11 am, Ye Liu wrote: > > From: Ye Liu <liuye@kylinos.cn> > > > > Replace repeated (20 - PAGE_SHIFT) calculations with standard macros: > > - MB_TO_PAGES(mb) converts MB to page count > > - PAGES_TO_MB(pages) converts pages to MB > > > > No functional change. > > > > Signed-off-by: Ye Liu <liuye@kylinos.cn> > > --- > > sh and x86 have their own pages_to_mb, drivers/target/target_core_user.c too. > I guess no one likes to clean the kernel :) > *A wild Lorenzo appaers*
On 18/07/25 8:11 am, Ye Liu wrote: > From: Ye Liu <liuye@kylinos.cn> > > Replace repeated (20 - PAGE_SHIFT) calculations with standard macros: > - MB_TO_PAGES(mb) converts MB to page count > - PAGES_TO_MB(pages) converts pages to MB > > No functional change. > > Signed-off-by: Ye Liu <liuye@kylinos.cn> > --- > Reviewed-by: Dev Jain <dev.jain@arm.com>
On 18.07.25 04:41, Ye Liu wrote: > From: Ye Liu <liuye@kylinos.cn> > > Replace repeated (20 - PAGE_SHIFT) calculations with standard macros: > - MB_TO_PAGES(mb) converts MB to page count > - PAGES_TO_MB(pages) converts pages to MB > > No functional change. > > Signed-off-by: Ye Liu <liuye@kylinos.cn> > --- Acked-by: David Hildenbrand <david@redhat.com> -- Cheers, David / dhildenb
On Fri, 18 Jul 2025 10:41:32 +0800 Ye Liu <ye.liu@linux.dev> wrote: > From: Ye Liu <liuye@kylinos.cn> > > Replace repeated (20 - PAGE_SHIFT) calculations with standard macros: > - MB_TO_PAGES(mb) converts MB to page count > - PAGES_TO_MB(pages) converts pages to MB > > No functional change. > > ... > > +/* > + * Convert between pages and MB > + * 20 is the shift for 1MB (2^20 = 1MB) > + * PAGE_SHIFT is the shift for page size (e.g., 12 for 4KB pages) > + * So (20 - PAGE_SHIFT) converts between pages and MB > + */ > +#define PAGES_TO_MB(pages) ((pages) >> (20 - PAGE_SHIFT)) > +#define MB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) > + > #ifdef CONFIG_SYSCTL > extern int sysctl_legacy_va_layout; > #else > > ... > > @@ -796,7 +796,7 @@ kfree_scale_thread(void *arg) > pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB\n", > (unsigned long long)(end_time - start_time), kfree_loops, > rcuscale_seq_diff(b_rcu_gp_test_finished, b_rcu_gp_test_started), > - (mem_begin - mem_during) >> (20 - PAGE_SHIFT)); > + PAGES_TO_MB(mem_begin - mem_during)); > > if (shutdown) { > smp_mb(); /* Assign before wake. */ But, but, but, obscure hard-coded magic numbers are there for our job security! Oh well, we got caught. Applied, thanks.
© 2016 - 2025 Red Hat, Inc.