From: Chen Ridong <chenridong@huawei.com>
The mem_cgroup_size helper is used only in apply_proportional_protection
to read the current memory usage. Its semantics are unclear and
inconsistent with other sites, which directly call page_counter_read for
the same purpose.
Remove this helper and replace its usage with page_counter_read for
clarity. Additionally, rename the local variable 'cgroup_size' to 'usage'
to better reflect its meaning.
This change is safe because page_counter_read() is only called when memcg
is enabled in the apply_proportional_protection.
No functional changes intended.
Signed-off-by: Chen Ridong <chenridong@huawei.com>
---
include/linux/memcontrol.h | 7 -------
mm/memcontrol.c | 5 -----
mm/vmscan.c | 6 +++---
3 files changed, 3 insertions(+), 15 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6a48398a1f4e..bedeb606c691 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -919,8 +919,6 @@ static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
-unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
-
void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
struct task_struct *p);
@@ -1328,11 +1326,6 @@ static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
return 0;
}
-static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
-{
- return 0;
-}
-
static inline void
mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
{
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index dbe7d8f93072..659ce171b1b3 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1621,11 +1621,6 @@ unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
return max;
}
-unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
-{
- return page_counter_read(&memcg->memory);
-}
-
void __memcg_memory_event(struct mem_cgroup *memcg,
enum memcg_memory_event event, bool allow_spinning)
{
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 670fe9fae5ba..35175f7b7f6e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2485,7 +2485,7 @@ static unsigned long apply_proportional_protection(struct mem_cgroup *memcg,
* again by how much of the total memory used is under
* hard protection.
*/
- unsigned long cgroup_size = mem_cgroup_size(memcg);
+ unsigned long usage = page_counter_read(&memcg->memory);
unsigned long protection;
/* memory.low scaling, make sure we retry before OOM */
@@ -2497,9 +2497,9 @@ static unsigned long apply_proportional_protection(struct mem_cgroup *memcg,
}
/* Avoid TOCTOU with earlier protection check */
- cgroup_size = max(cgroup_size, protection);
+ usage = max(usage, protection);
- scan -= scan * protection / (cgroup_size + 1);
+ scan -= scan * protection / (usage + 1);
/*
* Minimally target SWAP_CLUSTER_MAX pages to keep
--
2.34.1
Hi Chen,
kernel test robot noticed the following build errors:
[auto build test ERROR on next-20251209]
url: https://github.com/intel-lab-lkp/linux/commits/Chen-Ridong/memcg-move-mem_cgroup_usage-memcontrol-v1-c/20251209-211854
base: next-20251209
patch link: https://lore.kernel.org/r/20251209130251.1988615-3-chenridong%40huaweicloud.com
patch subject: [PATCH -next 2/2] memcg: remove mem_cgroup_size()
config: i386-allnoconfig (https://download.01.org/0day-ci/archive/20251210/202512100924.LqJqXM7P-lkp@intel.com/config)
compiler: gcc-14 (Debian 14.2.0-19) 14.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20251210/202512100924.LqJqXM7P-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202512100924.LqJqXM7P-lkp@intel.com/
All errors (new ones prefixed by >>):
mm/vmscan.c: In function 'apply_proportional_protection':
>> mm/vmscan.c:2488:63: error: invalid use of undefined type 'struct mem_cgroup'
2488 | unsigned long usage = page_counter_read(&memcg->memory);
| ^~
vim +2488 mm/vmscan.c
2450
2451 static unsigned long apply_proportional_protection(struct mem_cgroup *memcg,
2452 struct scan_control *sc, unsigned long scan)
2453 {
2454 unsigned long min, low;
2455
2456 mem_cgroup_protection(sc->target_mem_cgroup, memcg, &min, &low);
2457
2458 if (min || low) {
2459 /*
2460 * Scale a cgroup's reclaim pressure by proportioning
2461 * its current usage to its memory.low or memory.min
2462 * setting.
2463 *
2464 * This is important, as otherwise scanning aggression
2465 * becomes extremely binary -- from nothing as we
2466 * approach the memory protection threshold, to totally
2467 * nominal as we exceed it. This results in requiring
2468 * setting extremely liberal protection thresholds. It
2469 * also means we simply get no protection at all if we
2470 * set it too low, which is not ideal.
2471 *
2472 * If there is any protection in place, we reduce scan
2473 * pressure by how much of the total memory used is
2474 * within protection thresholds.
2475 *
2476 * There is one special case: in the first reclaim pass,
2477 * we skip over all groups that are within their low
2478 * protection. If that fails to reclaim enough pages to
2479 * satisfy the reclaim goal, we come back and override
2480 * the best-effort low protection. However, we still
2481 * ideally want to honor how well-behaved groups are in
2482 * that case instead of simply punishing them all
2483 * equally. As such, we reclaim them based on how much
2484 * memory they are using, reducing the scan pressure
2485 * again by how much of the total memory used is under
2486 * hard protection.
2487 */
> 2488 unsigned long usage = page_counter_read(&memcg->memory);
2489 unsigned long protection;
2490
2491 /* memory.low scaling, make sure we retry before OOM */
2492 if (!sc->memcg_low_reclaim && low > min) {
2493 protection = low;
2494 sc->memcg_low_skipped = 1;
2495 } else {
2496 protection = min;
2497 }
2498
2499 /* Avoid TOCTOU with earlier protection check */
2500 usage = max(usage, protection);
2501
2502 scan -= scan * protection / (usage + 1);
2503
2504 /*
2505 * Minimally target SWAP_CLUSTER_MAX pages to keep
2506 * reclaim moving forwards, avoiding decrementing
2507 * sc->priority further than desirable.
2508 */
2509 scan = max(scan, SWAP_CLUSTER_MAX);
2510 }
2511 return scan;
2512 }
2513
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
On 2025/12/10 9:35, kernel test robot wrote: > Hi Chen, > > kernel test robot noticed the following build errors: > > [auto build test ERROR on next-20251209] > > url: https://github.com/intel-lab-lkp/linux/commits/Chen-Ridong/memcg-move-mem_cgroup_usage-memcontrol-v1-c/20251209-211854 > base: next-20251209 > patch link: https://lore.kernel.org/r/20251209130251.1988615-3-chenridong%40huaweicloud.com > patch subject: [PATCH -next 2/2] memcg: remove mem_cgroup_size() > config: i386-allnoconfig (https://download.01.org/0day-ci/archive/20251210/202512100924.LqJqXM7P-lkp@intel.com/config) > compiler: gcc-14 (Debian 14.2.0-19) 14.2.0 > reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20251210/202512100924.LqJqXM7P-lkp@intel.com/reproduce) > > If you fix the issue in a separate patch/commit (i.e. not just a new version of > the same patch/commit), kindly add following tags > | Reported-by: kernel test robot <lkp@intel.com> > | Closes: https://lore.kernel.org/oe-kbuild-all/202512100924.LqJqXM7P-lkp@intel.com/ > > All errors (new ones prefixed by >>): > > mm/vmscan.c: In function 'apply_proportional_protection': >>> mm/vmscan.c:2488:63: error: invalid use of undefined type 'struct mem_cgroup' > 2488 | unsigned long usage = page_counter_read(&memcg->memory); > | ^~ > > > vim +2488 mm/vmscan.c > Oh, I missed CONFIG_MEMCG=n case, will fix it, thanks. -- Best regards, Ridong
Hi Chen,
kernel test robot noticed the following build errors:
[auto build test ERROR on next-20251209]
url: https://github.com/intel-lab-lkp/linux/commits/Chen-Ridong/memcg-move-mem_cgroup_usage-memcontrol-v1-c/20251209-211854
base: next-20251209
patch link: https://lore.kernel.org/r/20251209130251.1988615-3-chenridong%40huaweicloud.com
patch subject: [PATCH -next 2/2] memcg: remove mem_cgroup_size()
config: arm-allnoconfig (https://download.01.org/0day-ci/archive/20251210/202512100939.F1LEdUev-lkp@intel.com/config)
compiler: clang version 22.0.0git (https://github.com/llvm/llvm-project 6ec8c4351cfc1d0627d1633b02ea787bd29c77d8)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20251210/202512100939.F1LEdUev-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202512100939.F1LEdUev-lkp@intel.com/
All errors (new ones prefixed by >>):
>> mm/vmscan.c:2488:49: error: incomplete definition of type 'struct mem_cgroup'
2488 | unsigned long usage = page_counter_read(&memcg->memory);
| ~~~~~^
include/linux/mm_types.h:36:8: note: forward declaration of 'struct mem_cgroup'
36 | struct mem_cgroup;
| ^
1 error generated.
vim +2488 mm/vmscan.c
2450
2451 static unsigned long apply_proportional_protection(struct mem_cgroup *memcg,
2452 struct scan_control *sc, unsigned long scan)
2453 {
2454 unsigned long min, low;
2455
2456 mem_cgroup_protection(sc->target_mem_cgroup, memcg, &min, &low);
2457
2458 if (min || low) {
2459 /*
2460 * Scale a cgroup's reclaim pressure by proportioning
2461 * its current usage to its memory.low or memory.min
2462 * setting.
2463 *
2464 * This is important, as otherwise scanning aggression
2465 * becomes extremely binary -- from nothing as we
2466 * approach the memory protection threshold, to totally
2467 * nominal as we exceed it. This results in requiring
2468 * setting extremely liberal protection thresholds. It
2469 * also means we simply get no protection at all if we
2470 * set it too low, which is not ideal.
2471 *
2472 * If there is any protection in place, we reduce scan
2473 * pressure by how much of the total memory used is
2474 * within protection thresholds.
2475 *
2476 * There is one special case: in the first reclaim pass,
2477 * we skip over all groups that are within their low
2478 * protection. If that fails to reclaim enough pages to
2479 * satisfy the reclaim goal, we come back and override
2480 * the best-effort low protection. However, we still
2481 * ideally want to honor how well-behaved groups are in
2482 * that case instead of simply punishing them all
2483 * equally. As such, we reclaim them based on how much
2484 * memory they are using, reducing the scan pressure
2485 * again by how much of the total memory used is under
2486 * hard protection.
2487 */
> 2488 unsigned long usage = page_counter_read(&memcg->memory);
2489 unsigned long protection;
2490
2491 /* memory.low scaling, make sure we retry before OOM */
2492 if (!sc->memcg_low_reclaim && low > min) {
2493 protection = low;
2494 sc->memcg_low_skipped = 1;
2495 } else {
2496 protection = min;
2497 }
2498
2499 /* Avoid TOCTOU with earlier protection check */
2500 usage = max(usage, protection);
2501
2502 scan -= scan * protection / (usage + 1);
2503
2504 /*
2505 * Minimally target SWAP_CLUSTER_MAX pages to keep
2506 * reclaim moving forwards, avoiding decrementing
2507 * sc->priority further than desirable.
2508 */
2509 scan = max(scan, SWAP_CLUSTER_MAX);
2510 }
2511 return scan;
2512 }
2513
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
© 2016 - 2025 Red Hat, Inc.