mm/list_lru.c | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-)
From: Kairui Song <kasong@tencent.com>
Cocci is confused by the try lock then release RCU and return logic
here. So separate the try lock part out into a standalone helper. The
code is easier to follow too.
No feature change, fixes:
cocci warnings: (new ones prefixed by >>)
>> mm/list_lru.c:82:3-9: preceding lock on line 77
>> mm/list_lru.c:82:3-9: preceding lock on line 77
mm/list_lru.c:82:3-9: preceding lock on line 75
mm/list_lru.c:82:3-9: preceding lock on line 75
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Julia Lawall <julia.lawall@inria.fr>
Closes: https://lore.kernel.org/r/202505252043.pbT1tBHJ-lkp@intel.com/
Signed-off-by: Kairui Song <kasong@tencent.com>
---
mm/list_lru.c | 34 +++++++++++++++++++---------------
1 file changed, 19 insertions(+), 15 deletions(-)
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 490473af3122..ec48b5dadf51 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -60,30 +60,34 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
return &lru->node[nid].lru;
}
+static inline bool lock_list_lru(struct list_lru_one *l, bool irq)
+{
+ if (irq)
+ spin_lock_irq(&l->lock);
+ else
+ spin_lock(&l->lock);
+ if (unlikely(READ_ONCE(l->nr_items) == LONG_MIN)) {
+ if (irq)
+ spin_unlock_irq(&l->lock);
+ else
+ spin_unlock(&l->lock);
+ return false;
+ }
+ return true;
+}
+
static inline struct list_lru_one *
lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
bool irq, bool skip_empty)
{
struct list_lru_one *l;
- long nr_items;
rcu_read_lock();
again:
l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
- if (likely(l)) {
- if (irq)
- spin_lock_irq(&l->lock);
- else
- spin_lock(&l->lock);
- nr_items = READ_ONCE(l->nr_items);
- if (likely(nr_items != LONG_MIN)) {
- rcu_read_unlock();
- return l;
- }
- if (irq)
- spin_unlock_irq(&l->lock);
- else
- spin_unlock(&l->lock);
+ if (likely(l) && lock_list_lru(l, irq)) {
+ rcu_read_unlock();
+ return l;
}
/*
* Caller may simply bail out if raced with reparenting or
--
2.49.0
Hi Kairui,
On Tue, 27 May 2025 02:06:38 +0800 Kairui Song <ryncsn@gmail.com> wrote:
> From: Kairui Song <kasong@tencent.com>
>
> Cocci is confused by the try lock then release RCU and return logic
> here. So separate the try lock part out into a standalone helper. The
> code is easier to follow too.
>
> No feature change, fixes:
>
> cocci warnings: (new ones prefixed by >>)
> >> mm/list_lru.c:82:3-9: preceding lock on line 77
> >> mm/list_lru.c:82:3-9: preceding lock on line 77
> mm/list_lru.c:82:3-9: preceding lock on line 75
> mm/list_lru.c:82:3-9: preceding lock on line 75
>
> Reported-by: kernel test robot <lkp@intel.com>
> Reported-by: Julia Lawall <julia.lawall@inria.fr>
> Closes: https://lore.kernel.org/r/202505252043.pbT1tBHJ-lkp@intel.com/
> Signed-off-by: Kairui Song <kasong@tencent.com>
Reviewed-by: SeongJae Park <sj@kernel.org>
> ---
> mm/list_lru.c | 34 +++++++++++++++++++---------------
> 1 file changed, 19 insertions(+), 15 deletions(-)
>
> diff --git a/mm/list_lru.c b/mm/list_lru.c
> index 490473af3122..ec48b5dadf51 100644
> --- a/mm/list_lru.c
> +++ b/mm/list_lru.c
> @@ -60,30 +60,34 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
> return &lru->node[nid].lru;
> }
>
> +static inline bool lock_list_lru(struct list_lru_one *l, bool irq)
> +{
> + if (irq)
> + spin_lock_irq(&l->lock);
> + else
> + spin_lock(&l->lock);
> + if (unlikely(READ_ONCE(l->nr_items) == LONG_MIN)) {
> + if (irq)
> + spin_unlock_irq(&l->lock);
> + else
> + spin_unlock(&l->lock);
> + return false;
> + }
I'd prefer 'if (likely(...)) return true;' to reduce indentation and stop
wondering what goes to the likely case earlier. But that's my personal
preferrence that shouldn't block this.
> + return true;
> +}
> +
> static inline struct list_lru_one *
> lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
> bool irq, bool skip_empty)
> {
> struct list_lru_one *l;
> - long nr_items;
>
> rcu_read_lock();
> again:
> l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
> - if (likely(l)) {
> - if (irq)
> - spin_lock_irq(&l->lock);
> - else
> - spin_lock(&l->lock);
> - nr_items = READ_ONCE(l->nr_items);
> - if (likely(nr_items != LONG_MIN)) {
> - rcu_read_unlock();
> - return l;
> - }
> - if (irq)
> - spin_unlock_irq(&l->lock);
> - else
> - spin_unlock(&l->lock);
> + if (likely(l) && lock_list_lru(l, irq)) {
> + rcu_read_unlock();
> + return l;
> }
Much easier to read, indeed :)
> /*
> * Caller may simply bail out if raced with reparenting or
> --
> 2.49.0
Thanks,
SJ
[...]
> On May 27, 2025, at 02:06, Kairui Song <ryncsn@gmail.com> wrote: > > From: Kairui Song <kasong@tencent.com> > > Cocci is confused by the try lock then release RCU and return logic > here. So separate the try lock part out into a standalone helper. The > code is easier to follow too. > > No feature change, fixes: > > cocci warnings: (new ones prefixed by >>) >>> mm/list_lru.c:82:3-9: preceding lock on line 77 >>> mm/list_lru.c:82:3-9: preceding lock on line 77 > mm/list_lru.c:82:3-9: preceding lock on line 75 > mm/list_lru.c:82:3-9: preceding lock on line 75 > > Reported-by: kernel test robot <lkp@intel.com> > Reported-by: Julia Lawall <julia.lawall@inria.fr> > Closes: https://lore.kernel.org/r/202505252043.pbT1tBHJ-lkp@intel.com/ > Signed-off-by: Kairui Song <kasong@tencent.com> Reviewed-by: Muchun Song <muchun.song@linux.dev> Thanks.
On 5/27/25 2:06 AM, Kairui Song wrote:
> From: Kairui Song <kasong@tencent.com>
>
> Cocci is confused by the try lock then release RCU and return logic
> here. So separate the try lock part out into a standalone helper. The
> code is easier to follow too.
>
> No feature change, fixes:
>
> cocci warnings: (new ones prefixed by >>)
>>> mm/list_lru.c:82:3-9: preceding lock on line 77
>>> mm/list_lru.c:82:3-9: preceding lock on line 77
> mm/list_lru.c:82:3-9: preceding lock on line 75
> mm/list_lru.c:82:3-9: preceding lock on line 75
>
> Reported-by: kernel test robot <lkp@intel.com>
> Reported-by: Julia Lawall <julia.lawall@inria.fr>
> Closes: https://lore.kernel.org/r/202505252043.pbT1tBHJ-lkp@intel.com/
> Signed-off-by: Kairui Song <kasong@tencent.com>
> ---
> mm/list_lru.c | 34 +++++++++++++++++++---------------
> 1 file changed, 19 insertions(+), 15 deletions(-)
>
> diff --git a/mm/list_lru.c b/mm/list_lru.c
> index 490473af3122..ec48b5dadf51 100644
> --- a/mm/list_lru.c
> +++ b/mm/list_lru.c
> @@ -60,30 +60,34 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
> return &lru->node[nid].lru;
> }
>
> +static inline bool lock_list_lru(struct list_lru_one *l, bool irq)
> +{
> + if (irq)
> + spin_lock_irq(&l->lock);
> + else
> + spin_lock(&l->lock);
> + if (unlikely(READ_ONCE(l->nr_items) == LONG_MIN)) {
> + if (irq)
> + spin_unlock_irq(&l->lock);
> + else
> + spin_unlock(&l->lock);
> + return false;
> + }
> + return true;
> +}
> +
> static inline struct list_lru_one *
> lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
> bool irq, bool skip_empty)
> {
> struct list_lru_one *l;
> - long nr_items;
>
> rcu_read_lock();
> again:
> l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
> - if (likely(l)) {
> - if (irq)
> - spin_lock_irq(&l->lock);
> - else
> - spin_lock(&l->lock);
> - nr_items = READ_ONCE(l->nr_items);
> - if (likely(nr_items != LONG_MIN)) {
> - rcu_read_unlock();
> - return l;
> - }
> - if (irq)
> - spin_unlock_irq(&l->lock);
> - else
> - spin_unlock(&l->lock);
> + if (likely(l) && lock_list_lru(l, irq)) {
> + rcu_read_unlock();
> + return l;
> }
> /*
> * Caller may simply bail out if raced with reparenting or
And the code readability has also been improved.
Reviewed-by: Qi Zheng <zhengqi.arch@bytedance.com>
Thanks!
© 2016 - 2025 Red Hat, Inc.