kernel/rcu/sync.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
rcu_sync->gp_count is updated under the protection of ->rss_lock but read
locklessly by the WARN_ON() checks, and KCSAN noted the data race.
Move these WARN_ON_ONCE()'s under the lock and remove the no longer needed
READ_ONCE().
Reported-by: "Paul E. McKenney" <paulmck@kernel.org>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
---
kernel/rcu/sync.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
index 86df878a2fee..b50fde082198 100644
--- a/kernel/rcu/sync.c
+++ b/kernel/rcu/sync.c
@@ -152,9 +152,9 @@ void rcu_sync_enter(struct rcu_sync *rsp)
void rcu_sync_exit(struct rcu_sync *rsp)
{
WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
- WARN_ON_ONCE(READ_ONCE(rsp->gp_count) == 0);
spin_lock_irq(&rsp->rss_lock);
+ WARN_ON_ONCE(rsp->gp_count == 0);
if (!--rsp->gp_count) {
if (rsp->gp_state == GP_PASSED) {
WRITE_ONCE(rsp->gp_state, GP_EXIT);
@@ -174,10 +174,10 @@ void rcu_sync_dtor(struct rcu_sync *rsp)
{
int gp_state;
- WARN_ON_ONCE(READ_ONCE(rsp->gp_count));
WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
spin_lock_irq(&rsp->rss_lock);
+ WARN_ON_ONCE(rsp->gp_count != 0);
if (rsp->gp_state == GP_REPLAY)
WRITE_ONCE(rsp->gp_state, GP_EXIT);
gp_state = rsp->gp_state;
--
2.25.1.362.g51ebf55
On Sun, May 12, 2024 at 01:19:48PM +0200, Oleg Nesterov wrote:
> rcu_sync->gp_count is updated under the protection of ->rss_lock but read
> locklessly by the WARN_ON() checks, and KCSAN noted the data race.
>
> Move these WARN_ON_ONCE()'s under the lock and remove the no longer needed
> READ_ONCE().
>
> Reported-by: "Paul E. McKenney" <paulmck@kernel.org>
> Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Very good, thank you!
Due to inattention on my part, the patches were sent late, so the patch
you are (rightly) complaining about is on its way in. So what I did was
to port your patch on top of that one as shown below. Left to myself,
I would be thinking in terms of the v6.11 merge window. Please let me
know if this is more urgent than that.
And as always, please let me know if I messed anything on in the port.
Thanx, Paul
------------------------------------------------------------------------
commit 8d75fb302aaa97693c2294ded48a472e4956d615
Author: Oleg Nesterov <oleg@redhat.com>
Date: Sun May 12 08:02:07 2024 -0700
rcu: Eliminate lockless accesses to rcu_sync->gp_count
The rcu_sync structure's ->gp_count field is always accessed under the
protection of that same structure's ->rss_lock field, with the exception
of a pair of WARN_ON_ONCE() calls just prior to acquiring that lock in
functions rcu_sync_exit() and rcu_sync_dtor(). These lockless accesses
are unnecessary and impair KCSAN's ability to catch bugs that might be
inserted via other lockless accesses.
This commit therefore moves those WARN_ON_ONCE() calls under the lock.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
index 6c2bd9001adcd..05bfe69fdb0bb 100644
--- a/kernel/rcu/sync.c
+++ b/kernel/rcu/sync.c
@@ -151,15 +151,11 @@ void rcu_sync_enter(struct rcu_sync *rsp)
*/
void rcu_sync_exit(struct rcu_sync *rsp)
{
- int gpc;
-
WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
- WARN_ON_ONCE(READ_ONCE(rsp->gp_count) == 0);
spin_lock_irq(&rsp->rss_lock);
- gpc = rsp->gp_count - 1;
- WRITE_ONCE(rsp->gp_count, gpc);
- if (!gpc) {
+ WARN_ON_ONCE(rsp->gp_count == 0);
+ if (!--rsp->gp_count) {
if (rsp->gp_state == GP_PASSED) {
WRITE_ONCE(rsp->gp_state, GP_EXIT);
rcu_sync_call(rsp);
@@ -178,10 +174,10 @@ void rcu_sync_dtor(struct rcu_sync *rsp)
{
int gp_state;
- WARN_ON_ONCE(READ_ONCE(rsp->gp_count));
WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
spin_lock_irq(&rsp->rss_lock);
+ WARN_ON_ONCE(rsp->gp_count);
if (rsp->gp_state == GP_REPLAY)
WRITE_ONCE(rsp->gp_state, GP_EXIT);
gp_state = rsp->gp_state;
On 05/12, Paul E. McKenney wrote:
>
> --- a/kernel/rcu/sync.c
> +++ b/kernel/rcu/sync.c
> @@ -151,15 +151,11 @@ void rcu_sync_enter(struct rcu_sync *rsp)
> */
> void rcu_sync_exit(struct rcu_sync *rsp)
> {
> - int gpc;
> -
> WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
> - WARN_ON_ONCE(READ_ONCE(rsp->gp_count) == 0);
>
> spin_lock_irq(&rsp->rss_lock);
> - gpc = rsp->gp_count - 1;
> - WRITE_ONCE(rsp->gp_count, gpc);
> - if (!gpc) {
> + WARN_ON_ONCE(rsp->gp_count == 0);
> + if (!--rsp->gp_count) {
> if (rsp->gp_state == GP_PASSED) {
> WRITE_ONCE(rsp->gp_state, GP_EXIT);
> rcu_sync_call(rsp);
> @@ -178,10 +174,10 @@ void rcu_sync_dtor(struct rcu_sync *rsp)
> {
> int gp_state;
>
> - WARN_ON_ONCE(READ_ONCE(rsp->gp_count));
> WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
>
> spin_lock_irq(&rsp->rss_lock);
> + WARN_ON_ONCE(rsp->gp_count);
> if (rsp->gp_state == GP_REPLAY)
> WRITE_ONCE(rsp->gp_state, GP_EXIT);
> gp_state = rsp->gp_state;
Thanks Paul!
But then I think this change can also revert this chunk from the previous
patch:
@@ -122,7 +122,7 @@ void rcu_sync_enter(struct rcu_sync *rsp)
* we are called at early boot time but this shouldn't happen.
*/
}
- rsp->gp_count++;
+ WRITE_ONCE(rsp->gp_count, rsp->gp_count + 1);
spin_unlock_irq(&rsp->rss_lock);
if (gp_state == GP_IDLE) {
Thanks again,
Oleg.
On Sun, May 12, 2024 at 06:55:29PM +0200, Oleg Nesterov wrote:
> On 05/12, Paul E. McKenney wrote:
> >
> > --- a/kernel/rcu/sync.c
> > +++ b/kernel/rcu/sync.c
> > @@ -151,15 +151,11 @@ void rcu_sync_enter(struct rcu_sync *rsp)
> > */
> > void rcu_sync_exit(struct rcu_sync *rsp)
> > {
> > - int gpc;
> > -
> > WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
> > - WARN_ON_ONCE(READ_ONCE(rsp->gp_count) == 0);
> >
> > spin_lock_irq(&rsp->rss_lock);
> > - gpc = rsp->gp_count - 1;
> > - WRITE_ONCE(rsp->gp_count, gpc);
> > - if (!gpc) {
> > + WARN_ON_ONCE(rsp->gp_count == 0);
> > + if (!--rsp->gp_count) {
> > if (rsp->gp_state == GP_PASSED) {
> > WRITE_ONCE(rsp->gp_state, GP_EXIT);
> > rcu_sync_call(rsp);
> > @@ -178,10 +174,10 @@ void rcu_sync_dtor(struct rcu_sync *rsp)
> > {
> > int gp_state;
> >
> > - WARN_ON_ONCE(READ_ONCE(rsp->gp_count));
> > WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
> >
> > spin_lock_irq(&rsp->rss_lock);
> > + WARN_ON_ONCE(rsp->gp_count);
> > if (rsp->gp_state == GP_REPLAY)
> > WRITE_ONCE(rsp->gp_state, GP_EXIT);
> > gp_state = rsp->gp_state;
>
> Thanks Paul!
>
> But then I think this change can also revert this chunk from the previous
> patch:
>
> @@ -122,7 +122,7 @@ void rcu_sync_enter(struct rcu_sync *rsp)
> * we are called at early boot time but this shouldn't happen.
> */
> }
> - rsp->gp_count++;
> + WRITE_ONCE(rsp->gp_count, rsp->gp_count + 1);
> spin_unlock_irq(&rsp->rss_lock);
>
> if (gp_state == GP_IDLE) {
Good catch, thank you! How about like this?
Thanx, Paul
------------------------------------------------------------------------
commit 3e75ce9876396a770a0fcd8eecd83b9f6469f49c
Author: Oleg Nesterov <oleg@redhat.com>
Date: Sun May 12 08:02:07 2024 -0700
rcu: Eliminate lockless accesses to rcu_sync->gp_count
The rcu_sync structure's ->gp_count field is always accessed under the
protection of that same structure's ->rss_lock field, with the exception
of a pair of WARN_ON_ONCE() calls just prior to acquiring that lock in
functions rcu_sync_exit() and rcu_sync_dtor(). These lockless accesses
are unnecessary and impair KCSAN's ability to catch bugs that might be
inserted via other lockless accesses.
This commit therefore moves those WARN_ON_ONCE() calls under the lock.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
index 6c2bd9001adcd..da60a9947c005 100644
--- a/kernel/rcu/sync.c
+++ b/kernel/rcu/sync.c
@@ -122,7 +122,7 @@ void rcu_sync_enter(struct rcu_sync *rsp)
* we are called at early boot time but this shouldn't happen.
*/
}
- WRITE_ONCE(rsp->gp_count, rsp->gp_count + 1);
+ rsp->gp_count++;
spin_unlock_irq(&rsp->rss_lock);
if (gp_state == GP_IDLE) {
@@ -151,15 +151,11 @@ void rcu_sync_enter(struct rcu_sync *rsp)
*/
void rcu_sync_exit(struct rcu_sync *rsp)
{
- int gpc;
-
WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
- WARN_ON_ONCE(READ_ONCE(rsp->gp_count) == 0);
spin_lock_irq(&rsp->rss_lock);
- gpc = rsp->gp_count - 1;
- WRITE_ONCE(rsp->gp_count, gpc);
- if (!gpc) {
+ WARN_ON_ONCE(rsp->gp_count == 0);
+ if (!--rsp->gp_count) {
if (rsp->gp_state == GP_PASSED) {
WRITE_ONCE(rsp->gp_state, GP_EXIT);
rcu_sync_call(rsp);
@@ -178,10 +174,10 @@ void rcu_sync_dtor(struct rcu_sync *rsp)
{
int gp_state;
- WARN_ON_ONCE(READ_ONCE(rsp->gp_count));
WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
spin_lock_irq(&rsp->rss_lock);
+ WARN_ON_ONCE(rsp->gp_count);
if (rsp->gp_state == GP_REPLAY)
WRITE_ONCE(rsp->gp_state, GP_EXIT);
gp_state = rsp->gp_state;
© 2016 - 2025 Red Hat, Inc.