[PATCH v7 2/3] io_uring/msg_ring: drop unnecessary submitter_task checks

Caleb Sander Mateos posted 3 patches 1 month ago
[PATCH v7 2/3] io_uring/msg_ring: drop unnecessary submitter_task checks
Posted by Caleb Sander Mateos 1 month ago
__io_msg_ring_data() checks that the target_ctx isn't
IORING_SETUP_R_DISABLED before calling io_msg_data_remote(), which calls
io_msg_remote_post(). So submitter_task can't be modified concurrently
with the read in io_msg_remote_post(). Additionally, submitter_task must
exist, as io_msg_data_remote() is only called for io_msg_need_remote(),
i.e. task_complete is set, which requires IORING_SETUP_DEFER_TASKRUN,
which in turn requires IORING_SETUP_SINGLE_ISSUER. And submitter_task is
assigned in io_uring_create() or io_register_enable_rings() before
enabling any IORING_SETUP_SINGLE_ISSUER io_ring_ctx.
Similarly, io_msg_send_fd() checks IORING_SETUP_R_DISABLED and
io_msg_need_remote() before calling io_msg_fd_remote(). submitter_task
therefore can't be modified concurrently with the read in
io_msg_fd_remote() and must be non-null.
io_register_enable_rings() can't run concurrently because it's called
from io_uring_register() -> __io_uring_register() with uring_lock held.
Thus, replace the READ_ONCE() and WRITE_ONCE() of submitter_task with
plain loads and stores. And remove the NULL checks of submitter_task in
io_msg_remote_post() and io_msg_fd_remote().

Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
---
 io_uring/io_uring.c |  7 +------
 io_uring/msg_ring.c | 18 +++++-------------
 io_uring/register.c |  2 +-
 3 files changed, 7 insertions(+), 20 deletions(-)

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index ec27fafcb213..b31d88295297 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -3663,17 +3663,12 @@ static __cold int io_uring_create(struct io_ctx_config *config)
 		ret = -EFAULT;
 		goto err;
 	}
 
 	if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
-	    && !(ctx->flags & IORING_SETUP_R_DISABLED)) {
-		/*
-		 * Unlike io_register_enable_rings(), don't need WRITE_ONCE()
-		 * since ctx isn't yet accessible from other tasks
-		 */
+	    && !(ctx->flags & IORING_SETUP_R_DISABLED))
 		ctx->submitter_task = get_task_struct(current);
-	}
 
 	file = io_uring_get_file(ctx);
 	if (IS_ERR(file)) {
 		ret = PTR_ERR(file);
 		goto err;
diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
index 87b4d306cf1b..57ad0085869a 100644
--- a/io_uring/msg_ring.c
+++ b/io_uring/msg_ring.c
@@ -78,26 +78,21 @@ static void io_msg_tw_complete(struct io_tw_req tw_req, io_tw_token_t tw)
 	io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags);
 	kfree_rcu(req, rcu_head);
 	percpu_ref_put(&ctx->refs);
 }
 
-static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
+static void io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
 			      int res, u32 cflags, u64 user_data)
 {
-	if (!READ_ONCE(ctx->submitter_task)) {
-		kfree_rcu(req, rcu_head);
-		return -EOWNERDEAD;
-	}
 	req->opcode = IORING_OP_NOP;
 	req->cqe.user_data = user_data;
 	io_req_set_res(req, res, cflags);
 	percpu_ref_get(&ctx->refs);
 	req->ctx = ctx;
 	req->tctx = NULL;
 	req->io_task_work.func = io_msg_tw_complete;
 	io_req_task_work_add_remote(req, IOU_F_TWQ_LAZY_WAKE);
-	return 0;
 }
 
 static int io_msg_data_remote(struct io_ring_ctx *target_ctx,
 			      struct io_msg *msg)
 {
@@ -109,12 +104,12 @@ static int io_msg_data_remote(struct io_ring_ctx *target_ctx,
 		return -ENOMEM;
 
 	if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
 		flags = msg->cqe_flags;
 
-	return io_msg_remote_post(target_ctx, target, msg->len, flags,
-					msg->user_data);
+	io_msg_remote_post(target_ctx, target, msg->len, flags, msg->user_data);
+	return 0;
 }
 
 static int __io_msg_ring_data(struct io_ring_ctx *target_ctx,
 			      struct io_msg *msg, unsigned int issue_flags)
 {
@@ -125,11 +120,11 @@ static int __io_msg_ring_data(struct io_ring_ctx *target_ctx,
 		return -EINVAL;
 	if (!(msg->flags & IORING_MSG_RING_FLAGS_PASS) && msg->dst_fd)
 		return -EINVAL;
 	/*
 	 * Keep IORING_SETUP_R_DISABLED check before submitter_task load
-	 * in io_msg_data_remote() -> io_msg_remote_post()
+	 * in io_msg_data_remote() -> io_req_task_work_add_remote()
 	 */
 	if (smp_load_acquire(&target_ctx->flags) & IORING_SETUP_R_DISABLED)
 		return -EBADFD;
 
 	if (io_msg_need_remote(target_ctx))
@@ -225,14 +220,11 @@ static void io_msg_tw_fd_complete(struct callback_head *head)
 
 static int io_msg_fd_remote(struct io_kiocb *req)
 {
 	struct io_ring_ctx *ctx = req->file->private_data;
 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
-	struct task_struct *task = READ_ONCE(ctx->submitter_task);
-
-	if (unlikely(!task))
-		return -EOWNERDEAD;
+	struct task_struct *task = ctx->submitter_task;
 
 	init_task_work(&msg->tw, io_msg_tw_fd_complete);
 	if (task_work_add(task, &msg->tw, TWA_SIGNAL))
 		return -EOWNERDEAD;
 
diff --git a/io_uring/register.c b/io_uring/register.c
index 12318c276068..8104728af294 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -179,11 +179,11 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
 {
 	if (!(ctx->flags & IORING_SETUP_R_DISABLED))
 		return -EBADFD;
 
 	if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task) {
-		WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
+		ctx->submitter_task = get_task_struct(current);
 		/*
 		 * Lazy activation attempts would fail if it was polled before
 		 * submitter_task is set.
 		 */
 		if (wq_has_sleeper(&ctx->poll_wq))
-- 
2.45.2
Re: [PATCH v7 2/3] io_uring/msg_ring: drop unnecessary submitter_task checks
Posted by Joanne Koong 1 month ago
On Mon, Jan 5, 2026 at 1:05 PM Caleb Sander Mateos
<csander@purestorage.com> wrote:
>
> __io_msg_ring_data() checks that the target_ctx isn't
> IORING_SETUP_R_DISABLED before calling io_msg_data_remote(), which calls
> io_msg_remote_post(). So submitter_task can't be modified concurrently
> with the read in io_msg_remote_post(). Additionally, submitter_task must
> exist, as io_msg_data_remote() is only called for io_msg_need_remote(),
> i.e. task_complete is set, which requires IORING_SETUP_DEFER_TASKRUN,
> which in turn requires IORING_SETUP_SINGLE_ISSUER. And submitter_task is
> assigned in io_uring_create() or io_register_enable_rings() before
> enabling any IORING_SETUP_SINGLE_ISSUER io_ring_ctx.
> Similarly, io_msg_send_fd() checks IORING_SETUP_R_DISABLED and
> io_msg_need_remote() before calling io_msg_fd_remote(). submitter_task
> therefore can't be modified concurrently with the read in
> io_msg_fd_remote() and must be non-null.
> io_register_enable_rings() can't run concurrently because it's called
> from io_uring_register() -> __io_uring_register() with uring_lock held.
> Thus, replace the READ_ONCE() and WRITE_ONCE() of submitter_task with
> plain loads and stores. And remove the NULL checks of submitter_task in
> io_msg_remote_post() and io_msg_fd_remote().
>
> Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
> ---
>  io_uring/io_uring.c |  7 +------
>  io_uring/msg_ring.c | 18 +++++-------------
>  io_uring/register.c |  2 +-
>  3 files changed, 7 insertions(+), 20 deletions(-)
>
> diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
> index ec27fafcb213..b31d88295297 100644
> --- a/io_uring/io_uring.c
> +++ b/io_uring/io_uring.c
> @@ -3663,17 +3663,12 @@ static __cold int io_uring_create(struct io_ctx_config *config)
>                 ret = -EFAULT;
>                 goto err;
>         }
>
>         if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
> -           && !(ctx->flags & IORING_SETUP_R_DISABLED)) {
> -               /*
> -                * Unlike io_register_enable_rings(), don't need WRITE_ONCE()
> -                * since ctx isn't yet accessible from other tasks
> -                */
> +           && !(ctx->flags & IORING_SETUP_R_DISABLED))
>                 ctx->submitter_task = get_task_struct(current);
> -       }
>
>         file = io_uring_get_file(ctx);
>         if (IS_ERR(file)) {
>                 ret = PTR_ERR(file);
>                 goto err;
> diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
> index 87b4d306cf1b..57ad0085869a 100644
> --- a/io_uring/msg_ring.c
> +++ b/io_uring/msg_ring.c
> @@ -78,26 +78,21 @@ static void io_msg_tw_complete(struct io_tw_req tw_req, io_tw_token_t tw)
>         io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags);
>         kfree_rcu(req, rcu_head);
>         percpu_ref_put(&ctx->refs);
>  }
>
> -static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
> +static void io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
>                               int res, u32 cflags, u64 user_data)
>  {
> -       if (!READ_ONCE(ctx->submitter_task)) {
> -               kfree_rcu(req, rcu_head);
> -               return -EOWNERDEAD;
> -       }
>         req->opcode = IORING_OP_NOP;
>         req->cqe.user_data = user_data;
>         io_req_set_res(req, res, cflags);
>         percpu_ref_get(&ctx->refs);
>         req->ctx = ctx;
>         req->tctx = NULL;
>         req->io_task_work.func = io_msg_tw_complete;
>         io_req_task_work_add_remote(req, IOU_F_TWQ_LAZY_WAKE);
> -       return 0;
>  }
>
>  static int io_msg_data_remote(struct io_ring_ctx *target_ctx,
>                               struct io_msg *msg)
>  {
> @@ -109,12 +104,12 @@ static int io_msg_data_remote(struct io_ring_ctx *target_ctx,
>                 return -ENOMEM;
>
>         if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
>                 flags = msg->cqe_flags;
>
> -       return io_msg_remote_post(target_ctx, target, msg->len, flags,
> -                                       msg->user_data);
> +       io_msg_remote_post(target_ctx, target, msg->len, flags, msg->user_data);
> +       return 0;
>  }
>
>  static int __io_msg_ring_data(struct io_ring_ctx *target_ctx,
>                               struct io_msg *msg, unsigned int issue_flags)
>  {
> @@ -125,11 +120,11 @@ static int __io_msg_ring_data(struct io_ring_ctx *target_ctx,
>                 return -EINVAL;
>         if (!(msg->flags & IORING_MSG_RING_FLAGS_PASS) && msg->dst_fd)
>                 return -EINVAL;
>         /*
>          * Keep IORING_SETUP_R_DISABLED check before submitter_task load
> -        * in io_msg_data_remote() -> io_msg_remote_post()
> +        * in io_msg_data_remote() -> io_req_task_work_add_remote()
>          */
>         if (smp_load_acquire(&target_ctx->flags) & IORING_SETUP_R_DISABLED)
>                 return -EBADFD;
>
>         if (io_msg_need_remote(target_ctx))
> @@ -225,14 +220,11 @@ static void io_msg_tw_fd_complete(struct callback_head *head)
>
>  static int io_msg_fd_remote(struct io_kiocb *req)
>  {
>         struct io_ring_ctx *ctx = req->file->private_data;
>         struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
> -       struct task_struct *task = READ_ONCE(ctx->submitter_task);
> -
> -       if (unlikely(!task))
> -               return -EOWNERDEAD;
> +       struct task_struct *task = ctx->submitter_task;

Is the if !task check here still needed? in the
io_register_enable_rings() logic I see

if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task) {
        ctx->submitter_task = get_task_struct(current);
        ...
}
and then a few lines below
ctx->flags &= ~IORING_SETUP_R_DISABLED;

but I'm not seeing any memory barrier stuff that prevents these from
being reordered.

In io_msg_send_fd() I see that we check "if (target_ctx->flags &
IORING_SETUP_R_DISABLED) return -EBADFD;" before calling into
io_msg_fd_remote() here but if the ctx->submitter_task assignment and
IORING_SETUP_R_DISABLED flag clearing logic are reordered, then it
seems like this opens a race condition where there could be a null ptr
crash when task_work_add() gets called below?

Thanks,
Joanne

>
>         init_task_work(&msg->tw, io_msg_tw_fd_complete);
>         if (task_work_add(task, &msg->tw, TWA_SIGNAL))
>                 return -EOWNERDEAD;
>
> diff --git a/io_uring/register.c b/io_uring/register.c
> index 12318c276068..8104728af294 100644
> --- a/io_uring/register.c
> +++ b/io_uring/register.c
> @@ -179,11 +179,11 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
>  {
>         if (!(ctx->flags & IORING_SETUP_R_DISABLED))
>                 return -EBADFD;
>
>         if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task) {
> -               WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
> +               ctx->submitter_task = get_task_struct(current);
>                 /*
>                  * Lazy activation attempts would fail if it was polled before
>                  * submitter_task is set.
>                  */
>                 if (wq_has_sleeper(&ctx->poll_wq))
> --
> 2.45.2
>
Re: [PATCH v7 2/3] io_uring/msg_ring: drop unnecessary submitter_task checks
Posted by Caleb Sander Mateos 1 month ago
On Wed, Jan 7, 2026 at 8:25 PM Joanne Koong <joannelkoong@gmail.com> wrote:
>
> On Mon, Jan 5, 2026 at 1:05 PM Caleb Sander Mateos
> <csander@purestorage.com> wrote:
> >
> > __io_msg_ring_data() checks that the target_ctx isn't
> > IORING_SETUP_R_DISABLED before calling io_msg_data_remote(), which calls
> > io_msg_remote_post(). So submitter_task can't be modified concurrently
> > with the read in io_msg_remote_post(). Additionally, submitter_task must
> > exist, as io_msg_data_remote() is only called for io_msg_need_remote(),
> > i.e. task_complete is set, which requires IORING_SETUP_DEFER_TASKRUN,
> > which in turn requires IORING_SETUP_SINGLE_ISSUER. And submitter_task is
> > assigned in io_uring_create() or io_register_enable_rings() before
> > enabling any IORING_SETUP_SINGLE_ISSUER io_ring_ctx.
> > Similarly, io_msg_send_fd() checks IORING_SETUP_R_DISABLED and
> > io_msg_need_remote() before calling io_msg_fd_remote(). submitter_task
> > therefore can't be modified concurrently with the read in
> > io_msg_fd_remote() and must be non-null.
> > io_register_enable_rings() can't run concurrently because it's called
> > from io_uring_register() -> __io_uring_register() with uring_lock held.
> > Thus, replace the READ_ONCE() and WRITE_ONCE() of submitter_task with
> > plain loads and stores. And remove the NULL checks of submitter_task in
> > io_msg_remote_post() and io_msg_fd_remote().
> >
> > Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
> > ---
> >  io_uring/io_uring.c |  7 +------
> >  io_uring/msg_ring.c | 18 +++++-------------
> >  io_uring/register.c |  2 +-
> >  3 files changed, 7 insertions(+), 20 deletions(-)
> >
> > diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
> > index ec27fafcb213..b31d88295297 100644
> > --- a/io_uring/io_uring.c
> > +++ b/io_uring/io_uring.c
> > @@ -3663,17 +3663,12 @@ static __cold int io_uring_create(struct io_ctx_config *config)
> >                 ret = -EFAULT;
> >                 goto err;
> >         }
> >
> >         if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
> > -           && !(ctx->flags & IORING_SETUP_R_DISABLED)) {
> > -               /*
> > -                * Unlike io_register_enable_rings(), don't need WRITE_ONCE()
> > -                * since ctx isn't yet accessible from other tasks
> > -                */
> > +           && !(ctx->flags & IORING_SETUP_R_DISABLED))
> >                 ctx->submitter_task = get_task_struct(current);
> > -       }
> >
> >         file = io_uring_get_file(ctx);
> >         if (IS_ERR(file)) {
> >                 ret = PTR_ERR(file);
> >                 goto err;
> > diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
> > index 87b4d306cf1b..57ad0085869a 100644
> > --- a/io_uring/msg_ring.c
> > +++ b/io_uring/msg_ring.c
> > @@ -78,26 +78,21 @@ static void io_msg_tw_complete(struct io_tw_req tw_req, io_tw_token_t tw)
> >         io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags);
> >         kfree_rcu(req, rcu_head);
> >         percpu_ref_put(&ctx->refs);
> >  }
> >
> > -static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
> > +static void io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
> >                               int res, u32 cflags, u64 user_data)
> >  {
> > -       if (!READ_ONCE(ctx->submitter_task)) {
> > -               kfree_rcu(req, rcu_head);
> > -               return -EOWNERDEAD;
> > -       }
> >         req->opcode = IORING_OP_NOP;
> >         req->cqe.user_data = user_data;
> >         io_req_set_res(req, res, cflags);
> >         percpu_ref_get(&ctx->refs);
> >         req->ctx = ctx;
> >         req->tctx = NULL;
> >         req->io_task_work.func = io_msg_tw_complete;
> >         io_req_task_work_add_remote(req, IOU_F_TWQ_LAZY_WAKE);
> > -       return 0;
> >  }
> >
> >  static int io_msg_data_remote(struct io_ring_ctx *target_ctx,
> >                               struct io_msg *msg)
> >  {
> > @@ -109,12 +104,12 @@ static int io_msg_data_remote(struct io_ring_ctx *target_ctx,
> >                 return -ENOMEM;
> >
> >         if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
> >                 flags = msg->cqe_flags;
> >
> > -       return io_msg_remote_post(target_ctx, target, msg->len, flags,
> > -                                       msg->user_data);
> > +       io_msg_remote_post(target_ctx, target, msg->len, flags, msg->user_data);
> > +       return 0;
> >  }
> >
> >  static int __io_msg_ring_data(struct io_ring_ctx *target_ctx,
> >                               struct io_msg *msg, unsigned int issue_flags)
> >  {
> > @@ -125,11 +120,11 @@ static int __io_msg_ring_data(struct io_ring_ctx *target_ctx,
> >                 return -EINVAL;
> >         if (!(msg->flags & IORING_MSG_RING_FLAGS_PASS) && msg->dst_fd)
> >                 return -EINVAL;
> >         /*
> >          * Keep IORING_SETUP_R_DISABLED check before submitter_task load
> > -        * in io_msg_data_remote() -> io_msg_remote_post()
> > +        * in io_msg_data_remote() -> io_req_task_work_add_remote()
> >          */
> >         if (smp_load_acquire(&target_ctx->flags) & IORING_SETUP_R_DISABLED)
> >                 return -EBADFD;
> >
> >         if (io_msg_need_remote(target_ctx))
> > @@ -225,14 +220,11 @@ static void io_msg_tw_fd_complete(struct callback_head *head)
> >
> >  static int io_msg_fd_remote(struct io_kiocb *req)
> >  {
> >         struct io_ring_ctx *ctx = req->file->private_data;
> >         struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
> > -       struct task_struct *task = READ_ONCE(ctx->submitter_task);
> > -
> > -       if (unlikely(!task))
> > -               return -EOWNERDEAD;
> > +       struct task_struct *task = ctx->submitter_task;
>
> Is the if !task check here still needed? in the
> io_register_enable_rings() logic I see
>
> if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task) {
>         ctx->submitter_task = get_task_struct(current);
>         ...
> }
> and then a few lines below
> ctx->flags &= ~IORING_SETUP_R_DISABLED;
>
> but I'm not seeing any memory barrier stuff that prevents these from
> being reordered.
>
> In io_msg_send_fd() I see that we check "if (target_ctx->flags &
> IORING_SETUP_R_DISABLED) return -EBADFD;" before calling into
> io_msg_fd_remote() here but if the ctx->submitter_task assignment and
> IORING_SETUP_R_DISABLED flag clearing logic are reordered, then it
> seems like this opens a race condition where there could be a null ptr
> crash when task_work_add() gets called below?

Shouldn't patch 1's switch to use smp_store_release() for the clearing
of IORING_SETUP_R_DISABLED and smp_load_acquire() for the check of
IORING_SETUP_R_DISABLED in io_msg_send_fd() ensure the necessary
ordering? Or am I missing something?

Thanks,
Caleb
Re: [PATCH v7 2/3] io_uring/msg_ring: drop unnecessary submitter_task checks
Posted by Joanne Koong 1 month ago
On Wed, Jan 7, 2026 at 11:07 PM Caleb Sander Mateos
<csander@purestorage.com> wrote:
>
> On Wed, Jan 7, 2026 at 8:25 PM Joanne Koong <joannelkoong@gmail.com> wrote:
> >
> > On Mon, Jan 5, 2026 at 1:05 PM Caleb Sander Mateos
> > <csander@purestorage.com> wrote:
> > >
> > > __io_msg_ring_data() checks that the target_ctx isn't
> > > IORING_SETUP_R_DISABLED before calling io_msg_data_remote(), which calls
> > > io_msg_remote_post(). So submitter_task can't be modified concurrently
> > > with the read in io_msg_remote_post(). Additionally, submitter_task must
> > > exist, as io_msg_data_remote() is only called for io_msg_need_remote(),
> > > i.e. task_complete is set, which requires IORING_SETUP_DEFER_TASKRUN,
> > > which in turn requires IORING_SETUP_SINGLE_ISSUER. And submitter_task is
> > > assigned in io_uring_create() or io_register_enable_rings() before
> > > enabling any IORING_SETUP_SINGLE_ISSUER io_ring_ctx.
> > > Similarly, io_msg_send_fd() checks IORING_SETUP_R_DISABLED and
> > > io_msg_need_remote() before calling io_msg_fd_remote(). submitter_task
> > > therefore can't be modified concurrently with the read in
> > > io_msg_fd_remote() and must be non-null.
> > > io_register_enable_rings() can't run concurrently because it's called
> > > from io_uring_register() -> __io_uring_register() with uring_lock held.
> > > Thus, replace the READ_ONCE() and WRITE_ONCE() of submitter_task with
> > > plain loads and stores. And remove the NULL checks of submitter_task in
> > > io_msg_remote_post() and io_msg_fd_remote().
> > >
> > > Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
> > > ---
> > >  io_uring/io_uring.c |  7 +------
> > >  io_uring/msg_ring.c | 18 +++++-------------
> > >  io_uring/register.c |  2 +-
> > >  3 files changed, 7 insertions(+), 20 deletions(-)
> > >
> > > diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
> > > index ec27fafcb213..b31d88295297 100644
> > > --- a/io_uring/io_uring.c
> > > +++ b/io_uring/io_uring.c
> > > @@ -3663,17 +3663,12 @@ static __cold int io_uring_create(struct io_ctx_config *config)
> > >                 ret = -EFAULT;
> > >                 goto err;
> > >         }
> > >
> > >         if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
> > > -           && !(ctx->flags & IORING_SETUP_R_DISABLED)) {
> > > -               /*
> > > -                * Unlike io_register_enable_rings(), don't need WRITE_ONCE()
> > > -                * since ctx isn't yet accessible from other tasks
> > > -                */
> > > +           && !(ctx->flags & IORING_SETUP_R_DISABLED))
> > >                 ctx->submitter_task = get_task_struct(current);
> > > -       }
> > >
> > >         file = io_uring_get_file(ctx);
> > >         if (IS_ERR(file)) {
> > >                 ret = PTR_ERR(file);
> > >                 goto err;
> > > diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
> > > index 87b4d306cf1b..57ad0085869a 100644
> > > --- a/io_uring/msg_ring.c
> > > +++ b/io_uring/msg_ring.c
> > > @@ -78,26 +78,21 @@ static void io_msg_tw_complete(struct io_tw_req tw_req, io_tw_token_t tw)
> > >         io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags);
> > >         kfree_rcu(req, rcu_head);
> > >         percpu_ref_put(&ctx->refs);
> > >  }
> > >
> > > -static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
> > > +static void io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
> > >                               int res, u32 cflags, u64 user_data)
> > >  {
> > > -       if (!READ_ONCE(ctx->submitter_task)) {
> > > -               kfree_rcu(req, rcu_head);
> > > -               return -EOWNERDEAD;
> > > -       }
> > >         req->opcode = IORING_OP_NOP;
> > >         req->cqe.user_data = user_data;
> > >         io_req_set_res(req, res, cflags);
> > >         percpu_ref_get(&ctx->refs);
> > >         req->ctx = ctx;
> > >         req->tctx = NULL;
> > >         req->io_task_work.func = io_msg_tw_complete;
> > >         io_req_task_work_add_remote(req, IOU_F_TWQ_LAZY_WAKE);
> > > -       return 0;
> > >  }
> > >
> > >  static int io_msg_data_remote(struct io_ring_ctx *target_ctx,
> > >                               struct io_msg *msg)
> > >  {
> > > @@ -109,12 +104,12 @@ static int io_msg_data_remote(struct io_ring_ctx *target_ctx,
> > >                 return -ENOMEM;
> > >
> > >         if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
> > >                 flags = msg->cqe_flags;
> > >
> > > -       return io_msg_remote_post(target_ctx, target, msg->len, flags,
> > > -                                       msg->user_data);
> > > +       io_msg_remote_post(target_ctx, target, msg->len, flags, msg->user_data);
> > > +       return 0;
> > >  }
> > >
> > >  static int __io_msg_ring_data(struct io_ring_ctx *target_ctx,
> > >                               struct io_msg *msg, unsigned int issue_flags)
> > >  {
> > > @@ -125,11 +120,11 @@ static int __io_msg_ring_data(struct io_ring_ctx *target_ctx,
> > >                 return -EINVAL;
> > >         if (!(msg->flags & IORING_MSG_RING_FLAGS_PASS) && msg->dst_fd)
> > >                 return -EINVAL;
> > >         /*
> > >          * Keep IORING_SETUP_R_DISABLED check before submitter_task load
> > > -        * in io_msg_data_remote() -> io_msg_remote_post()
> > > +        * in io_msg_data_remote() -> io_req_task_work_add_remote()
> > >          */
> > >         if (smp_load_acquire(&target_ctx->flags) & IORING_SETUP_R_DISABLED)
> > >                 return -EBADFD;
> > >
> > >         if (io_msg_need_remote(target_ctx))
> > > @@ -225,14 +220,11 @@ static void io_msg_tw_fd_complete(struct callback_head *head)
> > >
> > >  static int io_msg_fd_remote(struct io_kiocb *req)
> > >  {
> > >         struct io_ring_ctx *ctx = req->file->private_data;
> > >         struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
> > > -       struct task_struct *task = READ_ONCE(ctx->submitter_task);
> > > -
> > > -       if (unlikely(!task))
> > > -               return -EOWNERDEAD;
> > > +       struct task_struct *task = ctx->submitter_task;
> >
> > Is the if !task check here still needed? in the
> > io_register_enable_rings() logic I see
> >
> > if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task) {
> >         ctx->submitter_task = get_task_struct(current);
> >         ...
> > }
> > and then a few lines below
> > ctx->flags &= ~IORING_SETUP_R_DISABLED;
> >
> > but I'm not seeing any memory barrier stuff that prevents these from
> > being reordered.
> >
> > In io_msg_send_fd() I see that we check "if (target_ctx->flags &
> > IORING_SETUP_R_DISABLED) return -EBADFD;" before calling into
> > io_msg_fd_remote() here but if the ctx->submitter_task assignment and
> > IORING_SETUP_R_DISABLED flag clearing logic are reordered, then it
> > seems like this opens a race condition where there could be a null ptr
> > crash when task_work_add() gets called below?
>
> Shouldn't patch 1's switch to use smp_store_release() for the clearing
> of IORING_SETUP_R_DISABLED and smp_load_acquire() for the check of
> IORING_SETUP_R_DISABLED in io_msg_send_fd() ensure the necessary
> ordering? Or am I missing something?
>

Nice, yes, that addresses my concern. I had skipped your changes in patch 1.

Reviewed-by: Joanne Koong <joannelkoong@gmail.com>

Thanks,
Joanne

> Thanks,
> Caleb