From: Yu Kuai <yukuai3@huawei.com>
ioc_lookup_icq() is used by bfq to lookup bfqq from IO path, the helper
have to be protected by queue_lock, which is too heavy. Hence add a new
helper that is lookless, this is safe because both request_queue and ioc
can be pinged by IO that is still issuing.
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
block/blk-ioc.c | 34 ++++++++++++++++++++++++++++++++++
block/blk.h | 1 +
2 files changed, 35 insertions(+)
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index ce82770c72ab..4945b48dfdb6 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -343,6 +343,40 @@ struct io_cq *ioc_lookup_icq(struct request_queue *q)
}
EXPORT_SYMBOL(ioc_lookup_icq);
+/**
+ * ioc_lookup_icq_rcu - lookup io_cq from ioc in io path
+ * @q: the associated request_queue
+ *
+ * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called from
+ * io issue path, either return NULL if current issue io to @q for the first
+ * time, or return a valid icq.
+ */
+struct io_cq *ioc_lookup_icq_rcu(struct request_queue *q)
+{
+ struct io_context *ioc = current->io_context;
+ struct io_cq *icq;
+
+ WARN_ON_ONCE(percpu_ref_is_zero(&q->q_usage_counter));
+
+ if (!ioc)
+ return NULL;
+
+ icq = rcu_dereference(ioc->icq_hint);
+ if (icq && icq->q == q)
+ return icq;
+
+ icq = radix_tree_lookup(&ioc->icq_tree, q->id);
+ if (!icq)
+ return NULL;
+
+ if (WARN_ON_ONCE(icq->q != q))
+ return NULL;
+
+ rcu_assign_pointer(ioc->icq_hint, icq);
+ return icq;
+}
+EXPORT_SYMBOL(ioc_lookup_icq_rcu);
+
/**
* ioc_create_icq - create and link io_cq
* @q: request_queue of interest
diff --git a/block/blk.h b/block/blk.h
index 468aa83c5a22..ef31b3ec1c69 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -461,6 +461,7 @@ static inline void req_set_nomerge(struct request_queue *q, struct request *req)
*/
struct io_cq *ioc_find_get_icq(struct request_queue *q);
struct io_cq *ioc_lookup_icq(struct request_queue *q);
+struct io_cq *ioc_lookup_icq_rcu(struct request_queue *q);
#ifdef CONFIG_BLK_ICQ
void ioc_clear_queue(struct request_queue *q);
#else
--
2.39.2
On 7/25/25 16:05, Yu Kuai wrote:
> From: Yu Kuai <yukuai3@huawei.com>
>
> ioc_lookup_icq() is used by bfq to lookup bfqq from IO path, the helper
> have to be protected by queue_lock, which is too heavy. Hence add a new
> helper that is lookless, this is safe because both request_queue and ioc
> can be pinged by IO that is still issuing.
>
> Signed-off-by: Yu Kuai <yukuai3@huawei.com>
> ---
> block/blk-ioc.c | 34 ++++++++++++++++++++++++++++++++++
> block/blk.h | 1 +
> 2 files changed, 35 insertions(+)
>
> diff --git a/block/blk-ioc.c b/block/blk-ioc.c
> index ce82770c72ab..4945b48dfdb6 100644
> --- a/block/blk-ioc.c
> +++ b/block/blk-ioc.c
> @@ -343,6 +343,40 @@ struct io_cq *ioc_lookup_icq(struct request_queue *q)
> }
> EXPORT_SYMBOL(ioc_lookup_icq);
>
> +/**
> + * ioc_lookup_icq_rcu - lookup io_cq from ioc in io path
> + * @q: the associated request_queue
> + *
> + * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called from
> + * io issue path, either return NULL if current issue io to @q for the first
> + * time, or return a valid icq.
> + */
> +struct io_cq *ioc_lookup_icq_rcu(struct request_queue *q)
> +{
> + struct io_context *ioc = current->io_context;
> + struct io_cq *icq;
> +
> + WARN_ON_ONCE(percpu_ref_is_zero(&q->q_usage_counter));
I do not think this is necessary.
> +
> + if (!ioc)
> + return NULL;
> +
> + icq = rcu_dereference(ioc->icq_hint);
> + if (icq && icq->q == q)
> + return icq;
> +
> + icq = radix_tree_lookup(&ioc->icq_tree, q->id);
> + if (!icq)
> + return NULL;
> +
> + if (WARN_ON_ONCE(icq->q != q))
> + return NULL;
> +
> + rcu_assign_pointer(ioc->icq_hint, icq);
> + return icq;
> +}
> +EXPORT_SYMBOL(ioc_lookup_icq_rcu);
Patch 2 calls this function with the rcu_read_lock() held. Why not move that rcu
read lock here inside this function ? That is how ioc_lookup_icq() was doing
things, with code that is more compact than this.
And since ioc_lookup_icq() was already using RCU, it seems that the only change
you need is to remove the "lockdep_assert_held(&q->queue_lock);" from that
function to endup with the same above functionality. So why all the churn ?
Another question is: is it safe to call radix_tree_lookup() without any lock
held ? What if this races with a radix tree insertion ? (I may be wrong here as
I am not familiar with that code).
> +
> /**
> * ioc_create_icq - create and link io_cq
> * @q: request_queue of interest
> diff --git a/block/blk.h b/block/blk.h
> index 468aa83c5a22..ef31b3ec1c69 100644
> --- a/block/blk.h
> +++ b/block/blk.h
> @@ -461,6 +461,7 @@ static inline void req_set_nomerge(struct request_queue *q, struct request *req)
> */
> struct io_cq *ioc_find_get_icq(struct request_queue *q);
> struct io_cq *ioc_lookup_icq(struct request_queue *q);
> +struct io_cq *ioc_lookup_icq_rcu(struct request_queue *q);
> #ifdef CONFIG_BLK_ICQ
> void ioc_clear_queue(struct request_queue *q);
> #else
--
Damien Le Moal
Western Digital Research
On Fri 25-07-25 19:21:06, Damien Le Moal wrote:
> On 7/25/25 16:05, Yu Kuai wrote:
> > From: Yu Kuai <yukuai3@huawei.com>
> >
> > ioc_lookup_icq() is used by bfq to lookup bfqq from IO path, the helper
> > have to be protected by queue_lock, which is too heavy. Hence add a new
> > helper that is lookless, this is safe because both request_queue and ioc
> > can be pinged by IO that is still issuing.
> >
> > Signed-off-by: Yu Kuai <yukuai3@huawei.com>
> > ---
> > block/blk-ioc.c | 34 ++++++++++++++++++++++++++++++++++
> > block/blk.h | 1 +
> > 2 files changed, 35 insertions(+)
> >
> > diff --git a/block/blk-ioc.c b/block/blk-ioc.c
> > index ce82770c72ab..4945b48dfdb6 100644
> > --- a/block/blk-ioc.c
> > +++ b/block/blk-ioc.c
> > @@ -343,6 +343,40 @@ struct io_cq *ioc_lookup_icq(struct request_queue *q)
> > }
> > EXPORT_SYMBOL(ioc_lookup_icq);
> >
> > +/**
> > + * ioc_lookup_icq_rcu - lookup io_cq from ioc in io path
> > + * @q: the associated request_queue
> > + *
> > + * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called from
> > + * io issue path, either return NULL if current issue io to @q for the first
> > + * time, or return a valid icq.
> > + */
> > +struct io_cq *ioc_lookup_icq_rcu(struct request_queue *q)
> > +{
> > + struct io_context *ioc = current->io_context;
> > + struct io_cq *icq;
> > +
> > + WARN_ON_ONCE(percpu_ref_is_zero(&q->q_usage_counter));
>
> I do not think this is necessary.
>
> > +
> > + if (!ioc)
> > + return NULL;
> > +
> > + icq = rcu_dereference(ioc->icq_hint);
> > + if (icq && icq->q == q)
> > + return icq;
> > +
> > + icq = radix_tree_lookup(&ioc->icq_tree, q->id);
> > + if (!icq)
> > + return NULL;
> > +
> > + if (WARN_ON_ONCE(icq->q != q))
> > + return NULL;
> > +
> > + rcu_assign_pointer(ioc->icq_hint, icq);
> > + return icq;
> > +}
> > +EXPORT_SYMBOL(ioc_lookup_icq_rcu);
>
> Patch 2 calls this function with the rcu_read_lock() held. Why not move that rcu
> read lock here inside this function ? That is how ioc_lookup_icq() was doing
> things, with code that is more compact than this.
>
> And since ioc_lookup_icq() was already using RCU, it seems that the only change
> you need is to remove the "lockdep_assert_held(&q->queue_lock);" from that
> function to endup with the same above functionality. So why all the churn ?
Yes, I agree, just dropping the assert and updating callers should be fine.
> Another question is: is it safe to call radix_tree_lookup() without any lock
> held ? What if this races with a radix tree insertion ? (I may be wrong here as
> I am not familiar with that code).
Yes, radix_tree_lookup() is fine to call with just rcu protection.
Honza
--
Jan Kara <jack@suse.com>
SUSE Labs, CR
Hi,
在 2025/7/25 20:03, Jan Kara 写道:
> On Fri 25-07-25 19:21:06, Damien Le Moal wrote:
>> On 7/25/25 16:05, Yu Kuai wrote:
>>> From: Yu Kuai <yukuai3@huawei.com>
>>>
>>> ioc_lookup_icq() is used by bfq to lookup bfqq from IO path, the helper
>>> have to be protected by queue_lock, which is too heavy. Hence add a new
>>> helper that is lookless, this is safe because both request_queue and ioc
>>> can be pinged by IO that is still issuing.
>>>
>>> Signed-off-by: Yu Kuai <yukuai3@huawei.com>
>>> ---
>>> block/blk-ioc.c | 34 ++++++++++++++++++++++++++++++++++
>>> block/blk.h | 1 +
>>> 2 files changed, 35 insertions(+)
>>>
>>> diff --git a/block/blk-ioc.c b/block/blk-ioc.c
>>> index ce82770c72ab..4945b48dfdb6 100644
>>> --- a/block/blk-ioc.c
>>> +++ b/block/blk-ioc.c
>>> @@ -343,6 +343,40 @@ struct io_cq *ioc_lookup_icq(struct request_queue *q)
>>> }
>>> EXPORT_SYMBOL(ioc_lookup_icq);
>>>
>>> +/**
>>> + * ioc_lookup_icq_rcu - lookup io_cq from ioc in io path
>>> + * @q: the associated request_queue
>>> + *
>>> + * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called from
>>> + * io issue path, either return NULL if current issue io to @q for the first
>>> + * time, or return a valid icq.
>>> + */
>>> +struct io_cq *ioc_lookup_icq_rcu(struct request_queue *q)
>>> +{
>>> + struct io_context *ioc = current->io_context;
>>> + struct io_cq *icq;
>>> +
>>> + WARN_ON_ONCE(percpu_ref_is_zero(&q->q_usage_counter));
>> I do not think this is necessary.
This is used to indicate this is from IO issue path, I can remove it.
>>> +
>>> + if (!ioc)
>>> + return NULL;
>>> +
>>> + icq = rcu_dereference(ioc->icq_hint);
>>> + if (icq && icq->q == q)
>>> + return icq;
>>> +
>>> + icq = radix_tree_lookup(&ioc->icq_tree, q->id);
>>> + if (!icq)
>>> + return NULL;
>>> +
>>> + if (WARN_ON_ONCE(icq->q != q))
>>> + return NULL;
>>> +
>>> + rcu_assign_pointer(ioc->icq_hint, icq);
>>> + return icq;
>>> +}
>>> +EXPORT_SYMBOL(ioc_lookup_icq_rcu);
>> Patch 2 calls this function with the rcu_read_lock() held. Why not move that rcu
>> read lock here inside this function ? That is how ioc_lookup_icq() was doing
>> things, with code that is more compact than this.
>>
>> And since ioc_lookup_icq() was already using RCU, it seems that the only change
>> you need is to remove the "lockdep_assert_held(&q->queue_lock);" from that
>> function to endup with the same above functionality. So why all the churn ?
> Yes, I agree, just dropping the assert and updating callers should be fine.
Yes, this is much simpler.
>> Another question is: is it safe to call radix_tree_lookup() without any lock
>> held ? What if this races with a radix tree insertion ? (I may be wrong here as
>> I am not familiar with that code).
> Yes, radix_tree_lookup() is fine to call with just rcu protection.
The insertion is protected by queue_lock, and look up is fine with rcu
protection.
Thanks,
Kuai
>
> Honza
© 2016 - 2026 Red Hat, Inc.