From: Yu Kuai <yukuai3@huawei.com>
Replace the internal spinlock 'dd->lock' with the new spinlock in
elevator_queue, there are no functional changes.
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
block/mq-deadline.c | 58 +++++++++++++++++++++------------------------
1 file changed, 27 insertions(+), 31 deletions(-)
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 9ab6c6256695..2054c023e855 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -101,7 +101,7 @@ struct deadline_data {
u32 async_depth;
int prio_aging_expire;
- spinlock_t lock;
+ spinlock_t *lock;
};
/* Maps an I/O priority class to a deadline scheduler priority. */
@@ -213,7 +213,7 @@ static void dd_merged_requests(struct request_queue *q, struct request *req,
const u8 ioprio_class = dd_rq_ioclass(next);
const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
- lockdep_assert_held(&dd->lock);
+ lockdep_assert_held(dd->lock);
dd->per_prio[prio].stats.merged++;
@@ -253,7 +253,7 @@ static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
{
const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
- lockdep_assert_held(&dd->lock);
+ lockdep_assert_held(dd->lock);
return stats->inserted - atomic_read(&stats->completed);
}
@@ -323,7 +323,7 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
enum dd_prio prio;
u8 ioprio_class;
- lockdep_assert_held(&dd->lock);
+ lockdep_assert_held(dd->lock);
if (!list_empty(&per_prio->dispatch)) {
rq = list_first_entry(&per_prio->dispatch, struct request,
@@ -434,7 +434,7 @@ static struct request *dd_dispatch_prio_aged_requests(struct deadline_data *dd,
enum dd_prio prio;
int prio_cnt;
- lockdep_assert_held(&dd->lock);
+ lockdep_assert_held(dd->lock);
prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, DD_BE_PRIO) +
!!dd_queued(dd, DD_IDLE_PRIO);
@@ -466,10 +466,9 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
struct request *rq;
enum dd_prio prio;
- spin_lock(&dd->lock);
rq = dd_dispatch_prio_aged_requests(dd, now);
if (rq)
- goto unlock;
+ return rq;
/*
* Next, dispatch requests in priority order. Ignore lower priority
@@ -481,9 +480,6 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
break;
}
-unlock:
- spin_unlock(&dd->lock);
-
return rq;
}
@@ -538,9 +534,9 @@ static void dd_exit_sched(struct elevator_queue *e)
WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
- spin_lock(&dd->lock);
+ spin_lock(dd->lock);
queued = dd_queued(dd, prio);
- spin_unlock(&dd->lock);
+ spin_unlock(dd->lock);
WARN_ONCE(queued != 0,
"statistics for priority %d: i %u m %u d %u c %u\n",
@@ -587,7 +583,7 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
dd->last_dir = DD_WRITE;
dd->fifo_batch = fifo_batch;
dd->prio_aging_expire = prio_aging_expire;
- spin_lock_init(&dd->lock);
+ dd->lock = &eq->lock;
/* We dispatch from request queue wide instead of hw queue */
blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
@@ -643,9 +639,9 @@ static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
struct request *free = NULL;
bool ret;
- spin_lock(&dd->lock);
+ spin_lock(dd->lock);
ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
- spin_unlock(&dd->lock);
+ spin_unlock(dd->lock);
if (free)
blk_mq_free_request(free);
@@ -667,7 +663,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
struct dd_per_prio *per_prio;
enum dd_prio prio;
- lockdep_assert_held(&dd->lock);
+ lockdep_assert_held(dd->lock);
prio = ioprio_class_to_prio[ioprio_class];
per_prio = &dd->per_prio[prio];
@@ -711,7 +707,7 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
struct deadline_data *dd = q->elevator->elevator_data;
LIST_HEAD(free);
- spin_lock(&dd->lock);
+ spin_lock(dd->lock);
while (!list_empty(list)) {
struct request *rq;
@@ -719,7 +715,7 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
list_del_init(&rq->queuelist);
dd_insert_request(hctx, rq, flags, &free);
}
- spin_unlock(&dd->lock);
+ spin_unlock(dd->lock);
blk_mq_free_requests(&free);
}
@@ -835,13 +831,13 @@ static const struct elv_fs_entry deadline_attrs[] = {
#define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \
static void *deadline_##name##_fifo_start(struct seq_file *m, \
loff_t *pos) \
- __acquires(&dd->lock) \
+ __acquires(dd->lock) \
{ \
struct request_queue *q = m->private; \
struct deadline_data *dd = q->elevator->elevator_data; \
struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
\
- spin_lock(&dd->lock); \
+ spin_lock(dd->lock); \
return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \
} \
\
@@ -856,12 +852,12 @@ static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
} \
\
static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
- __releases(&dd->lock) \
+ __releases(dd->lock) \
{ \
struct request_queue *q = m->private; \
struct deadline_data *dd = q->elevator->elevator_data; \
\
- spin_unlock(&dd->lock); \
+ spin_unlock(dd->lock); \
} \
\
static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
@@ -927,11 +923,11 @@ static int dd_queued_show(void *data, struct seq_file *m)
struct deadline_data *dd = q->elevator->elevator_data;
u32 rt, be, idle;
- spin_lock(&dd->lock);
+ spin_lock(dd->lock);
rt = dd_queued(dd, DD_RT_PRIO);
be = dd_queued(dd, DD_BE_PRIO);
idle = dd_queued(dd, DD_IDLE_PRIO);
- spin_unlock(&dd->lock);
+ spin_unlock(dd->lock);
seq_printf(m, "%u %u %u\n", rt, be, idle);
@@ -943,7 +939,7 @@ static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
{
const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
- lockdep_assert_held(&dd->lock);
+ lockdep_assert_held(dd->lock);
return stats->dispatched + stats->merged -
atomic_read(&stats->completed);
@@ -955,11 +951,11 @@ static int dd_owned_by_driver_show(void *data, struct seq_file *m)
struct deadline_data *dd = q->elevator->elevator_data;
u32 rt, be, idle;
- spin_lock(&dd->lock);
+ spin_lock(dd->lock);
rt = dd_owned_by_driver(dd, DD_RT_PRIO);
be = dd_owned_by_driver(dd, DD_BE_PRIO);
idle = dd_owned_by_driver(dd, DD_IDLE_PRIO);
- spin_unlock(&dd->lock);
+ spin_unlock(dd->lock);
seq_printf(m, "%u %u %u\n", rt, be, idle);
@@ -969,13 +965,13 @@ static int dd_owned_by_driver_show(void *data, struct seq_file *m)
#define DEADLINE_DISPATCH_ATTR(prio) \
static void *deadline_dispatch##prio##_start(struct seq_file *m, \
loff_t *pos) \
- __acquires(&dd->lock) \
+ __acquires(dd->lock) \
{ \
struct request_queue *q = m->private; \
struct deadline_data *dd = q->elevator->elevator_data; \
struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
\
- spin_lock(&dd->lock); \
+ spin_lock(dd->lock); \
return seq_list_start(&per_prio->dispatch, *pos); \
} \
\
@@ -990,12 +986,12 @@ static void *deadline_dispatch##prio##_next(struct seq_file *m, \
} \
\
static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \
- __releases(&dd->lock) \
+ __releases(dd->lock) \
{ \
struct request_queue *q = m->private; \
struct deadline_data *dd = q->elevator->elevator_data; \
\
- spin_unlock(&dd->lock); \
+ spin_unlock(dd->lock); \
} \
\
static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
--
2.39.2
On 7/30/25 10:22, Yu Kuai wrote: > From: Yu Kuai <yukuai3@huawei.com> > > Replace the internal spinlock 'dd->lock' with the new spinlock in > elevator_queue, there are no functional changes. > > Signed-off-by: Yu Kuai <yukuai3@huawei.com> > --- > block/mq-deadline.c | 58 +++++++++++++++++++++------------------------ > 1 file changed, 27 insertions(+), 31 deletions(-) > > diff --git a/block/mq-deadline.c b/block/mq-deadline.c > index 9ab6c6256695..2054c023e855 100644 > --- a/block/mq-deadline.c > +++ b/block/mq-deadline.c > @@ -101,7 +101,7 @@ struct deadline_data { > u32 async_depth; > int prio_aging_expire; > > - spinlock_t lock; > + spinlock_t *lock; > }; > > /* Maps an I/O priority class to a deadline scheduler priority. */ > @@ -213,7 +213,7 @@ static void dd_merged_requests(struct request_queue *q, struct request *req, > const u8 ioprio_class = dd_rq_ioclass(next); > const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; > > - lockdep_assert_held(&dd->lock); > + lockdep_assert_held(dd->lock); > > dd->per_prio[prio].stats.merged++; > > @@ -253,7 +253,7 @@ static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio) > { > const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats; > > - lockdep_assert_held(&dd->lock); > + lockdep_assert_held(dd->lock); > > return stats->inserted - atomic_read(&stats->completed); > } > @@ -323,7 +323,7 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd, > enum dd_prio prio; > u8 ioprio_class; > > - lockdep_assert_held(&dd->lock); > + lockdep_assert_held(dd->lock); > > if (!list_empty(&per_prio->dispatch)) { > rq = list_first_entry(&per_prio->dispatch, struct request, > @@ -434,7 +434,7 @@ static struct request *dd_dispatch_prio_aged_requests(struct deadline_data *dd, > enum dd_prio prio; > int prio_cnt; > > - lockdep_assert_held(&dd->lock); > + lockdep_assert_held(dd->lock); > > prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, DD_BE_PRIO) + > !!dd_queued(dd, DD_IDLE_PRIO); > @@ -466,10 +466,9 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) > struct request *rq; > enum dd_prio prio; > > - spin_lock(&dd->lock); > rq = dd_dispatch_prio_aged_requests(dd, now); > if (rq) > - goto unlock; > + return rq; > > /* > * Next, dispatch requests in priority order. Ignore lower priority > @@ -481,9 +480,6 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) > break; > } > > -unlock: > - spin_unlock(&dd->lock); > - > return rq; > } > > @@ -538,9 +534,9 @@ static void dd_exit_sched(struct elevator_queue *e) > WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ])); > WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE])); > > - spin_lock(&dd->lock); > + spin_lock(dd->lock); > queued = dd_queued(dd, prio); > - spin_unlock(&dd->lock); > + spin_unlock(dd->lock); > > WARN_ONCE(queued != 0, > "statistics for priority %d: i %u m %u d %u c %u\n", Do you still need 'dd->lock'? Can't you just refer to the lock from the elevator_queue structure directly? Cheers, Hannes -- Dr. Hannes Reinecke Kernel Storage Architect hare@suse.de +49 911 74053 688 SUSE Software Solutions GmbH, Frankenstr. 146, 90461 Nürnberg HRB 36809 (AG Nürnberg), GF: I. Totev, A. McDonald, W. Knoblich
On 7/31/25 3:20 PM, Hannes Reinecke wrote: > On 7/30/25 10:22, Yu Kuai wrote: >> From: Yu Kuai <yukuai3@huawei.com> >> >> Replace the internal spinlock 'dd->lock' with the new spinlock in >> elevator_queue, there are no functional changes. >> >> Signed-off-by: Yu Kuai <yukuai3@huawei.com> >> --- >> block/mq-deadline.c | 58 +++++++++++++++++++++------------------------ >> 1 file changed, 27 insertions(+), 31 deletions(-) >> >> diff --git a/block/mq-deadline.c b/block/mq-deadline.c >> index 9ab6c6256695..2054c023e855 100644 >> --- a/block/mq-deadline.c >> +++ b/block/mq-deadline.c >> @@ -101,7 +101,7 @@ struct deadline_data { >> u32 async_depth; >> int prio_aging_expire; >> - spinlock_t lock; >> + spinlock_t *lock; >> }; >> /* Maps an I/O priority class to a deadline scheduler priority. */ >> @@ -213,7 +213,7 @@ static void dd_merged_requests(struct request_queue *q, >> struct request *req, >> const u8 ioprio_class = dd_rq_ioclass(next); >> const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; >> - lockdep_assert_held(&dd->lock); >> + lockdep_assert_held(dd->lock); >> dd->per_prio[prio].stats.merged++; >> @@ -253,7 +253,7 @@ static u32 dd_queued(struct deadline_data *dd, enum >> dd_prio prio) >> { >> const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats; >> - lockdep_assert_held(&dd->lock); >> + lockdep_assert_held(dd->lock); >> return stats->inserted - atomic_read(&stats->completed); >> } >> @@ -323,7 +323,7 @@ static struct request *__dd_dispatch_request(struct >> deadline_data *dd, >> enum dd_prio prio; >> u8 ioprio_class; >> - lockdep_assert_held(&dd->lock); >> + lockdep_assert_held(dd->lock); >> if (!list_empty(&per_prio->dispatch)) { >> rq = list_first_entry(&per_prio->dispatch, struct request, >> @@ -434,7 +434,7 @@ static struct request >> *dd_dispatch_prio_aged_requests(struct deadline_data *dd, >> enum dd_prio prio; >> int prio_cnt; >> - lockdep_assert_held(&dd->lock); >> + lockdep_assert_held(dd->lock); >> prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, DD_BE_PRIO) + >> !!dd_queued(dd, DD_IDLE_PRIO); >> @@ -466,10 +466,9 @@ static struct request *dd_dispatch_request(struct >> blk_mq_hw_ctx *hctx) >> struct request *rq; >> enum dd_prio prio; >> - spin_lock(&dd->lock); >> rq = dd_dispatch_prio_aged_requests(dd, now); >> if (rq) >> - goto unlock; >> + return rq; >> /* >> * Next, dispatch requests in priority order. Ignore lower priority >> @@ -481,9 +480,6 @@ static struct request *dd_dispatch_request(struct >> blk_mq_hw_ctx *hctx) >> break; >> } >> -unlock: >> - spin_unlock(&dd->lock); >> - >> return rq; >> } >> @@ -538,9 +534,9 @@ static void dd_exit_sched(struct elevator_queue *e) >> WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ])); >> WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE])); >> - spin_lock(&dd->lock); >> + spin_lock(dd->lock); >> queued = dd_queued(dd, prio); >> - spin_unlock(&dd->lock); >> + spin_unlock(dd->lock); >> WARN_ONCE(queued != 0, >> "statistics for priority %d: i %u m %u d %u c %u\n", > > Do you still need 'dd->lock'? Can't you just refer to the lock from the > elevator_queue structure directly? Indeed. Little inline helpers for locking/unlocking q->elevator->lock would be nice. -- Damien Le Moal Western Digital Research
Hi, 在 2025/07/31 14:22, Damien Le Moal 写道: > On 7/31/25 3:20 PM, Hannes Reinecke wrote: >> On 7/30/25 10:22, Yu Kuai wrote: >>> From: Yu Kuai <yukuai3@huawei.com> >>> >>> Replace the internal spinlock 'dd->lock' with the new spinlock in >>> elevator_queue, there are no functional changes. >>> >>> Signed-off-by: Yu Kuai <yukuai3@huawei.com> >>> --- >>> block/mq-deadline.c | 58 +++++++++++++++++++++------------------------ >>> 1 file changed, 27 insertions(+), 31 deletions(-) >>> >>> diff --git a/block/mq-deadline.c b/block/mq-deadline.c >>> index 9ab6c6256695..2054c023e855 100644 >>> --- a/block/mq-deadline.c >>> +++ b/block/mq-deadline.c >>> @@ -101,7 +101,7 @@ struct deadline_data { >>> u32 async_depth; >>> int prio_aging_expire; >>> - spinlock_t lock; >>> + spinlock_t *lock; >>> }; >>> /* Maps an I/O priority class to a deadline scheduler priority. */ >>> @@ -213,7 +213,7 @@ static void dd_merged_requests(struct request_queue *q, >>> struct request *req, >>> const u8 ioprio_class = dd_rq_ioclass(next); >>> const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; >>> - lockdep_assert_held(&dd->lock); >>> + lockdep_assert_held(dd->lock); >>> dd->per_prio[prio].stats.merged++; >>> @@ -253,7 +253,7 @@ static u32 dd_queued(struct deadline_data *dd, enum >>> dd_prio prio) >>> { >>> const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats; >>> - lockdep_assert_held(&dd->lock); >>> + lockdep_assert_held(dd->lock); >>> return stats->inserted - atomic_read(&stats->completed); >>> } >>> @@ -323,7 +323,7 @@ static struct request *__dd_dispatch_request(struct >>> deadline_data *dd, >>> enum dd_prio prio; >>> u8 ioprio_class; >>> - lockdep_assert_held(&dd->lock); >>> + lockdep_assert_held(dd->lock); >>> if (!list_empty(&per_prio->dispatch)) { >>> rq = list_first_entry(&per_prio->dispatch, struct request, >>> @@ -434,7 +434,7 @@ static struct request >>> *dd_dispatch_prio_aged_requests(struct deadline_data *dd, >>> enum dd_prio prio; >>> int prio_cnt; >>> - lockdep_assert_held(&dd->lock); >>> + lockdep_assert_held(dd->lock); >>> prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, DD_BE_PRIO) + >>> !!dd_queued(dd, DD_IDLE_PRIO); >>> @@ -466,10 +466,9 @@ static struct request *dd_dispatch_request(struct >>> blk_mq_hw_ctx *hctx) >>> struct request *rq; >>> enum dd_prio prio; >>> - spin_lock(&dd->lock); >>> rq = dd_dispatch_prio_aged_requests(dd, now); >>> if (rq) >>> - goto unlock; >>> + return rq; >>> /* >>> * Next, dispatch requests in priority order. Ignore lower priority >>> @@ -481,9 +480,6 @@ static struct request *dd_dispatch_request(struct >>> blk_mq_hw_ctx *hctx) >>> break; >>> } >>> -unlock: >>> - spin_unlock(&dd->lock); >>> - >>> return rq; >>> } >>> @@ -538,9 +534,9 @@ static void dd_exit_sched(struct elevator_queue *e) >>> WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ])); >>> WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE])); >>> - spin_lock(&dd->lock); >>> + spin_lock(dd->lock); >>> queued = dd_queued(dd, prio); >>> - spin_unlock(&dd->lock); >>> + spin_unlock(dd->lock); >>> WARN_ONCE(queued != 0, >>> "statistics for priority %d: i %u m %u d %u c %u\n", >> >> Do you still need 'dd->lock'? Can't you just refer to the lock from the >> elevator_queue structure directly? > > Indeed. Little inline helpers for locking/unlocking q->elevator->lock would be > nice. How about the first patch to factor out inline helpers like dd_lock() and dd_unlock(), still use dd->lock without any functional changes, and then switch to use q->elevator->lock in the next patch? (same for bfq) Thanks, Kuai >
On 7/31/25 3:32 PM, Yu Kuai wrote: > Hi, > > 在 2025/07/31 14:22, Damien Le Moal 写道: >> On 7/31/25 3:20 PM, Hannes Reinecke wrote: >>> On 7/30/25 10:22, Yu Kuai wrote: >>>> From: Yu Kuai <yukuai3@huawei.com> >>>> >>>> Replace the internal spinlock 'dd->lock' with the new spinlock in >>>> elevator_queue, there are no functional changes. >>>> >>>> Signed-off-by: Yu Kuai <yukuai3@huawei.com> >>>> --- >>>> block/mq-deadline.c | 58 +++++++++++++++++++++------------------------ >>>> 1 file changed, 27 insertions(+), 31 deletions(-) >>>> >>>> diff --git a/block/mq-deadline.c b/block/mq-deadline.c >>>> index 9ab6c6256695..2054c023e855 100644 >>>> --- a/block/mq-deadline.c >>>> +++ b/block/mq-deadline.c >>>> @@ -101,7 +101,7 @@ struct deadline_data { >>>> u32 async_depth; >>>> int prio_aging_expire; >>>> - spinlock_t lock; >>>> + spinlock_t *lock; >>>> }; >>>> /* Maps an I/O priority class to a deadline scheduler priority. */ >>>> @@ -213,7 +213,7 @@ static void dd_merged_requests(struct request_queue *q, >>>> struct request *req, >>>> const u8 ioprio_class = dd_rq_ioclass(next); >>>> const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; >>>> - lockdep_assert_held(&dd->lock); >>>> + lockdep_assert_held(dd->lock); >>>> dd->per_prio[prio].stats.merged++; >>>> @@ -253,7 +253,7 @@ static u32 dd_queued(struct deadline_data *dd, enum >>>> dd_prio prio) >>>> { >>>> const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats; >>>> - lockdep_assert_held(&dd->lock); >>>> + lockdep_assert_held(dd->lock); >>>> return stats->inserted - atomic_read(&stats->completed); >>>> } >>>> @@ -323,7 +323,7 @@ static struct request *__dd_dispatch_request(struct >>>> deadline_data *dd, >>>> enum dd_prio prio; >>>> u8 ioprio_class; >>>> - lockdep_assert_held(&dd->lock); >>>> + lockdep_assert_held(dd->lock); >>>> if (!list_empty(&per_prio->dispatch)) { >>>> rq = list_first_entry(&per_prio->dispatch, struct request, >>>> @@ -434,7 +434,7 @@ static struct request >>>> *dd_dispatch_prio_aged_requests(struct deadline_data *dd, >>>> enum dd_prio prio; >>>> int prio_cnt; >>>> - lockdep_assert_held(&dd->lock); >>>> + lockdep_assert_held(dd->lock); >>>> prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, >>>> DD_BE_PRIO) + >>>> !!dd_queued(dd, DD_IDLE_PRIO); >>>> @@ -466,10 +466,9 @@ static struct request *dd_dispatch_request(struct >>>> blk_mq_hw_ctx *hctx) >>>> struct request *rq; >>>> enum dd_prio prio; >>>> - spin_lock(&dd->lock); >>>> rq = dd_dispatch_prio_aged_requests(dd, now); >>>> if (rq) >>>> - goto unlock; >>>> + return rq; >>>> /* >>>> * Next, dispatch requests in priority order. Ignore lower priority >>>> @@ -481,9 +480,6 @@ static struct request *dd_dispatch_request(struct >>>> blk_mq_hw_ctx *hctx) >>>> break; >>>> } >>>> -unlock: >>>> - spin_unlock(&dd->lock); >>>> - >>>> return rq; >>>> } >>>> @@ -538,9 +534,9 @@ static void dd_exit_sched(struct elevator_queue *e) >>>> WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ])); >>>> WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE])); >>>> - spin_lock(&dd->lock); >>>> + spin_lock(dd->lock); >>>> queued = dd_queued(dd, prio); >>>> - spin_unlock(&dd->lock); >>>> + spin_unlock(dd->lock); >>>> WARN_ONCE(queued != 0, >>>> "statistics for priority %d: i %u m %u d %u c %u\n", >>> >>> Do you still need 'dd->lock'? Can't you just refer to the lock from the >>> elevator_queue structure directly? >> >> Indeed. Little inline helpers for locking/unlocking q->elevator->lock would be >> nice. > > How about the first patch to factor out inline helpers like dd_lock() > and dd_unlock(), still use dd->lock without any functional changes, and > then switch to use q->elevator->lock in the next patch? (same for bfq) Patch one can introduce elv->lock and the helpers, then patch 2 use the helpers to replace dd->lock. Just don't say "no functional change" in the commit message and rather explain that things keep working the same way as before, but using a different lock. That will address Bart's comment too. And same for bfq in patch 3. -- Damien Le Moal Western Digital Research
Hi, 在 2025/07/31 15:04, Damien Le Moal 写道: > On 7/31/25 3:32 PM, Yu Kuai wrote: >> Hi, >> >> 在 2025/07/31 14:22, Damien Le Moal 写道: >>> On 7/31/25 3:20 PM, Hannes Reinecke wrote: >>>> On 7/30/25 10:22, Yu Kuai wrote: >>>>> From: Yu Kuai <yukuai3@huawei.com> >>>>> >>>>> Replace the internal spinlock 'dd->lock' with the new spinlock in >>>>> elevator_queue, there are no functional changes. >>>>> >>>>> Signed-off-by: Yu Kuai <yukuai3@huawei.com> >>>>> --- >>>>> block/mq-deadline.c | 58 +++++++++++++++++++++------------------------ >>>>> 1 file changed, 27 insertions(+), 31 deletions(-) >>>>> >>>>> diff --git a/block/mq-deadline.c b/block/mq-deadline.c >>>>> index 9ab6c6256695..2054c023e855 100644 >>>>> --- a/block/mq-deadline.c >>>>> +++ b/block/mq-deadline.c >>>>> @@ -101,7 +101,7 @@ struct deadline_data { >>>>> u32 async_depth; >>>>> int prio_aging_expire; >>>>> - spinlock_t lock; >>>>> + spinlock_t *lock; >>>>> }; >>>>> /* Maps an I/O priority class to a deadline scheduler priority. */ >>>>> @@ -213,7 +213,7 @@ static void dd_merged_requests(struct request_queue *q, >>>>> struct request *req, >>>>> const u8 ioprio_class = dd_rq_ioclass(next); >>>>> const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; >>>>> - lockdep_assert_held(&dd->lock); >>>>> + lockdep_assert_held(dd->lock); >>>>> dd->per_prio[prio].stats.merged++; >>>>> @@ -253,7 +253,7 @@ static u32 dd_queued(struct deadline_data *dd, enum >>>>> dd_prio prio) >>>>> { >>>>> const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats; >>>>> - lockdep_assert_held(&dd->lock); >>>>> + lockdep_assert_held(dd->lock); >>>>> return stats->inserted - atomic_read(&stats->completed); >>>>> } >>>>> @@ -323,7 +323,7 @@ static struct request *__dd_dispatch_request(struct >>>>> deadline_data *dd, >>>>> enum dd_prio prio; >>>>> u8 ioprio_class; >>>>> - lockdep_assert_held(&dd->lock); >>>>> + lockdep_assert_held(dd->lock); >>>>> if (!list_empty(&per_prio->dispatch)) { >>>>> rq = list_first_entry(&per_prio->dispatch, struct request, >>>>> @@ -434,7 +434,7 @@ static struct request >>>>> *dd_dispatch_prio_aged_requests(struct deadline_data *dd, >>>>> enum dd_prio prio; >>>>> int prio_cnt; >>>>> - lockdep_assert_held(&dd->lock); >>>>> + lockdep_assert_held(dd->lock); >>>>> prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, >>>>> DD_BE_PRIO) + >>>>> !!dd_queued(dd, DD_IDLE_PRIO); >>>>> @@ -466,10 +466,9 @@ static struct request *dd_dispatch_request(struct >>>>> blk_mq_hw_ctx *hctx) >>>>> struct request *rq; >>>>> enum dd_prio prio; >>>>> - spin_lock(&dd->lock); >>>>> rq = dd_dispatch_prio_aged_requests(dd, now); >>>>> if (rq) >>>>> - goto unlock; >>>>> + return rq; >>>>> /* >>>>> * Next, dispatch requests in priority order. Ignore lower priority >>>>> @@ -481,9 +480,6 @@ static struct request *dd_dispatch_request(struct >>>>> blk_mq_hw_ctx *hctx) >>>>> break; >>>>> } >>>>> -unlock: >>>>> - spin_unlock(&dd->lock); >>>>> - >>>>> return rq; >>>>> } >>>>> @@ -538,9 +534,9 @@ static void dd_exit_sched(struct elevator_queue *e) >>>>> WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ])); >>>>> WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE])); >>>>> - spin_lock(&dd->lock); >>>>> + spin_lock(dd->lock); >>>>> queued = dd_queued(dd, prio); >>>>> - spin_unlock(&dd->lock); >>>>> + spin_unlock(dd->lock); >>>>> WARN_ONCE(queued != 0, >>>>> "statistics for priority %d: i %u m %u d %u c %u\n", >>>> >>>> Do you still need 'dd->lock'? Can't you just refer to the lock from the >>>> elevator_queue structure directly? >>> >>> Indeed. Little inline helpers for locking/unlocking q->elevator->lock would be >>> nice. >> >> How about the first patch to factor out inline helpers like dd_lock() >> and dd_unlock(), still use dd->lock without any functional changes, and >> then switch to use q->elevator->lock in the next patch? (same for bfq) > > Patch one can introduce elv->lock and the helpers, then patch 2 use the helpers > to replace dd->lock. Just don't say "no functional change" in the commit > message and rather explain that things keep working the same way as before, but > using a different lock. That will address Bart's comment too. > And same for bfq in patch 3. > Ok, this is what I did in the first RFC version: https://lore.kernel.org/all/20250530080355.1138759-3-yukuai1@huaweicloud.com/ I somehow convince myself using dd->lock is better. :( Will change this in the next version. Thanks, Kuai >
On 7/30/25 1:22 AM, Yu Kuai wrote: > @@ -466,10 +466,9 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) > struct request *rq; > enum dd_prio prio; > > - spin_lock(&dd->lock); > rq = dd_dispatch_prio_aged_requests(dd, now); The description says "no functional changes" but I think I see a functional change above. Please restrict this patch to changing &dd->lock into dd->lock only. Thanks, Bart.
Hi, 在 2025/7/31 1:21, Bart Van Assche 写道: > On 7/30/25 1:22 AM, Yu Kuai wrote: >> @@ -466,10 +466,9 @@ static struct request >> *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) >> struct request *rq; >> enum dd_prio prio; >> - spin_lock(&dd->lock); >> rq = dd_dispatch_prio_aged_requests(dd, now); > > The description says "no functional changes" but I think I see a > functional change above. Please restrict this patch to changing > &dd->lock into dd->lock only. Ok, you mean that the lock is moved to the caller is functional change, right? Thanks, Kuai > > Thanks, > > Bart. >
On 7/30/25 11:01 AM, Yu Kuai wrote: > Ok, you mean that the lock is moved to the caller is functional change, > right? That's something one could argue about. But I think there is agreement that each patch should only include one logical change. Thanks, Bart.
© 2016 - 2025 Red Hat, Inc.