block: add two helpers for registering/un-registering sched debugfs

Add blk_mq_sched_reg_debugfs()/blk_mq_sched_unreg_debugfs() to clean up
sched init/exit code a bit.

Register & unregister debugfs for sched & sched_hctx order is changed a
bit, but it is safe because sched & sched_hctx is guaranteed to be ready
when exporting via debugfs.

Reviewed-by: Yu Kuai <yukuai3@huawei.com>
Reviewed-by: Nilay Shroff <nilay@linux.ibm.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20250505141805.2751237-6-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
pull/1250/head
Ming Lei 2025-05-05 22:17:43 +08:00 committed by Jens Axboe
parent 94209d27d1
commit ed3896acdc
1 changed files with 30 additions and 15 deletions

View File

@ -434,6 +434,30 @@ static int blk_mq_init_sched_shared_tags(struct request_queue *queue)
return 0;
}
static void blk_mq_sched_reg_debugfs(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
unsigned long i;
mutex_lock(&q->debugfs_mutex);
blk_mq_debugfs_register_sched(q);
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_debugfs_register_sched_hctx(q, hctx);
mutex_unlock(&q->debugfs_mutex);
}
static void blk_mq_sched_unreg_debugfs(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
unsigned long i;
mutex_lock(&q->debugfs_mutex);
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_debugfs_unregister_sched_hctx(hctx);
blk_mq_debugfs_unregister_sched(q);
mutex_unlock(&q->debugfs_mutex);
}
/* caller must have a reference to @e, will grab another one if successful */
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
{
@ -467,10 +491,6 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
if (ret)
goto err_free_map_and_rqs;
mutex_lock(&q->debugfs_mutex);
blk_mq_debugfs_register_sched(q);
mutex_unlock(&q->debugfs_mutex);
queue_for_each_hw_ctx(q, hctx, i) {
if (e->ops.init_hctx) {
ret = e->ops.init_hctx(hctx, i);
@ -482,11 +502,11 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
return ret;
}
}
mutex_lock(&q->debugfs_mutex);
blk_mq_debugfs_register_sched_hctx(q, hctx);
mutex_unlock(&q->debugfs_mutex);
}
/* sched is initialized, it is ready to export it via debugfs */
blk_mq_sched_reg_debugfs(q);
return 0;
err_free_map_and_rqs:
@ -524,11 +544,10 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
unsigned long i;
unsigned int flags = 0;
queue_for_each_hw_ctx(q, hctx, i) {
mutex_lock(&q->debugfs_mutex);
blk_mq_debugfs_unregister_sched_hctx(hctx);
mutex_unlock(&q->debugfs_mutex);
/* unexport via debugfs before exiting sched */
blk_mq_sched_unreg_debugfs(q);
queue_for_each_hw_ctx(q, hctx, i) {
if (e->type->ops.exit_hctx && hctx->sched_data) {
e->type->ops.exit_hctx(hctx, i);
hctx->sched_data = NULL;
@ -536,10 +555,6 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
flags = hctx->flags;
}
mutex_lock(&q->debugfs_mutex);
blk_mq_debugfs_unregister_sched(q);
mutex_unlock(&q->debugfs_mutex);
if (e->type->ops.exit_sched)
e->type->ops.exit_sched(e);
blk_mq_sched_tags_teardown(q, flags);