blk-mq: check invalid nr_requests in queue_requests_store()

queue_requests_store() is the only caller of
blk_mq_update_nr_requests(), and blk_mq_update_nr_requests() is the
only caller of blk_mq_tag_update_depth(), however, they all have
checkings for nr_requests input by user.

Make code cleaner by moving all the checkings to the top function:

1) nr_requests > reserved tags;
2) if there is elevator, 4 <= nr_requests <= 2048;
3) if elevator is none, 4 <= nr_requests <= tag_set->queue_depth;

Meanwhile, case 2 is the only case tags can grow and -ENOMEM might be
returned.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
Reviewed-by: Nilay Shroff <nilay@linux.ibm.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
pull/1354/merge
Yu Kuai 2025-09-10 16:04:38 +08:00 committed by Jens Axboe
parent 8bd7195fea
commit b46d4c447d
4 changed files with 17 additions and 22 deletions

View File

@ -610,14 +610,10 @@ void blk_mq_free_tags(struct blk_mq_tag_set *set, struct blk_mq_tags *tags)
} }
int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
struct blk_mq_tags **tagsptr, unsigned int tdepth, struct blk_mq_tags **tagsptr, unsigned int tdepth)
bool can_grow)
{ {
struct blk_mq_tags *tags = *tagsptr; struct blk_mq_tags *tags = *tagsptr;
if (tdepth <= tags->nr_reserved_tags)
return -EINVAL;
/* /*
* If we are allowed to grow beyond the original size, allocate * If we are allowed to grow beyond the original size, allocate
* a new set of tags before freeing the old one. * a new set of tags before freeing the old one.
@ -626,16 +622,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
struct blk_mq_tag_set *set = hctx->queue->tag_set; struct blk_mq_tag_set *set = hctx->queue->tag_set;
struct blk_mq_tags *new; struct blk_mq_tags *new;
if (!can_grow)
return -EINVAL;
/*
* We need some sort of upper limit, set it high enough that
* no valid use cases should require more.
*/
if (tdepth > MAX_SCHED_RQ)
return -EINVAL;
/* /*
* Only the sbitmap needs resizing since we allocated the max * Only the sbitmap needs resizing since we allocated the max
* initially. * initially.

View File

@ -4933,9 +4933,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
int ret = 0; int ret = 0;
unsigned long i; unsigned long i;
if (q->nr_requests == nr)
return 0;
blk_mq_quiesce_queue(q); blk_mq_quiesce_queue(q);
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
@ -4947,10 +4944,9 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
*/ */
if (hctx->sched_tags) { if (hctx->sched_tags) {
ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
nr, true); nr);
} else { } else {
ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr);
false);
} }
if (ret) if (ret)
goto out; goto out;

View File

@ -171,7 +171,7 @@ void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
unsigned int tag); unsigned int tag);
void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags); void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
struct blk_mq_tags **tags, unsigned int depth, bool can_grow); struct blk_mq_tags **tags, unsigned int depth);
void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
unsigned int size); unsigned int size);
void blk_mq_tag_update_sched_shared_tags(struct request_queue *q); void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);

View File

@ -75,12 +75,25 @@ queue_requests_store(struct gendisk *disk, const char *page, size_t count)
memflags = blk_mq_freeze_queue(q); memflags = blk_mq_freeze_queue(q);
mutex_lock(&q->elevator_lock); mutex_lock(&q->elevator_lock);
if (nr == q->nr_requests)
goto unlock;
if (nr < BLKDEV_MIN_RQ) if (nr < BLKDEV_MIN_RQ)
nr = BLKDEV_MIN_RQ; nr = BLKDEV_MIN_RQ;
if (nr <= q->tag_set->reserved_tags ||
(q->elevator && nr > MAX_SCHED_RQ) ||
(!q->elevator && nr > q->tag_set->queue_depth)) {
ret = -EINVAL;
goto unlock;
}
err = blk_mq_update_nr_requests(disk->queue, nr); err = blk_mq_update_nr_requests(disk->queue, nr);
if (err) if (err)
ret = err; ret = err;
unlock:
mutex_unlock(&q->elevator_lock); mutex_unlock(&q->elevator_lock);
blk_mq_unfreeze_queue(q, memflags); blk_mq_unfreeze_queue(q, memflags);
return ret; return ret;