blk-throttle: don't take carryover for prioritized processing of metadata
Commit 29390bb566 ("blk-throttle: support prioritized processing of metadata")
takes bytes/ios carryover for prioritized processing of metadata. Turns out
we can support it by charging it directly without trimming slice, and the
result is same with carryover.
Cc: Tejun Heo <tj@kernel.org>
Cc: Josef Bacik <josef@toxicpanda.com>
Cc: Yu Kuai <yukuai3@huawei.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Acked-by: Tejun Heo <tj@kernel.org>
Link: https://lore.kernel.org/r/20250305043123.3938491-3-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
pull/1188/head
parent
483a393e7e
commit
a9fc8868b3
|
|
@ -1620,13 +1620,6 @@ static bool tg_within_limit(struct throtl_grp *tg, struct bio *bio, bool rw)
|
|||
return tg_may_dispatch(tg, bio, NULL);
|
||||
}
|
||||
|
||||
static void tg_dispatch_in_debt(struct throtl_grp *tg, struct bio *bio, bool rw)
|
||||
{
|
||||
if (!bio_flagged(bio, BIO_BPS_THROTTLED))
|
||||
tg->carryover_bytes[rw] -= throtl_bio_data_size(bio);
|
||||
tg->carryover_ios[rw]--;
|
||||
}
|
||||
|
||||
bool __blk_throtl_bio(struct bio *bio)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||
|
|
@ -1663,10 +1656,12 @@ bool __blk_throtl_bio(struct bio *bio)
|
|||
/*
|
||||
* IOs which may cause priority inversions are
|
||||
* dispatched directly, even if they're over limit.
|
||||
* Debts are handled by carryover_bytes/ios while
|
||||
* calculating wait time.
|
||||
*
|
||||
* Charge and dispatch directly, and our throttle
|
||||
* control algorithm is adaptive, and extra IO bytes
|
||||
* will be throttled for paying the debt
|
||||
*/
|
||||
tg_dispatch_in_debt(tg, bio, rw);
|
||||
throtl_charge_bio(tg, bio);
|
||||
} else {
|
||||
/* if above limits, break to queue */
|
||||
break;
|
||||
|
|
|
|||
Loading…
Reference in New Issue