diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 3ebdec40e758..44746f4c0b89 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -697,7 +697,7 @@ static void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) unsigned int limit, act_idx; /* Sync reads have full depth available */ - if (op_is_sync(opf) && !op_is_write(opf)) + if (blk_mq_is_sync_read(opf)) limit = data->q->nr_requests; else limit = bfqd->async_depths[!!bfqd->wr_busy_queues][op_is_sync(opf)]; diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 02c40a72e959..5678e15bd33c 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -137,4 +137,9 @@ static inline void blk_mq_set_min_shallow_depth(struct request_queue *q, depth); } +static inline bool blk_mq_is_sync_read(blk_opf_t opf) +{ + return op_is_sync(opf) && !op_is_write(opf); +} + #endif diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index c1b36ffd19ce..2b3f5b8959af 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -556,7 +556,7 @@ static void kyber_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) * We use the scheduler tags as per-hardware queue queueing tokens. * Async requests can be limited at this stage. */ - if (!op_is_sync(opf)) { + if (!blk_mq_is_sync_read(opf)) { struct kyber_queue_data *kqd = data->q->elevator->elevator_data; data->shallow_depth = kqd->async_depth; diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 3e3719093aec..29d00221fbea 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -495,7 +495,7 @@ static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) struct deadline_data *dd = data->q->elevator->elevator_data; /* Do not throttle synchronous reads. */ - if (op_is_sync(opf) && !op_is_write(opf)) + if (blk_mq_is_sync_read(opf)) return; /*