blk-mq-sched: unify elevators checking for async requests
bfq and mq-deadline consider sync writes as async requests and only reserve tags for sync reads by async_depth, however, kyber doesn't consider sync writes as async requests for now. Consider the case there are lots of dirty pages, and user use fsync to flush dirty pages. In this case sched_tags can be exhausted by sync writes and sync reads can stuck waiting for tag. Hence let kyber follow what mq-deadline and bfq did, and unify async requests checking for all elevators. Signed-off-by: Yu Kuai <yukuai@fnnas.com> Reviewed-by: Nilay Shroff <nilay@linux.ibm.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>master
parent
9fc7900b14
commit
1db61b0afd
|
|
@ -697,7 +697,7 @@ static void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
|
|||
unsigned int limit, act_idx;
|
||||
|
||||
/* Sync reads have full depth available */
|
||||
if (op_is_sync(opf) && !op_is_write(opf))
|
||||
if (blk_mq_is_sync_read(opf))
|
||||
limit = data->q->nr_requests;
|
||||
else
|
||||
limit = bfqd->async_depths[!!bfqd->wr_busy_queues][op_is_sync(opf)];
|
||||
|
|
|
|||
|
|
@ -137,4 +137,9 @@ static inline void blk_mq_set_min_shallow_depth(struct request_queue *q,
|
|||
depth);
|
||||
}
|
||||
|
||||
static inline bool blk_mq_is_sync_read(blk_opf_t opf)
|
||||
{
|
||||
return op_is_sync(opf) && !op_is_write(opf);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -556,7 +556,7 @@ static void kyber_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
|
|||
* We use the scheduler tags as per-hardware queue queueing tokens.
|
||||
* Async requests can be limited at this stage.
|
||||
*/
|
||||
if (!op_is_sync(opf)) {
|
||||
if (!blk_mq_is_sync_read(opf)) {
|
||||
struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
|
||||
|
||||
data->shallow_depth = kqd->async_depth;
|
||||
|
|
|
|||
|
|
@ -495,7 +495,7 @@ static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
|
|||
struct deadline_data *dd = data->q->elevator->elevator_data;
|
||||
|
||||
/* Do not throttle synchronous reads. */
|
||||
if (op_is_sync(opf) && !op_is_write(opf))
|
||||
if (blk_mq_is_sync_read(opf))
|
||||
return;
|
||||
|
||||
/*
|
||||
|
|
|
|||
Loading…
Reference in New Issue