dm: use generic functions instead of disable_discard and disable_write_zeroes
A small code cleanup: use blk_queue_disable_discard and blk_queue_disable_write_zeroes instead of disable_discard and disable_write_zeroes. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>pull/1285/head
parent
33304b75df
commit
f1e24048ed
|
|
@ -163,9 +163,6 @@ struct mapped_device {
|
||||||
#define DMF_POST_SUSPENDING 8
|
#define DMF_POST_SUSPENDING 8
|
||||||
#define DMF_EMULATE_ZONE_APPEND 9
|
#define DMF_EMULATE_ZONE_APPEND 9
|
||||||
|
|
||||||
void disable_discard(struct mapped_device *md);
|
|
||||||
void disable_write_zeroes(struct mapped_device *md);
|
|
||||||
|
|
||||||
static inline sector_t dm_get_size(struct mapped_device *md)
|
static inline sector_t dm_get_size(struct mapped_device *md)
|
||||||
{
|
{
|
||||||
return get_capacity(md->disk);
|
return get_capacity(md->disk);
|
||||||
|
|
|
||||||
|
|
@ -217,10 +217,10 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
|
||||||
if (unlikely(error == BLK_STS_TARGET)) {
|
if (unlikely(error == BLK_STS_TARGET)) {
|
||||||
if (req_op(clone) == REQ_OP_DISCARD &&
|
if (req_op(clone) == REQ_OP_DISCARD &&
|
||||||
!clone->q->limits.max_discard_sectors)
|
!clone->q->limits.max_discard_sectors)
|
||||||
disable_discard(tio->md);
|
blk_queue_disable_discard(tio->md->queue);
|
||||||
else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
|
else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
|
||||||
!clone->q->limits.max_write_zeroes_sectors)
|
!clone->q->limits.max_write_zeroes_sectors)
|
||||||
disable_write_zeroes(tio->md);
|
blk_queue_disable_write_zeroes(tio->md->queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (r) {
|
switch (r) {
|
||||||
|
|
|
||||||
|
|
@ -1082,22 +1082,6 @@ static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
|
||||||
return &md->queue->limits;
|
return &md->queue->limits;
|
||||||
}
|
}
|
||||||
|
|
||||||
void disable_discard(struct mapped_device *md)
|
|
||||||
{
|
|
||||||
struct queue_limits *limits = dm_get_queue_limits(md);
|
|
||||||
|
|
||||||
/* device doesn't really support DISCARD, disable it */
|
|
||||||
limits->max_hw_discard_sectors = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void disable_write_zeroes(struct mapped_device *md)
|
|
||||||
{
|
|
||||||
struct queue_limits *limits = dm_get_queue_limits(md);
|
|
||||||
|
|
||||||
/* device doesn't really support WRITE ZEROES, disable it */
|
|
||||||
limits->max_write_zeroes_sectors = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
|
static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
|
||||||
{
|
{
|
||||||
return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
|
return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
|
||||||
|
|
@ -1115,10 +1099,10 @@ static void clone_endio(struct bio *bio)
|
||||||
if (unlikely(error == BLK_STS_TARGET)) {
|
if (unlikely(error == BLK_STS_TARGET)) {
|
||||||
if (bio_op(bio) == REQ_OP_DISCARD &&
|
if (bio_op(bio) == REQ_OP_DISCARD &&
|
||||||
!bdev_max_discard_sectors(bio->bi_bdev))
|
!bdev_max_discard_sectors(bio->bi_bdev))
|
||||||
disable_discard(md);
|
blk_queue_disable_discard(md->queue);
|
||||||
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
|
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
|
||||||
!bdev_write_zeroes_sectors(bio->bi_bdev))
|
!bdev_write_zeroes_sectors(bio->bi_bdev))
|
||||||
disable_write_zeroes(md);
|
blk_queue_disable_write_zeroes(md->queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (static_branch_unlikely(&zoned_enabled) &&
|
if (static_branch_unlikely(&zoned_enabled) &&
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue