block-6.19-20251211
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmk7RxcQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpnClD/94oSzn0ViI+kmtPcqiHVilGCQYIaBiQuUN N+Z3XiLCPUgPDeWxycbFflQ2pmuODXzOC5XZddC0hitxj4jIqL8jBwI0T/WOPXyw A0g8S7/Ny3Le5FftBy7duqjIDycXGYhKYD9sQEvSBTf0yu3QpT4hPRveuPouSPkz d4H73j+9VMrLRyXGuALhhdwIaqu6/QRtArjc1Yickisi5dEqpwSrHk0CQEe1zJgs wgeItEwfnDVdU0wNiLxSJY0HsTzYXtyYWAT5KiFPCPkHYZd1tadcwZ3D9aLF/oH8 LzLAX19QrTX11lVXP7FbipClYE5gprKDe4qPTExXQrPD7j3Ba4LWIl4QXZ2A4LPE Epam6R+ugOyly2+S2dea1lByoKafviRm4CqR3Ixr+S8ayTUser3oy6I1xGEi9v7D qF4LJ1ziLWz1kWoLdoOyJCDv0W3vK1U1Rflt24woOLZNpw2S20q7+vwwLQHoWxnY GBDRMi3NjCXH4qCJOaly5tnLNTzdxh0h64WsbjO+DGXOnOr39wH6TN4czkW4PPR5 IwFpP7HurRJMivoSHP50tRqbFLXETlAdceYV8HuhNYhlCIY6NaQbYr7PKzNk/GcJ e2/AkRNgJf5GRzemrfkCtndBCsyi1IMsFN0GXbhH6Xr705Lpkpf5qs77Mexg3+TK laf5G/vCDw== =h5ad -----END PGP SIGNATURE----- Merge tag 'block-6.19-20251211' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux Pull block fixes from Jens Axboe: - Always initialize DMA state, fixing a potentially nasty issue on the block side - btrfs zoned write fix with cached zone reports - Fix corruption issues in bcache with chained bio's, and further make it clear that the chained IO handler is simply a marker, it's not code meant to be executed - Kill old code dealing with synchronous IO polling in the block layer, that has been dead for a long time. Only async polling is supported these days - Fix a lockdep issue in tag_set management, moving it to RCU - Fix an issue with ublks bio_vec iteration - Don't unconditionally enforce blocking issue of ublk control commands, allow some of them with non-blocking issue as they do not block * tag 'block-6.19-20251211' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux: blk-mq-dma: always initialize dma state blk-mq: delete task running check in blk_hctx_poll() block: fix cached zone reports on devices with native zone append block: Use RCU in blk_mq_[un]quiesce_tagset() instead of set->tag_list_lock ublk: don't mutate struct bio_vec in iteration block: prohibit calls to bio_chain_endio bcache: fix improper use of bi_end_io ublk: allow non-blocking ctrl cmds in IO_URING_F_NONBLOCK issuemaster
commit
35ebee7e72
|
|
@ -321,9 +321,13 @@ static struct bio *__bio_chain_endio(struct bio *bio)
|
||||||
return parent;
|
return parent;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This function should only be used as a flag and must never be called.
|
||||||
|
* If execution reaches here, it indicates a serious programming error.
|
||||||
|
*/
|
||||||
static void bio_chain_endio(struct bio *bio)
|
static void bio_chain_endio(struct bio *bio)
|
||||||
{
|
{
|
||||||
bio_endio(__bio_chain_endio(bio));
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
||||||
|
|
@ -199,6 +199,7 @@ static bool blk_dma_map_iter_start(struct request *req, struct device *dma_dev,
|
||||||
if (blk_can_dma_map_iova(req, dma_dev) &&
|
if (blk_can_dma_map_iova(req, dma_dev) &&
|
||||||
dma_iova_try_alloc(dma_dev, state, vec.paddr, total_len))
|
dma_iova_try_alloc(dma_dev, state, vec.paddr, total_len))
|
||||||
return blk_rq_dma_map_iova(req, dma_dev, state, iter, &vec);
|
return blk_rq_dma_map_iova(req, dma_dev, state, iter, &vec);
|
||||||
|
memset(state, 0, sizeof(*state));
|
||||||
return blk_dma_map_direct(req, dma_dev, iter, &vec);
|
return blk_dma_map_direct(req, dma_dev, iter, &vec);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -336,12 +336,12 @@ void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set)
|
||||||
{
|
{
|
||||||
struct request_queue *q;
|
struct request_queue *q;
|
||||||
|
|
||||||
mutex_lock(&set->tag_list_lock);
|
rcu_read_lock();
|
||||||
list_for_each_entry(q, &set->tag_list, tag_set_list) {
|
list_for_each_entry_rcu(q, &set->tag_list, tag_set_list) {
|
||||||
if (!blk_queue_skip_tagset_quiesce(q))
|
if (!blk_queue_skip_tagset_quiesce(q))
|
||||||
blk_mq_quiesce_queue_nowait(q);
|
blk_mq_quiesce_queue_nowait(q);
|
||||||
}
|
}
|
||||||
mutex_unlock(&set->tag_list_lock);
|
rcu_read_unlock();
|
||||||
|
|
||||||
blk_mq_wait_quiesce_done(set);
|
blk_mq_wait_quiesce_done(set);
|
||||||
}
|
}
|
||||||
|
|
@ -351,12 +351,12 @@ void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set)
|
||||||
{
|
{
|
||||||
struct request_queue *q;
|
struct request_queue *q;
|
||||||
|
|
||||||
mutex_lock(&set->tag_list_lock);
|
rcu_read_lock();
|
||||||
list_for_each_entry(q, &set->tag_list, tag_set_list) {
|
list_for_each_entry_rcu(q, &set->tag_list, tag_set_list) {
|
||||||
if (!blk_queue_skip_tagset_quiesce(q))
|
if (!blk_queue_skip_tagset_quiesce(q))
|
||||||
blk_mq_unquiesce_queue(q);
|
blk_mq_unquiesce_queue(q);
|
||||||
}
|
}
|
||||||
mutex_unlock(&set->tag_list_lock);
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_mq_unquiesce_tagset);
|
EXPORT_SYMBOL_GPL(blk_mq_unquiesce_tagset);
|
||||||
|
|
||||||
|
|
@ -4311,7 +4311,7 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
|
||||||
struct blk_mq_tag_set *set = q->tag_set;
|
struct blk_mq_tag_set *set = q->tag_set;
|
||||||
|
|
||||||
mutex_lock(&set->tag_list_lock);
|
mutex_lock(&set->tag_list_lock);
|
||||||
list_del(&q->tag_set_list);
|
list_del_rcu(&q->tag_set_list);
|
||||||
if (list_is_singular(&set->tag_list)) {
|
if (list_is_singular(&set->tag_list)) {
|
||||||
/* just transitioned to unshared */
|
/* just transitioned to unshared */
|
||||||
set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
|
set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
|
||||||
|
|
@ -4319,7 +4319,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
|
||||||
blk_mq_update_tag_set_shared(set, false);
|
blk_mq_update_tag_set_shared(set, false);
|
||||||
}
|
}
|
||||||
mutex_unlock(&set->tag_list_lock);
|
mutex_unlock(&set->tag_list_lock);
|
||||||
INIT_LIST_HEAD(&q->tag_set_list);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
|
static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
|
||||||
|
|
@ -4338,7 +4337,7 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
|
||||||
}
|
}
|
||||||
if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
|
if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
|
||||||
queue_set_hctx_shared(q, true);
|
queue_set_hctx_shared(q, true);
|
||||||
list_add_tail(&q->tag_set_list, &set->tag_list);
|
list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
|
||||||
|
|
||||||
mutex_unlock(&set->tag_list_lock);
|
mutex_unlock(&set->tag_list_lock);
|
||||||
}
|
}
|
||||||
|
|
@ -5193,27 +5192,19 @@ EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
|
||||||
static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
|
static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
|
||||||
struct io_comp_batch *iob, unsigned int flags)
|
struct io_comp_batch *iob, unsigned int flags)
|
||||||
{
|
{
|
||||||
long state = get_current_state();
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
ret = q->mq_ops->poll(hctx, iob);
|
ret = q->mq_ops->poll(hctx, iob);
|
||||||
if (ret > 0) {
|
if (ret > 0)
|
||||||
__set_current_state(TASK_RUNNING);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
if (task_sigpending(current))
|
||||||
|
|
||||||
if (signal_pending_state(state, current))
|
|
||||||
__set_current_state(TASK_RUNNING);
|
|
||||||
if (task_is_running(current))
|
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (ret < 0 || (flags & BLK_POLL_ONESHOT))
|
if (ret < 0 || (flags & BLK_POLL_ONESHOT))
|
||||||
break;
|
break;
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
} while (!need_resched());
|
} while (!need_resched());
|
||||||
|
|
||||||
__set_current_state(TASK_RUNNING);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2100,7 +2100,7 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
|
||||||
* we have a zone write plug for such zone if the device has a zone
|
* we have a zone write plug for such zone if the device has a zone
|
||||||
* write plug hash table.
|
* write plug hash table.
|
||||||
*/
|
*/
|
||||||
if (!queue_emulates_zone_append(disk->queue) || !disk->zone_wplugs_hash)
|
if (!disk->zone_wplugs_hash)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
wp_offset = disk_zone_wplug_sync_wp_offset(disk, zone);
|
wp_offset = disk_zone_wplug_sync_wp_offset(disk, zone);
|
||||||
|
|
|
||||||
|
|
@ -926,6 +926,7 @@ static size_t ublk_copy_user_pages(const struct request *req,
|
||||||
size_t done = 0;
|
size_t done = 0;
|
||||||
|
|
||||||
rq_for_each_segment(bv, req, iter) {
|
rq_for_each_segment(bv, req, iter) {
|
||||||
|
unsigned len;
|
||||||
void *bv_buf;
|
void *bv_buf;
|
||||||
size_t copied;
|
size_t copied;
|
||||||
|
|
||||||
|
|
@ -934,18 +935,17 @@ static size_t ublk_copy_user_pages(const struct request *req,
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
bv.bv_offset += offset;
|
len = bv.bv_len - offset;
|
||||||
bv.bv_len -= offset;
|
bv_buf = kmap_local_page(bv.bv_page) + bv.bv_offset + offset;
|
||||||
bv_buf = bvec_kmap_local(&bv);
|
|
||||||
if (dir == ITER_DEST)
|
if (dir == ITER_DEST)
|
||||||
copied = copy_to_iter(bv_buf, bv.bv_len, uiter);
|
copied = copy_to_iter(bv_buf, len, uiter);
|
||||||
else
|
else
|
||||||
copied = copy_from_iter(bv_buf, bv.bv_len, uiter);
|
copied = copy_from_iter(bv_buf, len, uiter);
|
||||||
|
|
||||||
kunmap_local(bv_buf);
|
kunmap_local(bv_buf);
|
||||||
|
|
||||||
done += copied;
|
done += copied;
|
||||||
if (copied < bv.bv_len)
|
if (copied < len)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
offset = 0;
|
offset = 0;
|
||||||
|
|
@ -3673,6 +3673,19 @@ exit:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool ublk_ctrl_uring_cmd_may_sleep(u32 cmd_op)
|
||||||
|
{
|
||||||
|
switch (_IOC_NR(cmd_op)) {
|
||||||
|
case UBLK_CMD_GET_QUEUE_AFFINITY:
|
||||||
|
case UBLK_CMD_GET_DEV_INFO:
|
||||||
|
case UBLK_CMD_GET_DEV_INFO2:
|
||||||
|
case _IOC_NR(UBLK_U_CMD_GET_FEATURES):
|
||||||
|
return false;
|
||||||
|
default:
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
|
static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
|
||||||
unsigned int issue_flags)
|
unsigned int issue_flags)
|
||||||
{
|
{
|
||||||
|
|
@ -3681,7 +3694,8 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
|
||||||
u32 cmd_op = cmd->cmd_op;
|
u32 cmd_op = cmd->cmd_op;
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
|
|
||||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
if (ublk_ctrl_uring_cmd_may_sleep(cmd_op) &&
|
||||||
|
issue_flags & IO_URING_F_NONBLOCK)
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
|
||||||
ublk_ctrl_cmd_dump(cmd);
|
ublk_ctrl_cmd_dump(cmd);
|
||||||
|
|
|
||||||
|
|
@ -1104,7 +1104,7 @@ static void detached_dev_end_io(struct bio *bio)
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree(ddip);
|
kfree(ddip);
|
||||||
bio->bi_end_io(bio);
|
bio_endio(bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
|
static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
|
||||||
|
|
@ -1121,7 +1121,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
|
||||||
ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
|
ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
|
||||||
if (!ddip) {
|
if (!ddip) {
|
||||||
bio->bi_status = BLK_STS_RESOURCE;
|
bio->bi_status = BLK_STS_RESOURCE;
|
||||||
bio->bi_end_io(bio);
|
bio_endio(bio);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1136,7 +1136,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
|
||||||
|
|
||||||
if ((bio_op(bio) == REQ_OP_DISCARD) &&
|
if ((bio_op(bio) == REQ_OP_DISCARD) &&
|
||||||
!bdev_max_discard_sectors(dc->bdev))
|
!bdev_max_discard_sectors(dc->bdev))
|
||||||
bio->bi_end_io(bio);
|
detached_dev_end_io(bio);
|
||||||
else
|
else
|
||||||
submit_bio_noacct(bio);
|
submit_bio_noacct(bio);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue