block-6.19-20251208
-----BEGIN PGP SIGNATURE-----
iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmk3KZsQHGF4Ym9lQGtl
cm5lbC5kawAKCRD301j7KXHgpkNYD/91yqAJeehx2Heq3dWj9L8hDuETQelj/g9j
gtZCiriAPy+bb1/BmWjK+BmvjtBt+g3a4Cwi6tVj4F1zoE46IPeLhO+2iJTEBiBq
AhRtEf/MFXFK3qUnTpEnS8w3CtsXejOTB81VQ+6BysSu+B708m/1AQHv2HocZ37R
jivrzfCsEdBr+ISwYw/EG5KcDBVTFo/JdXIhs7k4Z8bBfa3P5ye4EhKjORtgbFNU
5nXb78SZoWNCZF143YV++9MpZc3M2jzkzrk1CTLsUHhOxWg4T/6wTXfPGZc/W4m8
UBhs03u/gMJnKHhlZd4kpZWDito1TQZTdY2f5sBsysRQqeT7bwDK/1xiQ1nllZiP
oYbeD6t65yMAlELwNFXo7y/DNcS2VLBMvChIX6p1gweEzyf23YneoHYyN5agEQlN
9C4EdcYzZRt0DwtHlIRtKvDk2LZzkJAcLau3D6ahU/DPLOawyWZKmvGiU+sSyJjF
bEIO5c/+MLqkAgLAGaFgA4twFF1aYH9ssmJerDxprarkf1jtlOBLvUQ391Gtb5Hd
B1yugmIgEwLbCFzhk9FlCtv2nQcWRCElnaeqv+Lv+xCBVPGCLm2qIHoTqmvHZPCd
GbN/h0XLdgUboYPCFWVAX72/4K/cv+fQQcb+a7tiq6vMKcgJ/2I1szFGpFqz7azB
hyiK0v3x2g==
=r1xa
-----END PGP SIGNATURE-----
Merge tag 'block-6.19-20251208' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux
Pull block updates from Jens Axboe:
"Followup set of fixes and updates for block for the 6.19 merge window.
NVMe had some late minute debates which lead to dropping some patches
from that tree, which is why the initial PR didn't have NVMe included.
It's here now. This pull request contains:
- NVMe pull request via Keith:
- Subsystem usage cleanups (Max)
- Endpoint device fixes (Shin'ichiro)
- Debug statements (Gerd)
- FC fabrics cleanups and fixes (Daniel)
- Consistent alloc API usages (Israel)
- Code comment updates (Chu)
- Authentication retry fix (Justin)
- Fix a memory leak in the discard ioctl code, if the task is being
interrupted by a signal at just the wrong time
- Zoned write plugging fixes
- Add ioctls for for persistent reservations
- Enable per-cpu bio caching by default
- Various little fixes and tweaks"
* tag 'block-6.19-20251208' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux: (27 commits)
nvme-fabrics: add ENOKEY to no retry criteria for authentication failures
nvme-auth: use kvfree() for memory allocated with kvcalloc()
nvmet-tcp: use kvcalloc for commands array
nvmet-rdma: use kvcalloc for commands and responses arrays
nvme: fix typo error in nvme target
nvmet-fc: use pr_* print macros instead of dev_*
nvmet-fcloop: remove unused lsdir member.
nvmet-fcloop: check all request and response have been processed
nvme-fc: check all request and response have been processed
block: fix memory leak in __blkdev_issue_zero_pages
block: fix comment for op_is_zone_mgmt() to include RESET_ALL
block: Clear BLK_ZONE_WPLUG_PLUGGED when aborting plugged BIOs
blk-mq: Abort suspend when wakeup events are pending
blk-mq: add blk_rq_nr_bvec() helper
block: add IOC_PR_READ_RESERVATION ioctl
block: add IOC_PR_READ_KEYS ioctl
nvme: reject invalid pr_read_keys() num_keys values
scsi: sd: reject invalid pr_read_keys() num_keys values
block: enable per-cpu bio cache by default
block: use bio_alloc_bioset for passthru IO by default
...
pull/1354/merge
commit
4482ebb297
26
block/bio.c
26
block/bio.c
|
|
@ -517,20 +517,18 @@ struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
|
||||||
if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
|
if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (opf & REQ_ALLOC_CACHE) {
|
if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
|
||||||
if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
|
opf |= REQ_ALLOC_CACHE;
|
||||||
bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
|
bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
|
||||||
gfp_mask, bs);
|
gfp_mask, bs);
|
||||||
if (bio)
|
if (bio)
|
||||||
return bio;
|
return bio;
|
||||||
/*
|
/*
|
||||||
* No cached bio available, bio returned below marked with
|
* No cached bio available, bio returned below marked with
|
||||||
* REQ_ALLOC_CACHE to particpate in per-cpu alloc cache.
|
* REQ_ALLOC_CACHE to participate in per-cpu alloc cache.
|
||||||
*/
|
*/
|
||||||
} else {
|
} else
|
||||||
opf &= ~REQ_ALLOC_CACHE;
|
opf &= ~REQ_ALLOC_CACHE;
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* submit_bio_noacct() converts recursion to iteration; this means if
|
* submit_bio_noacct() converts recursion to iteration; this means if
|
||||||
|
|
|
||||||
|
|
@ -202,13 +202,13 @@ static void __blkdev_issue_zero_pages(struct block_device *bdev,
|
||||||
unsigned int nr_vecs = __blkdev_sectors_to_bio_pages(nr_sects);
|
unsigned int nr_vecs = __blkdev_sectors_to_bio_pages(nr_sects);
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
|
|
||||||
bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask);
|
|
||||||
bio->bi_iter.bi_sector = sector;
|
|
||||||
|
|
||||||
if ((flags & BLKDEV_ZERO_KILLABLE) &&
|
if ((flags & BLKDEV_ZERO_KILLABLE) &&
|
||||||
fatal_signal_pending(current))
|
fatal_signal_pending(current))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask);
|
||||||
|
bio->bi_iter.bi_sector = sector;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
unsigned int len;
|
unsigned int len;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -37,6 +37,25 @@ static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
|
||||||
return bmd;
|
return bmd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void blk_mq_map_bio_put(struct bio *bio)
|
||||||
|
{
|
||||||
|
bio_put(bio);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct bio *blk_rq_map_bio_alloc(struct request *rq,
|
||||||
|
unsigned int nr_vecs, gfp_t gfp_mask)
|
||||||
|
{
|
||||||
|
struct block_device *bdev = rq->q->disk ? rq->q->disk->part0 : NULL;
|
||||||
|
struct bio *bio;
|
||||||
|
|
||||||
|
bio = bio_alloc_bioset(bdev, nr_vecs, rq->cmd_flags, gfp_mask,
|
||||||
|
&fs_bio_set);
|
||||||
|
if (!bio)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
return bio;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* bio_copy_from_iter - copy all pages from iov_iter to bio
|
* bio_copy_from_iter - copy all pages from iov_iter to bio
|
||||||
* @bio: The &struct bio which describes the I/O as destination
|
* @bio: The &struct bio which describes the I/O as destination
|
||||||
|
|
@ -154,10 +173,9 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
|
||||||
nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
|
nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
|
||||||
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
bio = bio_kmalloc(nr_pages, gfp_mask);
|
bio = blk_rq_map_bio_alloc(rq, nr_pages, gfp_mask);
|
||||||
if (!bio)
|
if (!bio)
|
||||||
goto out_bmd;
|
goto out_bmd;
|
||||||
bio_init_inline(bio, NULL, nr_pages, req_op(rq));
|
|
||||||
|
|
||||||
if (map_data) {
|
if (map_data) {
|
||||||
nr_pages = 1U << map_data->page_order;
|
nr_pages = 1U << map_data->page_order;
|
||||||
|
|
@ -233,43 +251,12 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
|
||||||
cleanup:
|
cleanup:
|
||||||
if (!map_data)
|
if (!map_data)
|
||||||
bio_free_pages(bio);
|
bio_free_pages(bio);
|
||||||
bio_uninit(bio);
|
blk_mq_map_bio_put(bio);
|
||||||
kfree(bio);
|
|
||||||
out_bmd:
|
out_bmd:
|
||||||
kfree(bmd);
|
kfree(bmd);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_mq_map_bio_put(struct bio *bio)
|
|
||||||
{
|
|
||||||
if (bio->bi_opf & REQ_ALLOC_CACHE) {
|
|
||||||
bio_put(bio);
|
|
||||||
} else {
|
|
||||||
bio_uninit(bio);
|
|
||||||
kfree(bio);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct bio *blk_rq_map_bio_alloc(struct request *rq,
|
|
||||||
unsigned int nr_vecs, gfp_t gfp_mask)
|
|
||||||
{
|
|
||||||
struct block_device *bdev = rq->q->disk ? rq->q->disk->part0 : NULL;
|
|
||||||
struct bio *bio;
|
|
||||||
|
|
||||||
if (rq->cmd_flags & REQ_ALLOC_CACHE && (nr_vecs <= BIO_INLINE_VECS)) {
|
|
||||||
bio = bio_alloc_bioset(bdev, nr_vecs, rq->cmd_flags, gfp_mask,
|
|
||||||
&fs_bio_set);
|
|
||||||
if (!bio)
|
|
||||||
return NULL;
|
|
||||||
} else {
|
|
||||||
bio = bio_kmalloc(nr_vecs, gfp_mask);
|
|
||||||
if (!bio)
|
|
||||||
return NULL;
|
|
||||||
bio_init_inline(bio, bdev, nr_vecs, req_op(rq));
|
|
||||||
}
|
|
||||||
return bio;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
|
static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
|
||||||
gfp_t gfp_mask)
|
gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
|
|
@ -318,25 +305,23 @@ static void bio_invalidate_vmalloc_pages(struct bio *bio)
|
||||||
static void bio_map_kern_endio(struct bio *bio)
|
static void bio_map_kern_endio(struct bio *bio)
|
||||||
{
|
{
|
||||||
bio_invalidate_vmalloc_pages(bio);
|
bio_invalidate_vmalloc_pages(bio);
|
||||||
bio_uninit(bio);
|
blk_mq_map_bio_put(bio);
|
||||||
kfree(bio);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct bio *bio_map_kern(void *data, unsigned int len, enum req_op op,
|
static struct bio *bio_map_kern(struct request *rq, void *data, unsigned int len,
|
||||||
gfp_t gfp_mask)
|
gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
unsigned int nr_vecs = bio_add_max_vecs(data, len);
|
unsigned int nr_vecs = bio_add_max_vecs(data, len);
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
|
|
||||||
bio = bio_kmalloc(nr_vecs, gfp_mask);
|
bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
|
||||||
if (!bio)
|
if (!bio)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
bio_init_inline(bio, NULL, nr_vecs, op);
|
|
||||||
if (is_vmalloc_addr(data)) {
|
if (is_vmalloc_addr(data)) {
|
||||||
bio->bi_private = data;
|
bio->bi_private = data;
|
||||||
if (!bio_add_vmalloc(bio, data, len)) {
|
if (!bio_add_vmalloc(bio, data, len)) {
|
||||||
bio_uninit(bio);
|
blk_mq_map_bio_put(bio);
|
||||||
kfree(bio);
|
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -349,8 +334,7 @@ static struct bio *bio_map_kern(void *data, unsigned int len, enum req_op op,
|
||||||
static void bio_copy_kern_endio(struct bio *bio)
|
static void bio_copy_kern_endio(struct bio *bio)
|
||||||
{
|
{
|
||||||
bio_free_pages(bio);
|
bio_free_pages(bio);
|
||||||
bio_uninit(bio);
|
blk_mq_map_bio_put(bio);
|
||||||
kfree(bio);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bio_copy_kern_endio_read(struct bio *bio)
|
static void bio_copy_kern_endio_read(struct bio *bio)
|
||||||
|
|
@ -369,6 +353,7 @@ static void bio_copy_kern_endio_read(struct bio *bio)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* bio_copy_kern - copy kernel address into bio
|
* bio_copy_kern - copy kernel address into bio
|
||||||
|
* @rq: request to fill
|
||||||
* @data: pointer to buffer to copy
|
* @data: pointer to buffer to copy
|
||||||
* @len: length in bytes
|
* @len: length in bytes
|
||||||
* @op: bio/request operation
|
* @op: bio/request operation
|
||||||
|
|
@ -377,9 +362,10 @@ static void bio_copy_kern_endio_read(struct bio *bio)
|
||||||
* copy the kernel address into a bio suitable for io to a block
|
* copy the kernel address into a bio suitable for io to a block
|
||||||
* device. Returns an error pointer in case of error.
|
* device. Returns an error pointer in case of error.
|
||||||
*/
|
*/
|
||||||
static struct bio *bio_copy_kern(void *data, unsigned int len, enum req_op op,
|
static struct bio *bio_copy_kern(struct request *rq, void *data, unsigned int len,
|
||||||
gfp_t gfp_mask)
|
gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
|
enum req_op op = req_op(rq);
|
||||||
unsigned long kaddr = (unsigned long)data;
|
unsigned long kaddr = (unsigned long)data;
|
||||||
unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||||
unsigned long start = kaddr >> PAGE_SHIFT;
|
unsigned long start = kaddr >> PAGE_SHIFT;
|
||||||
|
|
@ -394,10 +380,9 @@ static struct bio *bio_copy_kern(void *data, unsigned int len, enum req_op op,
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
nr_pages = end - start;
|
nr_pages = end - start;
|
||||||
bio = bio_kmalloc(nr_pages, gfp_mask);
|
bio = blk_rq_map_bio_alloc(rq, nr_pages, gfp_mask);
|
||||||
if (!bio)
|
if (!bio)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
bio_init_inline(bio, NULL, nr_pages, op);
|
|
||||||
|
|
||||||
while (len) {
|
while (len) {
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
@ -431,8 +416,7 @@ static struct bio *bio_copy_kern(void *data, unsigned int len, enum req_op op,
|
||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
bio_free_pages(bio);
|
bio_free_pages(bio);
|
||||||
bio_uninit(bio);
|
blk_mq_map_bio_put(bio);
|
||||||
kfree(bio);
|
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -679,18 +663,16 @@ int blk_rq_map_kern(struct request *rq, void *kbuf, unsigned int len,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!blk_rq_aligned(rq->q, addr, len) || object_is_on_stack(kbuf))
|
if (!blk_rq_aligned(rq->q, addr, len) || object_is_on_stack(kbuf))
|
||||||
bio = bio_copy_kern(kbuf, len, req_op(rq), gfp_mask);
|
bio = bio_copy_kern(rq, kbuf, len, gfp_mask);
|
||||||
else
|
else
|
||||||
bio = bio_map_kern(kbuf, len, req_op(rq), gfp_mask);
|
bio = bio_map_kern(rq, kbuf, len, gfp_mask);
|
||||||
|
|
||||||
if (IS_ERR(bio))
|
if (IS_ERR(bio))
|
||||||
return PTR_ERR(bio);
|
return PTR_ERR(bio);
|
||||||
|
|
||||||
ret = blk_rq_append_bio(rq, bio);
|
ret = blk_rq_append_bio(rq, bio);
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret))
|
||||||
bio_uninit(bio);
|
blk_mq_map_bio_put(bio);
|
||||||
kfree(bio);
|
|
||||||
}
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_rq_map_kern);
|
EXPORT_SYMBOL(blk_rq_map_kern);
|
||||||
|
|
|
||||||
|
|
@ -23,6 +23,7 @@
|
||||||
#include <linux/cache.h>
|
#include <linux/cache.h>
|
||||||
#include <linux/sched/topology.h>
|
#include <linux/sched/topology.h>
|
||||||
#include <linux/sched/signal.h>
|
#include <linux/sched/signal.h>
|
||||||
|
#include <linux/suspend.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/crash_dump.h>
|
#include <linux/crash_dump.h>
|
||||||
#include <linux/prefetch.h>
|
#include <linux/prefetch.h>
|
||||||
|
|
@ -3718,6 +3719,7 @@ static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
|
||||||
{
|
{
|
||||||
struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
|
struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
|
||||||
struct blk_mq_hw_ctx, cpuhp_online);
|
struct blk_mq_hw_ctx, cpuhp_online);
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
if (blk_mq_hctx_has_online_cpu(hctx, cpu))
|
if (blk_mq_hctx_has_online_cpu(hctx, cpu))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
@ -3738,12 +3740,24 @@ static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
|
||||||
* frozen and there are no requests.
|
* frozen and there are no requests.
|
||||||
*/
|
*/
|
||||||
if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
|
if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
|
||||||
while (blk_mq_hctx_has_requests(hctx))
|
while (blk_mq_hctx_has_requests(hctx)) {
|
||||||
|
/*
|
||||||
|
* The wakeup capable IRQ handler of block device is
|
||||||
|
* not called during suspend. Skip the loop by checking
|
||||||
|
* pm_wakeup_pending to prevent the deadlock and improve
|
||||||
|
* suspend latency.
|
||||||
|
*/
|
||||||
|
if (pm_wakeup_pending()) {
|
||||||
|
clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
|
||||||
|
ret = -EBUSY;
|
||||||
|
break;
|
||||||
|
}
|
||||||
msleep(5);
|
msleep(5);
|
||||||
|
}
|
||||||
percpu_ref_put(&hctx->queue->q_usage_counter);
|
percpu_ref_put(&hctx->queue->q_usage_counter);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -741,6 +741,8 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
|
||||||
{
|
{
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
|
|
||||||
|
lockdep_assert_held(&zwplug->lock);
|
||||||
|
|
||||||
if (bio_list_empty(&zwplug->bio_list))
|
if (bio_list_empty(&zwplug->bio_list))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
|
@ -748,6 +750,8 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
|
||||||
zwplug->disk->disk_name, zwplug->zone_no);
|
zwplug->disk->disk_name, zwplug->zone_no);
|
||||||
while ((bio = bio_list_pop(&zwplug->bio_list)))
|
while ((bio = bio_list_pop(&zwplug->bio_list)))
|
||||||
blk_zone_wplug_bio_io_error(zwplug, bio);
|
blk_zone_wplug_bio_io_error(zwplug, bio);
|
||||||
|
|
||||||
|
zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -184,8 +184,6 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
||||||
loff_t pos = iocb->ki_pos;
|
loff_t pos = iocb->ki_pos;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (iocb->ki_flags & IOCB_ALLOC_CACHE)
|
|
||||||
opf |= REQ_ALLOC_CACHE;
|
|
||||||
bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
|
bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
|
||||||
&blkdev_dio_pool);
|
&blkdev_dio_pool);
|
||||||
dio = container_of(bio, struct blkdev_dio, bio);
|
dio = container_of(bio, struct blkdev_dio, bio);
|
||||||
|
|
@ -333,8 +331,6 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
|
||||||
loff_t pos = iocb->ki_pos;
|
loff_t pos = iocb->ki_pos;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (iocb->ki_flags & IOCB_ALLOC_CACHE)
|
|
||||||
opf |= REQ_ALLOC_CACHE;
|
|
||||||
bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
|
bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
|
||||||
&blkdev_dio_pool);
|
&blkdev_dio_pool);
|
||||||
dio = container_of(bio, struct blkdev_dio, bio);
|
dio = container_of(bio, struct blkdev_dio, bio);
|
||||||
|
|
|
||||||
|
|
@ -423,6 +423,86 @@ static int blkdev_pr_clear(struct block_device *bdev, blk_mode_t mode,
|
||||||
return ops->pr_clear(bdev, c.key);
|
return ops->pr_clear(bdev, c.key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int blkdev_pr_read_keys(struct block_device *bdev, blk_mode_t mode,
|
||||||
|
struct pr_read_keys __user *arg)
|
||||||
|
{
|
||||||
|
const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
|
||||||
|
struct pr_keys *keys_info;
|
||||||
|
struct pr_read_keys read_keys;
|
||||||
|
u64 __user *keys_ptr;
|
||||||
|
size_t keys_info_len;
|
||||||
|
size_t keys_copy_len;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!blkdev_pr_allowed(bdev, mode))
|
||||||
|
return -EPERM;
|
||||||
|
if (!ops || !ops->pr_read_keys)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
if (copy_from_user(&read_keys, arg, sizeof(read_keys)))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
keys_info_len = struct_size(keys_info, keys, read_keys.num_keys);
|
||||||
|
if (keys_info_len == SIZE_MAX)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
keys_info = kzalloc(keys_info_len, GFP_KERNEL);
|
||||||
|
if (!keys_info)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
keys_info->num_keys = read_keys.num_keys;
|
||||||
|
|
||||||
|
ret = ops->pr_read_keys(bdev, keys_info);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
/* Copy out individual keys */
|
||||||
|
keys_ptr = u64_to_user_ptr(read_keys.keys_ptr);
|
||||||
|
keys_copy_len = min(read_keys.num_keys, keys_info->num_keys) *
|
||||||
|
sizeof(keys_info->keys[0]);
|
||||||
|
|
||||||
|
if (copy_to_user(keys_ptr, keys_info->keys, keys_copy_len)) {
|
||||||
|
ret = -EFAULT;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Copy out the arg struct */
|
||||||
|
read_keys.generation = keys_info->generation;
|
||||||
|
read_keys.num_keys = keys_info->num_keys;
|
||||||
|
|
||||||
|
if (copy_to_user(arg, &read_keys, sizeof(read_keys)))
|
||||||
|
ret = -EFAULT;
|
||||||
|
out:
|
||||||
|
kfree(keys_info);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int blkdev_pr_read_reservation(struct block_device *bdev,
|
||||||
|
blk_mode_t mode, struct pr_read_reservation __user *arg)
|
||||||
|
{
|
||||||
|
const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
|
||||||
|
struct pr_held_reservation rsv = {};
|
||||||
|
struct pr_read_reservation out = {};
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!blkdev_pr_allowed(bdev, mode))
|
||||||
|
return -EPERM;
|
||||||
|
if (!ops || !ops->pr_read_reservation)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
ret = ops->pr_read_reservation(bdev, &rsv);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
out.key = rsv.key;
|
||||||
|
out.generation = rsv.generation;
|
||||||
|
out.type = rsv.type;
|
||||||
|
|
||||||
|
if (copy_to_user(arg, &out, sizeof(out)))
|
||||||
|
return -EFAULT;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int blkdev_flushbuf(struct block_device *bdev, unsigned cmd,
|
static int blkdev_flushbuf(struct block_device *bdev, unsigned cmd,
|
||||||
unsigned long arg)
|
unsigned long arg)
|
||||||
{
|
{
|
||||||
|
|
@ -645,6 +725,10 @@ static int blkdev_common_ioctl(struct block_device *bdev, blk_mode_t mode,
|
||||||
return blkdev_pr_preempt(bdev, mode, argp, true);
|
return blkdev_pr_preempt(bdev, mode, argp, true);
|
||||||
case IOC_PR_CLEAR:
|
case IOC_PR_CLEAR:
|
||||||
return blkdev_pr_clear(bdev, mode, argp);
|
return blkdev_pr_clear(bdev, mode, argp);
|
||||||
|
case IOC_PR_READ_KEYS:
|
||||||
|
return blkdev_pr_read_keys(bdev, mode, argp);
|
||||||
|
case IOC_PR_READ_RESERVATION:
|
||||||
|
return blkdev_pr_read_reservation(bdev, mode, argp);
|
||||||
default:
|
default:
|
||||||
return blk_get_meta_cap(bdev, cmd, argp);
|
return blk_get_meta_cap(bdev, cmd, argp);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -348,11 +348,10 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
|
||||||
struct file *file = lo->lo_backing_file;
|
struct file *file = lo->lo_backing_file;
|
||||||
struct bio_vec tmp;
|
struct bio_vec tmp;
|
||||||
unsigned int offset;
|
unsigned int offset;
|
||||||
int nr_bvec = 0;
|
unsigned int nr_bvec;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
rq_for_each_bvec(tmp, rq, rq_iter)
|
nr_bvec = blk_rq_nr_bvec(rq);
|
||||||
nr_bvec++;
|
|
||||||
|
|
||||||
if (rq->bio != rq->biotail) {
|
if (rq->bio != rq->biotail) {
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -394,7 +394,7 @@ static void zloop_rw(struct zloop_cmd *cmd)
|
||||||
struct bio_vec tmp;
|
struct bio_vec tmp;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
sector_t zone_end;
|
sector_t zone_end;
|
||||||
int nr_bvec = 0;
|
unsigned int nr_bvec;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
atomic_set(&cmd->ref, 2);
|
atomic_set(&cmd->ref, 2);
|
||||||
|
|
@ -487,8 +487,7 @@ static void zloop_rw(struct zloop_cmd *cmd)
|
||||||
spin_unlock_irqrestore(&zone->wp_lock, flags);
|
spin_unlock_irqrestore(&zone->wp_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
rq_for_each_bvec(tmp, rq, rq_iter)
|
nr_bvec = blk_rq_nr_bvec(rq);
|
||||||
nr_bvec++;
|
|
||||||
|
|
||||||
if (rq->bio != rq->biotail) {
|
if (rq->bio != rq->biotail) {
|
||||||
struct bio_vec *bvec;
|
struct bio_vec *bvec;
|
||||||
|
|
|
||||||
|
|
@ -1122,7 +1122,7 @@ void nvme_auth_free(struct nvme_ctrl *ctrl)
|
||||||
if (ctrl->dhchap_ctxs) {
|
if (ctrl->dhchap_ctxs) {
|
||||||
for (i = 0; i < ctrl_max_dhchaps(ctrl); i++)
|
for (i = 0; i < ctrl_max_dhchaps(ctrl); i++)
|
||||||
nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]);
|
nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]);
|
||||||
kfree(ctrl->dhchap_ctxs);
|
kvfree(ctrl->dhchap_ctxs);
|
||||||
}
|
}
|
||||||
if (ctrl->host_key) {
|
if (ctrl->host_key) {
|
||||||
nvme_auth_free_key(ctrl->host_key);
|
nvme_auth_free_key(ctrl->host_key);
|
||||||
|
|
|
||||||
|
|
@ -592,7 +592,7 @@ bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status)
|
||||||
if (status > 0 && (status & NVME_STATUS_DNR))
|
if (status > 0 && (status & NVME_STATUS_DNR))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (status == -EKEYREJECTED)
|
if (status == -EKEYREJECTED || status == -ENOKEY)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (ctrl->opts->max_reconnects == -1 ||
|
if (ctrl->opts->max_reconnects == -1 ||
|
||||||
|
|
|
||||||
|
|
@ -520,6 +520,8 @@ nvme_fc_free_rport(struct kref *ref)
|
||||||
|
|
||||||
WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
|
WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
|
||||||
WARN_ON(!list_empty(&rport->ctrl_list));
|
WARN_ON(!list_empty(&rport->ctrl_list));
|
||||||
|
WARN_ON(!list_empty(&rport->ls_req_list));
|
||||||
|
WARN_ON(!list_empty(&rport->ls_rcv_list));
|
||||||
|
|
||||||
/* remove from lport list */
|
/* remove from lport list */
|
||||||
spin_lock_irqsave(&nvme_fc_lock, flags);
|
spin_lock_irqsave(&nvme_fc_lock, flags);
|
||||||
|
|
@ -1468,14 +1470,14 @@ nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport,
|
||||||
{
|
{
|
||||||
struct fcnvme_ls_disconnect_assoc_rqst *rqst =
|
struct fcnvme_ls_disconnect_assoc_rqst *rqst =
|
||||||
&lsop->rqstbuf->rq_dis_assoc;
|
&lsop->rqstbuf->rq_dis_assoc;
|
||||||
struct nvme_fc_ctrl *ctrl, *ret = NULL;
|
struct nvme_fc_ctrl *ctrl, *tmp, *ret = NULL;
|
||||||
struct nvmefc_ls_rcv_op *oldls = NULL;
|
struct nvmefc_ls_rcv_op *oldls = NULL;
|
||||||
u64 association_id = be64_to_cpu(rqst->associd.association_id);
|
u64 association_id = be64_to_cpu(rqst->associd.association_id);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&rport->lock, flags);
|
spin_lock_irqsave(&rport->lock, flags);
|
||||||
|
|
||||||
list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
|
list_for_each_entry_safe(ctrl, tmp, &rport->ctrl_list, ctrl_list) {
|
||||||
if (!nvme_fc_ctrl_get(ctrl))
|
if (!nvme_fc_ctrl_get(ctrl))
|
||||||
continue;
|
continue;
|
||||||
spin_lock(&ctrl->lock);
|
spin_lock(&ctrl->lock);
|
||||||
|
|
@ -1488,7 +1490,9 @@ nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport,
|
||||||
if (ret)
|
if (ret)
|
||||||
/* leave the ctrl get reference */
|
/* leave the ctrl get reference */
|
||||||
break;
|
break;
|
||||||
|
spin_unlock_irqrestore(&rport->lock, flags);
|
||||||
nvme_fc_ctrl_put(ctrl);
|
nvme_fc_ctrl_put(ctrl);
|
||||||
|
spin_lock_irqsave(&rport->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&rport->lock, flags);
|
spin_unlock_irqrestore(&rport->lock, flags);
|
||||||
|
|
|
||||||
|
|
@ -447,7 +447,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||||
struct iov_iter iter;
|
struct iov_iter iter;
|
||||||
struct iov_iter *map_iter = NULL;
|
struct iov_iter *map_iter = NULL;
|
||||||
struct request *req;
|
struct request *req;
|
||||||
blk_opf_t rq_flags = REQ_ALLOC_CACHE;
|
blk_opf_t rq_flags = 0;
|
||||||
blk_mq_req_flags_t blk_flags = 0;
|
blk_mq_req_flags_t blk_flags = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2984,6 +2984,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
|
|
||||||
if (readl(dev->bar + NVME_REG_CSTS) == -1) {
|
if (readl(dev->bar + NVME_REG_CSTS) == -1) {
|
||||||
|
dev_dbg(dev->ctrl.device, "reading CSTS register failed\n");
|
||||||
result = -ENODEV;
|
result = -ENODEV;
|
||||||
goto disable;
|
goto disable;
|
||||||
}
|
}
|
||||||
|
|
@ -3609,6 +3610,7 @@ out_uninit_ctrl:
|
||||||
nvme_uninit_ctrl(&dev->ctrl);
|
nvme_uninit_ctrl(&dev->ctrl);
|
||||||
out_put_ctrl:
|
out_put_ctrl:
|
||||||
nvme_put_ctrl(&dev->ctrl);
|
nvme_put_ctrl(&dev->ctrl);
|
||||||
|
dev_err_probe(&pdev->dev, result, "probe failed\n");
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -228,7 +228,8 @@ retry:
|
||||||
static int nvme_pr_read_keys(struct block_device *bdev,
|
static int nvme_pr_read_keys(struct block_device *bdev,
|
||||||
struct pr_keys *keys_info)
|
struct pr_keys *keys_info)
|
||||||
{
|
{
|
||||||
u32 rse_len, num_keys = keys_info->num_keys;
|
size_t rse_len;
|
||||||
|
u32 num_keys = keys_info->num_keys;
|
||||||
struct nvme_reservation_status_ext *rse;
|
struct nvme_reservation_status_ext *rse;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
bool eds;
|
bool eds;
|
||||||
|
|
@ -238,6 +239,9 @@ static int nvme_pr_read_keys(struct block_device *bdev,
|
||||||
* enough to get enough keys to fill the return keys buffer.
|
* enough to get enough keys to fill the return keys buffer.
|
||||||
*/
|
*/
|
||||||
rse_len = struct_size(rse, regctl_eds, num_keys);
|
rse_len = struct_size(rse, regctl_eds, num_keys);
|
||||||
|
if (rse_len > U32_MAX)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
rse = kzalloc(rse_len, GFP_KERNEL);
|
rse = kzalloc(rse_len, GFP_KERNEL);
|
||||||
if (!rse)
|
if (!rse)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
||||||
|
|
@ -708,7 +708,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We don't really have a practical limit on the number of abort
|
* We don't really have a practical limit on the number of abort
|
||||||
* comands. But we don't do anything useful for abort either, so
|
* commands. But we don't do anything useful for abort either, so
|
||||||
* no point in allowing more abort commands than the spec requires.
|
* no point in allowing more abort commands than the spec requires.
|
||||||
*/
|
*/
|
||||||
id->acl = 3;
|
id->acl = 3;
|
||||||
|
|
|
||||||
|
|
@ -381,8 +381,8 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
|
||||||
ret = crypto_shash_update(shash, buf, 1);
|
ret = crypto_shash_update(shash, buf, 1);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
ret = crypto_shash_update(shash, ctrl->subsysnqn,
|
ret = crypto_shash_update(shash, ctrl->subsys->subsysnqn,
|
||||||
strlen(ctrl->subsysnqn));
|
strlen(ctrl->subsys->subsysnqn));
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
ret = crypto_shash_final(shash, response);
|
ret = crypto_shash_final(shash, response);
|
||||||
|
|
@ -429,7 +429,7 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
|
||||||
}
|
}
|
||||||
|
|
||||||
transformed_key = nvme_auth_transform_key(ctrl->ctrl_key,
|
transformed_key = nvme_auth_transform_key(ctrl->ctrl_key,
|
||||||
ctrl->subsysnqn);
|
ctrl->subsys->subsysnqn);
|
||||||
if (IS_ERR(transformed_key)) {
|
if (IS_ERR(transformed_key)) {
|
||||||
ret = PTR_ERR(transformed_key);
|
ret = PTR_ERR(transformed_key);
|
||||||
goto out_free_tfm;
|
goto out_free_tfm;
|
||||||
|
|
@ -484,8 +484,8 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
|
||||||
ret = crypto_shash_update(shash, "Controller", 10);
|
ret = crypto_shash_update(shash, "Controller", 10);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
ret = crypto_shash_update(shash, ctrl->subsysnqn,
|
ret = crypto_shash_update(shash, ctrl->subsys->subsysnqn,
|
||||||
strlen(ctrl->subsysnqn));
|
strlen(ctrl->subsys->subsysnqn));
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
ret = crypto_shash_update(shash, buf, 1);
|
ret = crypto_shash_update(shash, buf, 1);
|
||||||
|
|
@ -575,7 +575,7 @@ void nvmet_auth_insert_psk(struct nvmet_sq *sq)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ret = nvme_auth_generate_digest(sq->ctrl->shash_id, psk, psk_len,
|
ret = nvme_auth_generate_digest(sq->ctrl->shash_id, psk, psk_len,
|
||||||
sq->ctrl->subsysnqn,
|
sq->ctrl->subsys->subsysnqn,
|
||||||
sq->ctrl->hostnqn, &digest);
|
sq->ctrl->hostnqn, &digest);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_warn("%s: ctrl %d qid %d failed to generate digest, error %d\n",
|
pr_warn("%s: ctrl %d qid %d failed to generate digest, error %d\n",
|
||||||
|
|
@ -590,8 +590,10 @@ void nvmet_auth_insert_psk(struct nvmet_sq *sq)
|
||||||
goto out_free_digest;
|
goto out_free_digest;
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_NVME_TARGET_TCP_TLS
|
#ifdef CONFIG_NVME_TARGET_TCP_TLS
|
||||||
tls_key = nvme_tls_psk_refresh(NULL, sq->ctrl->hostnqn, sq->ctrl->subsysnqn,
|
tls_key = nvme_tls_psk_refresh(NULL, sq->ctrl->hostnqn,
|
||||||
sq->ctrl->shash_id, tls_psk, psk_len, digest);
|
sq->ctrl->subsys->subsysnqn,
|
||||||
|
sq->ctrl->shash_id, tls_psk, psk_len,
|
||||||
|
digest);
|
||||||
if (IS_ERR(tls_key)) {
|
if (IS_ERR(tls_key)) {
|
||||||
pr_warn("%s: ctrl %d qid %d failed to refresh key, error %ld\n",
|
pr_warn("%s: ctrl %d qid %d failed to refresh key, error %ld\n",
|
||||||
__func__, sq->ctrl->cntlid, sq->qid, PTR_ERR(tls_key));
|
__func__, sq->ctrl->cntlid, sq->qid, PTR_ERR(tls_key));
|
||||||
|
|
|
||||||
|
|
@ -40,7 +40,7 @@ EXPORT_SYMBOL_GPL(nvmet_wq);
|
||||||
* - the nvmet_transports array
|
* - the nvmet_transports array
|
||||||
*
|
*
|
||||||
* When updating any of those lists/structures write lock should be obtained,
|
* When updating any of those lists/structures write lock should be obtained,
|
||||||
* while when reading (popolating discovery log page or checking host-subsystem
|
* while when reading (populating discovery log page or checking host-subsystem
|
||||||
* link) read lock is obtained to allow concurrent reads.
|
* link) read lock is obtained to allow concurrent reads.
|
||||||
*/
|
*/
|
||||||
DECLARE_RWSEM(nvmet_config_sem);
|
DECLARE_RWSEM(nvmet_config_sem);
|
||||||
|
|
@ -1628,7 +1628,6 @@ struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args)
|
||||||
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
|
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
|
||||||
INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
|
INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
|
||||||
|
|
||||||
memcpy(ctrl->subsysnqn, args->subsysnqn, NVMF_NQN_SIZE);
|
|
||||||
memcpy(ctrl->hostnqn, args->hostnqn, NVMF_NQN_SIZE);
|
memcpy(ctrl->hostnqn, args->hostnqn, NVMF_NQN_SIZE);
|
||||||
|
|
||||||
kref_init(&ctrl->ref);
|
kref_init(&ctrl->ref);
|
||||||
|
|
@ -1903,6 +1902,8 @@ static void nvmet_subsys_free(struct kref *ref)
|
||||||
struct nvmet_subsys *subsys =
|
struct nvmet_subsys *subsys =
|
||||||
container_of(ref, struct nvmet_subsys, ref);
|
container_of(ref, struct nvmet_subsys, ref);
|
||||||
|
|
||||||
|
WARN_ON_ONCE(!list_empty(&subsys->ctrls));
|
||||||
|
WARN_ON_ONCE(!list_empty(&subsys->hosts));
|
||||||
WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
|
WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
|
||||||
|
|
||||||
nvmet_debugfs_subsys_free(subsys);
|
nvmet_debugfs_subsys_free(subsys);
|
||||||
|
|
|
||||||
|
|
@ -490,8 +490,7 @@ nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
|
||||||
sizeof(*discon_rqst) + sizeof(*discon_acc) +
|
sizeof(*discon_rqst) + sizeof(*discon_acc) +
|
||||||
tgtport->ops->lsrqst_priv_sz), GFP_KERNEL);
|
tgtport->ops->lsrqst_priv_sz), GFP_KERNEL);
|
||||||
if (!lsop) {
|
if (!lsop) {
|
||||||
dev_info(tgtport->dev,
|
pr_info("{%d:%d}: send Disconnect Association failed: ENOMEM\n",
|
||||||
"{%d:%d} send Disconnect Association failed: ENOMEM\n",
|
|
||||||
tgtport->fc_target_port.port_num, assoc->a_id);
|
tgtport->fc_target_port.port_num, assoc->a_id);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
@ -513,8 +512,7 @@ nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
|
||||||
ret = nvmet_fc_send_ls_req_async(tgtport, lsop,
|
ret = nvmet_fc_send_ls_req_async(tgtport, lsop,
|
||||||
nvmet_fc_disconnect_assoc_done);
|
nvmet_fc_disconnect_assoc_done);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_info(tgtport->dev,
|
pr_info("{%d:%d}: XMT Disconnect Association failed: %d\n",
|
||||||
"{%d:%d} XMT Disconnect Association failed: %d\n",
|
|
||||||
tgtport->fc_target_port.port_num, assoc->a_id, ret);
|
tgtport->fc_target_port.port_num, assoc->a_id, ret);
|
||||||
kfree(lsop);
|
kfree(lsop);
|
||||||
}
|
}
|
||||||
|
|
@ -1187,8 +1185,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
|
||||||
if (oldls)
|
if (oldls)
|
||||||
nvmet_fc_xmt_ls_rsp(tgtport, oldls);
|
nvmet_fc_xmt_ls_rsp(tgtport, oldls);
|
||||||
ida_free(&tgtport->assoc_cnt, assoc->a_id);
|
ida_free(&tgtport->assoc_cnt, assoc->a_id);
|
||||||
dev_info(tgtport->dev,
|
pr_info("{%d:%d}: Association freed\n",
|
||||||
"{%d:%d} Association freed\n",
|
|
||||||
tgtport->fc_target_port.port_num, assoc->a_id);
|
tgtport->fc_target_port.port_num, assoc->a_id);
|
||||||
kfree(assoc);
|
kfree(assoc);
|
||||||
}
|
}
|
||||||
|
|
@ -1224,8 +1221,7 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
|
||||||
flush_workqueue(assoc->queues[i]->work_q);
|
flush_workqueue(assoc->queues[i]->work_q);
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_info(tgtport->dev,
|
pr_info("{%d:%d}: Association deleted\n",
|
||||||
"{%d:%d} Association deleted\n",
|
|
||||||
tgtport->fc_target_port.port_num, assoc->a_id);
|
tgtport->fc_target_port.port_num, assoc->a_id);
|
||||||
|
|
||||||
nvmet_fc_tgtport_put(tgtport);
|
nvmet_fc_tgtport_put(tgtport);
|
||||||
|
|
@ -1716,9 +1712,9 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(tgtport->dev,
|
pr_err("{%d}: Create Association LS failed: %s\n",
|
||||||
"Create Association LS failed: %s\n",
|
tgtport->fc_target_port.port_num,
|
||||||
validation_errors[ret]);
|
validation_errors[ret]);
|
||||||
iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
|
iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
|
||||||
sizeof(*acc), rqst->w0.ls_cmd,
|
sizeof(*acc), rqst->w0.ls_cmd,
|
||||||
FCNVME_RJT_RC_LOGIC,
|
FCNVME_RJT_RC_LOGIC,
|
||||||
|
|
@ -1730,8 +1726,7 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
|
||||||
atomic_set(&queue->connected, 1);
|
atomic_set(&queue->connected, 1);
|
||||||
queue->sqhd = 0; /* best place to init value */
|
queue->sqhd = 0; /* best place to init value */
|
||||||
|
|
||||||
dev_info(tgtport->dev,
|
pr_info("{%d:%d}: Association created\n",
|
||||||
"{%d:%d} Association created\n",
|
|
||||||
tgtport->fc_target_port.port_num, iod->assoc->a_id);
|
tgtport->fc_target_port.port_num, iod->assoc->a_id);
|
||||||
|
|
||||||
/* format a response */
|
/* format a response */
|
||||||
|
|
@ -1809,9 +1804,9 @@ nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(tgtport->dev,
|
pr_err("{%d}: Create Connection LS failed: %s\n",
|
||||||
"Create Connection LS failed: %s\n",
|
tgtport->fc_target_port.port_num,
|
||||||
validation_errors[ret]);
|
validation_errors[ret]);
|
||||||
iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
|
iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
|
||||||
sizeof(*acc), rqst->w0.ls_cmd,
|
sizeof(*acc), rqst->w0.ls_cmd,
|
||||||
(ret == VERR_NO_ASSOC) ?
|
(ret == VERR_NO_ASSOC) ?
|
||||||
|
|
@ -1871,9 +1866,9 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret || !assoc) {
|
if (ret || !assoc) {
|
||||||
dev_err(tgtport->dev,
|
pr_err("{%d}: Disconnect LS failed: %s\n",
|
||||||
"Disconnect LS failed: %s\n",
|
tgtport->fc_target_port.port_num,
|
||||||
validation_errors[ret]);
|
validation_errors[ret]);
|
||||||
iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
|
iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
|
||||||
sizeof(*acc), rqst->w0.ls_cmd,
|
sizeof(*acc), rqst->w0.ls_cmd,
|
||||||
(ret == VERR_NO_ASSOC) ?
|
(ret == VERR_NO_ASSOC) ?
|
||||||
|
|
@ -1907,8 +1902,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
|
||||||
spin_unlock_irqrestore(&tgtport->lock, flags);
|
spin_unlock_irqrestore(&tgtport->lock, flags);
|
||||||
|
|
||||||
if (oldls) {
|
if (oldls) {
|
||||||
dev_info(tgtport->dev,
|
pr_info("{%d:%d}: Multiple Disconnect Association LS's "
|
||||||
"{%d:%d} Multiple Disconnect Association LS's "
|
|
||||||
"received\n",
|
"received\n",
|
||||||
tgtport->fc_target_port.port_num, assoc->a_id);
|
tgtport->fc_target_port.port_num, assoc->a_id);
|
||||||
/* overwrite good response with bogus failure */
|
/* overwrite good response with bogus failure */
|
||||||
|
|
@ -2051,8 +2045,8 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
|
||||||
struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
|
struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
|
||||||
|
|
||||||
if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
|
if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
|
||||||
dev_info(tgtport->dev,
|
pr_info("{%d}: RCV %s LS failed: payload too large (%d)\n",
|
||||||
"RCV %s LS failed: payload too large (%d)\n",
|
tgtport->fc_target_port.port_num,
|
||||||
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
|
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
|
||||||
nvmefc_ls_names[w0->ls_cmd] : "",
|
nvmefc_ls_names[w0->ls_cmd] : "",
|
||||||
lsreqbuf_len);
|
lsreqbuf_len);
|
||||||
|
|
@ -2060,8 +2054,8 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!nvmet_fc_tgtport_get(tgtport)) {
|
if (!nvmet_fc_tgtport_get(tgtport)) {
|
||||||
dev_info(tgtport->dev,
|
pr_info("{%d}: RCV %s LS failed: target deleting\n",
|
||||||
"RCV %s LS failed: target deleting\n",
|
tgtport->fc_target_port.port_num,
|
||||||
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
|
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
|
||||||
nvmefc_ls_names[w0->ls_cmd] : "");
|
nvmefc_ls_names[w0->ls_cmd] : "");
|
||||||
return -ESHUTDOWN;
|
return -ESHUTDOWN;
|
||||||
|
|
@ -2069,8 +2063,8 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
|
||||||
|
|
||||||
iod = nvmet_fc_alloc_ls_iod(tgtport);
|
iod = nvmet_fc_alloc_ls_iod(tgtport);
|
||||||
if (!iod) {
|
if (!iod) {
|
||||||
dev_info(tgtport->dev,
|
pr_info("{%d}: RCV %s LS failed: context allocation failed\n",
|
||||||
"RCV %s LS failed: context allocation failed\n",
|
tgtport->fc_target_port.port_num,
|
||||||
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
|
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
|
||||||
nvmefc_ls_names[w0->ls_cmd] : "");
|
nvmefc_ls_names[w0->ls_cmd] : "");
|
||||||
nvmet_fc_tgtport_put(tgtport);
|
nvmet_fc_tgtport_put(tgtport);
|
||||||
|
|
|
||||||
|
|
@ -254,7 +254,6 @@ struct fcloop_nport {
|
||||||
struct fcloop_lsreq {
|
struct fcloop_lsreq {
|
||||||
struct nvmefc_ls_req *lsreq;
|
struct nvmefc_ls_req *lsreq;
|
||||||
struct nvmefc_ls_rsp ls_rsp;
|
struct nvmefc_ls_rsp ls_rsp;
|
||||||
int lsdir; /* H2T or T2H */
|
|
||||||
int status;
|
int status;
|
||||||
struct list_head ls_list; /* fcloop_rport->ls_list */
|
struct list_head ls_list; /* fcloop_rport->ls_list */
|
||||||
};
|
};
|
||||||
|
|
@ -1111,8 +1110,10 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
|
||||||
rport->nport->rport = NULL;
|
rport->nport->rport = NULL;
|
||||||
spin_unlock_irqrestore(&fcloop_lock, flags);
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
||||||
|
|
||||||
if (put_port)
|
if (put_port) {
|
||||||
|
WARN_ON(!list_empty(&rport->ls_list));
|
||||||
fcloop_nport_put(rport->nport);
|
fcloop_nport_put(rport->nport);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|
@ -1130,8 +1131,10 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
|
||||||
tport->nport->tport = NULL;
|
tport->nport->tport = NULL;
|
||||||
spin_unlock_irqrestore(&fcloop_lock, flags);
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
||||||
|
|
||||||
if (put_port)
|
if (put_port) {
|
||||||
|
WARN_ON(!list_empty(&tport->ls_list));
|
||||||
fcloop_nport_put(tport->nport);
|
fcloop_nport_put(tport->nport);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#define FCLOOP_HW_QUEUES 4
|
#define FCLOOP_HW_QUEUES 4
|
||||||
|
|
|
||||||
|
|
@ -285,7 +285,6 @@ struct nvmet_ctrl {
|
||||||
__le32 *changed_ns_list;
|
__le32 *changed_ns_list;
|
||||||
u32 nr_changed_ns;
|
u32 nr_changed_ns;
|
||||||
|
|
||||||
char subsysnqn[NVMF_NQN_FIELD_LEN];
|
|
||||||
char hostnqn[NVMF_NQN_FIELD_LEN];
|
char hostnqn[NVMF_NQN_FIELD_LEN];
|
||||||
|
|
||||||
struct device *p2p_client;
|
struct device *p2p_client;
|
||||||
|
|
|
||||||
|
|
@ -150,7 +150,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
|
||||||
* code path with duplicate ctrl subsysnqn. In order to prevent that we
|
* code path with duplicate ctrl subsysnqn. In order to prevent that we
|
||||||
* mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
|
* mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
|
||||||
*/
|
*/
|
||||||
memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn));
|
memcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
|
||||||
|
|
||||||
/* use fabric id-ctrl values */
|
/* use fabric id-ctrl values */
|
||||||
id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
|
id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
|
||||||
|
|
|
||||||
|
|
@ -320,12 +320,14 @@ static void nvmet_pci_epf_init_dma(struct nvmet_pci_epf *nvme_epf)
|
||||||
nvme_epf->dma_enabled = true;
|
nvme_epf->dma_enabled = true;
|
||||||
|
|
||||||
dev_dbg(dev, "Using DMA RX channel %s, maximum segment size %u B\n",
|
dev_dbg(dev, "Using DMA RX channel %s, maximum segment size %u B\n",
|
||||||
dma_chan_name(chan),
|
dma_chan_name(nvme_epf->dma_rx_chan),
|
||||||
dma_get_max_seg_size(dmaengine_get_dma_device(chan)));
|
dma_get_max_seg_size(dmaengine_get_dma_device(nvme_epf->
|
||||||
|
dma_rx_chan)));
|
||||||
|
|
||||||
dev_dbg(dev, "Using DMA TX channel %s, maximum segment size %u B\n",
|
dev_dbg(dev, "Using DMA TX channel %s, maximum segment size %u B\n",
|
||||||
dma_chan_name(chan),
|
dma_chan_name(nvme_epf->dma_tx_chan),
|
||||||
dma_get_max_seg_size(dmaengine_get_dma_device(chan)));
|
dma_get_max_seg_size(dmaengine_get_dma_device(nvme_epf->
|
||||||
|
dma_tx_chan)));
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
|
@ -2325,6 +2327,8 @@ static int nvmet_pci_epf_epc_init(struct pci_epf *epf)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nvmet_pci_epf_init_dma(nvme_epf);
|
||||||
|
|
||||||
/* Set device ID, class, etc. */
|
/* Set device ID, class, etc. */
|
||||||
epf->header->vendorid = ctrl->tctrl->subsys->vendor_id;
|
epf->header->vendorid = ctrl->tctrl->subsys->vendor_id;
|
||||||
epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id;
|
epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id;
|
||||||
|
|
@ -2422,8 +2426,6 @@ static int nvmet_pci_epf_bind(struct pci_epf *epf)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
nvmet_pci_epf_init_dma(nvme_epf);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -367,7 +367,7 @@ nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
|
||||||
struct nvmet_rdma_cmd *cmds;
|
struct nvmet_rdma_cmd *cmds;
|
||||||
int ret = -EINVAL, i;
|
int ret = -EINVAL, i;
|
||||||
|
|
||||||
cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
|
cmds = kvcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
|
||||||
if (!cmds)
|
if (!cmds)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
|
@ -382,7 +382,7 @@ nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
|
||||||
out_free:
|
out_free:
|
||||||
while (--i >= 0)
|
while (--i >= 0)
|
||||||
nvmet_rdma_free_cmd(ndev, cmds + i, admin);
|
nvmet_rdma_free_cmd(ndev, cmds + i, admin);
|
||||||
kfree(cmds);
|
kvfree(cmds);
|
||||||
out:
|
out:
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
@ -394,7 +394,7 @@ static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
|
||||||
|
|
||||||
for (i = 0; i < nr_cmds; i++)
|
for (i = 0; i < nr_cmds; i++)
|
||||||
nvmet_rdma_free_cmd(ndev, cmds + i, admin);
|
nvmet_rdma_free_cmd(ndev, cmds + i, admin);
|
||||||
kfree(cmds);
|
kvfree(cmds);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
|
static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
|
||||||
|
|
@ -455,7 +455,7 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
|
||||||
NUMA_NO_NODE, false, true))
|
NUMA_NO_NODE, false, true))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
|
queue->rsps = kvcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!queue->rsps)
|
if (!queue->rsps)
|
||||||
goto out_free_sbitmap;
|
goto out_free_sbitmap;
|
||||||
|
|
@ -473,7 +473,7 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
|
||||||
out_free:
|
out_free:
|
||||||
while (--i >= 0)
|
while (--i >= 0)
|
||||||
nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
|
nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
|
||||||
kfree(queue->rsps);
|
kvfree(queue->rsps);
|
||||||
out_free_sbitmap:
|
out_free_sbitmap:
|
||||||
sbitmap_free(&queue->rsp_tags);
|
sbitmap_free(&queue->rsp_tags);
|
||||||
out:
|
out:
|
||||||
|
|
@ -487,7 +487,7 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
|
||||||
|
|
||||||
for (i = 0; i < nr_rsps; i++)
|
for (i = 0; i < nr_rsps; i++)
|
||||||
nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
|
nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
|
||||||
kfree(queue->rsps);
|
kvfree(queue->rsps);
|
||||||
sbitmap_free(&queue->rsp_tags);
|
sbitmap_free(&queue->rsp_tags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1484,7 +1484,7 @@ static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
|
||||||
struct nvmet_tcp_cmd *cmds;
|
struct nvmet_tcp_cmd *cmds;
|
||||||
int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
|
int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
|
||||||
|
|
||||||
cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
|
cmds = kvcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
|
||||||
if (!cmds)
|
if (!cmds)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
|
@ -1500,7 +1500,7 @@ static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
|
||||||
out_free:
|
out_free:
|
||||||
while (--i >= 0)
|
while (--i >= 0)
|
||||||
nvmet_tcp_free_cmd(cmds + i);
|
nvmet_tcp_free_cmd(cmds + i);
|
||||||
kfree(cmds);
|
kvfree(cmds);
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
@ -1514,7 +1514,7 @@ static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
|
||||||
nvmet_tcp_free_cmd(cmds + i);
|
nvmet_tcp_free_cmd(cmds + i);
|
||||||
|
|
||||||
nvmet_tcp_free_cmd(&queue->connect);
|
nvmet_tcp_free_cmd(&queue->connect);
|
||||||
kfree(cmds);
|
kvfree(cmds);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
|
static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
|
||||||
|
|
|
||||||
|
|
@ -2004,9 +2004,19 @@ static int sd_pr_read_keys(struct block_device *bdev, struct pr_keys *keys_info)
|
||||||
{
|
{
|
||||||
int result, i, data_offset, num_copy_keys;
|
int result, i, data_offset, num_copy_keys;
|
||||||
u32 num_keys = keys_info->num_keys;
|
u32 num_keys = keys_info->num_keys;
|
||||||
int data_len = num_keys * 8 + 8;
|
int data_len;
|
||||||
u8 *data;
|
u8 *data;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Each reservation key takes 8 bytes and there is an 8-byte header
|
||||||
|
* before the reservation key list. The total size must fit into the
|
||||||
|
* 16-bit ALLOCATION LENGTH field.
|
||||||
|
*/
|
||||||
|
if (check_mul_overflow(num_keys, 8, &data_len) ||
|
||||||
|
check_add_overflow(data_len, 8, &data_len) ||
|
||||||
|
data_len > USHRT_MAX)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
data = kzalloc(data_len, GFP_KERNEL);
|
data = kzalloc(data_len, GFP_KERNEL);
|
||||||
if (!data)
|
if (!data)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
||||||
|
|
@ -1213,6 +1213,24 @@ static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
|
||||||
return max_t(unsigned short, rq->nr_phys_segments, 1);
|
return max_t(unsigned short, rq->nr_phys_segments, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* blk_rq_nr_bvec - return number of bvecs in a request
|
||||||
|
* @rq: request to calculate bvecs for
|
||||||
|
*
|
||||||
|
* Returns the number of bvecs.
|
||||||
|
*/
|
||||||
|
static inline unsigned int blk_rq_nr_bvec(struct request *rq)
|
||||||
|
{
|
||||||
|
struct req_iterator rq_iter;
|
||||||
|
struct bio_vec bv;
|
||||||
|
unsigned int nr_bvec = 0;
|
||||||
|
|
||||||
|
rq_for_each_bvec(bv, rq, rq_iter)
|
||||||
|
nr_bvec++;
|
||||||
|
|
||||||
|
return nr_bvec;
|
||||||
|
}
|
||||||
|
|
||||||
int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
|
int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
|
||||||
struct scatterlist **last_sg);
|
struct scatterlist **last_sg);
|
||||||
static inline int blk_rq_map_sg(struct request *rq, struct scatterlist *sglist)
|
static inline int blk_rq_map_sg(struct request *rq, struct scatterlist *sglist)
|
||||||
|
|
|
||||||
|
|
@ -479,10 +479,7 @@ static inline bool op_is_discard(blk_opf_t op)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if a bio or request operation is a zone management operation, with
|
* Check if a bio or request operation is a zone management operation.
|
||||||
* the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
|
|
||||||
* due to its different handling in the block layer and device response in
|
|
||||||
* case of command failure.
|
|
||||||
*/
|
*/
|
||||||
static inline bool op_is_zone_mgmt(enum req_op op)
|
static inline bool op_is_zone_mgmt(enum req_op op)
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -56,6 +56,18 @@ struct pr_clear {
|
||||||
__u32 __pad;
|
__u32 __pad;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct pr_read_keys {
|
||||||
|
__u32 generation;
|
||||||
|
__u32 num_keys;
|
||||||
|
__u64 keys_ptr;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct pr_read_reservation {
|
||||||
|
__u64 key;
|
||||||
|
__u32 generation;
|
||||||
|
__u32 type;
|
||||||
|
};
|
||||||
|
|
||||||
#define PR_FL_IGNORE_KEY (1 << 0) /* ignore existing key */
|
#define PR_FL_IGNORE_KEY (1 << 0) /* ignore existing key */
|
||||||
|
|
||||||
#define IOC_PR_REGISTER _IOW('p', 200, struct pr_registration)
|
#define IOC_PR_REGISTER _IOW('p', 200, struct pr_registration)
|
||||||
|
|
@ -64,5 +76,7 @@ struct pr_clear {
|
||||||
#define IOC_PR_PREEMPT _IOW('p', 203, struct pr_preempt)
|
#define IOC_PR_PREEMPT _IOW('p', 203, struct pr_preempt)
|
||||||
#define IOC_PR_PREEMPT_ABORT _IOW('p', 204, struct pr_preempt)
|
#define IOC_PR_PREEMPT_ABORT _IOW('p', 204, struct pr_preempt)
|
||||||
#define IOC_PR_CLEAR _IOW('p', 205, struct pr_clear)
|
#define IOC_PR_CLEAR _IOW('p', 205, struct pr_clear)
|
||||||
|
#define IOC_PR_READ_KEYS _IOWR('p', 206, struct pr_read_keys)
|
||||||
|
#define IOC_PR_READ_RESERVATION _IOR('p', 207, struct pr_read_reservation)
|
||||||
|
|
||||||
#endif /* _UAPI_PR_H */
|
#endif /* _UAPI_PR_H */
|
||||||
|
|
|
||||||
|
|
@ -855,7 +855,6 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
|
||||||
ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type);
|
ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
return ret;
|
return ret;
|
||||||
kiocb->ki_flags |= IOCB_ALLOC_CACHE;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the file is marked O_NONBLOCK, still allow retry for it if it
|
* If the file is marked O_NONBLOCK, still allow retry for it if it
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue