nvme updates for Linux 6.19

- Subsystem usage cleanups (Max)
  - Endpoint device fixes (Shin'ichiro)
  - Debug statements (Gerd)
  - FC fabrics cleanups and fixes (Daniel)
  - Consistent alloc API usages (Israel)
  - Code comment updates (Chu)
  - Authentication retry fix (Justin)
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE3Fbyvv+648XNRdHTPe3zGtjzRgkFAmkyEDQACgkQPe3zGtjz
 RgnY0w//VDEwG8L9jVHgK6oGaABVhT0QhEGs6RcwsRvXeQT81TRU3aazS3VWQ281
 +HZxM+8Jnhlg7FYZTV6+pTjgSzr7hvrEtDSm+1wCC6t2FtmVeluka/NedX15JA+/
 JUo5Tged3qckQZPDjFbxCuSwjGJdWyCkyaBQqEFTYyP59M5yf+KGKOzO5nleSBp2
 txWKYXscuoee0SH/0bSm6YuzlNcX2vW++O+6y3v73gxF4vGxUSvTnJdUJW6NYtgy
 Fj7a5FTIwvBw3Pdr3CBmtD6OWdYDbAlvRSowdUzE8ItQiI7vbkbVkSJhTOLFHzCV
 ZX2xnRV97jOLijXFroUA/+o0naW/0W7xls+aIfAEyXfBTP9kFdQL/iB6bWwLudEK
 ZELZyBcn6b0bDPuxUWnZfhu/NqjDQ2/PV+lz9ULo903gz66AljXm3LQtldrePBku
 XFPPbbeahCogizyAxFOwZGTPQbcMhqEgq9Afet8yq9V9ZtVMTAf6C1/TySE2dldT
 Xg9SpJttb2Tx8XqMYvSUptSDHqeA8NKhwjlDj+h0zxXRfRGcmyk6HG3yCUX1wOOc
 gR3vzgCfRi3YHvfrxTiPea/ev/0YpFkX4NA3U+4hXFc8ue/xwpQwevyN6+fgpHH1
 WRjTS5t6icJnkwIKQTXz6acwKsQsWwFoMdXQUzzZopaR2BvLCVI=
 =aPBc
 -----END PGP SIGNATURE-----

Merge tag 'nvme-6.19-2025-12-04' of git://git.infradead.org/nvme into block-6.19

Pull NVMe updates from Keith:

"- Subsystem usage cleanups (Max)
 - Endpoint device fixes (Shin'ichiro)
 - Debug statements (Gerd)
 - FC fabrics cleanups and fixes (Daniel)
 - Consistent alloc API usages (Israel)
 - Code comment updates (Chu)
 - Authentication retry fix (Justin)"

* tag 'nvme-6.19-2025-12-04' of git://git.infradead.org/nvme:
  nvme-fabrics: add ENOKEY to no retry criteria for authentication failures
  nvme-auth: use kvfree() for memory allocated with kvcalloc()
  nvmet-tcp: use kvcalloc for commands array
  nvmet-rdma: use kvcalloc for commands and responses arrays
  nvme: fix typo error in nvme target
  nvmet-fc: use pr_* print macros instead of dev_*
  nvmet-fcloop: remove unused lsdir member.
  nvmet-fcloop: check all request and response have been processed
  nvme-fc: check all request and response have been processed
  nvme-fc: don't hold rport lock when putting ctrl
  nvme-pci: add debug message on fail to read CSTS
  nvme-pci: print error message on failure in nvme_probe
  nvmet: pci-epf: fix DMA channel debug print
  nvmet: pci-epf: move DMA initialization to EPC init callback
  nvmet: remove redundant subsysnqn field from ctrl
  nvmet: add sanity checks when freeing subsystem
pull/1354/merge
Jens Axboe 2025-12-04 20:58:19 -07:00
commit 0f45353dd4
14 changed files with 69 additions and 62 deletions

View File

@ -1122,7 +1122,7 @@ void nvme_auth_free(struct nvme_ctrl *ctrl)
if (ctrl->dhchap_ctxs) { if (ctrl->dhchap_ctxs) {
for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) for (i = 0; i < ctrl_max_dhchaps(ctrl); i++)
nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]); nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]);
kfree(ctrl->dhchap_ctxs); kvfree(ctrl->dhchap_ctxs);
} }
if (ctrl->host_key) { if (ctrl->host_key) {
nvme_auth_free_key(ctrl->host_key); nvme_auth_free_key(ctrl->host_key);

View File

@ -592,7 +592,7 @@ bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status)
if (status > 0 && (status & NVME_STATUS_DNR)) if (status > 0 && (status & NVME_STATUS_DNR))
return false; return false;
if (status == -EKEYREJECTED) if (status == -EKEYREJECTED || status == -ENOKEY)
return false; return false;
if (ctrl->opts->max_reconnects == -1 || if (ctrl->opts->max_reconnects == -1 ||

View File

@ -520,6 +520,8 @@ nvme_fc_free_rport(struct kref *ref)
WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED); WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
WARN_ON(!list_empty(&rport->ctrl_list)); WARN_ON(!list_empty(&rport->ctrl_list));
WARN_ON(!list_empty(&rport->ls_req_list));
WARN_ON(!list_empty(&rport->ls_rcv_list));
/* remove from lport list */ /* remove from lport list */
spin_lock_irqsave(&nvme_fc_lock, flags); spin_lock_irqsave(&nvme_fc_lock, flags);
@ -1468,14 +1470,14 @@ nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport,
{ {
struct fcnvme_ls_disconnect_assoc_rqst *rqst = struct fcnvme_ls_disconnect_assoc_rqst *rqst =
&lsop->rqstbuf->rq_dis_assoc; &lsop->rqstbuf->rq_dis_assoc;
struct nvme_fc_ctrl *ctrl, *ret = NULL; struct nvme_fc_ctrl *ctrl, *tmp, *ret = NULL;
struct nvmefc_ls_rcv_op *oldls = NULL; struct nvmefc_ls_rcv_op *oldls = NULL;
u64 association_id = be64_to_cpu(rqst->associd.association_id); u64 association_id = be64_to_cpu(rqst->associd.association_id);
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&rport->lock, flags); spin_lock_irqsave(&rport->lock, flags);
list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { list_for_each_entry_safe(ctrl, tmp, &rport->ctrl_list, ctrl_list) {
if (!nvme_fc_ctrl_get(ctrl)) if (!nvme_fc_ctrl_get(ctrl))
continue; continue;
spin_lock(&ctrl->lock); spin_lock(&ctrl->lock);
@ -1488,7 +1490,9 @@ nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport,
if (ret) if (ret)
/* leave the ctrl get reference */ /* leave the ctrl get reference */
break; break;
spin_unlock_irqrestore(&rport->lock, flags);
nvme_fc_ctrl_put(ctrl); nvme_fc_ctrl_put(ctrl);
spin_lock_irqsave(&rport->lock, flags);
} }
spin_unlock_irqrestore(&rport->lock, flags); spin_unlock_irqrestore(&rport->lock, flags);

View File

@ -2984,6 +2984,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
pci_set_master(pdev); pci_set_master(pdev);
if (readl(dev->bar + NVME_REG_CSTS) == -1) { if (readl(dev->bar + NVME_REG_CSTS) == -1) {
dev_dbg(dev->ctrl.device, "reading CSTS register failed\n");
result = -ENODEV; result = -ENODEV;
goto disable; goto disable;
} }
@ -3609,6 +3610,7 @@ out_uninit_ctrl:
nvme_uninit_ctrl(&dev->ctrl); nvme_uninit_ctrl(&dev->ctrl);
out_put_ctrl: out_put_ctrl:
nvme_put_ctrl(&dev->ctrl); nvme_put_ctrl(&dev->ctrl);
dev_err_probe(&pdev->dev, result, "probe failed\n");
return result; return result;
} }

View File

@ -708,7 +708,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
/* /*
* We don't really have a practical limit on the number of abort * We don't really have a practical limit on the number of abort
* comands. But we don't do anything useful for abort either, so * commands. But we don't do anything useful for abort either, so
* no point in allowing more abort commands than the spec requires. * no point in allowing more abort commands than the spec requires.
*/ */
id->acl = 3; id->acl = 3;

View File

@ -381,8 +381,8 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
ret = crypto_shash_update(shash, buf, 1); ret = crypto_shash_update(shash, buf, 1);
if (ret) if (ret)
goto out; goto out;
ret = crypto_shash_update(shash, ctrl->subsysnqn, ret = crypto_shash_update(shash, ctrl->subsys->subsysnqn,
strlen(ctrl->subsysnqn)); strlen(ctrl->subsys->subsysnqn));
if (ret) if (ret)
goto out; goto out;
ret = crypto_shash_final(shash, response); ret = crypto_shash_final(shash, response);
@ -429,7 +429,7 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
} }
transformed_key = nvme_auth_transform_key(ctrl->ctrl_key, transformed_key = nvme_auth_transform_key(ctrl->ctrl_key,
ctrl->subsysnqn); ctrl->subsys->subsysnqn);
if (IS_ERR(transformed_key)) { if (IS_ERR(transformed_key)) {
ret = PTR_ERR(transformed_key); ret = PTR_ERR(transformed_key);
goto out_free_tfm; goto out_free_tfm;
@ -484,8 +484,8 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
ret = crypto_shash_update(shash, "Controller", 10); ret = crypto_shash_update(shash, "Controller", 10);
if (ret) if (ret)
goto out; goto out;
ret = crypto_shash_update(shash, ctrl->subsysnqn, ret = crypto_shash_update(shash, ctrl->subsys->subsysnqn,
strlen(ctrl->subsysnqn)); strlen(ctrl->subsys->subsysnqn));
if (ret) if (ret)
goto out; goto out;
ret = crypto_shash_update(shash, buf, 1); ret = crypto_shash_update(shash, buf, 1);
@ -575,7 +575,7 @@ void nvmet_auth_insert_psk(struct nvmet_sq *sq)
return; return;
} }
ret = nvme_auth_generate_digest(sq->ctrl->shash_id, psk, psk_len, ret = nvme_auth_generate_digest(sq->ctrl->shash_id, psk, psk_len,
sq->ctrl->subsysnqn, sq->ctrl->subsys->subsysnqn,
sq->ctrl->hostnqn, &digest); sq->ctrl->hostnqn, &digest);
if (ret) { if (ret) {
pr_warn("%s: ctrl %d qid %d failed to generate digest, error %d\n", pr_warn("%s: ctrl %d qid %d failed to generate digest, error %d\n",
@ -590,8 +590,10 @@ void nvmet_auth_insert_psk(struct nvmet_sq *sq)
goto out_free_digest; goto out_free_digest;
} }
#ifdef CONFIG_NVME_TARGET_TCP_TLS #ifdef CONFIG_NVME_TARGET_TCP_TLS
tls_key = nvme_tls_psk_refresh(NULL, sq->ctrl->hostnqn, sq->ctrl->subsysnqn, tls_key = nvme_tls_psk_refresh(NULL, sq->ctrl->hostnqn,
sq->ctrl->shash_id, tls_psk, psk_len, digest); sq->ctrl->subsys->subsysnqn,
sq->ctrl->shash_id, tls_psk, psk_len,
digest);
if (IS_ERR(tls_key)) { if (IS_ERR(tls_key)) {
pr_warn("%s: ctrl %d qid %d failed to refresh key, error %ld\n", pr_warn("%s: ctrl %d qid %d failed to refresh key, error %ld\n",
__func__, sq->ctrl->cntlid, sq->qid, PTR_ERR(tls_key)); __func__, sq->ctrl->cntlid, sq->qid, PTR_ERR(tls_key));

View File

@ -40,7 +40,7 @@ EXPORT_SYMBOL_GPL(nvmet_wq);
* - the nvmet_transports array * - the nvmet_transports array
* *
* When updating any of those lists/structures write lock should be obtained, * When updating any of those lists/structures write lock should be obtained,
* while when reading (popolating discovery log page or checking host-subsystem * while when reading (populating discovery log page or checking host-subsystem
* link) read lock is obtained to allow concurrent reads. * link) read lock is obtained to allow concurrent reads.
*/ */
DECLARE_RWSEM(nvmet_config_sem); DECLARE_RWSEM(nvmet_config_sem);
@ -1628,7 +1628,6 @@ struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args)
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer); INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
memcpy(ctrl->subsysnqn, args->subsysnqn, NVMF_NQN_SIZE);
memcpy(ctrl->hostnqn, args->hostnqn, NVMF_NQN_SIZE); memcpy(ctrl->hostnqn, args->hostnqn, NVMF_NQN_SIZE);
kref_init(&ctrl->ref); kref_init(&ctrl->ref);
@ -1903,6 +1902,8 @@ static void nvmet_subsys_free(struct kref *ref)
struct nvmet_subsys *subsys = struct nvmet_subsys *subsys =
container_of(ref, struct nvmet_subsys, ref); container_of(ref, struct nvmet_subsys, ref);
WARN_ON_ONCE(!list_empty(&subsys->ctrls));
WARN_ON_ONCE(!list_empty(&subsys->hosts));
WARN_ON_ONCE(!xa_empty(&subsys->namespaces)); WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
nvmet_debugfs_subsys_free(subsys); nvmet_debugfs_subsys_free(subsys);

View File

@ -490,8 +490,7 @@ nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
sizeof(*discon_rqst) + sizeof(*discon_acc) + sizeof(*discon_rqst) + sizeof(*discon_acc) +
tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); tgtport->ops->lsrqst_priv_sz), GFP_KERNEL);
if (!lsop) { if (!lsop) {
dev_info(tgtport->dev, pr_info("{%d:%d}: send Disconnect Association failed: ENOMEM\n",
"{%d:%d} send Disconnect Association failed: ENOMEM\n",
tgtport->fc_target_port.port_num, assoc->a_id); tgtport->fc_target_port.port_num, assoc->a_id);
return; return;
} }
@ -513,8 +512,7 @@ nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
ret = nvmet_fc_send_ls_req_async(tgtport, lsop, ret = nvmet_fc_send_ls_req_async(tgtport, lsop,
nvmet_fc_disconnect_assoc_done); nvmet_fc_disconnect_assoc_done);
if (ret) { if (ret) {
dev_info(tgtport->dev, pr_info("{%d:%d}: XMT Disconnect Association failed: %d\n",
"{%d:%d} XMT Disconnect Association failed: %d\n",
tgtport->fc_target_port.port_num, assoc->a_id, ret); tgtport->fc_target_port.port_num, assoc->a_id, ret);
kfree(lsop); kfree(lsop);
} }
@ -1187,8 +1185,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
if (oldls) if (oldls)
nvmet_fc_xmt_ls_rsp(tgtport, oldls); nvmet_fc_xmt_ls_rsp(tgtport, oldls);
ida_free(&tgtport->assoc_cnt, assoc->a_id); ida_free(&tgtport->assoc_cnt, assoc->a_id);
dev_info(tgtport->dev, pr_info("{%d:%d}: Association freed\n",
"{%d:%d} Association freed\n",
tgtport->fc_target_port.port_num, assoc->a_id); tgtport->fc_target_port.port_num, assoc->a_id);
kfree(assoc); kfree(assoc);
} }
@ -1224,8 +1221,7 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
flush_workqueue(assoc->queues[i]->work_q); flush_workqueue(assoc->queues[i]->work_q);
} }
dev_info(tgtport->dev, pr_info("{%d:%d}: Association deleted\n",
"{%d:%d} Association deleted\n",
tgtport->fc_target_port.port_num, assoc->a_id); tgtport->fc_target_port.port_num, assoc->a_id);
nvmet_fc_tgtport_put(tgtport); nvmet_fc_tgtport_put(tgtport);
@ -1716,8 +1712,8 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
} }
if (ret) { if (ret) {
dev_err(tgtport->dev, pr_err("{%d}: Create Association LS failed: %s\n",
"Create Association LS failed: %s\n", tgtport->fc_target_port.port_num,
validation_errors[ret]); validation_errors[ret]);
iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
sizeof(*acc), rqst->w0.ls_cmd, sizeof(*acc), rqst->w0.ls_cmd,
@ -1730,8 +1726,7 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
atomic_set(&queue->connected, 1); atomic_set(&queue->connected, 1);
queue->sqhd = 0; /* best place to init value */ queue->sqhd = 0; /* best place to init value */
dev_info(tgtport->dev, pr_info("{%d:%d}: Association created\n",
"{%d:%d} Association created\n",
tgtport->fc_target_port.port_num, iod->assoc->a_id); tgtport->fc_target_port.port_num, iod->assoc->a_id);
/* format a response */ /* format a response */
@ -1809,8 +1804,8 @@ nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
} }
if (ret) { if (ret) {
dev_err(tgtport->dev, pr_err("{%d}: Create Connection LS failed: %s\n",
"Create Connection LS failed: %s\n", tgtport->fc_target_port.port_num,
validation_errors[ret]); validation_errors[ret]);
iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
sizeof(*acc), rqst->w0.ls_cmd, sizeof(*acc), rqst->w0.ls_cmd,
@ -1871,8 +1866,8 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
} }
if (ret || !assoc) { if (ret || !assoc) {
dev_err(tgtport->dev, pr_err("{%d}: Disconnect LS failed: %s\n",
"Disconnect LS failed: %s\n", tgtport->fc_target_port.port_num,
validation_errors[ret]); validation_errors[ret]);
iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
sizeof(*acc), rqst->w0.ls_cmd, sizeof(*acc), rqst->w0.ls_cmd,
@ -1907,8 +1902,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
spin_unlock_irqrestore(&tgtport->lock, flags); spin_unlock_irqrestore(&tgtport->lock, flags);
if (oldls) { if (oldls) {
dev_info(tgtport->dev, pr_info("{%d:%d}: Multiple Disconnect Association LS's "
"{%d:%d} Multiple Disconnect Association LS's "
"received\n", "received\n",
tgtport->fc_target_port.port_num, assoc->a_id); tgtport->fc_target_port.port_num, assoc->a_id);
/* overwrite good response with bogus failure */ /* overwrite good response with bogus failure */
@ -2051,8 +2045,8 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
dev_info(tgtport->dev, pr_info("{%d}: RCV %s LS failed: payload too large (%d)\n",
"RCV %s LS failed: payload too large (%d)\n", tgtport->fc_target_port.port_num,
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
nvmefc_ls_names[w0->ls_cmd] : "", nvmefc_ls_names[w0->ls_cmd] : "",
lsreqbuf_len); lsreqbuf_len);
@ -2060,8 +2054,8 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
} }
if (!nvmet_fc_tgtport_get(tgtport)) { if (!nvmet_fc_tgtport_get(tgtport)) {
dev_info(tgtport->dev, pr_info("{%d}: RCV %s LS failed: target deleting\n",
"RCV %s LS failed: target deleting\n", tgtport->fc_target_port.port_num,
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
nvmefc_ls_names[w0->ls_cmd] : ""); nvmefc_ls_names[w0->ls_cmd] : "");
return -ESHUTDOWN; return -ESHUTDOWN;
@ -2069,8 +2063,8 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
iod = nvmet_fc_alloc_ls_iod(tgtport); iod = nvmet_fc_alloc_ls_iod(tgtport);
if (!iod) { if (!iod) {
dev_info(tgtport->dev, pr_info("{%d}: RCV %s LS failed: context allocation failed\n",
"RCV %s LS failed: context allocation failed\n", tgtport->fc_target_port.port_num,
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
nvmefc_ls_names[w0->ls_cmd] : ""); nvmefc_ls_names[w0->ls_cmd] : "");
nvmet_fc_tgtport_put(tgtport); nvmet_fc_tgtport_put(tgtport);

View File

@ -254,7 +254,6 @@ struct fcloop_nport {
struct fcloop_lsreq { struct fcloop_lsreq {
struct nvmefc_ls_req *lsreq; struct nvmefc_ls_req *lsreq;
struct nvmefc_ls_rsp ls_rsp; struct nvmefc_ls_rsp ls_rsp;
int lsdir; /* H2T or T2H */
int status; int status;
struct list_head ls_list; /* fcloop_rport->ls_list */ struct list_head ls_list; /* fcloop_rport->ls_list */
}; };
@ -1111,8 +1110,10 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
rport->nport->rport = NULL; rport->nport->rport = NULL;
spin_unlock_irqrestore(&fcloop_lock, flags); spin_unlock_irqrestore(&fcloop_lock, flags);
if (put_port) if (put_port) {
WARN_ON(!list_empty(&rport->ls_list));
fcloop_nport_put(rport->nport); fcloop_nport_put(rport->nport);
}
} }
static void static void
@ -1130,8 +1131,10 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
tport->nport->tport = NULL; tport->nport->tport = NULL;
spin_unlock_irqrestore(&fcloop_lock, flags); spin_unlock_irqrestore(&fcloop_lock, flags);
if (put_port) if (put_port) {
WARN_ON(!list_empty(&tport->ls_list));
fcloop_nport_put(tport->nport); fcloop_nport_put(tport->nport);
}
} }
#define FCLOOP_HW_QUEUES 4 #define FCLOOP_HW_QUEUES 4

View File

@ -285,7 +285,6 @@ struct nvmet_ctrl {
__le32 *changed_ns_list; __le32 *changed_ns_list;
u32 nr_changed_ns; u32 nr_changed_ns;
char subsysnqn[NVMF_NQN_FIELD_LEN];
char hostnqn[NVMF_NQN_FIELD_LEN]; char hostnqn[NVMF_NQN_FIELD_LEN];
struct device *p2p_client; struct device *p2p_client;

View File

@ -150,7 +150,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
* code path with duplicate ctrl subsysnqn. In order to prevent that we * code path with duplicate ctrl subsysnqn. In order to prevent that we
* mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn. * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
*/ */
memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn)); memcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
/* use fabric id-ctrl values */ /* use fabric id-ctrl values */
id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) + id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +

View File

@ -320,12 +320,14 @@ static void nvmet_pci_epf_init_dma(struct nvmet_pci_epf *nvme_epf)
nvme_epf->dma_enabled = true; nvme_epf->dma_enabled = true;
dev_dbg(dev, "Using DMA RX channel %s, maximum segment size %u B\n", dev_dbg(dev, "Using DMA RX channel %s, maximum segment size %u B\n",
dma_chan_name(chan), dma_chan_name(nvme_epf->dma_rx_chan),
dma_get_max_seg_size(dmaengine_get_dma_device(chan))); dma_get_max_seg_size(dmaengine_get_dma_device(nvme_epf->
dma_rx_chan)));
dev_dbg(dev, "Using DMA TX channel %s, maximum segment size %u B\n", dev_dbg(dev, "Using DMA TX channel %s, maximum segment size %u B\n",
dma_chan_name(chan), dma_chan_name(nvme_epf->dma_tx_chan),
dma_get_max_seg_size(dmaengine_get_dma_device(chan))); dma_get_max_seg_size(dmaengine_get_dma_device(nvme_epf->
dma_tx_chan)));
return; return;
@ -2325,6 +2327,8 @@ static int nvmet_pci_epf_epc_init(struct pci_epf *epf)
return ret; return ret;
} }
nvmet_pci_epf_init_dma(nvme_epf);
/* Set device ID, class, etc. */ /* Set device ID, class, etc. */
epf->header->vendorid = ctrl->tctrl->subsys->vendor_id; epf->header->vendorid = ctrl->tctrl->subsys->vendor_id;
epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id; epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id;
@ -2422,8 +2426,6 @@ static int nvmet_pci_epf_bind(struct pci_epf *epf)
if (ret) if (ret)
return ret; return ret;
nvmet_pci_epf_init_dma(nvme_epf);
return 0; return 0;
} }

View File

@ -367,7 +367,7 @@ nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_cmd *cmds; struct nvmet_rdma_cmd *cmds;
int ret = -EINVAL, i; int ret = -EINVAL, i;
cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL); cmds = kvcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
if (!cmds) if (!cmds)
goto out; goto out;
@ -382,7 +382,7 @@ nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
out_free: out_free:
while (--i >= 0) while (--i >= 0)
nvmet_rdma_free_cmd(ndev, cmds + i, admin); nvmet_rdma_free_cmd(ndev, cmds + i, admin);
kfree(cmds); kvfree(cmds);
out: out:
return ERR_PTR(ret); return ERR_PTR(ret);
} }
@ -394,7 +394,7 @@ static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
for (i = 0; i < nr_cmds; i++) for (i = 0; i < nr_cmds; i++)
nvmet_rdma_free_cmd(ndev, cmds + i, admin); nvmet_rdma_free_cmd(ndev, cmds + i, admin);
kfree(cmds); kvfree(cmds);
} }
static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
@ -455,7 +455,7 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
NUMA_NO_NODE, false, true)) NUMA_NO_NODE, false, true))
goto out; goto out;
queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), queue->rsps = kvcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
GFP_KERNEL); GFP_KERNEL);
if (!queue->rsps) if (!queue->rsps)
goto out_free_sbitmap; goto out_free_sbitmap;
@ -473,7 +473,7 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
out_free: out_free:
while (--i >= 0) while (--i >= 0)
nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
kfree(queue->rsps); kvfree(queue->rsps);
out_free_sbitmap: out_free_sbitmap:
sbitmap_free(&queue->rsp_tags); sbitmap_free(&queue->rsp_tags);
out: out:
@ -487,7 +487,7 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
for (i = 0; i < nr_rsps; i++) for (i = 0; i < nr_rsps; i++)
nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
kfree(queue->rsps); kvfree(queue->rsps);
sbitmap_free(&queue->rsp_tags); sbitmap_free(&queue->rsp_tags);
} }

View File

@ -1484,7 +1484,7 @@ static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
struct nvmet_tcp_cmd *cmds; struct nvmet_tcp_cmd *cmds;
int i, ret = -EINVAL, nr_cmds = queue->nr_cmds; int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL); cmds = kvcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
if (!cmds) if (!cmds)
goto out; goto out;
@ -1500,7 +1500,7 @@ static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
out_free: out_free:
while (--i >= 0) while (--i >= 0)
nvmet_tcp_free_cmd(cmds + i); nvmet_tcp_free_cmd(cmds + i);
kfree(cmds); kvfree(cmds);
out: out:
return ret; return ret;
} }
@ -1514,7 +1514,7 @@ static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
nvmet_tcp_free_cmd(cmds + i); nvmet_tcp_free_cmd(cmds + i);
nvmet_tcp_free_cmd(&queue->connect); nvmet_tcp_free_cmd(&queue->connect);
kfree(cmds); kvfree(cmds);
} }
static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)