seven smb3 server fixes
-----BEGIN PGP SIGNATURE-----
iQGzBAABCgAdFiEE6fsu8pdIjtWE/DpLiiy9cAdyT1EFAmlA0jkACgkQiiy9cAdy
T1HXXQv/SiJ9wKH6PDJZ6MRGYJLpHoq6BHUj6Uob2x7fc9LXGTlKwFJ8NAWBN5/1
Po6MrL28C4Lkm+KJttH/D/9FpsBEWmViMeHuiu8SahdC90TaoNi9hu4lBQvCGSOm
D59dWuG7KCXVgu3i6zWTKf2G2OkkRwHGKQ66TRvJ317HD0mzQfamke0SBLgN1/VJ
nKrZw7fuBLNf5x2Yxtn01idGSwROCTqSLG1i6V4wlfX4mLT9ZJAgfbzK7bhReT8U
ph2OZqFhKMSzZJQE/6VHw2A51LFfWZPNnp4Cl4AEkIVHzhWqzipGggUs782rGQcW
cHG/1Zawk03ap+7omuyhjgaFjQ02N1W2D+avdSKAjVpFCX+qsAf1RHw+N3+alA8g
JNuI4O4rtrHHznqaZ2xdgaWHpKp1K+ku2gjZYwTmt0L0ewcPRzvmpWJPT9r+1yFb
TwLGWPSVpR9jYViUF0X2cmlLYFaiKvKIFgRGn08UD4OrEQupy5p1tIJSzFnqf7E/
9tKxoXte
=RiDE
-----END PGP SIGNATURE-----
Merge tag 'v6.19-rc1-ksmbd-server-fixes' of git://git.samba.org/ksmbd
Pull smb server fixes from Steve French:
- Fix set xattr name validation
- Fix session refcount leak
- Minor cleanup
- smbdirect (RDMA) fixes: improve receive completion, and connect
* tag 'v6.19-rc1-ksmbd-server-fixes' of git://git.samba.org/ksmbd:
ksmbd: fix buffer validation by including null terminator size in EA length
ksmbd: Fix refcount leak when invalid session is found on session lookup
ksmbd: remove redundant DACL check in smb_check_perm_dacl
ksmbd: convert comma to semicolon
smb: server: defer the initial recv completion logic to smb_direct_negotiate_recv_work()
smb: server: initialize recv_io->cqe.done = recv_done just once
smb: smbdirect: introduce smbdirect_socket.connect.{lock,work}
pull/1354/merge
commit
53ec4a79ff
|
|
@ -132,6 +132,14 @@ struct smbdirect_socket {
|
||||||
|
|
||||||
struct smbdirect_socket_parameters parameters;
|
struct smbdirect_socket_parameters parameters;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The state for connect/negotiation
|
||||||
|
*/
|
||||||
|
struct {
|
||||||
|
spinlock_t lock;
|
||||||
|
struct work_struct work;
|
||||||
|
} connect;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The state for keepalive and timeout handling
|
* The state for keepalive and timeout handling
|
||||||
*/
|
*/
|
||||||
|
|
@ -353,6 +361,10 @@ static __always_inline void smbdirect_socket_init(struct smbdirect_socket *sc)
|
||||||
INIT_WORK(&sc->disconnect_work, __smbdirect_socket_disabled_work);
|
INIT_WORK(&sc->disconnect_work, __smbdirect_socket_disabled_work);
|
||||||
disable_work_sync(&sc->disconnect_work);
|
disable_work_sync(&sc->disconnect_work);
|
||||||
|
|
||||||
|
spin_lock_init(&sc->connect.lock);
|
||||||
|
INIT_WORK(&sc->connect.work, __smbdirect_socket_disabled_work);
|
||||||
|
disable_work_sync(&sc->connect.work);
|
||||||
|
|
||||||
INIT_WORK(&sc->idle.immediate_work, __smbdirect_socket_disabled_work);
|
INIT_WORK(&sc->idle.immediate_work, __smbdirect_socket_disabled_work);
|
||||||
disable_work_sync(&sc->idle.immediate_work);
|
disable_work_sync(&sc->idle.immediate_work);
|
||||||
INIT_DELAYED_WORK(&sc->idle.timer_work, __smbdirect_socket_disabled_work);
|
INIT_DELAYED_WORK(&sc->idle.timer_work, __smbdirect_socket_disabled_work);
|
||||||
|
|
|
||||||
|
|
@ -325,8 +325,10 @@ struct ksmbd_session *ksmbd_session_lookup_all(struct ksmbd_conn *conn,
|
||||||
sess = ksmbd_session_lookup(conn, id);
|
sess = ksmbd_session_lookup(conn, id);
|
||||||
if (!sess && conn->binding)
|
if (!sess && conn->binding)
|
||||||
sess = ksmbd_session_lookup_slowpath(id);
|
sess = ksmbd_session_lookup_slowpath(id);
|
||||||
if (sess && sess->state != SMB2_SESSION_VALID)
|
if (sess && sess->state != SMB2_SESSION_VALID) {
|
||||||
|
ksmbd_user_session_put(sess);
|
||||||
sess = NULL;
|
sess = NULL;
|
||||||
|
}
|
||||||
return sess;
|
return sess;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2363,7 +2363,7 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
unsigned int next = 0;
|
unsigned int next = 0;
|
||||||
|
|
||||||
if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength +
|
if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength + 1 +
|
||||||
le16_to_cpu(eabuf->EaValueLength))
|
le16_to_cpu(eabuf->EaValueLength))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
|
@ -2440,7 +2440,7 @@ next:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength +
|
if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength + 1 +
|
||||||
le16_to_cpu(eabuf->EaValueLength)) {
|
le16_to_cpu(eabuf->EaValueLength)) {
|
||||||
rc = -EINVAL;
|
rc = -EINVAL;
|
||||||
break;
|
break;
|
||||||
|
|
|
||||||
|
|
@ -1307,9 +1307,6 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
|
||||||
granted |= le32_to_cpu(ace->access_req);
|
granted |= le32_to_cpu(ace->access_req);
|
||||||
ace = (struct smb_ace *)((char *)ace + le16_to_cpu(ace->size));
|
ace = (struct smb_ace *)((char *)ace + le16_to_cpu(ace->size));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pdacl->num_aces)
|
|
||||||
granted = GENERIC_ALL_FLAGS;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!uid)
|
if (!uid)
|
||||||
|
|
|
||||||
|
|
@ -242,6 +242,7 @@ static void smb_direct_disconnect_rdma_work(struct work_struct *work)
|
||||||
* disable[_delayed]_work_sync()
|
* disable[_delayed]_work_sync()
|
||||||
*/
|
*/
|
||||||
disable_work(&sc->disconnect_work);
|
disable_work(&sc->disconnect_work);
|
||||||
|
disable_work(&sc->connect.work);
|
||||||
disable_work(&sc->recv_io.posted.refill_work);
|
disable_work(&sc->recv_io.posted.refill_work);
|
||||||
disable_delayed_work(&sc->idle.timer_work);
|
disable_delayed_work(&sc->idle.timer_work);
|
||||||
disable_work(&sc->idle.immediate_work);
|
disable_work(&sc->idle.immediate_work);
|
||||||
|
|
@ -297,6 +298,7 @@ smb_direct_disconnect_rdma_connection(struct smbdirect_socket *sc)
|
||||||
* not queued again but here we don't block and avoid
|
* not queued again but here we don't block and avoid
|
||||||
* disable[_delayed]_work_sync()
|
* disable[_delayed]_work_sync()
|
||||||
*/
|
*/
|
||||||
|
disable_work(&sc->connect.work);
|
||||||
disable_work(&sc->recv_io.posted.refill_work);
|
disable_work(&sc->recv_io.posted.refill_work);
|
||||||
disable_work(&sc->idle.immediate_work);
|
disable_work(&sc->idle.immediate_work);
|
||||||
disable_delayed_work(&sc->idle.timer_work);
|
disable_delayed_work(&sc->idle.timer_work);
|
||||||
|
|
@ -467,6 +469,7 @@ static void free_transport(struct smb_direct_transport *t)
|
||||||
*/
|
*/
|
||||||
smb_direct_disconnect_wake_up_all(sc);
|
smb_direct_disconnect_wake_up_all(sc);
|
||||||
|
|
||||||
|
disable_work_sync(&sc->connect.work);
|
||||||
disable_work_sync(&sc->recv_io.posted.refill_work);
|
disable_work_sync(&sc->recv_io.posted.refill_work);
|
||||||
disable_delayed_work_sync(&sc->idle.timer_work);
|
disable_delayed_work_sync(&sc->idle.timer_work);
|
||||||
disable_work_sync(&sc->idle.immediate_work);
|
disable_work_sync(&sc->idle.immediate_work);
|
||||||
|
|
@ -635,28 +638,8 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||||
|
|
||||||
switch (sc->recv_io.expected) {
|
switch (sc->recv_io.expected) {
|
||||||
case SMBDIRECT_EXPECT_NEGOTIATE_REQ:
|
case SMBDIRECT_EXPECT_NEGOTIATE_REQ:
|
||||||
if (wc->byte_len < sizeof(struct smbdirect_negotiate_req)) {
|
/* see smb_direct_negotiate_recv_done */
|
||||||
put_recvmsg(sc, recvmsg);
|
break;
|
||||||
smb_direct_disconnect_rdma_connection(sc);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
sc->recv_io.reassembly.full_packet_received = true;
|
|
||||||
/*
|
|
||||||
* Some drivers (at least mlx5_ib) might post a
|
|
||||||
* recv completion before RDMA_CM_EVENT_ESTABLISHED,
|
|
||||||
* we need to adjust our expectation in that case.
|
|
||||||
*/
|
|
||||||
if (!sc->first_error && sc->status == SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING)
|
|
||||||
sc->status = SMBDIRECT_SOCKET_NEGOTIATE_NEEDED;
|
|
||||||
if (SMBDIRECT_CHECK_STATUS_WARN(sc, SMBDIRECT_SOCKET_NEGOTIATE_NEEDED)) {
|
|
||||||
put_recvmsg(sc, recvmsg);
|
|
||||||
smb_direct_disconnect_rdma_connection(sc);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
sc->status = SMBDIRECT_SOCKET_NEGOTIATE_RUNNING;
|
|
||||||
enqueue_reassembly(sc, recvmsg, 0);
|
|
||||||
wake_up(&sc->status_wait);
|
|
||||||
return;
|
|
||||||
case SMBDIRECT_EXPECT_DATA_TRANSFER: {
|
case SMBDIRECT_EXPECT_DATA_TRANSFER: {
|
||||||
struct smbdirect_data_transfer *data_transfer =
|
struct smbdirect_data_transfer *data_transfer =
|
||||||
(struct smbdirect_data_transfer *)recvmsg->packet;
|
(struct smbdirect_data_transfer *)recvmsg->packet;
|
||||||
|
|
@ -742,6 +725,126 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||||
smb_direct_disconnect_rdma_connection(sc);
|
smb_direct_disconnect_rdma_connection(sc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void smb_direct_negotiate_recv_work(struct work_struct *work);
|
||||||
|
|
||||||
|
static void smb_direct_negotiate_recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||||
|
{
|
||||||
|
struct smbdirect_recv_io *recv_io =
|
||||||
|
container_of(wc->wr_cqe, struct smbdirect_recv_io, cqe);
|
||||||
|
struct smbdirect_socket *sc = recv_io->socket;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* reset the common recv_done for later reuse.
|
||||||
|
*/
|
||||||
|
recv_io->cqe.done = recv_done;
|
||||||
|
|
||||||
|
if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
|
||||||
|
put_recvmsg(sc, recv_io);
|
||||||
|
if (wc->status != IB_WC_WR_FLUSH_ERR) {
|
||||||
|
pr_err("Negotiate Recv error. status='%s (%d)' opcode=%d\n",
|
||||||
|
ib_wc_status_msg(wc->status), wc->status,
|
||||||
|
wc->opcode);
|
||||||
|
smb_direct_disconnect_rdma_connection(sc);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
ksmbd_debug(RDMA, "Negotiate Recv completed. status='%s (%d)', opcode=%d\n",
|
||||||
|
ib_wc_status_msg(wc->status), wc->status,
|
||||||
|
wc->opcode);
|
||||||
|
|
||||||
|
ib_dma_sync_single_for_cpu(sc->ib.dev,
|
||||||
|
recv_io->sge.addr,
|
||||||
|
recv_io->sge.length,
|
||||||
|
DMA_FROM_DEVICE);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is an internal error!
|
||||||
|
*/
|
||||||
|
if (WARN_ON_ONCE(sc->recv_io.expected != SMBDIRECT_EXPECT_NEGOTIATE_REQ)) {
|
||||||
|
put_recvmsg(sc, recv_io);
|
||||||
|
smb_direct_disconnect_rdma_connection(sc);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Don't reset timer to the keepalive interval in
|
||||||
|
* this will be done in smb_direct_negotiate_recv_work.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Only remember the recv_io if it has enough bytes,
|
||||||
|
* this gives smb_direct_negotiate_recv_work enough
|
||||||
|
* information in order to disconnect if it was not
|
||||||
|
* valid.
|
||||||
|
*/
|
||||||
|
sc->recv_io.reassembly.full_packet_received = true;
|
||||||
|
if (wc->byte_len >= sizeof(struct smbdirect_negotiate_req))
|
||||||
|
enqueue_reassembly(sc, recv_io, 0);
|
||||||
|
else
|
||||||
|
put_recvmsg(sc, recv_io);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some drivers (at least mlx5_ib and irdma in roce mode)
|
||||||
|
* might post a recv completion before RDMA_CM_EVENT_ESTABLISHED,
|
||||||
|
* we need to adjust our expectation in that case.
|
||||||
|
*
|
||||||
|
* So we defer further processing of the negotiation
|
||||||
|
* to smb_direct_negotiate_recv_work().
|
||||||
|
*
|
||||||
|
* If we are already in SMBDIRECT_SOCKET_NEGOTIATE_NEEDED
|
||||||
|
* we queue the work directly otherwise
|
||||||
|
* smb_direct_cm_handler() will do it, when
|
||||||
|
* RDMA_CM_EVENT_ESTABLISHED arrived.
|
||||||
|
*/
|
||||||
|
spin_lock_irqsave(&sc->connect.lock, flags);
|
||||||
|
if (!sc->first_error) {
|
||||||
|
INIT_WORK(&sc->connect.work, smb_direct_negotiate_recv_work);
|
||||||
|
if (sc->status == SMBDIRECT_SOCKET_NEGOTIATE_NEEDED)
|
||||||
|
queue_work(sc->workqueue, &sc->connect.work);
|
||||||
|
}
|
||||||
|
spin_unlock_irqrestore(&sc->connect.lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void smb_direct_negotiate_recv_work(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct smbdirect_socket *sc =
|
||||||
|
container_of(work, struct smbdirect_socket, connect.work);
|
||||||
|
const struct smbdirect_socket_parameters *sp = &sc->parameters;
|
||||||
|
struct smbdirect_recv_io *recv_io;
|
||||||
|
|
||||||
|
if (sc->first_error)
|
||||||
|
return;
|
||||||
|
|
||||||
|
ksmbd_debug(RDMA, "Negotiate Recv Work running\n");
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Reset timer to the keepalive interval in
|
||||||
|
* order to trigger our next keepalive message.
|
||||||
|
*/
|
||||||
|
sc->idle.keepalive = SMBDIRECT_KEEPALIVE_NONE;
|
||||||
|
mod_delayed_work(sc->workqueue, &sc->idle.timer_work,
|
||||||
|
msecs_to_jiffies(sp->keepalive_interval_msec));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If smb_direct_negotiate_recv_done() detected an
|
||||||
|
* invalid request we want to disconnect.
|
||||||
|
*/
|
||||||
|
recv_io = get_first_reassembly(sc);
|
||||||
|
if (!recv_io) {
|
||||||
|
smb_direct_disconnect_rdma_connection(sc);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (SMBDIRECT_CHECK_STATUS_WARN(sc, SMBDIRECT_SOCKET_NEGOTIATE_NEEDED)) {
|
||||||
|
smb_direct_disconnect_rdma_connection(sc);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
sc->status = SMBDIRECT_SOCKET_NEGOTIATE_RUNNING;
|
||||||
|
wake_up(&sc->status_wait);
|
||||||
|
}
|
||||||
|
|
||||||
static int smb_direct_post_recv(struct smbdirect_socket *sc,
|
static int smb_direct_post_recv(struct smbdirect_socket *sc,
|
||||||
struct smbdirect_recv_io *recvmsg)
|
struct smbdirect_recv_io *recvmsg)
|
||||||
{
|
{
|
||||||
|
|
@ -758,7 +861,6 @@ static int smb_direct_post_recv(struct smbdirect_socket *sc,
|
||||||
return ret;
|
return ret;
|
||||||
recvmsg->sge.length = sp->max_recv_size;
|
recvmsg->sge.length = sp->max_recv_size;
|
||||||
recvmsg->sge.lkey = sc->ib.pd->local_dma_lkey;
|
recvmsg->sge.lkey = sc->ib.pd->local_dma_lkey;
|
||||||
recvmsg->cqe.done = recv_done;
|
|
||||||
|
|
||||||
wr.wr_cqe = &recvmsg->cqe;
|
wr.wr_cqe = &recvmsg->cqe;
|
||||||
wr.next = NULL;
|
wr.next = NULL;
|
||||||
|
|
@ -1732,6 +1834,7 @@ static int smb_direct_cm_handler(struct rdma_cm_id *cm_id,
|
||||||
struct rdma_cm_event *event)
|
struct rdma_cm_event *event)
|
||||||
{
|
{
|
||||||
struct smbdirect_socket *sc = cm_id->context;
|
struct smbdirect_socket *sc = cm_id->context;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
ksmbd_debug(RDMA, "RDMA CM event. cm_id=%p event=%s (%d)\n",
|
ksmbd_debug(RDMA, "RDMA CM event. cm_id=%p event=%s (%d)\n",
|
||||||
cm_id, rdma_event_msg(event->event), event->event);
|
cm_id, rdma_event_msg(event->event), event->event);
|
||||||
|
|
@ -1739,18 +1842,27 @@ static int smb_direct_cm_handler(struct rdma_cm_id *cm_id,
|
||||||
switch (event->event) {
|
switch (event->event) {
|
||||||
case RDMA_CM_EVENT_ESTABLISHED: {
|
case RDMA_CM_EVENT_ESTABLISHED: {
|
||||||
/*
|
/*
|
||||||
* Some drivers (at least mlx5_ib) might post a
|
* Some drivers (at least mlx5_ib and irdma in roce mode)
|
||||||
* recv completion before RDMA_CM_EVENT_ESTABLISHED,
|
* might post a recv completion before RDMA_CM_EVENT_ESTABLISHED,
|
||||||
* we need to adjust our expectation in that case.
|
* we need to adjust our expectation in that case.
|
||||||
*
|
*
|
||||||
* As we already started the negotiation, we just
|
* If smb_direct_negotiate_recv_done was called first
|
||||||
* ignore RDMA_CM_EVENT_ESTABLISHED here.
|
* it initialized sc->connect.work only for us to
|
||||||
|
* start, so that we turned into
|
||||||
|
* SMBDIRECT_SOCKET_NEGOTIATE_NEEDED, before
|
||||||
|
* smb_direct_negotiate_recv_work() runs.
|
||||||
|
*
|
||||||
|
* If smb_direct_negotiate_recv_done didn't happen
|
||||||
|
* yet. sc->connect.work is still be disabled and
|
||||||
|
* queue_work() is a no-op.
|
||||||
*/
|
*/
|
||||||
if (!sc->first_error && sc->status > SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING)
|
|
||||||
break;
|
|
||||||
if (SMBDIRECT_CHECK_STATUS_DISCONNECT(sc, SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING))
|
if (SMBDIRECT_CHECK_STATUS_DISCONNECT(sc, SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING))
|
||||||
break;
|
break;
|
||||||
sc->status = SMBDIRECT_SOCKET_NEGOTIATE_NEEDED;
|
sc->status = SMBDIRECT_SOCKET_NEGOTIATE_NEEDED;
|
||||||
|
spin_lock_irqsave(&sc->connect.lock, flags);
|
||||||
|
if (!sc->first_error)
|
||||||
|
queue_work(sc->workqueue, &sc->connect.work);
|
||||||
|
spin_unlock_irqrestore(&sc->connect.lock, flags);
|
||||||
wake_up(&sc->status_wait);
|
wake_up(&sc->status_wait);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
@ -1921,6 +2033,7 @@ static int smb_direct_prepare_negotiation(struct smbdirect_socket *sc)
|
||||||
recvmsg = get_free_recvmsg(sc);
|
recvmsg = get_free_recvmsg(sc);
|
||||||
if (!recvmsg)
|
if (!recvmsg)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
recvmsg->cqe.done = smb_direct_negotiate_recv_done;
|
||||||
|
|
||||||
ret = smb_direct_post_recv(sc, recvmsg);
|
ret = smb_direct_post_recv(sc, recvmsg);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
|
@ -2339,6 +2452,7 @@ respond:
|
||||||
|
|
||||||
static int smb_direct_connect(struct smbdirect_socket *sc)
|
static int smb_direct_connect(struct smbdirect_socket *sc)
|
||||||
{
|
{
|
||||||
|
struct smbdirect_recv_io *recv_io;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = smb_direct_init_params(sc);
|
ret = smb_direct_init_params(sc);
|
||||||
|
|
@ -2353,6 +2467,9 @@ static int smb_direct_connect(struct smbdirect_socket *sc)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
list_for_each_entry(recv_io, &sc->recv_io.free.list, list)
|
||||||
|
recv_io->cqe.done = recv_done;
|
||||||
|
|
||||||
ret = smb_direct_create_qpair(sc);
|
ret = smb_direct_create_qpair(sc);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_err("Can't accept RDMA client: %d\n", ret);
|
pr_err("Can't accept RDMA client: %d\n", ret);
|
||||||
|
|
|
||||||
|
|
@ -702,7 +702,7 @@ retry:
|
||||||
rd.old_parent = NULL;
|
rd.old_parent = NULL;
|
||||||
rd.new_parent = new_path.dentry;
|
rd.new_parent = new_path.dentry;
|
||||||
rd.flags = flags;
|
rd.flags = flags;
|
||||||
rd.delegated_inode = NULL,
|
rd.delegated_inode = NULL;
|
||||||
err = start_renaming_dentry(&rd, lookup_flags, old_child, &new_last);
|
err = start_renaming_dentry(&rd, lookup_flags, old_child, &new_last);
|
||||||
if (err)
|
if (err)
|
||||||
goto out_drop_write;
|
goto out_drop_write;
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue