33 smb3/cifs client changesets, mostly smbdirect cleanup
-----BEGIN PGP SIGNATURE-----
iQGzBAABCgAdFiEE6fsu8pdIjtWE/DpLiiy9cAdyT1EFAmiWLjwACgkQiiy9cAdy
T1HvVAwAjW4BJ5QOGaDqRXaTtCZ5TvrdtlG3C9J7+YwGuUACT/m3+OffJEVsyWsA
/FP9r7oJ1T/4tNfD6V/4b8uEScVRMdSkdKedikFBH0UlV/Y2gWCdEXOgZOw19WKb
HBR35scMBmcFu/v+dJKpkAduNEJTQ35Is+RynY9PX9iJNTGYRWG3Oj1sod9tDHrA
suWAsNW6+xV6kQthyZmCWqVjz1lgkLp2MaYOGtoYBZZ5Z1RVoDu/+bwJBUhiGnzW
68PywL8ogqaszZLs3lv7vAXTjEeVzKOD43Tffs5/762eI7+VvMxJBR4aLfZGY5nE
uYK/9doGto3hlraQ5EHtOe11DRN3xqoHs6AbBlfvq95Ex2WksoWsmnRlwseUzuEy
0uvHxk7sptNiG2RP/yeCNGKKLPNJIXwPSHuedkk8K01tJf+J9JVZp2hllucGzrYb
3q7g623XJJUvCybBCq/gmPZMunhqxCxS+d5ZLwO3OL8xT9WB4ryRum7GoPotZUsp
HqP2NDug
=eX1a
-----END PGP SIGNATURE-----
Merge tag 'v6.17rc-part2-SMB3-client-fixes' of git://git.samba.org/sfrench/cifs-2.6
Pull more smb client updates from Steve French:
"Non-smbdirect:
- Fix null ptr deref caused by delay in global spinlock
initialization
- Two fixes for native symlink creation with SMB3.1.1 POSIX
Extensions
- Fix for socket special file creation with SMB3.1.1 POSIX Exensions
- Reduce lock contention by splitting out mid_counter_lock
- move SMB1 transport code to separate file to reduce module size
when support for legacy servers is disabled
- Two cleanup patches: rename mid_lock to make it clearer what it
protects and one to convert mid flags to bool to make clearer
Smbdirect/RDMA restructuring and fixes:
- Fix for error handling in send done
- Remove unneeded empty packet queue
- Fix put_receive_buffer error path
- Two fixes to recv_done error paths
- Remove unused variable
- Improve response and recvmsg type handling
- Fix handling of incoming message type
- Two cleanup fixes for better handling smbdirect recv io
- Two cleanup fixes for socket spinlock
- Two patches that add socket reassembly struct
- Remove unused connection_status enum
- Use flag in common header for SMBDIRECT_RECV_IO_MAX_SGE
- Two cleanup patches to introduce and use smbdirect send io
- Two cleanup patches to introduce and use smbdirect send_io struct
- Fix to return error if rdma connect takes longer than 5 seconds
- Error logging improvements
- Fix redundand call to init_waitqueue_head
- Remove unneeded wait queue"
* tag 'v6.17rc-part2-SMB3-client-fixes' of git://git.samba.org/sfrench/cifs-2.6: (33 commits)
smb: client: only use a single wait_queue to monitor smbdirect connection status
smb: client: don't call init_waitqueue_head(&info->conn_wait) twice in _smbd_get_connection
smb: client: improve logging in smbd_conn_upcall()
smb: client: return an error if rdma_connect does not return within 5 seconds
smb: client: make use of smbdirect_socket.{send,recv}_io.mem.{cache,pool}
smb: smbdirect: add smbdirect_socket.{send,recv}_io.mem.{cache,pool}
smb: client: make use of struct smbdirect_send_io
smb: smbdirect: introduce struct smbdirect_send_io
smb: client: make use of SMBDIRECT_RECV_IO_MAX_SGE
smb: smbdirect: add SMBDIRECT_RECV_IO_MAX_SGE
smb: client: remove unused enum smbd_connection_status
smb: client: make use of smbdirect_socket.recv_io.reassembly.*
smb: smbdirect: introduce smbdirect_socket.recv_io.reassembly.*
smb: client: make use of smb: smbdirect_socket.recv_io.free.{list,lock}
smb: smbdirect: introduce smbdirect_socket.recv_io.free.{list,lock}
smb: client: make use of struct smbdirect_recv_io
smb: smbdirect: introduce struct smbdirect_recv_io
smb: client: make use of smbdirect_socket->recv_io.expected
smb: smbdirect: introduce smbdirect_socket.recv_io.expected
smb: client: remove unused smbd_connection->fragment_reassembly_remaining
...
pull/1340/head
commit
cfaf773b79
|
|
@ -32,6 +32,6 @@ cifs-$(CONFIG_CIFS_SMB_DIRECT) += smbdirect.o
|
||||||
|
|
||||||
cifs-$(CONFIG_CIFS_ROOT) += cifsroot.o
|
cifs-$(CONFIG_CIFS_ROOT) += cifsroot.o
|
||||||
|
|
||||||
cifs-$(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) += smb1ops.o cifssmb.o
|
cifs-$(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) += smb1ops.o cifssmb.o cifstransport.o
|
||||||
|
|
||||||
cifs-$(CONFIG_CIFS_COMPRESSION) += compress.o compress/lz77.o
|
cifs-$(CONFIG_CIFS_COMPRESSION) += compress.o compress/lz77.o
|
||||||
|
|
|
||||||
|
|
@ -60,7 +60,7 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
cifs_dbg(VFS, "Dump pending requests:\n");
|
cifs_dbg(VFS, "Dump pending requests:\n");
|
||||||
spin_lock(&server->mid_lock);
|
spin_lock(&server->mid_queue_lock);
|
||||||
list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
|
list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
|
||||||
cifs_dbg(VFS, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %llu\n",
|
cifs_dbg(VFS, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %llu\n",
|
||||||
mid_entry->mid_state,
|
mid_entry->mid_state,
|
||||||
|
|
@ -83,7 +83,7 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
|
||||||
mid_entry->resp_buf, 62);
|
mid_entry->resp_buf, 62);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock(&server->mid_lock);
|
spin_unlock(&server->mid_queue_lock);
|
||||||
#endif /* CONFIG_CIFS_DEBUG2 */
|
#endif /* CONFIG_CIFS_DEBUG2 */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -412,6 +412,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
|
||||||
spin_lock(&cifs_tcp_ses_lock);
|
spin_lock(&cifs_tcp_ses_lock);
|
||||||
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
|
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
|
||||||
#ifdef CONFIG_CIFS_SMB_DIRECT
|
#ifdef CONFIG_CIFS_SMB_DIRECT
|
||||||
|
struct smbdirect_socket *sc;
|
||||||
struct smbdirect_socket_parameters *sp;
|
struct smbdirect_socket_parameters *sp;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
@ -436,7 +437,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
|
||||||
seq_printf(m, "\nSMBDirect transport not available");
|
seq_printf(m, "\nSMBDirect transport not available");
|
||||||
goto skip_rdma;
|
goto skip_rdma;
|
||||||
}
|
}
|
||||||
sp = &server->smbd_conn->socket.parameters;
|
sc = &server->smbd_conn->socket;
|
||||||
|
sp = &sc->parameters;
|
||||||
|
|
||||||
seq_printf(m, "\nSMBDirect (in hex) protocol version: %x "
|
seq_printf(m, "\nSMBDirect (in hex) protocol version: %x "
|
||||||
"transport status: %x",
|
"transport status: %x",
|
||||||
|
|
@ -465,15 +467,13 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
|
||||||
seq_printf(m, "\nRead Queue count_reassembly_queue: %x "
|
seq_printf(m, "\nRead Queue count_reassembly_queue: %x "
|
||||||
"count_enqueue_reassembly_queue: %x "
|
"count_enqueue_reassembly_queue: %x "
|
||||||
"count_dequeue_reassembly_queue: %x "
|
"count_dequeue_reassembly_queue: %x "
|
||||||
"fragment_reassembly_remaining: %x "
|
|
||||||
"reassembly_data_length: %x "
|
"reassembly_data_length: %x "
|
||||||
"reassembly_queue_length: %x",
|
"reassembly_queue_length: %x",
|
||||||
server->smbd_conn->count_reassembly_queue,
|
server->smbd_conn->count_reassembly_queue,
|
||||||
server->smbd_conn->count_enqueue_reassembly_queue,
|
server->smbd_conn->count_enqueue_reassembly_queue,
|
||||||
server->smbd_conn->count_dequeue_reassembly_queue,
|
server->smbd_conn->count_dequeue_reassembly_queue,
|
||||||
server->smbd_conn->fragment_reassembly_remaining,
|
sc->recv_io.reassembly.data_length,
|
||||||
server->smbd_conn->reassembly_data_length,
|
sc->recv_io.reassembly.queue_length);
|
||||||
server->smbd_conn->reassembly_queue_length);
|
|
||||||
seq_printf(m, "\nCurrent Credits send_credits: %x "
|
seq_printf(m, "\nCurrent Credits send_credits: %x "
|
||||||
"receive_credits: %x receive_credit_target: %x",
|
"receive_credits: %x receive_credit_target: %x",
|
||||||
atomic_read(&server->smbd_conn->send_credits),
|
atomic_read(&server->smbd_conn->send_credits),
|
||||||
|
|
@ -481,10 +481,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
|
||||||
server->smbd_conn->receive_credit_target);
|
server->smbd_conn->receive_credit_target);
|
||||||
seq_printf(m, "\nPending send_pending: %x ",
|
seq_printf(m, "\nPending send_pending: %x ",
|
||||||
atomic_read(&server->smbd_conn->send_pending));
|
atomic_read(&server->smbd_conn->send_pending));
|
||||||
seq_printf(m, "\nReceive buffers count_receive_queue: %x "
|
seq_printf(m, "\nReceive buffers count_receive_queue: %x ",
|
||||||
"count_empty_packet_queue: %x",
|
server->smbd_conn->count_receive_queue);
|
||||||
server->smbd_conn->count_receive_queue,
|
|
||||||
server->smbd_conn->count_empty_packet_queue);
|
|
||||||
seq_printf(m, "\nMR responder_resources: %x "
|
seq_printf(m, "\nMR responder_resources: %x "
|
||||||
"max_frmr_depth: %x mr_type: %x",
|
"max_frmr_depth: %x mr_type: %x",
|
||||||
server->smbd_conn->responder_resources,
|
server->smbd_conn->responder_resources,
|
||||||
|
|
@ -672,7 +670,7 @@ skip_rdma:
|
||||||
|
|
||||||
seq_printf(m, "\n\tServer ConnectionId: 0x%llx",
|
seq_printf(m, "\n\tServer ConnectionId: 0x%llx",
|
||||||
chan_server->conn_id);
|
chan_server->conn_id);
|
||||||
spin_lock(&chan_server->mid_lock);
|
spin_lock(&chan_server->mid_queue_lock);
|
||||||
list_for_each_entry(mid_entry, &chan_server->pending_mid_q, qhead) {
|
list_for_each_entry(mid_entry, &chan_server->pending_mid_q, qhead) {
|
||||||
seq_printf(m, "\n\t\tState: %d com: %d pid: %d cbdata: %p mid %llu",
|
seq_printf(m, "\n\t\tState: %d com: %d pid: %d cbdata: %p mid %llu",
|
||||||
mid_entry->mid_state,
|
mid_entry->mid_state,
|
||||||
|
|
@ -681,7 +679,7 @@ skip_rdma:
|
||||||
mid_entry->callback_data,
|
mid_entry->callback_data,
|
||||||
mid_entry->mid);
|
mid_entry->mid);
|
||||||
}
|
}
|
||||||
spin_unlock(&chan_server->mid_lock);
|
spin_unlock(&chan_server->mid_queue_lock);
|
||||||
}
|
}
|
||||||
spin_unlock(&ses->chan_lock);
|
spin_unlock(&ses->chan_lock);
|
||||||
seq_puts(m, "\n--\n");
|
seq_puts(m, "\n--\n");
|
||||||
|
|
|
||||||
|
|
@ -77,7 +77,7 @@ unsigned int global_secflags = CIFSSEC_DEF;
|
||||||
unsigned int GlobalCurrentXid; /* protected by GlobalMid_Lock */
|
unsigned int GlobalCurrentXid; /* protected by GlobalMid_Lock */
|
||||||
unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */
|
unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */
|
||||||
unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Lock */
|
unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Lock */
|
||||||
spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
|
DEFINE_SPINLOCK(GlobalMid_Lock); /* protects above & list operations on midQ entries */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Global counters, updated atomically
|
* Global counters, updated atomically
|
||||||
|
|
@ -97,7 +97,7 @@ atomic_t total_buf_alloc_count;
|
||||||
atomic_t total_small_buf_alloc_count;
|
atomic_t total_small_buf_alloc_count;
|
||||||
#endif/* STATS2 */
|
#endif/* STATS2 */
|
||||||
struct list_head cifs_tcp_ses_list;
|
struct list_head cifs_tcp_ses_list;
|
||||||
spinlock_t cifs_tcp_ses_lock;
|
DEFINE_SPINLOCK(cifs_tcp_ses_lock);
|
||||||
static const struct super_operations cifs_super_ops;
|
static const struct super_operations cifs_super_ops;
|
||||||
unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
|
unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
|
||||||
module_param(CIFSMaxBufSize, uint, 0444);
|
module_param(CIFSMaxBufSize, uint, 0444);
|
||||||
|
|
@ -723,7 +723,7 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
|
||||||
else
|
else
|
||||||
seq_puts(s, ",nativesocket");
|
seq_puts(s, ",nativesocket");
|
||||||
seq_show_option(s, "symlink",
|
seq_show_option(s, "symlink",
|
||||||
cifs_symlink_type_str(get_cifs_symlink_type(cifs_sb)));
|
cifs_symlink_type_str(cifs_symlink_type(cifs_sb)));
|
||||||
|
|
||||||
seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
|
seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
|
||||||
seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
|
seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
|
||||||
|
|
@ -1863,8 +1863,6 @@ init_cifs(void)
|
||||||
GlobalCurrentXid = 0;
|
GlobalCurrentXid = 0;
|
||||||
GlobalTotalActiveXid = 0;
|
GlobalTotalActiveXid = 0;
|
||||||
GlobalMaxActiveXid = 0;
|
GlobalMaxActiveXid = 0;
|
||||||
spin_lock_init(&cifs_tcp_ses_lock);
|
|
||||||
spin_lock_init(&GlobalMid_Lock);
|
|
||||||
|
|
||||||
cifs_lock_secret = get_random_u32();
|
cifs_lock_secret = get_random_u32();
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -732,7 +732,8 @@ struct TCP_Server_Info {
|
||||||
#endif
|
#endif
|
||||||
wait_queue_head_t response_q;
|
wait_queue_head_t response_q;
|
||||||
wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/
|
wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/
|
||||||
spinlock_t mid_lock; /* protect mid queue and it's entries */
|
spinlock_t mid_queue_lock; /* protect mid queue */
|
||||||
|
spinlock_t mid_counter_lock;
|
||||||
struct list_head pending_mid_q;
|
struct list_head pending_mid_q;
|
||||||
bool noblocksnd; /* use blocking sendmsg */
|
bool noblocksnd; /* use blocking sendmsg */
|
||||||
bool noautotune; /* do not autotune send buf sizes */
|
bool noautotune; /* do not autotune send buf sizes */
|
||||||
|
|
@ -770,7 +771,7 @@ struct TCP_Server_Info {
|
||||||
/* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */
|
/* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */
|
||||||
unsigned int capabilities; /* selective disabling of caps by smb sess */
|
unsigned int capabilities; /* selective disabling of caps by smb sess */
|
||||||
int timeAdj; /* Adjust for difference in server time zone in sec */
|
int timeAdj; /* Adjust for difference in server time zone in sec */
|
||||||
__u64 CurrentMid; /* multiplex id - rotating counter, protected by GlobalMid_Lock */
|
__u64 current_mid; /* multiplex id - rotating counter, protected by mid_counter_lock */
|
||||||
char cryptkey[CIFS_CRYPTO_KEY_SIZE]; /* used by ntlm, ntlmv2 etc */
|
char cryptkey[CIFS_CRYPTO_KEY_SIZE]; /* used by ntlm, ntlmv2 etc */
|
||||||
/* 16th byte of RFC1001 workstation name is always null */
|
/* 16th byte of RFC1001 workstation name is always null */
|
||||||
char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
|
char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
|
||||||
|
|
@ -1729,9 +1730,10 @@ struct mid_q_entry {
|
||||||
unsigned int resp_buf_size;
|
unsigned int resp_buf_size;
|
||||||
int mid_state; /* wish this were enum but can not pass to wait_event */
|
int mid_state; /* wish this were enum but can not pass to wait_event */
|
||||||
int mid_rc; /* rc for MID_RC */
|
int mid_rc; /* rc for MID_RC */
|
||||||
unsigned int mid_flags;
|
|
||||||
__le16 command; /* smb command code */
|
__le16 command; /* smb command code */
|
||||||
unsigned int optype; /* operation type */
|
unsigned int optype; /* operation type */
|
||||||
|
bool wait_cancelled:1; /* Cancelled while waiting for response */
|
||||||
|
bool deleted_from_q:1; /* Whether Mid has been dequeued frem pending_mid_q */
|
||||||
bool large_buf:1; /* if valid response, is pointer to large buf */
|
bool large_buf:1; /* if valid response, is pointer to large buf */
|
||||||
bool multiRsp:1; /* multiple trans2 responses for one request */
|
bool multiRsp:1; /* multiple trans2 responses for one request */
|
||||||
bool multiEnd:1; /* both received */
|
bool multiEnd:1; /* both received */
|
||||||
|
|
@ -1893,10 +1895,6 @@ static inline bool is_replayable_error(int error)
|
||||||
#define MID_RESPONSE_READY 0x40 /* ready for other process handle the rsp */
|
#define MID_RESPONSE_READY 0x40 /* ready for other process handle the rsp */
|
||||||
#define MID_RC 0x80 /* mid_rc contains custom rc */
|
#define MID_RC 0x80 /* mid_rc contains custom rc */
|
||||||
|
|
||||||
/* Flags */
|
|
||||||
#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */
|
|
||||||
#define MID_DELETED 2 /* Mid has been dequeued/deleted */
|
|
||||||
|
|
||||||
/* Types of response buffer returned from SendReceive2 */
|
/* Types of response buffer returned from SendReceive2 */
|
||||||
#define CIFS_NO_BUFFER 0 /* Response buffer not returned */
|
#define CIFS_NO_BUFFER 0 /* Response buffer not returned */
|
||||||
#define CIFS_SMALL_BUFFER 1
|
#define CIFS_SMALL_BUFFER 1
|
||||||
|
|
@ -2007,9 +2005,9 @@ require use of the stronger protocol */
|
||||||
* GlobalCurrentXid
|
* GlobalCurrentXid
|
||||||
* GlobalTotalActiveXid
|
* GlobalTotalActiveXid
|
||||||
* TCP_Server_Info->srv_lock (anything in struct not protected by another lock and can change)
|
* TCP_Server_Info->srv_lock (anything in struct not protected by another lock and can change)
|
||||||
* TCP_Server_Info->mid_lock TCP_Server_Info->pending_mid_q cifs_get_tcp_session
|
* TCP_Server_Info->mid_queue_lock TCP_Server_Info->pending_mid_q cifs_get_tcp_session
|
||||||
* ->CurrentMid
|
* mid_q_entry->deleted_from_q
|
||||||
* (any changes in mid_q_entry fields)
|
* TCP_Server_Info->mid_counter_lock TCP_Server_Info->current_mid cifs_get_tcp_session
|
||||||
* TCP_Server_Info->req_lock TCP_Server_Info->in_flight cifs_get_tcp_session
|
* TCP_Server_Info->req_lock TCP_Server_Info->in_flight cifs_get_tcp_session
|
||||||
* ->credits
|
* ->credits
|
||||||
* ->echo_credits
|
* ->echo_credits
|
||||||
|
|
@ -2377,4 +2375,9 @@ static inline bool cifs_netbios_name(const char *name, size_t namelen)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define CIFS_REPARSE_SUPPORT(tcon) \
|
||||||
|
((tcon)->posix_extensions || \
|
||||||
|
(le32_to_cpu((tcon)->fsAttrInfo.Attributes) & \
|
||||||
|
FILE_SUPPORTS_REPARSE_POINTS))
|
||||||
|
|
||||||
#endif /* _CIFS_GLOB_H */
|
#endif /* _CIFS_GLOB_H */
|
||||||
|
|
|
||||||
|
|
@ -116,16 +116,31 @@ extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
|
||||||
int * /* bytes returned */ , const int);
|
int * /* bytes returned */ , const int);
|
||||||
extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
|
extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
|
||||||
char *in_buf, int flags);
|
char *in_buf, int flags);
|
||||||
|
int cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server);
|
||||||
extern struct mid_q_entry *cifs_setup_request(struct cifs_ses *,
|
extern struct mid_q_entry *cifs_setup_request(struct cifs_ses *,
|
||||||
struct TCP_Server_Info *,
|
struct TCP_Server_Info *,
|
||||||
struct smb_rqst *);
|
struct smb_rqst *);
|
||||||
extern struct mid_q_entry *cifs_setup_async_request(struct TCP_Server_Info *,
|
extern struct mid_q_entry *cifs_setup_async_request(struct TCP_Server_Info *,
|
||||||
struct smb_rqst *);
|
struct smb_rqst *);
|
||||||
|
int __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
|
||||||
|
struct smb_rqst *rqst);
|
||||||
extern int cifs_check_receive(struct mid_q_entry *mid,
|
extern int cifs_check_receive(struct mid_q_entry *mid,
|
||||||
struct TCP_Server_Info *server, bool log_error);
|
struct TCP_Server_Info *server, bool log_error);
|
||||||
|
int wait_for_free_request(struct TCP_Server_Info *server, const int flags,
|
||||||
|
unsigned int *instance);
|
||||||
extern int cifs_wait_mtu_credits(struct TCP_Server_Info *server,
|
extern int cifs_wait_mtu_credits(struct TCP_Server_Info *server,
|
||||||
size_t size, size_t *num,
|
size_t size, size_t *num,
|
||||||
struct cifs_credits *credits);
|
struct cifs_credits *credits);
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
|
||||||
|
struct mid_q_entry *mid)
|
||||||
|
{
|
||||||
|
return server->ops->send_cancel ?
|
||||||
|
server->ops->send_cancel(server, rqst, mid) : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ);
|
||||||
extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
|
extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
|
||||||
struct kvec *, int /* nvec to send */,
|
struct kvec *, int /* nvec to send */,
|
||||||
int * /* type of buf returned */, const int flags,
|
int * /* type of buf returned */, const int flags,
|
||||||
|
|
|
||||||
|
|
@ -2751,7 +2751,7 @@ int cifs_query_reparse_point(const unsigned int xid,
|
||||||
if (cap_unix(tcon->ses))
|
if (cap_unix(tcon->ses))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
if (!(le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS))
|
if (!CIFS_REPARSE_SUPPORT(tcon))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
oparms = (struct cifs_open_parms) {
|
oparms = (struct cifs_open_parms) {
|
||||||
|
|
@ -2879,7 +2879,7 @@ struct inode *cifs_create_reparse_inode(struct cifs_open_info_data *data,
|
||||||
* attempt to create reparse point. This will prevent creating unusable
|
* attempt to create reparse point. This will prevent creating unusable
|
||||||
* empty object on the server.
|
* empty object on the server.
|
||||||
*/
|
*/
|
||||||
if (!(le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS))
|
if (!CIFS_REPARSE_SUPPORT(tcon))
|
||||||
return ERR_PTR(-EOPNOTSUPP);
|
return ERR_PTR(-EOPNOTSUPP);
|
||||||
|
|
||||||
#ifndef CONFIG_CIFS_XATTR
|
#ifndef CONFIG_CIFS_XATTR
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,566 @@
|
||||||
|
// SPDX-License-Identifier: LGPL-2.1
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright (C) International Business Machines Corp., 2002,2008
|
||||||
|
* Author(s): Steve French (sfrench@us.ibm.com)
|
||||||
|
* Jeremy Allison (jra@samba.org) 2006.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/fs.h>
|
||||||
|
#include <linux/list.h>
|
||||||
|
#include <linux/gfp.h>
|
||||||
|
#include <linux/wait.h>
|
||||||
|
#include <linux/net.h>
|
||||||
|
#include <linux/delay.h>
|
||||||
|
#include <linux/freezer.h>
|
||||||
|
#include <linux/tcp.h>
|
||||||
|
#include <linux/bvec.h>
|
||||||
|
#include <linux/highmem.h>
|
||||||
|
#include <linux/uaccess.h>
|
||||||
|
#include <linux/processor.h>
|
||||||
|
#include <linux/mempool.h>
|
||||||
|
#include <linux/sched/signal.h>
|
||||||
|
#include <linux/task_io_accounting_ops.h>
|
||||||
|
#include "cifspdu.h"
|
||||||
|
#include "cifsglob.h"
|
||||||
|
#include "cifsproto.h"
|
||||||
|
#include "cifs_debug.h"
|
||||||
|
#include "smb2proto.h"
|
||||||
|
#include "smbdirect.h"
|
||||||
|
#include "compress.h"
|
||||||
|
|
||||||
|
/* Max number of iovectors we can use off the stack when sending requests. */
|
||||||
|
#define CIFS_MAX_IOV_SIZE 8
|
||||||
|
|
||||||
|
static struct mid_q_entry *
|
||||||
|
alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
|
||||||
|
{
|
||||||
|
struct mid_q_entry *temp;
|
||||||
|
|
||||||
|
if (server == NULL) {
|
||||||
|
cifs_dbg(VFS, "%s: null TCP session\n", __func__);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
|
||||||
|
memset(temp, 0, sizeof(struct mid_q_entry));
|
||||||
|
kref_init(&temp->refcount);
|
||||||
|
temp->mid = get_mid(smb_buffer);
|
||||||
|
temp->pid = current->pid;
|
||||||
|
temp->command = cpu_to_le16(smb_buffer->Command);
|
||||||
|
cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
|
||||||
|
/* easier to use jiffies */
|
||||||
|
/* when mid allocated can be before when sent */
|
||||||
|
temp->when_alloc = jiffies;
|
||||||
|
temp->server = server;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The default is for the mid to be synchronous, so the
|
||||||
|
* default callback just wakes up the current task.
|
||||||
|
*/
|
||||||
|
get_task_struct(current);
|
||||||
|
temp->creator = current;
|
||||||
|
temp->callback = cifs_wake_up_task;
|
||||||
|
temp->callback_data = current;
|
||||||
|
|
||||||
|
atomic_inc(&mid_count);
|
||||||
|
temp->mid_state = MID_REQUEST_ALLOCATED;
|
||||||
|
return temp;
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
|
||||||
|
unsigned int smb_buf_length)
|
||||||
|
{
|
||||||
|
struct kvec iov[2];
|
||||||
|
struct smb_rqst rqst = { .rq_iov = iov,
|
||||||
|
.rq_nvec = 2 };
|
||||||
|
|
||||||
|
iov[0].iov_base = smb_buffer;
|
||||||
|
iov[0].iov_len = 4;
|
||||||
|
iov[1].iov_base = (char *)smb_buffer + 4;
|
||||||
|
iov[1].iov_len = smb_buf_length;
|
||||||
|
|
||||||
|
return __smb_send_rqst(server, 1, &rqst);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
|
||||||
|
struct mid_q_entry **ppmidQ)
|
||||||
|
{
|
||||||
|
spin_lock(&ses->ses_lock);
|
||||||
|
if (ses->ses_status == SES_NEW) {
|
||||||
|
if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
|
||||||
|
(in_buf->Command != SMB_COM_NEGOTIATE)) {
|
||||||
|
spin_unlock(&ses->ses_lock);
|
||||||
|
return -EAGAIN;
|
||||||
|
}
|
||||||
|
/* else ok - we are setting up session */
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ses->ses_status == SES_EXITING) {
|
||||||
|
/* check if SMB session is bad because we are setting it up */
|
||||||
|
if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
|
||||||
|
spin_unlock(&ses->ses_lock);
|
||||||
|
return -EAGAIN;
|
||||||
|
}
|
||||||
|
/* else ok - we are shutting down session */
|
||||||
|
}
|
||||||
|
spin_unlock(&ses->ses_lock);
|
||||||
|
|
||||||
|
*ppmidQ = alloc_mid(in_buf, ses->server);
|
||||||
|
if (*ppmidQ == NULL)
|
||||||
|
return -ENOMEM;
|
||||||
|
spin_lock(&ses->server->mid_queue_lock);
|
||||||
|
list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
|
||||||
|
spin_unlock(&ses->server->mid_queue_lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct mid_q_entry *
|
||||||
|
cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
|
||||||
|
struct mid_q_entry *mid;
|
||||||
|
|
||||||
|
if (rqst->rq_iov[0].iov_len != 4 ||
|
||||||
|
rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
|
||||||
|
return ERR_PTR(-EIO);
|
||||||
|
|
||||||
|
/* enable signing if server requires it */
|
||||||
|
if (server->sign)
|
||||||
|
hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
|
||||||
|
|
||||||
|
mid = alloc_mid(hdr, server);
|
||||||
|
if (mid == NULL)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
|
||||||
|
if (rc) {
|
||||||
|
release_mid(mid);
|
||||||
|
return ERR_PTR(rc);
|
||||||
|
}
|
||||||
|
|
||||||
|
return mid;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Send an SMB Request. No response info (other than return code)
|
||||||
|
* needs to be parsed.
|
||||||
|
*
|
||||||
|
* flags indicate the type of request buffer and how long to wait
|
||||||
|
* and whether to log NT STATUS code (error) before mapping it to POSIX error
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
int
|
||||||
|
SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
|
||||||
|
char *in_buf, int flags)
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
struct kvec iov[1];
|
||||||
|
struct kvec rsp_iov;
|
||||||
|
int resp_buf_type;
|
||||||
|
|
||||||
|
iov[0].iov_base = in_buf;
|
||||||
|
iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
|
||||||
|
flags |= CIFS_NO_RSP_BUF;
|
||||||
|
rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
|
||||||
|
cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
|
||||||
|
bool log_error)
|
||||||
|
{
|
||||||
|
unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
|
||||||
|
|
||||||
|
dump_smb(mid->resp_buf, min_t(u32, 92, len));
|
||||||
|
|
||||||
|
/* convert the length into a more usable form */
|
||||||
|
if (server->sign) {
|
||||||
|
struct kvec iov[2];
|
||||||
|
int rc = 0;
|
||||||
|
struct smb_rqst rqst = { .rq_iov = iov,
|
||||||
|
.rq_nvec = 2 };
|
||||||
|
|
||||||
|
iov[0].iov_base = mid->resp_buf;
|
||||||
|
iov[0].iov_len = 4;
|
||||||
|
iov[1].iov_base = (char *)mid->resp_buf + 4;
|
||||||
|
iov[1].iov_len = len - 4;
|
||||||
|
/* FIXME: add code to kill session */
|
||||||
|
rc = cifs_verify_signature(&rqst, server,
|
||||||
|
mid->sequence_number);
|
||||||
|
if (rc)
|
||||||
|
cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
|
||||||
|
rc);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* BB special case reconnect tid and uid here? */
|
||||||
|
return map_and_check_smb_error(mid, log_error);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct mid_q_entry *
|
||||||
|
cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
|
||||||
|
struct smb_rqst *rqst)
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
|
||||||
|
struct mid_q_entry *mid;
|
||||||
|
|
||||||
|
if (rqst->rq_iov[0].iov_len != 4 ||
|
||||||
|
rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
|
||||||
|
return ERR_PTR(-EIO);
|
||||||
|
|
||||||
|
rc = allocate_mid(ses, hdr, &mid);
|
||||||
|
if (rc)
|
||||||
|
return ERR_PTR(rc);
|
||||||
|
rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
|
||||||
|
if (rc) {
|
||||||
|
delete_mid(mid);
|
||||||
|
return ERR_PTR(rc);
|
||||||
|
}
|
||||||
|
return mid;
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
SendReceive2(const unsigned int xid, struct cifs_ses *ses,
|
||||||
|
struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
|
||||||
|
const int flags, struct kvec *resp_iov)
|
||||||
|
{
|
||||||
|
struct smb_rqst rqst;
|
||||||
|
struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
|
||||||
|
new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!new_iov) {
|
||||||
|
/* otherwise cifs_send_recv below sets resp_buf_type */
|
||||||
|
*resp_buf_type = CIFS_NO_BUFFER;
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
} else
|
||||||
|
new_iov = s_iov;
|
||||||
|
|
||||||
|
/* 1st iov is a RFC1001 length followed by the rest of the packet */
|
||||||
|
memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
|
||||||
|
|
||||||
|
new_iov[0].iov_base = new_iov[1].iov_base;
|
||||||
|
new_iov[0].iov_len = 4;
|
||||||
|
new_iov[1].iov_base += 4;
|
||||||
|
new_iov[1].iov_len -= 4;
|
||||||
|
|
||||||
|
memset(&rqst, 0, sizeof(struct smb_rqst));
|
||||||
|
rqst.rq_iov = new_iov;
|
||||||
|
rqst.rq_nvec = n_vec + 1;
|
||||||
|
|
||||||
|
rc = cifs_send_recv(xid, ses, ses->server,
|
||||||
|
&rqst, resp_buf_type, flags, resp_iov);
|
||||||
|
if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
|
||||||
|
kfree(new_iov);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
SendReceive(const unsigned int xid, struct cifs_ses *ses,
|
||||||
|
struct smb_hdr *in_buf, struct smb_hdr *out_buf,
|
||||||
|
int *pbytes_returned, const int flags)
|
||||||
|
{
|
||||||
|
int rc = 0;
|
||||||
|
struct mid_q_entry *midQ;
|
||||||
|
unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
|
||||||
|
struct kvec iov = { .iov_base = in_buf, .iov_len = len };
|
||||||
|
struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
|
||||||
|
struct cifs_credits credits = { .value = 1, .instance = 0 };
|
||||||
|
struct TCP_Server_Info *server;
|
||||||
|
|
||||||
|
if (ses == NULL) {
|
||||||
|
cifs_dbg(VFS, "Null smb session\n");
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
server = ses->server;
|
||||||
|
if (server == NULL) {
|
||||||
|
cifs_dbg(VFS, "Null tcp session\n");
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_lock(&server->srv_lock);
|
||||||
|
if (server->tcpStatus == CifsExiting) {
|
||||||
|
spin_unlock(&server->srv_lock);
|
||||||
|
return -ENOENT;
|
||||||
|
}
|
||||||
|
spin_unlock(&server->srv_lock);
|
||||||
|
|
||||||
|
/* Ensure that we do not send more than 50 overlapping requests
|
||||||
|
to the same server. We may make this configurable later or
|
||||||
|
use ses->maxReq */
|
||||||
|
|
||||||
|
if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
|
||||||
|
cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
|
||||||
|
len);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = wait_for_free_request(server, flags, &credits.instance);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
/* make sure that we sign in the same order that we send on this socket
|
||||||
|
and avoid races inside tcp sendmsg code that could cause corruption
|
||||||
|
of smb data */
|
||||||
|
|
||||||
|
cifs_server_lock(server);
|
||||||
|
|
||||||
|
rc = allocate_mid(ses, in_buf, &midQ);
|
||||||
|
if (rc) {
|
||||||
|
cifs_server_unlock(server);
|
||||||
|
/* Update # of requests on wire to server */
|
||||||
|
add_credits(server, &credits, 0);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
|
||||||
|
if (rc) {
|
||||||
|
cifs_server_unlock(server);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
midQ->mid_state = MID_REQUEST_SUBMITTED;
|
||||||
|
|
||||||
|
rc = smb_send(server, in_buf, len);
|
||||||
|
cifs_save_when_sent(midQ);
|
||||||
|
|
||||||
|
if (rc < 0)
|
||||||
|
server->sequence_number -= 2;
|
||||||
|
|
||||||
|
cifs_server_unlock(server);
|
||||||
|
|
||||||
|
if (rc < 0)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
rc = wait_for_response(server, midQ);
|
||||||
|
if (rc != 0) {
|
||||||
|
send_cancel(server, &rqst, midQ);
|
||||||
|
spin_lock(&server->mid_queue_lock);
|
||||||
|
if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
|
||||||
|
midQ->mid_state == MID_RESPONSE_RECEIVED) {
|
||||||
|
/* no longer considered to be "in-flight" */
|
||||||
|
midQ->callback = release_mid;
|
||||||
|
spin_unlock(&server->mid_queue_lock);
|
||||||
|
add_credits(server, &credits, 0);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
spin_unlock(&server->mid_queue_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = cifs_sync_mid_result(midQ, server);
|
||||||
|
if (rc != 0) {
|
||||||
|
add_credits(server, &credits, 0);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!midQ->resp_buf || !out_buf ||
|
||||||
|
midQ->mid_state != MID_RESPONSE_READY) {
|
||||||
|
rc = -EIO;
|
||||||
|
cifs_server_dbg(VFS, "Bad MID state?\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
|
||||||
|
memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
|
||||||
|
rc = cifs_check_receive(midQ, server, 0);
|
||||||
|
out:
|
||||||
|
delete_mid(midQ);
|
||||||
|
add_credits(server, &credits, 0);
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
|
||||||
|
blocking lock to return. */
|
||||||
|
|
||||||
|
static int
|
||||||
|
send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
|
||||||
|
struct smb_hdr *in_buf,
|
||||||
|
struct smb_hdr *out_buf)
|
||||||
|
{
|
||||||
|
int bytes_returned;
|
||||||
|
struct cifs_ses *ses = tcon->ses;
|
||||||
|
LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
|
||||||
|
|
||||||
|
/* We just modify the current in_buf to change
|
||||||
|
the type of lock from LOCKING_ANDX_SHARED_LOCK
|
||||||
|
or LOCKING_ANDX_EXCLUSIVE_LOCK to
|
||||||
|
LOCKING_ANDX_CANCEL_LOCK. */
|
||||||
|
|
||||||
|
pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
|
||||||
|
pSMB->Timeout = 0;
|
||||||
|
pSMB->hdr.Mid = get_next_mid(ses->server);
|
||||||
|
|
||||||
|
return SendReceive(xid, ses, in_buf, out_buf,
|
||||||
|
&bytes_returned, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
|
||||||
|
struct smb_hdr *in_buf, struct smb_hdr *out_buf,
|
||||||
|
int *pbytes_returned)
|
||||||
|
{
|
||||||
|
int rc = 0;
|
||||||
|
int rstart = 0;
|
||||||
|
struct mid_q_entry *midQ;
|
||||||
|
struct cifs_ses *ses;
|
||||||
|
unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
|
||||||
|
struct kvec iov = { .iov_base = in_buf, .iov_len = len };
|
||||||
|
struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
|
||||||
|
unsigned int instance;
|
||||||
|
struct TCP_Server_Info *server;
|
||||||
|
|
||||||
|
if (tcon == NULL || tcon->ses == NULL) {
|
||||||
|
cifs_dbg(VFS, "Null smb session\n");
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
ses = tcon->ses;
|
||||||
|
server = ses->server;
|
||||||
|
|
||||||
|
if (server == NULL) {
|
||||||
|
cifs_dbg(VFS, "Null tcp session\n");
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_lock(&server->srv_lock);
|
||||||
|
if (server->tcpStatus == CifsExiting) {
|
||||||
|
spin_unlock(&server->srv_lock);
|
||||||
|
return -ENOENT;
|
||||||
|
}
|
||||||
|
spin_unlock(&server->srv_lock);
|
||||||
|
|
||||||
|
/* Ensure that we do not send more than 50 overlapping requests
|
||||||
|
to the same server. We may make this configurable later or
|
||||||
|
use ses->maxReq */
|
||||||
|
|
||||||
|
if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
|
||||||
|
cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
|
||||||
|
len);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
/* make sure that we sign in the same order that we send on this socket
|
||||||
|
and avoid races inside tcp sendmsg code that could cause corruption
|
||||||
|
of smb data */
|
||||||
|
|
||||||
|
cifs_server_lock(server);
|
||||||
|
|
||||||
|
rc = allocate_mid(ses, in_buf, &midQ);
|
||||||
|
if (rc) {
|
||||||
|
cifs_server_unlock(server);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
|
||||||
|
if (rc) {
|
||||||
|
delete_mid(midQ);
|
||||||
|
cifs_server_unlock(server);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
midQ->mid_state = MID_REQUEST_SUBMITTED;
|
||||||
|
rc = smb_send(server, in_buf, len);
|
||||||
|
cifs_save_when_sent(midQ);
|
||||||
|
|
||||||
|
if (rc < 0)
|
||||||
|
server->sequence_number -= 2;
|
||||||
|
|
||||||
|
cifs_server_unlock(server);
|
||||||
|
|
||||||
|
if (rc < 0) {
|
||||||
|
delete_mid(midQ);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Wait for a reply - allow signals to interrupt. */
|
||||||
|
rc = wait_event_interruptible(server->response_q,
|
||||||
|
(!(midQ->mid_state == MID_REQUEST_SUBMITTED ||
|
||||||
|
midQ->mid_state == MID_RESPONSE_RECEIVED)) ||
|
||||||
|
((server->tcpStatus != CifsGood) &&
|
||||||
|
(server->tcpStatus != CifsNew)));
|
||||||
|
|
||||||
|
/* Were we interrupted by a signal ? */
|
||||||
|
spin_lock(&server->srv_lock);
|
||||||
|
if ((rc == -ERESTARTSYS) &&
|
||||||
|
(midQ->mid_state == MID_REQUEST_SUBMITTED ||
|
||||||
|
midQ->mid_state == MID_RESPONSE_RECEIVED) &&
|
||||||
|
((server->tcpStatus == CifsGood) ||
|
||||||
|
(server->tcpStatus == CifsNew))) {
|
||||||
|
spin_unlock(&server->srv_lock);
|
||||||
|
|
||||||
|
if (in_buf->Command == SMB_COM_TRANSACTION2) {
|
||||||
|
/* POSIX lock. We send a NT_CANCEL SMB to cause the
|
||||||
|
blocking lock to return. */
|
||||||
|
rc = send_cancel(server, &rqst, midQ);
|
||||||
|
if (rc) {
|
||||||
|
delete_mid(midQ);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
|
||||||
|
to cause the blocking lock to return. */
|
||||||
|
|
||||||
|
rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
|
||||||
|
|
||||||
|
/* If we get -ENOLCK back the lock may have
|
||||||
|
already been removed. Don't exit in this case. */
|
||||||
|
if (rc && rc != -ENOLCK) {
|
||||||
|
delete_mid(midQ);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = wait_for_response(server, midQ);
|
||||||
|
if (rc) {
|
||||||
|
send_cancel(server, &rqst, midQ);
|
||||||
|
spin_lock(&server->mid_queue_lock);
|
||||||
|
if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
|
||||||
|
midQ->mid_state == MID_RESPONSE_RECEIVED) {
|
||||||
|
/* no longer considered to be "in-flight" */
|
||||||
|
midQ->callback = release_mid;
|
||||||
|
spin_unlock(&server->mid_queue_lock);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
spin_unlock(&server->mid_queue_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* We got the response - restart system call. */
|
||||||
|
rstart = 1;
|
||||||
|
spin_lock(&server->srv_lock);
|
||||||
|
}
|
||||||
|
spin_unlock(&server->srv_lock);
|
||||||
|
|
||||||
|
rc = cifs_sync_mid_result(midQ, server);
|
||||||
|
if (rc != 0)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
/* rcvd frame is ok */
|
||||||
|
if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_READY) {
|
||||||
|
rc = -EIO;
|
||||||
|
cifs_tcon_dbg(VFS, "Bad MID state?\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
|
||||||
|
memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
|
||||||
|
rc = cifs_check_receive(midQ, server, 0);
|
||||||
|
out:
|
||||||
|
delete_mid(midQ);
|
||||||
|
if (rstart && rc == -EACCES)
|
||||||
|
return -ERESTARTSYS;
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
@ -321,15 +321,15 @@ cifs_abort_connection(struct TCP_Server_Info *server)
|
||||||
/* mark submitted MIDs for retry and issue callback */
|
/* mark submitted MIDs for retry and issue callback */
|
||||||
INIT_LIST_HEAD(&retry_list);
|
INIT_LIST_HEAD(&retry_list);
|
||||||
cifs_dbg(FYI, "%s: moving mids to private list\n", __func__);
|
cifs_dbg(FYI, "%s: moving mids to private list\n", __func__);
|
||||||
spin_lock(&server->mid_lock);
|
spin_lock(&server->mid_queue_lock);
|
||||||
list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) {
|
list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) {
|
||||||
kref_get(&mid->refcount);
|
kref_get(&mid->refcount);
|
||||||
if (mid->mid_state == MID_REQUEST_SUBMITTED)
|
if (mid->mid_state == MID_REQUEST_SUBMITTED)
|
||||||
mid->mid_state = MID_RETRY_NEEDED;
|
mid->mid_state = MID_RETRY_NEEDED;
|
||||||
list_move(&mid->qhead, &retry_list);
|
list_move(&mid->qhead, &retry_list);
|
||||||
mid->mid_flags |= MID_DELETED;
|
mid->deleted_from_q = true;
|
||||||
}
|
}
|
||||||
spin_unlock(&server->mid_lock);
|
spin_unlock(&server->mid_queue_lock);
|
||||||
cifs_server_unlock(server);
|
cifs_server_unlock(server);
|
||||||
|
|
||||||
cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
|
cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
|
||||||
|
|
@ -358,7 +358,7 @@ static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num
|
||||||
}
|
}
|
||||||
|
|
||||||
cifs_dbg(FYI, "Mark tcp session as need reconnect\n");
|
cifs_dbg(FYI, "Mark tcp session as need reconnect\n");
|
||||||
trace_smb3_reconnect(server->CurrentMid, server->conn_id,
|
trace_smb3_reconnect(server->current_mid, server->conn_id,
|
||||||
server->hostname);
|
server->hostname);
|
||||||
server->tcpStatus = CifsNeedReconnect;
|
server->tcpStatus = CifsNeedReconnect;
|
||||||
|
|
||||||
|
|
@ -884,13 +884,13 @@ is_smb_response(struct TCP_Server_Info *server, unsigned char type)
|
||||||
* server there should be exactly one pending mid
|
* server there should be exactly one pending mid
|
||||||
* corresponding to SMB1/SMB2 Negotiate packet.
|
* corresponding to SMB1/SMB2 Negotiate packet.
|
||||||
*/
|
*/
|
||||||
spin_lock(&server->mid_lock);
|
spin_lock(&server->mid_queue_lock);
|
||||||
list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) {
|
list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) {
|
||||||
kref_get(&mid->refcount);
|
kref_get(&mid->refcount);
|
||||||
list_move(&mid->qhead, &dispose_list);
|
list_move(&mid->qhead, &dispose_list);
|
||||||
mid->mid_flags |= MID_DELETED;
|
mid->deleted_from_q = true;
|
||||||
}
|
}
|
||||||
spin_unlock(&server->mid_lock);
|
spin_unlock(&server->mid_queue_lock);
|
||||||
|
|
||||||
/* Now try to reconnect once with NetBIOS session. */
|
/* Now try to reconnect once with NetBIOS session. */
|
||||||
server->with_rfc1001 = true;
|
server->with_rfc1001 = true;
|
||||||
|
|
@ -957,7 +957,7 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
|
||||||
#ifdef CONFIG_CIFS_STATS2
|
#ifdef CONFIG_CIFS_STATS2
|
||||||
mid->when_received = jiffies;
|
mid->when_received = jiffies;
|
||||||
#endif
|
#endif
|
||||||
spin_lock(&mid->server->mid_lock);
|
spin_lock(&mid->server->mid_queue_lock);
|
||||||
if (!malformed)
|
if (!malformed)
|
||||||
mid->mid_state = MID_RESPONSE_RECEIVED;
|
mid->mid_state = MID_RESPONSE_RECEIVED;
|
||||||
else
|
else
|
||||||
|
|
@ -966,13 +966,13 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
|
||||||
* Trying to handle/dequeue a mid after the send_recv()
|
* Trying to handle/dequeue a mid after the send_recv()
|
||||||
* function has finished processing it is a bug.
|
* function has finished processing it is a bug.
|
||||||
*/
|
*/
|
||||||
if (mid->mid_flags & MID_DELETED) {
|
if (mid->deleted_from_q == true) {
|
||||||
spin_unlock(&mid->server->mid_lock);
|
spin_unlock(&mid->server->mid_queue_lock);
|
||||||
pr_warn_once("trying to dequeue a deleted mid\n");
|
pr_warn_once("trying to dequeue a deleted mid\n");
|
||||||
} else {
|
} else {
|
||||||
list_del_init(&mid->qhead);
|
list_del_init(&mid->qhead);
|
||||||
mid->mid_flags |= MID_DELETED;
|
mid->deleted_from_q = true;
|
||||||
spin_unlock(&mid->server->mid_lock);
|
spin_unlock(&mid->server->mid_queue_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1101,16 +1101,16 @@ clean_demultiplex_info(struct TCP_Server_Info *server)
|
||||||
struct list_head *tmp, *tmp2;
|
struct list_head *tmp, *tmp2;
|
||||||
LIST_HEAD(dispose_list);
|
LIST_HEAD(dispose_list);
|
||||||
|
|
||||||
spin_lock(&server->mid_lock);
|
spin_lock(&server->mid_queue_lock);
|
||||||
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
|
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
|
||||||
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
|
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
|
||||||
cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid);
|
cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid);
|
||||||
kref_get(&mid_entry->refcount);
|
kref_get(&mid_entry->refcount);
|
||||||
mid_entry->mid_state = MID_SHUTDOWN;
|
mid_entry->mid_state = MID_SHUTDOWN;
|
||||||
list_move(&mid_entry->qhead, &dispose_list);
|
list_move(&mid_entry->qhead, &dispose_list);
|
||||||
mid_entry->mid_flags |= MID_DELETED;
|
mid_entry->deleted_from_q = true;
|
||||||
}
|
}
|
||||||
spin_unlock(&server->mid_lock);
|
spin_unlock(&server->mid_queue_lock);
|
||||||
|
|
||||||
/* now walk dispose list and issue callbacks */
|
/* now walk dispose list and issue callbacks */
|
||||||
list_for_each_safe(tmp, tmp2, &dispose_list) {
|
list_for_each_safe(tmp, tmp2, &dispose_list) {
|
||||||
|
|
@ -1242,7 +1242,7 @@ smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
|
||||||
spin_unlock(&server->req_lock);
|
spin_unlock(&server->req_lock);
|
||||||
wake_up(&server->request_q);
|
wake_up(&server->request_q);
|
||||||
|
|
||||||
trace_smb3_hdr_credits(server->CurrentMid,
|
trace_smb3_hdr_credits(server->current_mid,
|
||||||
server->conn_id, server->hostname, scredits,
|
server->conn_id, server->hostname, scredits,
|
||||||
le16_to_cpu(shdr->CreditRequest), in_flight);
|
le16_to_cpu(shdr->CreditRequest), in_flight);
|
||||||
cifs_server_dbg(FYI, "%s: added %u credits total=%d\n",
|
cifs_server_dbg(FYI, "%s: added %u credits total=%d\n",
|
||||||
|
|
@ -1822,7 +1822,8 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
|
||||||
tcp_ses->compression.requested = ctx->compress;
|
tcp_ses->compression.requested = ctx->compress;
|
||||||
spin_lock_init(&tcp_ses->req_lock);
|
spin_lock_init(&tcp_ses->req_lock);
|
||||||
spin_lock_init(&tcp_ses->srv_lock);
|
spin_lock_init(&tcp_ses->srv_lock);
|
||||||
spin_lock_init(&tcp_ses->mid_lock);
|
spin_lock_init(&tcp_ses->mid_queue_lock);
|
||||||
|
spin_lock_init(&tcp_ses->mid_counter_lock);
|
||||||
INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
|
INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
|
||||||
INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
|
INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
|
||||||
INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
|
INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
|
||||||
|
|
|
||||||
|
|
@ -1652,6 +1652,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
|
||||||
pr_warn_once("conflicting posix mount options specified\n");
|
pr_warn_once("conflicting posix mount options specified\n");
|
||||||
ctx->linux_ext = 1;
|
ctx->linux_ext = 1;
|
||||||
ctx->no_linux_ext = 0;
|
ctx->no_linux_ext = 0;
|
||||||
|
ctx->nonativesocket = 1; /* POSIX mounts use NFS style reparse points */
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case Opt_nocase:
|
case Opt_nocase:
|
||||||
|
|
@ -1829,24 +1830,6 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum cifs_symlink_type get_cifs_symlink_type(struct cifs_sb_info *cifs_sb)
|
|
||||||
{
|
|
||||||
if (cifs_sb->ctx->symlink_type == CIFS_SYMLINK_TYPE_DEFAULT) {
|
|
||||||
if (cifs_sb->ctx->mfsymlinks)
|
|
||||||
return CIFS_SYMLINK_TYPE_MFSYMLINKS;
|
|
||||||
else if (cifs_sb->ctx->sfu_emul)
|
|
||||||
return CIFS_SYMLINK_TYPE_SFU;
|
|
||||||
else if (cifs_sb->ctx->linux_ext && !cifs_sb->ctx->no_linux_ext)
|
|
||||||
return CIFS_SYMLINK_TYPE_UNIX;
|
|
||||||
else if (cifs_sb->ctx->reparse_type != CIFS_REPARSE_TYPE_NONE)
|
|
||||||
return CIFS_SYMLINK_TYPE_NATIVE;
|
|
||||||
else
|
|
||||||
return CIFS_SYMLINK_TYPE_NONE;
|
|
||||||
} else {
|
|
||||||
return cifs_sb->ctx->symlink_type;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int smb3_init_fs_context(struct fs_context *fc)
|
int smb3_init_fs_context(struct fs_context *fc)
|
||||||
{
|
{
|
||||||
struct smb3_fs_context *ctx;
|
struct smb3_fs_context *ctx;
|
||||||
|
|
|
||||||
|
|
@ -341,7 +341,23 @@ struct smb3_fs_context {
|
||||||
|
|
||||||
extern const struct fs_parameter_spec smb3_fs_parameters[];
|
extern const struct fs_parameter_spec smb3_fs_parameters[];
|
||||||
|
|
||||||
extern enum cifs_symlink_type get_cifs_symlink_type(struct cifs_sb_info *cifs_sb);
|
static inline enum cifs_symlink_type cifs_symlink_type(struct cifs_sb_info *cifs_sb)
|
||||||
|
{
|
||||||
|
bool posix = cifs_sb_master_tcon(cifs_sb)->posix_extensions;
|
||||||
|
|
||||||
|
if (cifs_sb->ctx->symlink_type != CIFS_SYMLINK_TYPE_DEFAULT)
|
||||||
|
return cifs_sb->ctx->symlink_type;
|
||||||
|
|
||||||
|
if (cifs_sb->ctx->mfsymlinks)
|
||||||
|
return CIFS_SYMLINK_TYPE_MFSYMLINKS;
|
||||||
|
else if (cifs_sb->ctx->sfu_emul)
|
||||||
|
return CIFS_SYMLINK_TYPE_SFU;
|
||||||
|
else if (cifs_sb->ctx->linux_ext && !cifs_sb->ctx->no_linux_ext)
|
||||||
|
return posix ? CIFS_SYMLINK_TYPE_NATIVE : CIFS_SYMLINK_TYPE_UNIX;
|
||||||
|
else if (cifs_sb->ctx->reparse_type != CIFS_REPARSE_TYPE_NONE)
|
||||||
|
return CIFS_SYMLINK_TYPE_NATIVE;
|
||||||
|
return CIFS_SYMLINK_TYPE_NONE;
|
||||||
|
}
|
||||||
|
|
||||||
extern int smb3_init_fs_context(struct fs_context *fc);
|
extern int smb3_init_fs_context(struct fs_context *fc);
|
||||||
extern void smb3_cleanup_fs_context_contents(struct smb3_fs_context *ctx);
|
extern void smb3_cleanup_fs_context_contents(struct smb3_fs_context *ctx);
|
||||||
|
|
|
||||||
|
|
@ -605,14 +605,7 @@ cifs_symlink(struct mnt_idmap *idmap, struct inode *inode,
|
||||||
|
|
||||||
/* BB what if DFS and this volume is on different share? BB */
|
/* BB what if DFS and this volume is on different share? BB */
|
||||||
rc = -EOPNOTSUPP;
|
rc = -EOPNOTSUPP;
|
||||||
switch (get_cifs_symlink_type(cifs_sb)) {
|
switch (cifs_symlink_type(cifs_sb)) {
|
||||||
case CIFS_SYMLINK_TYPE_DEFAULT:
|
|
||||||
/* should not happen, get_cifs_symlink_type() resolves the default */
|
|
||||||
break;
|
|
||||||
|
|
||||||
case CIFS_SYMLINK_TYPE_NONE:
|
|
||||||
break;
|
|
||||||
|
|
||||||
case CIFS_SYMLINK_TYPE_UNIX:
|
case CIFS_SYMLINK_TYPE_UNIX:
|
||||||
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
|
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
|
||||||
if (pTcon->unix_ext) {
|
if (pTcon->unix_ext) {
|
||||||
|
|
@ -642,12 +635,14 @@ cifs_symlink(struct mnt_idmap *idmap, struct inode *inode,
|
||||||
case CIFS_SYMLINK_TYPE_NATIVE:
|
case CIFS_SYMLINK_TYPE_NATIVE:
|
||||||
case CIFS_SYMLINK_TYPE_NFS:
|
case CIFS_SYMLINK_TYPE_NFS:
|
||||||
case CIFS_SYMLINK_TYPE_WSL:
|
case CIFS_SYMLINK_TYPE_WSL:
|
||||||
if (le32_to_cpu(pTcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS) {
|
if (CIFS_REPARSE_SUPPORT(pTcon)) {
|
||||||
rc = create_reparse_symlink(xid, inode, direntry, pTcon,
|
rc = create_reparse_symlink(xid, inode, direntry, pTcon,
|
||||||
full_path, symname);
|
full_path, symname);
|
||||||
goto symlink_exit;
|
goto symlink_exit;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rc == 0) {
|
if (rc == 0) {
|
||||||
|
|
|
||||||
|
|
@ -38,7 +38,7 @@ int create_reparse_symlink(const unsigned int xid, struct inode *inode,
|
||||||
struct dentry *dentry, struct cifs_tcon *tcon,
|
struct dentry *dentry, struct cifs_tcon *tcon,
|
||||||
const char *full_path, const char *symname)
|
const char *full_path, const char *symname)
|
||||||
{
|
{
|
||||||
switch (get_cifs_symlink_type(CIFS_SB(inode->i_sb))) {
|
switch (cifs_symlink_type(CIFS_SB(inode->i_sb))) {
|
||||||
case CIFS_SYMLINK_TYPE_NATIVE:
|
case CIFS_SYMLINK_TYPE_NATIVE:
|
||||||
return create_native_symlink(xid, inode, dentry, tcon, full_path, symname);
|
return create_native_symlink(xid, inode, dentry, tcon, full_path, symname);
|
||||||
case CIFS_SYMLINK_TYPE_NFS:
|
case CIFS_SYMLINK_TYPE_NFS:
|
||||||
|
|
|
||||||
|
|
@ -95,17 +95,17 @@ cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
|
||||||
struct smb_hdr *buf = (struct smb_hdr *)buffer;
|
struct smb_hdr *buf = (struct smb_hdr *)buffer;
|
||||||
struct mid_q_entry *mid;
|
struct mid_q_entry *mid;
|
||||||
|
|
||||||
spin_lock(&server->mid_lock);
|
spin_lock(&server->mid_queue_lock);
|
||||||
list_for_each_entry(mid, &server->pending_mid_q, qhead) {
|
list_for_each_entry(mid, &server->pending_mid_q, qhead) {
|
||||||
if (compare_mid(mid->mid, buf) &&
|
if (compare_mid(mid->mid, buf) &&
|
||||||
mid->mid_state == MID_REQUEST_SUBMITTED &&
|
mid->mid_state == MID_REQUEST_SUBMITTED &&
|
||||||
le16_to_cpu(mid->command) == buf->Command) {
|
le16_to_cpu(mid->command) == buf->Command) {
|
||||||
kref_get(&mid->refcount);
|
kref_get(&mid->refcount);
|
||||||
spin_unlock(&server->mid_lock);
|
spin_unlock(&server->mid_queue_lock);
|
||||||
return mid;
|
return mid;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock(&server->mid_lock);
|
spin_unlock(&server->mid_queue_lock);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -169,10 +169,9 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
|
||||||
__u16 last_mid, cur_mid;
|
__u16 last_mid, cur_mid;
|
||||||
bool collision, reconnect = false;
|
bool collision, reconnect = false;
|
||||||
|
|
||||||
spin_lock(&server->mid_lock);
|
spin_lock(&server->mid_counter_lock);
|
||||||
|
|
||||||
/* mid is 16 bit only for CIFS/SMB */
|
/* mid is 16 bit only for CIFS/SMB */
|
||||||
cur_mid = (__u16)((server->CurrentMid) & 0xffff);
|
cur_mid = (__u16)((server->current_mid) & 0xffff);
|
||||||
/* we do not want to loop forever */
|
/* we do not want to loop forever */
|
||||||
last_mid = cur_mid;
|
last_mid = cur_mid;
|
||||||
cur_mid++;
|
cur_mid++;
|
||||||
|
|
@ -198,6 +197,7 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
|
||||||
cur_mid++;
|
cur_mid++;
|
||||||
|
|
||||||
num_mids = 0;
|
num_mids = 0;
|
||||||
|
spin_lock(&server->mid_queue_lock);
|
||||||
list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
|
list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
|
||||||
++num_mids;
|
++num_mids;
|
||||||
if (mid_entry->mid == cur_mid &&
|
if (mid_entry->mid == cur_mid &&
|
||||||
|
|
@ -207,6 +207,7 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
spin_unlock(&server->mid_queue_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if we have more than 32k mids in the list, then something
|
* if we have more than 32k mids in the list, then something
|
||||||
|
|
@ -223,12 +224,12 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
|
||||||
|
|
||||||
if (!collision) {
|
if (!collision) {
|
||||||
mid = (__u64)cur_mid;
|
mid = (__u64)cur_mid;
|
||||||
server->CurrentMid = mid;
|
server->current_mid = mid;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
cur_mid++;
|
cur_mid++;
|
||||||
}
|
}
|
||||||
spin_unlock(&server->mid_lock);
|
spin_unlock(&server->mid_counter_lock);
|
||||||
|
|
||||||
if (reconnect) {
|
if (reconnect) {
|
||||||
cifs_signal_cifsd_for_reconnect(server, false);
|
cifs_signal_cifsd_for_reconnect(server, false);
|
||||||
|
|
@ -1272,7 +1273,7 @@ cifs_make_node(unsigned int xid, struct inode *inode,
|
||||||
*/
|
*/
|
||||||
return cifs_sfu_make_node(xid, inode, dentry, tcon,
|
return cifs_sfu_make_node(xid, inode, dentry, tcon,
|
||||||
full_path, mode, dev);
|
full_path, mode, dev);
|
||||||
} else if (le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS) {
|
} else if (CIFS_REPARSE_SUPPORT(tcon)) {
|
||||||
/*
|
/*
|
||||||
* mknod via reparse points requires server support for
|
* mknod via reparse points requires server support for
|
||||||
* storing reparse points, which is available since
|
* storing reparse points, which is available since
|
||||||
|
|
|
||||||
|
|
@ -1346,9 +1346,8 @@ struct inode *smb2_create_reparse_inode(struct cifs_open_info_data *data,
|
||||||
* attempt to create reparse point. This will prevent creating unusable
|
* attempt to create reparse point. This will prevent creating unusable
|
||||||
* empty object on the server.
|
* empty object on the server.
|
||||||
*/
|
*/
|
||||||
if (!(le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS))
|
if (!CIFS_REPARSE_SUPPORT(tcon))
|
||||||
if (!tcon->posix_extensions)
|
return ERR_PTR(-EOPNOTSUPP);
|
||||||
return ERR_PTR(-EOPNOTSUPP);
|
|
||||||
|
|
||||||
oparms = CIFS_OPARMS(cifs_sb, tcon, full_path,
|
oparms = CIFS_OPARMS(cifs_sb, tcon, full_path,
|
||||||
SYNCHRONIZE | DELETE |
|
SYNCHRONIZE | DELETE |
|
||||||
|
|
|
||||||
|
|
@ -91,7 +91,7 @@ smb2_add_credits(struct TCP_Server_Info *server,
|
||||||
if (*val > 65000) {
|
if (*val > 65000) {
|
||||||
*val = 65000; /* Don't get near 64K credits, avoid srv bugs */
|
*val = 65000; /* Don't get near 64K credits, avoid srv bugs */
|
||||||
pr_warn_once("server overflowed SMB3 credits\n");
|
pr_warn_once("server overflowed SMB3 credits\n");
|
||||||
trace_smb3_overflow_credits(server->CurrentMid,
|
trace_smb3_overflow_credits(server->current_mid,
|
||||||
server->conn_id, server->hostname, *val,
|
server->conn_id, server->hostname, *val,
|
||||||
add, server->in_flight);
|
add, server->in_flight);
|
||||||
}
|
}
|
||||||
|
|
@ -136,7 +136,7 @@ smb2_add_credits(struct TCP_Server_Info *server,
|
||||||
wake_up(&server->request_q);
|
wake_up(&server->request_q);
|
||||||
|
|
||||||
if (reconnect_detected) {
|
if (reconnect_detected) {
|
||||||
trace_smb3_reconnect_detected(server->CurrentMid,
|
trace_smb3_reconnect_detected(server->current_mid,
|
||||||
server->conn_id, server->hostname, scredits, add, in_flight);
|
server->conn_id, server->hostname, scredits, add, in_flight);
|
||||||
|
|
||||||
cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
|
cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
|
||||||
|
|
@ -144,7 +144,7 @@ smb2_add_credits(struct TCP_Server_Info *server,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (reconnect_with_invalid_credits) {
|
if (reconnect_with_invalid_credits) {
|
||||||
trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
|
trace_smb3_reconnect_with_invalid_credits(server->current_mid,
|
||||||
server->conn_id, server->hostname, scredits, add, in_flight);
|
server->conn_id, server->hostname, scredits, add, in_flight);
|
||||||
cifs_dbg(FYI, "Negotiate operation when server credits is non-zero. Optype: %d, server credits: %d, credits added: %d\n",
|
cifs_dbg(FYI, "Negotiate operation when server credits is non-zero. Optype: %d, server credits: %d, credits added: %d\n",
|
||||||
optype, scredits, add);
|
optype, scredits, add);
|
||||||
|
|
@ -176,7 +176,7 @@ smb2_add_credits(struct TCP_Server_Info *server,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_smb3_add_credits(server->CurrentMid,
|
trace_smb3_add_credits(server->current_mid,
|
||||||
server->conn_id, server->hostname, scredits, add, in_flight);
|
server->conn_id, server->hostname, scredits, add, in_flight);
|
||||||
cifs_dbg(FYI, "%s: added %u credits total=%d\n", __func__, add, scredits);
|
cifs_dbg(FYI, "%s: added %u credits total=%d\n", __func__, add, scredits);
|
||||||
}
|
}
|
||||||
|
|
@ -203,7 +203,7 @@ smb2_set_credits(struct TCP_Server_Info *server, const int val)
|
||||||
in_flight = server->in_flight;
|
in_flight = server->in_flight;
|
||||||
spin_unlock(&server->req_lock);
|
spin_unlock(&server->req_lock);
|
||||||
|
|
||||||
trace_smb3_set_credits(server->CurrentMid,
|
trace_smb3_set_credits(server->current_mid,
|
||||||
server->conn_id, server->hostname, scredits, val, in_flight);
|
server->conn_id, server->hostname, scredits, val, in_flight);
|
||||||
cifs_dbg(FYI, "%s: set %u credits\n", __func__, val);
|
cifs_dbg(FYI, "%s: set %u credits\n", __func__, val);
|
||||||
|
|
||||||
|
|
@ -288,7 +288,7 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, size_t size,
|
||||||
in_flight = server->in_flight;
|
in_flight = server->in_flight;
|
||||||
spin_unlock(&server->req_lock);
|
spin_unlock(&server->req_lock);
|
||||||
|
|
||||||
trace_smb3_wait_credits(server->CurrentMid,
|
trace_smb3_wait_credits(server->current_mid,
|
||||||
server->conn_id, server->hostname, scredits, -(credits->value), in_flight);
|
server->conn_id, server->hostname, scredits, -(credits->value), in_flight);
|
||||||
cifs_dbg(FYI, "%s: removed %u credits total=%d\n",
|
cifs_dbg(FYI, "%s: removed %u credits total=%d\n",
|
||||||
__func__, credits->value, scredits);
|
__func__, credits->value, scredits);
|
||||||
|
|
@ -316,7 +316,7 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
|
||||||
server->credits, server->in_flight,
|
server->credits, server->in_flight,
|
||||||
new_val - credits->value,
|
new_val - credits->value,
|
||||||
cifs_trace_rw_credits_no_adjust_up);
|
cifs_trace_rw_credits_no_adjust_up);
|
||||||
trace_smb3_too_many_credits(server->CurrentMid,
|
trace_smb3_too_many_credits(server->current_mid,
|
||||||
server->conn_id, server->hostname, 0, credits->value - new_val, 0);
|
server->conn_id, server->hostname, 0, credits->value - new_val, 0);
|
||||||
cifs_server_dbg(VFS, "R=%x[%x] request has less credits (%d) than required (%d)",
|
cifs_server_dbg(VFS, "R=%x[%x] request has less credits (%d) than required (%d)",
|
||||||
subreq->rreq->debug_id, subreq->subreq.debug_index,
|
subreq->rreq->debug_id, subreq->subreq.debug_index,
|
||||||
|
|
@ -338,7 +338,7 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
|
||||||
server->credits, server->in_flight,
|
server->credits, server->in_flight,
|
||||||
new_val - credits->value,
|
new_val - credits->value,
|
||||||
cifs_trace_rw_credits_old_session);
|
cifs_trace_rw_credits_old_session);
|
||||||
trace_smb3_reconnect_detected(server->CurrentMid,
|
trace_smb3_reconnect_detected(server->current_mid,
|
||||||
server->conn_id, server->hostname, scredits,
|
server->conn_id, server->hostname, scredits,
|
||||||
credits->value - new_val, in_flight);
|
credits->value - new_val, in_flight);
|
||||||
cifs_server_dbg(VFS, "R=%x[%x] trying to return %d credits to old session\n",
|
cifs_server_dbg(VFS, "R=%x[%x] trying to return %d credits to old session\n",
|
||||||
|
|
@ -358,7 +358,7 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
|
||||||
spin_unlock(&server->req_lock);
|
spin_unlock(&server->req_lock);
|
||||||
wake_up(&server->request_q);
|
wake_up(&server->request_q);
|
||||||
|
|
||||||
trace_smb3_adj_credits(server->CurrentMid,
|
trace_smb3_adj_credits(server->current_mid,
|
||||||
server->conn_id, server->hostname, scredits,
|
server->conn_id, server->hostname, scredits,
|
||||||
credits->value - new_val, in_flight);
|
credits->value - new_val, in_flight);
|
||||||
cifs_dbg(FYI, "%s: adjust added %u credits total=%d\n",
|
cifs_dbg(FYI, "%s: adjust added %u credits total=%d\n",
|
||||||
|
|
@ -374,19 +374,19 @@ smb2_get_next_mid(struct TCP_Server_Info *server)
|
||||||
{
|
{
|
||||||
__u64 mid;
|
__u64 mid;
|
||||||
/* for SMB2 we need the current value */
|
/* for SMB2 we need the current value */
|
||||||
spin_lock(&server->mid_lock);
|
spin_lock(&server->mid_counter_lock);
|
||||||
mid = server->CurrentMid++;
|
mid = server->current_mid++;
|
||||||
spin_unlock(&server->mid_lock);
|
spin_unlock(&server->mid_counter_lock);
|
||||||
return mid;
|
return mid;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
|
smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
|
||||||
{
|
{
|
||||||
spin_lock(&server->mid_lock);
|
spin_lock(&server->mid_counter_lock);
|
||||||
if (server->CurrentMid >= val)
|
if (server->current_mid >= val)
|
||||||
server->CurrentMid -= val;
|
server->current_mid -= val;
|
||||||
spin_unlock(&server->mid_lock);
|
spin_unlock(&server->mid_counter_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct mid_q_entry *
|
static struct mid_q_entry *
|
||||||
|
|
@ -401,7 +401,7 @@ __smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&server->mid_lock);
|
spin_lock(&server->mid_queue_lock);
|
||||||
list_for_each_entry(mid, &server->pending_mid_q, qhead) {
|
list_for_each_entry(mid, &server->pending_mid_q, qhead) {
|
||||||
if ((mid->mid == wire_mid) &&
|
if ((mid->mid == wire_mid) &&
|
||||||
(mid->mid_state == MID_REQUEST_SUBMITTED) &&
|
(mid->mid_state == MID_REQUEST_SUBMITTED) &&
|
||||||
|
|
@ -409,13 +409,13 @@ __smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
|
||||||
kref_get(&mid->refcount);
|
kref_get(&mid->refcount);
|
||||||
if (dequeue) {
|
if (dequeue) {
|
||||||
list_del_init(&mid->qhead);
|
list_del_init(&mid->qhead);
|
||||||
mid->mid_flags |= MID_DELETED;
|
mid->deleted_from_q = true;
|
||||||
}
|
}
|
||||||
spin_unlock(&server->mid_lock);
|
spin_unlock(&server->mid_queue_lock);
|
||||||
return mid;
|
return mid;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock(&server->mid_lock);
|
spin_unlock(&server->mid_queue_lock);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -460,9 +460,9 @@ smb2_negotiate(const unsigned int xid,
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
spin_lock(&server->mid_lock);
|
spin_lock(&server->mid_counter_lock);
|
||||||
server->CurrentMid = 0;
|
server->current_mid = 0;
|
||||||
spin_unlock(&server->mid_lock);
|
spin_unlock(&server->mid_counter_lock);
|
||||||
rc = SMB2_negotiate(xid, ses, server);
|
rc = SMB2_negotiate(xid, ses, server);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
@ -2498,7 +2498,7 @@ smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
|
||||||
spin_unlock(&server->req_lock);
|
spin_unlock(&server->req_lock);
|
||||||
wake_up(&server->request_q);
|
wake_up(&server->request_q);
|
||||||
|
|
||||||
trace_smb3_pend_credits(server->CurrentMid,
|
trace_smb3_pend_credits(server->current_mid,
|
||||||
server->conn_id, server->hostname, scredits,
|
server->conn_id, server->hostname, scredits,
|
||||||
le16_to_cpu(shdr->CreditRequest), in_flight);
|
le16_to_cpu(shdr->CreditRequest), in_flight);
|
||||||
cifs_dbg(FYI, "%s: status pending add %u credits total=%d\n",
|
cifs_dbg(FYI, "%s: status pending add %u credits total=%d\n",
|
||||||
|
|
@ -4809,18 +4809,18 @@ static void smb2_decrypt_offload(struct work_struct *work)
|
||||||
} else {
|
} else {
|
||||||
spin_lock(&dw->server->srv_lock);
|
spin_lock(&dw->server->srv_lock);
|
||||||
if (dw->server->tcpStatus == CifsNeedReconnect) {
|
if (dw->server->tcpStatus == CifsNeedReconnect) {
|
||||||
spin_lock(&dw->server->mid_lock);
|
spin_lock(&dw->server->mid_queue_lock);
|
||||||
mid->mid_state = MID_RETRY_NEEDED;
|
mid->mid_state = MID_RETRY_NEEDED;
|
||||||
spin_unlock(&dw->server->mid_lock);
|
spin_unlock(&dw->server->mid_queue_lock);
|
||||||
spin_unlock(&dw->server->srv_lock);
|
spin_unlock(&dw->server->srv_lock);
|
||||||
mid->callback(mid);
|
mid->callback(mid);
|
||||||
} else {
|
} else {
|
||||||
spin_lock(&dw->server->mid_lock);
|
spin_lock(&dw->server->mid_queue_lock);
|
||||||
mid->mid_state = MID_REQUEST_SUBMITTED;
|
mid->mid_state = MID_REQUEST_SUBMITTED;
|
||||||
mid->mid_flags &= ~(MID_DELETED);
|
mid->deleted_from_q = false;
|
||||||
list_add_tail(&mid->qhead,
|
list_add_tail(&mid->qhead,
|
||||||
&dw->server->pending_mid_q);
|
&dw->server->pending_mid_q);
|
||||||
spin_unlock(&dw->server->mid_lock);
|
spin_unlock(&dw->server->mid_queue_lock);
|
||||||
spin_unlock(&dw->server->srv_lock);
|
spin_unlock(&dw->server->srv_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -5260,10 +5260,9 @@ static int smb2_make_node(unsigned int xid, struct inode *inode,
|
||||||
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
|
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
|
||||||
rc = cifs_sfu_make_node(xid, inode, dentry, tcon,
|
rc = cifs_sfu_make_node(xid, inode, dentry, tcon,
|
||||||
full_path, mode, dev);
|
full_path, mode, dev);
|
||||||
} else if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS)
|
} else if (CIFS_REPARSE_SUPPORT(tcon)) {
|
||||||
|| (tcon->posix_extensions)) {
|
|
||||||
rc = mknod_reparse(xid, inode, dentry, tcon,
|
rc = mknod_reparse(xid, inode, dentry, tcon,
|
||||||
full_path, mode, dev);
|
full_path, mode, dev);
|
||||||
}
|
}
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -840,9 +840,9 @@ smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server,
|
||||||
*mid = smb2_mid_entry_alloc(shdr, server);
|
*mid = smb2_mid_entry_alloc(shdr, server);
|
||||||
if (*mid == NULL)
|
if (*mid == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
spin_lock(&server->mid_lock);
|
spin_lock(&server->mid_queue_lock);
|
||||||
list_add_tail(&(*mid)->qhead, &server->pending_mid_q);
|
list_add_tail(&(*mid)->qhead, &server->pending_mid_q);
|
||||||
spin_unlock(&server->mid_lock);
|
spin_unlock(&server->mid_queue_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -13,27 +13,23 @@
|
||||||
#include "cifsproto.h"
|
#include "cifsproto.h"
|
||||||
#include "smb2proto.h"
|
#include "smb2proto.h"
|
||||||
|
|
||||||
static struct smbd_response *get_empty_queue_buffer(
|
static struct smbdirect_recv_io *get_receive_buffer(
|
||||||
struct smbd_connection *info);
|
|
||||||
static struct smbd_response *get_receive_buffer(
|
|
||||||
struct smbd_connection *info);
|
struct smbd_connection *info);
|
||||||
static void put_receive_buffer(
|
static void put_receive_buffer(
|
||||||
struct smbd_connection *info,
|
struct smbd_connection *info,
|
||||||
struct smbd_response *response);
|
struct smbdirect_recv_io *response);
|
||||||
static int allocate_receive_buffers(struct smbd_connection *info, int num_buf);
|
static int allocate_receive_buffers(struct smbd_connection *info, int num_buf);
|
||||||
static void destroy_receive_buffers(struct smbd_connection *info);
|
static void destroy_receive_buffers(struct smbd_connection *info);
|
||||||
|
|
||||||
static void put_empty_packet(
|
|
||||||
struct smbd_connection *info, struct smbd_response *response);
|
|
||||||
static void enqueue_reassembly(
|
static void enqueue_reassembly(
|
||||||
struct smbd_connection *info,
|
struct smbd_connection *info,
|
||||||
struct smbd_response *response, int data_length);
|
struct smbdirect_recv_io *response, int data_length);
|
||||||
static struct smbd_response *_get_first_reassembly(
|
static struct smbdirect_recv_io *_get_first_reassembly(
|
||||||
struct smbd_connection *info);
|
struct smbd_connection *info);
|
||||||
|
|
||||||
static int smbd_post_recv(
|
static int smbd_post_recv(
|
||||||
struct smbd_connection *info,
|
struct smbd_connection *info,
|
||||||
struct smbd_response *response);
|
struct smbdirect_recv_io *response);
|
||||||
|
|
||||||
static int smbd_post_send_empty(struct smbd_connection *info);
|
static int smbd_post_send_empty(struct smbd_connection *info);
|
||||||
|
|
||||||
|
|
@ -182,9 +178,10 @@ static int smbd_conn_upcall(
|
||||||
{
|
{
|
||||||
struct smbd_connection *info = id->context;
|
struct smbd_connection *info = id->context;
|
||||||
struct smbdirect_socket *sc = &info->socket;
|
struct smbdirect_socket *sc = &info->socket;
|
||||||
|
const char *event_name = rdma_event_msg(event->event);
|
||||||
|
|
||||||
log_rdma_event(INFO, "event=%d status=%d\n",
|
log_rdma_event(INFO, "event=%s status=%d\n",
|
||||||
event->event, event->status);
|
event_name, event->status);
|
||||||
|
|
||||||
switch (event->event) {
|
switch (event->event) {
|
||||||
case RDMA_CM_EVENT_ADDR_RESOLVED:
|
case RDMA_CM_EVENT_ADDR_RESOLVED:
|
||||||
|
|
@ -194,45 +191,50 @@ static int smbd_conn_upcall(
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case RDMA_CM_EVENT_ADDR_ERROR:
|
case RDMA_CM_EVENT_ADDR_ERROR:
|
||||||
|
log_rdma_event(ERR, "connecting failed event=%s\n", event_name);
|
||||||
info->ri_rc = -EHOSTUNREACH;
|
info->ri_rc = -EHOSTUNREACH;
|
||||||
complete(&info->ri_done);
|
complete(&info->ri_done);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case RDMA_CM_EVENT_ROUTE_ERROR:
|
case RDMA_CM_EVENT_ROUTE_ERROR:
|
||||||
|
log_rdma_event(ERR, "connecting failed event=%s\n", event_name);
|
||||||
info->ri_rc = -ENETUNREACH;
|
info->ri_rc = -ENETUNREACH;
|
||||||
complete(&info->ri_done);
|
complete(&info->ri_done);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case RDMA_CM_EVENT_ESTABLISHED:
|
case RDMA_CM_EVENT_ESTABLISHED:
|
||||||
log_rdma_event(INFO, "connected event=%d\n", event->event);
|
log_rdma_event(INFO, "connected event=%s\n", event_name);
|
||||||
sc->status = SMBDIRECT_SOCKET_CONNECTED;
|
sc->status = SMBDIRECT_SOCKET_CONNECTED;
|
||||||
wake_up_interruptible(&info->conn_wait);
|
wake_up_interruptible(&info->status_wait);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case RDMA_CM_EVENT_CONNECT_ERROR:
|
case RDMA_CM_EVENT_CONNECT_ERROR:
|
||||||
case RDMA_CM_EVENT_UNREACHABLE:
|
case RDMA_CM_EVENT_UNREACHABLE:
|
||||||
case RDMA_CM_EVENT_REJECTED:
|
case RDMA_CM_EVENT_REJECTED:
|
||||||
log_rdma_event(INFO, "connecting failed event=%d\n", event->event);
|
log_rdma_event(ERR, "connecting failed event=%s\n", event_name);
|
||||||
sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
|
sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
|
||||||
wake_up_interruptible(&info->conn_wait);
|
wake_up_interruptible(&info->status_wait);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case RDMA_CM_EVENT_DEVICE_REMOVAL:
|
case RDMA_CM_EVENT_DEVICE_REMOVAL:
|
||||||
case RDMA_CM_EVENT_DISCONNECTED:
|
case RDMA_CM_EVENT_DISCONNECTED:
|
||||||
/* This happens when we fail the negotiation */
|
/* This happens when we fail the negotiation */
|
||||||
if (sc->status == SMBDIRECT_SOCKET_NEGOTIATE_FAILED) {
|
if (sc->status == SMBDIRECT_SOCKET_NEGOTIATE_FAILED) {
|
||||||
|
log_rdma_event(ERR, "event=%s during negotiation\n", event_name);
|
||||||
sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
|
sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
|
||||||
wake_up(&info->conn_wait);
|
wake_up(&info->status_wait);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
|
sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
|
||||||
wake_up_interruptible(&info->disconn_wait);
|
wake_up_interruptible(&info->status_wait);
|
||||||
wake_up_interruptible(&info->wait_reassembly_queue);
|
wake_up_interruptible(&sc->recv_io.reassembly.wait_queue);
|
||||||
wake_up_interruptible_all(&info->wait_send_queue);
|
wake_up_interruptible_all(&info->wait_send_queue);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
log_rdma_event(ERR, "unexpected event=%s status=%d\n",
|
||||||
|
event_name, event->status);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -259,12 +261,12 @@ smbd_qp_async_error_upcall(struct ib_event *event, void *context)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void *smbd_request_payload(struct smbd_request *request)
|
static inline void *smbdirect_send_io_payload(struct smbdirect_send_io *request)
|
||||||
{
|
{
|
||||||
return (void *)request->packet;
|
return (void *)request->packet;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void *smbd_response_payload(struct smbd_response *response)
|
static inline void *smbdirect_recv_io_payload(struct smbdirect_recv_io *response)
|
||||||
{
|
{
|
||||||
return (void *)response->packet;
|
return (void *)response->packet;
|
||||||
}
|
}
|
||||||
|
|
@ -273,32 +275,35 @@ static inline void *smbd_response_payload(struct smbd_response *response)
|
||||||
static void send_done(struct ib_cq *cq, struct ib_wc *wc)
|
static void send_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct smbd_request *request =
|
struct smbdirect_send_io *request =
|
||||||
container_of(wc->wr_cqe, struct smbd_request, cqe);
|
container_of(wc->wr_cqe, struct smbdirect_send_io, cqe);
|
||||||
struct smbd_connection *info = request->info;
|
struct smbdirect_socket *sc = request->socket;
|
||||||
struct smbdirect_socket *sc = &info->socket;
|
struct smbd_connection *info =
|
||||||
|
container_of(sc, struct smbd_connection, socket);
|
||||||
|
|
||||||
log_rdma_send(INFO, "smbd_request 0x%p completed wc->status=%d\n",
|
log_rdma_send(INFO, "smbdirect_send_io 0x%p completed wc->status=%d\n",
|
||||||
request, wc->status);
|
request, wc->status);
|
||||||
|
|
||||||
if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
|
|
||||||
log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n",
|
|
||||||
wc->status, wc->opcode);
|
|
||||||
smbd_disconnect_rdma_connection(request->info);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < request->num_sge; i++)
|
for (i = 0; i < request->num_sge; i++)
|
||||||
ib_dma_unmap_single(sc->ib.dev,
|
ib_dma_unmap_single(sc->ib.dev,
|
||||||
request->sge[i].addr,
|
request->sge[i].addr,
|
||||||
request->sge[i].length,
|
request->sge[i].length,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
|
||||||
if (atomic_dec_and_test(&request->info->send_pending))
|
if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
|
||||||
wake_up(&request->info->wait_send_pending);
|
log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n",
|
||||||
|
wc->status, wc->opcode);
|
||||||
|
mempool_free(request, sc->send_io.mem.pool);
|
||||||
|
smbd_disconnect_rdma_connection(info);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
wake_up(&request->info->wait_post_send);
|
if (atomic_dec_and_test(&info->send_pending))
|
||||||
|
wake_up(&info->wait_send_pending);
|
||||||
|
|
||||||
mempool_free(request, request->info->request_mempool);
|
wake_up(&info->wait_post_send);
|
||||||
|
|
||||||
|
mempool_free(request, sc->send_io.mem.pool);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dump_smbdirect_negotiate_resp(struct smbdirect_negotiate_resp *resp)
|
static void dump_smbdirect_negotiate_resp(struct smbdirect_negotiate_resp *resp)
|
||||||
|
|
@ -317,12 +322,13 @@ static void dump_smbdirect_negotiate_resp(struct smbdirect_negotiate_resp *resp)
|
||||||
* return value: true if negotiation is a success, false if failed
|
* return value: true if negotiation is a success, false if failed
|
||||||
*/
|
*/
|
||||||
static bool process_negotiation_response(
|
static bool process_negotiation_response(
|
||||||
struct smbd_response *response, int packet_length)
|
struct smbdirect_recv_io *response, int packet_length)
|
||||||
{
|
{
|
||||||
struct smbd_connection *info = response->info;
|
struct smbdirect_socket *sc = response->socket;
|
||||||
struct smbdirect_socket *sc = &info->socket;
|
struct smbd_connection *info =
|
||||||
|
container_of(sc, struct smbd_connection, socket);
|
||||||
struct smbdirect_socket_parameters *sp = &sc->parameters;
|
struct smbdirect_socket_parameters *sp = &sc->parameters;
|
||||||
struct smbdirect_negotiate_resp *packet = smbd_response_payload(response);
|
struct smbdirect_negotiate_resp *packet = smbdirect_recv_io_payload(response);
|
||||||
|
|
||||||
if (packet_length < sizeof(struct smbdirect_negotiate_resp)) {
|
if (packet_length < sizeof(struct smbdirect_negotiate_resp)) {
|
||||||
log_rdma_event(ERR,
|
log_rdma_event(ERR,
|
||||||
|
|
@ -385,15 +391,15 @@ static bool process_negotiation_response(
|
||||||
info->max_frmr_depth * PAGE_SIZE);
|
info->max_frmr_depth * PAGE_SIZE);
|
||||||
info->max_frmr_depth = sp->max_read_write_size / PAGE_SIZE;
|
info->max_frmr_depth = sp->max_read_write_size / PAGE_SIZE;
|
||||||
|
|
||||||
|
sc->recv_io.expected = SMBDIRECT_EXPECT_DATA_TRANSFER;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void smbd_post_send_credits(struct work_struct *work)
|
static void smbd_post_send_credits(struct work_struct *work)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
int use_receive_queue = 1;
|
|
||||||
int rc;
|
int rc;
|
||||||
struct smbd_response *response;
|
struct smbdirect_recv_io *response;
|
||||||
struct smbd_connection *info =
|
struct smbd_connection *info =
|
||||||
container_of(work, struct smbd_connection,
|
container_of(work, struct smbd_connection,
|
||||||
post_send_credits_work);
|
post_send_credits_work);
|
||||||
|
|
@ -407,20 +413,10 @@ static void smbd_post_send_credits(struct work_struct *work)
|
||||||
if (info->receive_credit_target >
|
if (info->receive_credit_target >
|
||||||
atomic_read(&info->receive_credits)) {
|
atomic_read(&info->receive_credits)) {
|
||||||
while (true) {
|
while (true) {
|
||||||
if (use_receive_queue)
|
response = get_receive_buffer(info);
|
||||||
response = get_receive_buffer(info);
|
if (!response)
|
||||||
else
|
break;
|
||||||
response = get_empty_queue_buffer(info);
|
|
||||||
if (!response) {
|
|
||||||
/* now switch to empty packet queue */
|
|
||||||
if (use_receive_queue) {
|
|
||||||
use_receive_queue = 0;
|
|
||||||
continue;
|
|
||||||
} else
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
response->type = SMBD_TRANSFER_DATA;
|
|
||||||
response->first_segment = false;
|
response->first_segment = false;
|
||||||
rc = smbd_post_recv(info, response);
|
rc = smbd_post_recv(info, response);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
|
|
@ -454,19 +450,20 @@ static void smbd_post_send_credits(struct work_struct *work)
|
||||||
static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||||
{
|
{
|
||||||
struct smbdirect_data_transfer *data_transfer;
|
struct smbdirect_data_transfer *data_transfer;
|
||||||
struct smbd_response *response =
|
struct smbdirect_recv_io *response =
|
||||||
container_of(wc->wr_cqe, struct smbd_response, cqe);
|
container_of(wc->wr_cqe, struct smbdirect_recv_io, cqe);
|
||||||
struct smbd_connection *info = response->info;
|
struct smbdirect_socket *sc = response->socket;
|
||||||
|
struct smbd_connection *info =
|
||||||
|
container_of(sc, struct smbd_connection, socket);
|
||||||
int data_length = 0;
|
int data_length = 0;
|
||||||
|
|
||||||
log_rdma_recv(INFO, "response=0x%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%u\n",
|
log_rdma_recv(INFO, "response=0x%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%u\n",
|
||||||
response, response->type, wc->status, wc->opcode,
|
response, sc->recv_io.expected, wc->status, wc->opcode,
|
||||||
wc->byte_len, wc->pkey_index);
|
wc->byte_len, wc->pkey_index);
|
||||||
|
|
||||||
if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
|
if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
|
||||||
log_rdma_recv(INFO, "wc->status=%d opcode=%d\n",
|
log_rdma_recv(INFO, "wc->status=%d opcode=%d\n",
|
||||||
wc->status, wc->opcode);
|
wc->status, wc->opcode);
|
||||||
smbd_disconnect_rdma_connection(info);
|
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -476,43 +473,31 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||||
response->sge.length,
|
response->sge.length,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
|
|
||||||
switch (response->type) {
|
switch (sc->recv_io.expected) {
|
||||||
/* SMBD negotiation response */
|
/* SMBD negotiation response */
|
||||||
case SMBD_NEGOTIATE_RESP:
|
case SMBDIRECT_EXPECT_NEGOTIATE_REP:
|
||||||
dump_smbdirect_negotiate_resp(smbd_response_payload(response));
|
dump_smbdirect_negotiate_resp(smbdirect_recv_io_payload(response));
|
||||||
info->full_packet_received = true;
|
sc->recv_io.reassembly.full_packet_received = true;
|
||||||
info->negotiate_done =
|
info->negotiate_done =
|
||||||
process_negotiation_response(response, wc->byte_len);
|
process_negotiation_response(response, wc->byte_len);
|
||||||
|
put_receive_buffer(info, response);
|
||||||
complete(&info->negotiate_completion);
|
complete(&info->negotiate_completion);
|
||||||
break;
|
return;
|
||||||
|
|
||||||
/* SMBD data transfer packet */
|
/* SMBD data transfer packet */
|
||||||
case SMBD_TRANSFER_DATA:
|
case SMBDIRECT_EXPECT_DATA_TRANSFER:
|
||||||
data_transfer = smbd_response_payload(response);
|
data_transfer = smbdirect_recv_io_payload(response);
|
||||||
data_length = le32_to_cpu(data_transfer->data_length);
|
data_length = le32_to_cpu(data_transfer->data_length);
|
||||||
|
|
||||||
/*
|
|
||||||
* If this is a packet with data playload place the data in
|
|
||||||
* reassembly queue and wake up the reading thread
|
|
||||||
*/
|
|
||||||
if (data_length) {
|
if (data_length) {
|
||||||
if (info->full_packet_received)
|
if (sc->recv_io.reassembly.full_packet_received)
|
||||||
response->first_segment = true;
|
response->first_segment = true;
|
||||||
|
|
||||||
if (le32_to_cpu(data_transfer->remaining_data_length))
|
if (le32_to_cpu(data_transfer->remaining_data_length))
|
||||||
info->full_packet_received = false;
|
sc->recv_io.reassembly.full_packet_received = false;
|
||||||
else
|
else
|
||||||
info->full_packet_received = true;
|
sc->recv_io.reassembly.full_packet_received = true;
|
||||||
|
}
|
||||||
enqueue_reassembly(
|
|
||||||
info,
|
|
||||||
response,
|
|
||||||
data_length);
|
|
||||||
} else
|
|
||||||
put_empty_packet(info, response);
|
|
||||||
|
|
||||||
if (data_length)
|
|
||||||
wake_up_interruptible(&info->wait_reassembly_queue);
|
|
||||||
|
|
||||||
atomic_dec(&info->receive_credits);
|
atomic_dec(&info->receive_credits);
|
||||||
info->receive_credit_target =
|
info->receive_credit_target =
|
||||||
|
|
@ -540,15 +525,31 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||||
info->keep_alive_requested = KEEP_ALIVE_PENDING;
|
info->keep_alive_requested = KEEP_ALIVE_PENDING;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If this is a packet with data playload place the data in
|
||||||
|
* reassembly queue and wake up the reading thread
|
||||||
|
*/
|
||||||
|
if (data_length) {
|
||||||
|
enqueue_reassembly(info, response, data_length);
|
||||||
|
wake_up_interruptible(&sc->recv_io.reassembly.wait_queue);
|
||||||
|
} else
|
||||||
|
put_receive_buffer(info, response);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
default:
|
case SMBDIRECT_EXPECT_NEGOTIATE_REQ:
|
||||||
log_rdma_recv(ERR,
|
/* Only server... */
|
||||||
"unexpected response type=%d\n", response->type);
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is an internal error!
|
||||||
|
*/
|
||||||
|
log_rdma_recv(ERR, "unexpected response type=%d\n", sc->recv_io.expected);
|
||||||
|
WARN_ON_ONCE(sc->recv_io.expected != SMBDIRECT_EXPECT_DATA_TRANSFER);
|
||||||
error:
|
error:
|
||||||
put_receive_buffer(info, response);
|
put_receive_buffer(info, response);
|
||||||
|
smbd_disconnect_rdma_connection(info);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct rdma_cm_id *smbd_create_id(
|
static struct rdma_cm_id *smbd_create_id(
|
||||||
|
|
@ -694,16 +695,16 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
|
||||||
struct smbdirect_socket_parameters *sp = &sc->parameters;
|
struct smbdirect_socket_parameters *sp = &sc->parameters;
|
||||||
struct ib_send_wr send_wr;
|
struct ib_send_wr send_wr;
|
||||||
int rc = -ENOMEM;
|
int rc = -ENOMEM;
|
||||||
struct smbd_request *request;
|
struct smbdirect_send_io *request;
|
||||||
struct smbdirect_negotiate_req *packet;
|
struct smbdirect_negotiate_req *packet;
|
||||||
|
|
||||||
request = mempool_alloc(info->request_mempool, GFP_KERNEL);
|
request = mempool_alloc(sc->send_io.mem.pool, GFP_KERNEL);
|
||||||
if (!request)
|
if (!request)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
request->info = info;
|
request->socket = sc;
|
||||||
|
|
||||||
packet = smbd_request_payload(request);
|
packet = smbdirect_send_io_payload(request);
|
||||||
packet->min_version = cpu_to_le16(SMBDIRECT_V1);
|
packet->min_version = cpu_to_le16(SMBDIRECT_V1);
|
||||||
packet->max_version = cpu_to_le16(SMBDIRECT_V1);
|
packet->max_version = cpu_to_le16(SMBDIRECT_V1);
|
||||||
packet->reserved = 0;
|
packet->reserved = 0;
|
||||||
|
|
@ -756,7 +757,7 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
|
||||||
smbd_disconnect_rdma_connection(info);
|
smbd_disconnect_rdma_connection(info);
|
||||||
|
|
||||||
dma_mapping_failed:
|
dma_mapping_failed:
|
||||||
mempool_free(request, info->request_mempool);
|
mempool_free(request, sc->send_io.mem.pool);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -800,7 +801,7 @@ static int manage_keep_alive_before_sending(struct smbd_connection *info)
|
||||||
|
|
||||||
/* Post the send request */
|
/* Post the send request */
|
||||||
static int smbd_post_send(struct smbd_connection *info,
|
static int smbd_post_send(struct smbd_connection *info,
|
||||||
struct smbd_request *request)
|
struct smbdirect_send_io *request)
|
||||||
{
|
{
|
||||||
struct smbdirect_socket *sc = &info->socket;
|
struct smbdirect_socket *sc = &info->socket;
|
||||||
struct smbdirect_socket_parameters *sp = &sc->parameters;
|
struct smbdirect_socket_parameters *sp = &sc->parameters;
|
||||||
|
|
@ -849,7 +850,7 @@ static int smbd_post_send_iter(struct smbd_connection *info,
|
||||||
int i, rc;
|
int i, rc;
|
||||||
int header_length;
|
int header_length;
|
||||||
int data_length;
|
int data_length;
|
||||||
struct smbd_request *request;
|
struct smbdirect_send_io *request;
|
||||||
struct smbdirect_data_transfer *packet;
|
struct smbdirect_data_transfer *packet;
|
||||||
int new_credits = 0;
|
int new_credits = 0;
|
||||||
|
|
||||||
|
|
@ -888,20 +889,20 @@ wait_send_queue:
|
||||||
goto wait_send_queue;
|
goto wait_send_queue;
|
||||||
}
|
}
|
||||||
|
|
||||||
request = mempool_alloc(info->request_mempool, GFP_KERNEL);
|
request = mempool_alloc(sc->send_io.mem.pool, GFP_KERNEL);
|
||||||
if (!request) {
|
if (!request) {
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
goto err_alloc;
|
goto err_alloc;
|
||||||
}
|
}
|
||||||
|
|
||||||
request->info = info;
|
request->socket = sc;
|
||||||
memset(request->sge, 0, sizeof(request->sge));
|
memset(request->sge, 0, sizeof(request->sge));
|
||||||
|
|
||||||
/* Fill in the data payload to find out how much data we can add */
|
/* Fill in the data payload to find out how much data we can add */
|
||||||
if (iter) {
|
if (iter) {
|
||||||
struct smb_extract_to_rdma extract = {
|
struct smb_extract_to_rdma extract = {
|
||||||
.nr_sge = 1,
|
.nr_sge = 1,
|
||||||
.max_sge = SMBDIRECT_MAX_SEND_SGE,
|
.max_sge = SMBDIRECT_SEND_IO_MAX_SGE,
|
||||||
.sge = request->sge,
|
.sge = request->sge,
|
||||||
.device = sc->ib.dev,
|
.device = sc->ib.dev,
|
||||||
.local_dma_lkey = sc->ib.pd->local_dma_lkey,
|
.local_dma_lkey = sc->ib.pd->local_dma_lkey,
|
||||||
|
|
@ -923,7 +924,7 @@ wait_send_queue:
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Fill in the packet header */
|
/* Fill in the packet header */
|
||||||
packet = smbd_request_payload(request);
|
packet = smbdirect_send_io_payload(request);
|
||||||
packet->credits_requested = cpu_to_le16(sp->send_credit_target);
|
packet->credits_requested = cpu_to_le16(sp->send_credit_target);
|
||||||
|
|
||||||
new_credits = manage_credits_prior_sending(info);
|
new_credits = manage_credits_prior_sending(info);
|
||||||
|
|
@ -982,7 +983,7 @@ err_dma:
|
||||||
request->sge[i].addr,
|
request->sge[i].addr,
|
||||||
request->sge[i].length,
|
request->sge[i].length,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
mempool_free(request, info->request_mempool);
|
mempool_free(request, sc->send_io.mem.pool);
|
||||||
|
|
||||||
/* roll back receive credits and credits to be offered */
|
/* roll back receive credits and credits to be offered */
|
||||||
spin_lock(&info->lock_new_credits_offered);
|
spin_lock(&info->lock_new_credits_offered);
|
||||||
|
|
@ -1042,7 +1043,7 @@ static int smbd_post_send_full_iter(struct smbd_connection *info,
|
||||||
* The interaction is controlled by send/receive credit system
|
* The interaction is controlled by send/receive credit system
|
||||||
*/
|
*/
|
||||||
static int smbd_post_recv(
|
static int smbd_post_recv(
|
||||||
struct smbd_connection *info, struct smbd_response *response)
|
struct smbd_connection *info, struct smbdirect_recv_io *response)
|
||||||
{
|
{
|
||||||
struct smbdirect_socket *sc = &info->socket;
|
struct smbdirect_socket *sc = &info->socket;
|
||||||
struct smbdirect_socket_parameters *sp = &sc->parameters;
|
struct smbdirect_socket_parameters *sp = &sc->parameters;
|
||||||
|
|
@ -1069,6 +1070,7 @@ static int smbd_post_recv(
|
||||||
if (rc) {
|
if (rc) {
|
||||||
ib_dma_unmap_single(sc->ib.dev, response->sge.addr,
|
ib_dma_unmap_single(sc->ib.dev, response->sge.addr,
|
||||||
response->sge.length, DMA_FROM_DEVICE);
|
response->sge.length, DMA_FROM_DEVICE);
|
||||||
|
response->sge.length = 0;
|
||||||
smbd_disconnect_rdma_connection(info);
|
smbd_disconnect_rdma_connection(info);
|
||||||
log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
|
log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
|
||||||
}
|
}
|
||||||
|
|
@ -1079,10 +1081,11 @@ static int smbd_post_recv(
|
||||||
/* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */
|
/* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */
|
||||||
static int smbd_negotiate(struct smbd_connection *info)
|
static int smbd_negotiate(struct smbd_connection *info)
|
||||||
{
|
{
|
||||||
|
struct smbdirect_socket *sc = &info->socket;
|
||||||
int rc;
|
int rc;
|
||||||
struct smbd_response *response = get_receive_buffer(info);
|
struct smbdirect_recv_io *response = get_receive_buffer(info);
|
||||||
|
|
||||||
response->type = SMBD_NEGOTIATE_RESP;
|
sc->recv_io.expected = SMBDIRECT_EXPECT_NEGOTIATE_REP;
|
||||||
rc = smbd_post_recv(info, response);
|
rc = smbd_post_recv(info, response);
|
||||||
log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=0x%llx iov.length=%u iov.lkey=0x%x\n",
|
log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=0x%llx iov.length=%u iov.lkey=0x%x\n",
|
||||||
rc, response->sge.addr,
|
rc, response->sge.addr,
|
||||||
|
|
@ -1113,17 +1116,6 @@ static int smbd_negotiate(struct smbd_connection *info)
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void put_empty_packet(
|
|
||||||
struct smbd_connection *info, struct smbd_response *response)
|
|
||||||
{
|
|
||||||
spin_lock(&info->empty_packet_queue_lock);
|
|
||||||
list_add_tail(&response->list, &info->empty_packet_queue);
|
|
||||||
info->count_empty_packet_queue++;
|
|
||||||
spin_unlock(&info->empty_packet_queue_lock);
|
|
||||||
|
|
||||||
queue_work(info->workqueue, &info->post_send_credits_work);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1
|
* Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1
|
||||||
* This is a queue for reassembling upper layer payload and present to upper
|
* This is a queue for reassembling upper layer payload and present to upper
|
||||||
|
|
@ -1136,12 +1128,14 @@ static void put_empty_packet(
|
||||||
*/
|
*/
|
||||||
static void enqueue_reassembly(
|
static void enqueue_reassembly(
|
||||||
struct smbd_connection *info,
|
struct smbd_connection *info,
|
||||||
struct smbd_response *response,
|
struct smbdirect_recv_io *response,
|
||||||
int data_length)
|
int data_length)
|
||||||
{
|
{
|
||||||
spin_lock(&info->reassembly_queue_lock);
|
struct smbdirect_socket *sc = &info->socket;
|
||||||
list_add_tail(&response->list, &info->reassembly_queue);
|
|
||||||
info->reassembly_queue_length++;
|
spin_lock(&sc->recv_io.reassembly.lock);
|
||||||
|
list_add_tail(&response->list, &sc->recv_io.reassembly.list);
|
||||||
|
sc->recv_io.reassembly.queue_length++;
|
||||||
/*
|
/*
|
||||||
* Make sure reassembly_data_length is updated after list and
|
* Make sure reassembly_data_length is updated after list and
|
||||||
* reassembly_queue_length are updated. On the dequeue side
|
* reassembly_queue_length are updated. On the dequeue side
|
||||||
|
|
@ -1149,8 +1143,8 @@ static void enqueue_reassembly(
|
||||||
* if reassembly_queue_length and list is up to date
|
* if reassembly_queue_length and list is up to date
|
||||||
*/
|
*/
|
||||||
virt_wmb();
|
virt_wmb();
|
||||||
info->reassembly_data_length += data_length;
|
sc->recv_io.reassembly.data_length += data_length;
|
||||||
spin_unlock(&info->reassembly_queue_lock);
|
spin_unlock(&sc->recv_io.reassembly.lock);
|
||||||
info->count_reassembly_queue++;
|
info->count_reassembly_queue++;
|
||||||
info->count_enqueue_reassembly_queue++;
|
info->count_enqueue_reassembly_queue++;
|
||||||
}
|
}
|
||||||
|
|
@ -1160,58 +1154,41 @@ static void enqueue_reassembly(
|
||||||
* Caller is responsible for locking
|
* Caller is responsible for locking
|
||||||
* return value: the first entry if any, NULL if queue is empty
|
* return value: the first entry if any, NULL if queue is empty
|
||||||
*/
|
*/
|
||||||
static struct smbd_response *_get_first_reassembly(struct smbd_connection *info)
|
static struct smbdirect_recv_io *_get_first_reassembly(struct smbd_connection *info)
|
||||||
{
|
{
|
||||||
struct smbd_response *ret = NULL;
|
struct smbdirect_socket *sc = &info->socket;
|
||||||
|
struct smbdirect_recv_io *ret = NULL;
|
||||||
|
|
||||||
if (!list_empty(&info->reassembly_queue)) {
|
if (!list_empty(&sc->recv_io.reassembly.list)) {
|
||||||
ret = list_first_entry(
|
ret = list_first_entry(
|
||||||
&info->reassembly_queue,
|
&sc->recv_io.reassembly.list,
|
||||||
struct smbd_response, list);
|
struct smbdirect_recv_io, list);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct smbd_response *get_empty_queue_buffer(
|
|
||||||
struct smbd_connection *info)
|
|
||||||
{
|
|
||||||
struct smbd_response *ret = NULL;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&info->empty_packet_queue_lock, flags);
|
|
||||||
if (!list_empty(&info->empty_packet_queue)) {
|
|
||||||
ret = list_first_entry(
|
|
||||||
&info->empty_packet_queue,
|
|
||||||
struct smbd_response, list);
|
|
||||||
list_del(&ret->list);
|
|
||||||
info->count_empty_packet_queue--;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get a receive buffer
|
* Get a receive buffer
|
||||||
* For each remote send, we need to post a receive. The receive buffers are
|
* For each remote send, we need to post a receive. The receive buffers are
|
||||||
* pre-allocated in advance.
|
* pre-allocated in advance.
|
||||||
* return value: the receive buffer, NULL if none is available
|
* return value: the receive buffer, NULL if none is available
|
||||||
*/
|
*/
|
||||||
static struct smbd_response *get_receive_buffer(struct smbd_connection *info)
|
static struct smbdirect_recv_io *get_receive_buffer(struct smbd_connection *info)
|
||||||
{
|
{
|
||||||
struct smbd_response *ret = NULL;
|
struct smbdirect_socket *sc = &info->socket;
|
||||||
|
struct smbdirect_recv_io *ret = NULL;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&info->receive_queue_lock, flags);
|
spin_lock_irqsave(&sc->recv_io.free.lock, flags);
|
||||||
if (!list_empty(&info->receive_queue)) {
|
if (!list_empty(&sc->recv_io.free.list)) {
|
||||||
ret = list_first_entry(
|
ret = list_first_entry(
|
||||||
&info->receive_queue,
|
&sc->recv_io.free.list,
|
||||||
struct smbd_response, list);
|
struct smbdirect_recv_io, list);
|
||||||
list_del(&ret->list);
|
list_del(&ret->list);
|
||||||
info->count_receive_queue--;
|
info->count_receive_queue--;
|
||||||
info->count_get_receive_buffer++;
|
info->count_get_receive_buffer++;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&info->receive_queue_lock, flags);
|
spin_unlock_irqrestore(&sc->recv_io.free.lock, flags);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
@ -1223,19 +1200,24 @@ static struct smbd_response *get_receive_buffer(struct smbd_connection *info)
|
||||||
* receive buffer is returned.
|
* receive buffer is returned.
|
||||||
*/
|
*/
|
||||||
static void put_receive_buffer(
|
static void put_receive_buffer(
|
||||||
struct smbd_connection *info, struct smbd_response *response)
|
struct smbd_connection *info, struct smbdirect_recv_io *response)
|
||||||
{
|
{
|
||||||
struct smbdirect_socket *sc = &info->socket;
|
struct smbdirect_socket *sc = &info->socket;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
ib_dma_unmap_single(sc->ib.dev, response->sge.addr,
|
if (likely(response->sge.length != 0)) {
|
||||||
response->sge.length, DMA_FROM_DEVICE);
|
ib_dma_unmap_single(sc->ib.dev,
|
||||||
|
response->sge.addr,
|
||||||
|
response->sge.length,
|
||||||
|
DMA_FROM_DEVICE);
|
||||||
|
response->sge.length = 0;
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&info->receive_queue_lock, flags);
|
spin_lock_irqsave(&sc->recv_io.free.lock, flags);
|
||||||
list_add_tail(&response->list, &info->receive_queue);
|
list_add_tail(&response->list, &sc->recv_io.free.list);
|
||||||
info->count_receive_queue++;
|
info->count_receive_queue++;
|
||||||
info->count_put_receive_buffer++;
|
info->count_put_receive_buffer++;
|
||||||
spin_unlock_irqrestore(&info->receive_queue_lock, flags);
|
spin_unlock_irqrestore(&sc->recv_io.free.lock, flags);
|
||||||
|
|
||||||
queue_work(info->workqueue, &info->post_send_credits_work);
|
queue_work(info->workqueue, &info->post_send_credits_work);
|
||||||
}
|
}
|
||||||
|
|
@ -1243,58 +1225,54 @@ static void put_receive_buffer(
|
||||||
/* Preallocate all receive buffer on transport establishment */
|
/* Preallocate all receive buffer on transport establishment */
|
||||||
static int allocate_receive_buffers(struct smbd_connection *info, int num_buf)
|
static int allocate_receive_buffers(struct smbd_connection *info, int num_buf)
|
||||||
{
|
{
|
||||||
|
struct smbdirect_socket *sc = &info->socket;
|
||||||
|
struct smbdirect_recv_io *response;
|
||||||
int i;
|
int i;
|
||||||
struct smbd_response *response;
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&info->reassembly_queue);
|
INIT_LIST_HEAD(&sc->recv_io.reassembly.list);
|
||||||
spin_lock_init(&info->reassembly_queue_lock);
|
spin_lock_init(&sc->recv_io.reassembly.lock);
|
||||||
info->reassembly_data_length = 0;
|
sc->recv_io.reassembly.data_length = 0;
|
||||||
info->reassembly_queue_length = 0;
|
sc->recv_io.reassembly.queue_length = 0;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&info->receive_queue);
|
INIT_LIST_HEAD(&sc->recv_io.free.list);
|
||||||
spin_lock_init(&info->receive_queue_lock);
|
spin_lock_init(&sc->recv_io.free.lock);
|
||||||
info->count_receive_queue = 0;
|
info->count_receive_queue = 0;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&info->empty_packet_queue);
|
|
||||||
spin_lock_init(&info->empty_packet_queue_lock);
|
|
||||||
info->count_empty_packet_queue = 0;
|
|
||||||
|
|
||||||
init_waitqueue_head(&info->wait_receive_queues);
|
init_waitqueue_head(&info->wait_receive_queues);
|
||||||
|
|
||||||
for (i = 0; i < num_buf; i++) {
|
for (i = 0; i < num_buf; i++) {
|
||||||
response = mempool_alloc(info->response_mempool, GFP_KERNEL);
|
response = mempool_alloc(sc->recv_io.mem.pool, GFP_KERNEL);
|
||||||
if (!response)
|
if (!response)
|
||||||
goto allocate_failed;
|
goto allocate_failed;
|
||||||
|
|
||||||
response->info = info;
|
response->socket = sc;
|
||||||
list_add_tail(&response->list, &info->receive_queue);
|
response->sge.length = 0;
|
||||||
|
list_add_tail(&response->list, &sc->recv_io.free.list);
|
||||||
info->count_receive_queue++;
|
info->count_receive_queue++;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
allocate_failed:
|
allocate_failed:
|
||||||
while (!list_empty(&info->receive_queue)) {
|
while (!list_empty(&sc->recv_io.free.list)) {
|
||||||
response = list_first_entry(
|
response = list_first_entry(
|
||||||
&info->receive_queue,
|
&sc->recv_io.free.list,
|
||||||
struct smbd_response, list);
|
struct smbdirect_recv_io, list);
|
||||||
list_del(&response->list);
|
list_del(&response->list);
|
||||||
info->count_receive_queue--;
|
info->count_receive_queue--;
|
||||||
|
|
||||||
mempool_free(response, info->response_mempool);
|
mempool_free(response, sc->recv_io.mem.pool);
|
||||||
}
|
}
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void destroy_receive_buffers(struct smbd_connection *info)
|
static void destroy_receive_buffers(struct smbd_connection *info)
|
||||||
{
|
{
|
||||||
struct smbd_response *response;
|
struct smbdirect_socket *sc = &info->socket;
|
||||||
|
struct smbdirect_recv_io *response;
|
||||||
|
|
||||||
while ((response = get_receive_buffer(info)))
|
while ((response = get_receive_buffer(info)))
|
||||||
mempool_free(response, info->response_mempool);
|
mempool_free(response, sc->recv_io.mem.pool);
|
||||||
|
|
||||||
while ((response = get_empty_queue_buffer(info)))
|
|
||||||
mempool_free(response, info->response_mempool);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Implement idle connection timer [MS-SMBD] 3.1.6.2 */
|
/* Implement idle connection timer [MS-SMBD] 3.1.6.2 */
|
||||||
|
|
@ -1332,7 +1310,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
|
||||||
struct smbd_connection *info = server->smbd_conn;
|
struct smbd_connection *info = server->smbd_conn;
|
||||||
struct smbdirect_socket *sc;
|
struct smbdirect_socket *sc;
|
||||||
struct smbdirect_socket_parameters *sp;
|
struct smbdirect_socket_parameters *sp;
|
||||||
struct smbd_response *response;
|
struct smbdirect_recv_io *response;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (!info) {
|
if (!info) {
|
||||||
|
|
@ -1347,7 +1325,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
|
||||||
rdma_disconnect(sc->rdma.cm_id);
|
rdma_disconnect(sc->rdma.cm_id);
|
||||||
log_rdma_event(INFO, "wait for transport being disconnected\n");
|
log_rdma_event(INFO, "wait for transport being disconnected\n");
|
||||||
wait_event_interruptible(
|
wait_event_interruptible(
|
||||||
info->disconn_wait,
|
info->status_wait,
|
||||||
sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
|
sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1366,23 +1344,22 @@ void smbd_destroy(struct TCP_Server_Info *server)
|
||||||
/* It's not possible for upper layer to get to reassembly */
|
/* It's not possible for upper layer to get to reassembly */
|
||||||
log_rdma_event(INFO, "drain the reassembly queue\n");
|
log_rdma_event(INFO, "drain the reassembly queue\n");
|
||||||
do {
|
do {
|
||||||
spin_lock_irqsave(&info->reassembly_queue_lock, flags);
|
spin_lock_irqsave(&sc->recv_io.reassembly.lock, flags);
|
||||||
response = _get_first_reassembly(info);
|
response = _get_first_reassembly(info);
|
||||||
if (response) {
|
if (response) {
|
||||||
list_del(&response->list);
|
list_del(&response->list);
|
||||||
spin_unlock_irqrestore(
|
spin_unlock_irqrestore(
|
||||||
&info->reassembly_queue_lock, flags);
|
&sc->recv_io.reassembly.lock, flags);
|
||||||
put_receive_buffer(info, response);
|
put_receive_buffer(info, response);
|
||||||
} else
|
} else
|
||||||
spin_unlock_irqrestore(
|
spin_unlock_irqrestore(
|
||||||
&info->reassembly_queue_lock, flags);
|
&sc->recv_io.reassembly.lock, flags);
|
||||||
} while (response);
|
} while (response);
|
||||||
info->reassembly_data_length = 0;
|
sc->recv_io.reassembly.data_length = 0;
|
||||||
|
|
||||||
log_rdma_event(INFO, "free receive buffers\n");
|
log_rdma_event(INFO, "free receive buffers\n");
|
||||||
wait_event(info->wait_receive_queues,
|
wait_event(info->wait_receive_queues,
|
||||||
info->count_receive_queue + info->count_empty_packet_queue
|
info->count_receive_queue == sp->recv_credit_max);
|
||||||
== sp->recv_credit_max);
|
|
||||||
destroy_receive_buffers(info);
|
destroy_receive_buffers(info);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -1407,11 +1384,11 @@ void smbd_destroy(struct TCP_Server_Info *server)
|
||||||
rdma_destroy_id(sc->rdma.cm_id);
|
rdma_destroy_id(sc->rdma.cm_id);
|
||||||
|
|
||||||
/* free mempools */
|
/* free mempools */
|
||||||
mempool_destroy(info->request_mempool);
|
mempool_destroy(sc->send_io.mem.pool);
|
||||||
kmem_cache_destroy(info->request_cache);
|
kmem_cache_destroy(sc->send_io.mem.cache);
|
||||||
|
|
||||||
mempool_destroy(info->response_mempool);
|
mempool_destroy(sc->recv_io.mem.pool);
|
||||||
kmem_cache_destroy(info->response_cache);
|
kmem_cache_destroy(sc->recv_io.mem.cache);
|
||||||
|
|
||||||
sc->status = SMBDIRECT_SOCKET_DESTROYED;
|
sc->status = SMBDIRECT_SOCKET_DESTROYED;
|
||||||
|
|
||||||
|
|
@ -1459,12 +1436,14 @@ create_conn:
|
||||||
|
|
||||||
static void destroy_caches_and_workqueue(struct smbd_connection *info)
|
static void destroy_caches_and_workqueue(struct smbd_connection *info)
|
||||||
{
|
{
|
||||||
|
struct smbdirect_socket *sc = &info->socket;
|
||||||
|
|
||||||
destroy_receive_buffers(info);
|
destroy_receive_buffers(info);
|
||||||
destroy_workqueue(info->workqueue);
|
destroy_workqueue(info->workqueue);
|
||||||
mempool_destroy(info->response_mempool);
|
mempool_destroy(sc->recv_io.mem.pool);
|
||||||
kmem_cache_destroy(info->response_cache);
|
kmem_cache_destroy(sc->recv_io.mem.cache);
|
||||||
mempool_destroy(info->request_mempool);
|
mempool_destroy(sc->send_io.mem.pool);
|
||||||
kmem_cache_destroy(info->request_cache);
|
kmem_cache_destroy(sc->send_io.mem.cache);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define MAX_NAME_LEN 80
|
#define MAX_NAME_LEN 80
|
||||||
|
|
@ -1478,41 +1457,41 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info)
|
||||||
if (WARN_ON_ONCE(sp->max_recv_size < sizeof(struct smbdirect_data_transfer)))
|
if (WARN_ON_ONCE(sp->max_recv_size < sizeof(struct smbdirect_data_transfer)))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
scnprintf(name, MAX_NAME_LEN, "smbd_request_%p", info);
|
scnprintf(name, MAX_NAME_LEN, "smbdirect_send_io_%p", info);
|
||||||
info->request_cache =
|
sc->send_io.mem.cache =
|
||||||
kmem_cache_create(
|
kmem_cache_create(
|
||||||
name,
|
name,
|
||||||
sizeof(struct smbd_request) +
|
sizeof(struct smbdirect_send_io) +
|
||||||
sizeof(struct smbdirect_data_transfer),
|
sizeof(struct smbdirect_data_transfer),
|
||||||
0, SLAB_HWCACHE_ALIGN, NULL);
|
0, SLAB_HWCACHE_ALIGN, NULL);
|
||||||
if (!info->request_cache)
|
if (!sc->send_io.mem.cache)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
info->request_mempool =
|
sc->send_io.mem.pool =
|
||||||
mempool_create(sp->send_credit_target, mempool_alloc_slab,
|
mempool_create(sp->send_credit_target, mempool_alloc_slab,
|
||||||
mempool_free_slab, info->request_cache);
|
mempool_free_slab, sc->send_io.mem.cache);
|
||||||
if (!info->request_mempool)
|
if (!sc->send_io.mem.pool)
|
||||||
goto out1;
|
goto out1;
|
||||||
|
|
||||||
scnprintf(name, MAX_NAME_LEN, "smbd_response_%p", info);
|
scnprintf(name, MAX_NAME_LEN, "smbdirect_recv_io_%p", info);
|
||||||
|
|
||||||
struct kmem_cache_args response_args = {
|
struct kmem_cache_args response_args = {
|
||||||
.align = __alignof__(struct smbd_response),
|
.align = __alignof__(struct smbdirect_recv_io),
|
||||||
.useroffset = (offsetof(struct smbd_response, packet) +
|
.useroffset = (offsetof(struct smbdirect_recv_io, packet) +
|
||||||
sizeof(struct smbdirect_data_transfer)),
|
sizeof(struct smbdirect_data_transfer)),
|
||||||
.usersize = sp->max_recv_size - sizeof(struct smbdirect_data_transfer),
|
.usersize = sp->max_recv_size - sizeof(struct smbdirect_data_transfer),
|
||||||
};
|
};
|
||||||
info->response_cache =
|
sc->recv_io.mem.cache =
|
||||||
kmem_cache_create(name,
|
kmem_cache_create(name,
|
||||||
sizeof(struct smbd_response) + sp->max_recv_size,
|
sizeof(struct smbdirect_recv_io) + sp->max_recv_size,
|
||||||
&response_args, SLAB_HWCACHE_ALIGN);
|
&response_args, SLAB_HWCACHE_ALIGN);
|
||||||
if (!info->response_cache)
|
if (!sc->recv_io.mem.cache)
|
||||||
goto out2;
|
goto out2;
|
||||||
|
|
||||||
info->response_mempool =
|
sc->recv_io.mem.pool =
|
||||||
mempool_create(sp->recv_credit_max, mempool_alloc_slab,
|
mempool_create(sp->recv_credit_max, mempool_alloc_slab,
|
||||||
mempool_free_slab, info->response_cache);
|
mempool_free_slab, sc->recv_io.mem.cache);
|
||||||
if (!info->response_mempool)
|
if (!sc->recv_io.mem.pool)
|
||||||
goto out3;
|
goto out3;
|
||||||
|
|
||||||
scnprintf(name, MAX_NAME_LEN, "smbd_%p", info);
|
scnprintf(name, MAX_NAME_LEN, "smbd_%p", info);
|
||||||
|
|
@ -1531,13 +1510,13 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info)
|
||||||
out5:
|
out5:
|
||||||
destroy_workqueue(info->workqueue);
|
destroy_workqueue(info->workqueue);
|
||||||
out4:
|
out4:
|
||||||
mempool_destroy(info->response_mempool);
|
mempool_destroy(sc->recv_io.mem.pool);
|
||||||
out3:
|
out3:
|
||||||
kmem_cache_destroy(info->response_cache);
|
kmem_cache_destroy(sc->recv_io.mem.cache);
|
||||||
out2:
|
out2:
|
||||||
mempool_destroy(info->request_mempool);
|
mempool_destroy(sc->send_io.mem.pool);
|
||||||
out1:
|
out1:
|
||||||
kmem_cache_destroy(info->request_cache);
|
kmem_cache_destroy(sc->send_io.mem.cache);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1593,8 +1572,8 @@ static struct smbd_connection *_smbd_get_connection(
|
||||||
sp->max_recv_size = smbd_max_receive_size;
|
sp->max_recv_size = smbd_max_receive_size;
|
||||||
sp->keepalive_interval_msec = smbd_keep_alive_interval * 1000;
|
sp->keepalive_interval_msec = smbd_keep_alive_interval * 1000;
|
||||||
|
|
||||||
if (sc->ib.dev->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE ||
|
if (sc->ib.dev->attrs.max_send_sge < SMBDIRECT_SEND_IO_MAX_SGE ||
|
||||||
sc->ib.dev->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) {
|
sc->ib.dev->attrs.max_recv_sge < SMBDIRECT_RECV_IO_MAX_SGE) {
|
||||||
log_rdma_event(ERR,
|
log_rdma_event(ERR,
|
||||||
"device %.*s max_send_sge/max_recv_sge = %d/%d too small\n",
|
"device %.*s max_send_sge/max_recv_sge = %d/%d too small\n",
|
||||||
IB_DEVICE_NAME_MAX,
|
IB_DEVICE_NAME_MAX,
|
||||||
|
|
@ -1625,8 +1604,8 @@ static struct smbd_connection *_smbd_get_connection(
|
||||||
qp_attr.qp_context = info;
|
qp_attr.qp_context = info;
|
||||||
qp_attr.cap.max_send_wr = sp->send_credit_target;
|
qp_attr.cap.max_send_wr = sp->send_credit_target;
|
||||||
qp_attr.cap.max_recv_wr = sp->recv_credit_max;
|
qp_attr.cap.max_recv_wr = sp->recv_credit_max;
|
||||||
qp_attr.cap.max_send_sge = SMBDIRECT_MAX_SEND_SGE;
|
qp_attr.cap.max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE;
|
||||||
qp_attr.cap.max_recv_sge = SMBDIRECT_MAX_RECV_SGE;
|
qp_attr.cap.max_recv_sge = SMBDIRECT_RECV_IO_MAX_SGE;
|
||||||
qp_attr.cap.max_inline_data = 0;
|
qp_attr.cap.max_inline_data = 0;
|
||||||
qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
|
qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
|
||||||
qp_attr.qp_type = IB_QPT_RC;
|
qp_attr.qp_type = IB_QPT_RC;
|
||||||
|
|
@ -1671,17 +1650,18 @@ static struct smbd_connection *_smbd_get_connection(
|
||||||
log_rdma_event(INFO, "connecting to IP %pI4 port %d\n",
|
log_rdma_event(INFO, "connecting to IP %pI4 port %d\n",
|
||||||
&addr_in->sin_addr, port);
|
&addr_in->sin_addr, port);
|
||||||
|
|
||||||
init_waitqueue_head(&info->conn_wait);
|
init_waitqueue_head(&info->status_wait);
|
||||||
init_waitqueue_head(&info->disconn_wait);
|
init_waitqueue_head(&sc->recv_io.reassembly.wait_queue);
|
||||||
init_waitqueue_head(&info->wait_reassembly_queue);
|
|
||||||
rc = rdma_connect(sc->rdma.cm_id, &conn_param);
|
rc = rdma_connect(sc->rdma.cm_id, &conn_param);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
|
log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
|
||||||
goto rdma_connect_failed;
|
goto rdma_connect_failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
wait_event_interruptible(
|
wait_event_interruptible_timeout(
|
||||||
info->conn_wait, sc->status != SMBDIRECT_SOCKET_CONNECTING);
|
info->status_wait,
|
||||||
|
sc->status != SMBDIRECT_SOCKET_CONNECTING,
|
||||||
|
msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
|
||||||
|
|
||||||
if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
|
if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
|
||||||
log_rdma_event(ERR, "rdma_connect failed port=%d\n", port);
|
log_rdma_event(ERR, "rdma_connect failed port=%d\n", port);
|
||||||
|
|
@ -1735,9 +1715,8 @@ negotiation_failed:
|
||||||
cancel_delayed_work_sync(&info->idle_timer_work);
|
cancel_delayed_work_sync(&info->idle_timer_work);
|
||||||
destroy_caches_and_workqueue(info);
|
destroy_caches_and_workqueue(info);
|
||||||
sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED;
|
sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED;
|
||||||
init_waitqueue_head(&info->conn_wait);
|
|
||||||
rdma_disconnect(sc->rdma.cm_id);
|
rdma_disconnect(sc->rdma.cm_id);
|
||||||
wait_event(info->conn_wait,
|
wait_event(info->status_wait,
|
||||||
sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
|
sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
|
||||||
|
|
||||||
allocate_cache_failed:
|
allocate_cache_failed:
|
||||||
|
|
@ -1794,7 +1773,7 @@ try_again:
|
||||||
int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
|
int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
|
||||||
{
|
{
|
||||||
struct smbdirect_socket *sc = &info->socket;
|
struct smbdirect_socket *sc = &info->socket;
|
||||||
struct smbd_response *response;
|
struct smbdirect_recv_io *response;
|
||||||
struct smbdirect_data_transfer *data_transfer;
|
struct smbdirect_data_transfer *data_transfer;
|
||||||
size_t size = iov_iter_count(&msg->msg_iter);
|
size_t size = iov_iter_count(&msg->msg_iter);
|
||||||
int to_copy, to_read, data_read, offset;
|
int to_copy, to_read, data_read, offset;
|
||||||
|
|
@ -1810,9 +1789,9 @@ again:
|
||||||
* the only one reading from the front of the queue. The transport
|
* the only one reading from the front of the queue. The transport
|
||||||
* may add more entries to the back of the queue at the same time
|
* may add more entries to the back of the queue at the same time
|
||||||
*/
|
*/
|
||||||
log_read(INFO, "size=%zd info->reassembly_data_length=%d\n", size,
|
log_read(INFO, "size=%zd sc->recv_io.reassembly.data_length=%d\n", size,
|
||||||
info->reassembly_data_length);
|
sc->recv_io.reassembly.data_length);
|
||||||
if (info->reassembly_data_length >= size) {
|
if (sc->recv_io.reassembly.data_length >= size) {
|
||||||
int queue_length;
|
int queue_length;
|
||||||
int queue_removed = 0;
|
int queue_removed = 0;
|
||||||
|
|
||||||
|
|
@ -1824,13 +1803,13 @@ again:
|
||||||
* updated in SOFTIRQ as more data is received
|
* updated in SOFTIRQ as more data is received
|
||||||
*/
|
*/
|
||||||
virt_rmb();
|
virt_rmb();
|
||||||
queue_length = info->reassembly_queue_length;
|
queue_length = sc->recv_io.reassembly.queue_length;
|
||||||
data_read = 0;
|
data_read = 0;
|
||||||
to_read = size;
|
to_read = size;
|
||||||
offset = info->first_entry_offset;
|
offset = sc->recv_io.reassembly.first_entry_offset;
|
||||||
while (data_read < size) {
|
while (data_read < size) {
|
||||||
response = _get_first_reassembly(info);
|
response = _get_first_reassembly(info);
|
||||||
data_transfer = smbd_response_payload(response);
|
data_transfer = smbdirect_recv_io_payload(response);
|
||||||
data_length = le32_to_cpu(data_transfer->data_length);
|
data_length = le32_to_cpu(data_transfer->data_length);
|
||||||
remaining_data_length =
|
remaining_data_length =
|
||||||
le32_to_cpu(
|
le32_to_cpu(
|
||||||
|
|
@ -1875,10 +1854,10 @@ again:
|
||||||
list_del(&response->list);
|
list_del(&response->list);
|
||||||
else {
|
else {
|
||||||
spin_lock_irq(
|
spin_lock_irq(
|
||||||
&info->reassembly_queue_lock);
|
&sc->recv_io.reassembly.lock);
|
||||||
list_del(&response->list);
|
list_del(&response->list);
|
||||||
spin_unlock_irq(
|
spin_unlock_irq(
|
||||||
&info->reassembly_queue_lock);
|
&sc->recv_io.reassembly.lock);
|
||||||
}
|
}
|
||||||
queue_removed++;
|
queue_removed++;
|
||||||
info->count_reassembly_queue--;
|
info->count_reassembly_queue--;
|
||||||
|
|
@ -1897,23 +1876,23 @@ again:
|
||||||
to_read, data_read, offset);
|
to_read, data_read, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irq(&info->reassembly_queue_lock);
|
spin_lock_irq(&sc->recv_io.reassembly.lock);
|
||||||
info->reassembly_data_length -= data_read;
|
sc->recv_io.reassembly.data_length -= data_read;
|
||||||
info->reassembly_queue_length -= queue_removed;
|
sc->recv_io.reassembly.queue_length -= queue_removed;
|
||||||
spin_unlock_irq(&info->reassembly_queue_lock);
|
spin_unlock_irq(&sc->recv_io.reassembly.lock);
|
||||||
|
|
||||||
info->first_entry_offset = offset;
|
sc->recv_io.reassembly.first_entry_offset = offset;
|
||||||
log_read(INFO, "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n",
|
log_read(INFO, "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n",
|
||||||
data_read, info->reassembly_data_length,
|
data_read, sc->recv_io.reassembly.data_length,
|
||||||
info->first_entry_offset);
|
sc->recv_io.reassembly.first_entry_offset);
|
||||||
read_rfc1002_done:
|
read_rfc1002_done:
|
||||||
return data_read;
|
return data_read;
|
||||||
}
|
}
|
||||||
|
|
||||||
log_read(INFO, "wait_event on more data\n");
|
log_read(INFO, "wait_event on more data\n");
|
||||||
rc = wait_event_interruptible(
|
rc = wait_event_interruptible(
|
||||||
info->wait_reassembly_queue,
|
sc->recv_io.reassembly.wait_queue,
|
||||||
info->reassembly_data_length >= size ||
|
sc->recv_io.reassembly.data_length >= size ||
|
||||||
sc->status != SMBDIRECT_SOCKET_CONNECTED);
|
sc->status != SMBDIRECT_SOCKET_CONNECTED);
|
||||||
/* Don't return any data if interrupted */
|
/* Don't return any data if interrupted */
|
||||||
if (rc)
|
if (rc)
|
||||||
|
|
|
||||||
|
|
@ -33,16 +33,6 @@ enum keep_alive_status {
|
||||||
KEEP_ALIVE_SENT,
|
KEEP_ALIVE_SENT,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum smbd_connection_status {
|
|
||||||
SMBD_CREATED,
|
|
||||||
SMBD_CONNECTING,
|
|
||||||
SMBD_CONNECTED,
|
|
||||||
SMBD_NEGOTIATE_FAILED,
|
|
||||||
SMBD_DISCONNECTING,
|
|
||||||
SMBD_DISCONNECTED,
|
|
||||||
SMBD_DESTROYED
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The context for the SMBDirect transport
|
* The context for the SMBDirect transport
|
||||||
* Everything related to the transport is here. It has several logical parts
|
* Everything related to the transport is here. It has several logical parts
|
||||||
|
|
@ -57,8 +47,7 @@ struct smbd_connection {
|
||||||
|
|
||||||
int ri_rc;
|
int ri_rc;
|
||||||
struct completion ri_done;
|
struct completion ri_done;
|
||||||
wait_queue_head_t conn_wait;
|
wait_queue_head_t status_wait;
|
||||||
wait_queue_head_t disconn_wait;
|
|
||||||
|
|
||||||
struct completion negotiate_completion;
|
struct completion negotiate_completion;
|
||||||
bool negotiate_done;
|
bool negotiate_done;
|
||||||
|
|
@ -75,7 +64,6 @@ struct smbd_connection {
|
||||||
atomic_t send_credits;
|
atomic_t send_credits;
|
||||||
atomic_t receive_credits;
|
atomic_t receive_credits;
|
||||||
int receive_credit_target;
|
int receive_credit_target;
|
||||||
int fragment_reassembly_remaining;
|
|
||||||
|
|
||||||
/* Memory registrations */
|
/* Memory registrations */
|
||||||
/* Maximum number of RDMA read/write outstanding on this connection */
|
/* Maximum number of RDMA read/write outstanding on this connection */
|
||||||
|
|
@ -106,52 +94,16 @@ struct smbd_connection {
|
||||||
wait_queue_head_t wait_post_send;
|
wait_queue_head_t wait_post_send;
|
||||||
|
|
||||||
/* Receive queue */
|
/* Receive queue */
|
||||||
struct list_head receive_queue;
|
|
||||||
int count_receive_queue;
|
int count_receive_queue;
|
||||||
spinlock_t receive_queue_lock;
|
|
||||||
|
|
||||||
struct list_head empty_packet_queue;
|
|
||||||
int count_empty_packet_queue;
|
|
||||||
spinlock_t empty_packet_queue_lock;
|
|
||||||
|
|
||||||
wait_queue_head_t wait_receive_queues;
|
wait_queue_head_t wait_receive_queues;
|
||||||
|
|
||||||
/* Reassembly queue */
|
|
||||||
struct list_head reassembly_queue;
|
|
||||||
spinlock_t reassembly_queue_lock;
|
|
||||||
wait_queue_head_t wait_reassembly_queue;
|
|
||||||
|
|
||||||
/* total data length of reassembly queue */
|
|
||||||
int reassembly_data_length;
|
|
||||||
int reassembly_queue_length;
|
|
||||||
/* the offset to first buffer in reassembly queue */
|
|
||||||
int first_entry_offset;
|
|
||||||
|
|
||||||
bool send_immediate;
|
bool send_immediate;
|
||||||
|
|
||||||
wait_queue_head_t wait_send_queue;
|
wait_queue_head_t wait_send_queue;
|
||||||
|
|
||||||
/*
|
|
||||||
* Indicate if we have received a full packet on the connection
|
|
||||||
* This is used to identify the first SMBD packet of a assembled
|
|
||||||
* payload (SMB packet) in reassembly queue so we can return a
|
|
||||||
* RFC1002 length to upper layer to indicate the length of the SMB
|
|
||||||
* packet received
|
|
||||||
*/
|
|
||||||
bool full_packet_received;
|
|
||||||
|
|
||||||
struct workqueue_struct *workqueue;
|
struct workqueue_struct *workqueue;
|
||||||
struct delayed_work idle_timer_work;
|
struct delayed_work idle_timer_work;
|
||||||
|
|
||||||
/* Memory pool for preallocating buffers */
|
|
||||||
/* request pool for RDMA send */
|
|
||||||
struct kmem_cache *request_cache;
|
|
||||||
mempool_t *request_mempool;
|
|
||||||
|
|
||||||
/* response pool for RDMA receive */
|
|
||||||
struct kmem_cache *response_cache;
|
|
||||||
mempool_t *response_mempool;
|
|
||||||
|
|
||||||
/* for debug purposes */
|
/* for debug purposes */
|
||||||
unsigned int count_get_receive_buffer;
|
unsigned int count_get_receive_buffer;
|
||||||
unsigned int count_put_receive_buffer;
|
unsigned int count_put_receive_buffer;
|
||||||
|
|
@ -161,48 +113,6 @@ struct smbd_connection {
|
||||||
unsigned int count_send_empty;
|
unsigned int count_send_empty;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum smbd_message_type {
|
|
||||||
SMBD_NEGOTIATE_RESP,
|
|
||||||
SMBD_TRANSFER_DATA,
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Maximum number of SGEs used by smbdirect.c in any send work request */
|
|
||||||
#define SMBDIRECT_MAX_SEND_SGE 6
|
|
||||||
|
|
||||||
/* The context for a SMBD request */
|
|
||||||
struct smbd_request {
|
|
||||||
struct smbd_connection *info;
|
|
||||||
struct ib_cqe cqe;
|
|
||||||
|
|
||||||
/* the SGE entries for this work request */
|
|
||||||
struct ib_sge sge[SMBDIRECT_MAX_SEND_SGE];
|
|
||||||
int num_sge;
|
|
||||||
|
|
||||||
/* SMBD packet header follows this structure */
|
|
||||||
u8 packet[];
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Maximum number of SGEs used by smbdirect.c in any receive work request */
|
|
||||||
#define SMBDIRECT_MAX_RECV_SGE 1
|
|
||||||
|
|
||||||
/* The context for a SMBD response */
|
|
||||||
struct smbd_response {
|
|
||||||
struct smbd_connection *info;
|
|
||||||
struct ib_cqe cqe;
|
|
||||||
struct ib_sge sge;
|
|
||||||
|
|
||||||
enum smbd_message_type type;
|
|
||||||
|
|
||||||
/* Link to receive queue or reassembly queue */
|
|
||||||
struct list_head list;
|
|
||||||
|
|
||||||
/* Indicate if this is the 1st packet of a payload */
|
|
||||||
bool first_segment;
|
|
||||||
|
|
||||||
/* SMBD packet header and payload follows this structure */
|
|
||||||
u8 packet[];
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Create a SMBDirect session */
|
/* Create a SMBDirect session */
|
||||||
struct smbd_connection *smbd_get_connection(
|
struct smbd_connection *smbd_get_connection(
|
||||||
struct TCP_Server_Info *server, struct sockaddr *dstaddr);
|
struct TCP_Server_Info *server, struct sockaddr *dstaddr);
|
||||||
|
|
|
||||||
|
|
@ -30,9 +30,6 @@
|
||||||
#include "smbdirect.h"
|
#include "smbdirect.h"
|
||||||
#include "compress.h"
|
#include "compress.h"
|
||||||
|
|
||||||
/* Max number of iovectors we can use off the stack when sending requests. */
|
|
||||||
#define CIFS_MAX_IOV_SIZE 8
|
|
||||||
|
|
||||||
void
|
void
|
||||||
cifs_wake_up_task(struct mid_q_entry *mid)
|
cifs_wake_up_task(struct mid_q_entry *mid)
|
||||||
{
|
{
|
||||||
|
|
@ -41,42 +38,6 @@ cifs_wake_up_task(struct mid_q_entry *mid)
|
||||||
wake_up_process(mid->callback_data);
|
wake_up_process(mid->callback_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct mid_q_entry *
|
|
||||||
alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
|
|
||||||
{
|
|
||||||
struct mid_q_entry *temp;
|
|
||||||
|
|
||||||
if (server == NULL) {
|
|
||||||
cifs_dbg(VFS, "%s: null TCP session\n", __func__);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
|
|
||||||
memset(temp, 0, sizeof(struct mid_q_entry));
|
|
||||||
kref_init(&temp->refcount);
|
|
||||||
temp->mid = get_mid(smb_buffer);
|
|
||||||
temp->pid = current->pid;
|
|
||||||
temp->command = cpu_to_le16(smb_buffer->Command);
|
|
||||||
cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
|
|
||||||
/* easier to use jiffies */
|
|
||||||
/* when mid allocated can be before when sent */
|
|
||||||
temp->when_alloc = jiffies;
|
|
||||||
temp->server = server;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The default is for the mid to be synchronous, so the
|
|
||||||
* default callback just wakes up the current task.
|
|
||||||
*/
|
|
||||||
get_task_struct(current);
|
|
||||||
temp->creator = current;
|
|
||||||
temp->callback = cifs_wake_up_task;
|
|
||||||
temp->callback_data = current;
|
|
||||||
|
|
||||||
atomic_inc(&mid_count);
|
|
||||||
temp->mid_state = MID_REQUEST_ALLOCATED;
|
|
||||||
return temp;
|
|
||||||
}
|
|
||||||
|
|
||||||
void __release_mid(struct kref *refcount)
|
void __release_mid(struct kref *refcount)
|
||||||
{
|
{
|
||||||
struct mid_q_entry *midEntry =
|
struct mid_q_entry *midEntry =
|
||||||
|
|
@ -89,7 +50,7 @@ void __release_mid(struct kref *refcount)
|
||||||
#endif
|
#endif
|
||||||
struct TCP_Server_Info *server = midEntry->server;
|
struct TCP_Server_Info *server = midEntry->server;
|
||||||
|
|
||||||
if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
|
if (midEntry->resp_buf && (midEntry->wait_cancelled) &&
|
||||||
(midEntry->mid_state == MID_RESPONSE_RECEIVED ||
|
(midEntry->mid_state == MID_RESPONSE_RECEIVED ||
|
||||||
midEntry->mid_state == MID_RESPONSE_READY) &&
|
midEntry->mid_state == MID_RESPONSE_READY) &&
|
||||||
server->ops->handle_cancelled_mid)
|
server->ops->handle_cancelled_mid)
|
||||||
|
|
@ -160,12 +121,12 @@ void __release_mid(struct kref *refcount)
|
||||||
void
|
void
|
||||||
delete_mid(struct mid_q_entry *mid)
|
delete_mid(struct mid_q_entry *mid)
|
||||||
{
|
{
|
||||||
spin_lock(&mid->server->mid_lock);
|
spin_lock(&mid->server->mid_queue_lock);
|
||||||
if (!(mid->mid_flags & MID_DELETED)) {
|
if (mid->deleted_from_q == false) {
|
||||||
list_del_init(&mid->qhead);
|
list_del_init(&mid->qhead);
|
||||||
mid->mid_flags |= MID_DELETED;
|
mid->deleted_from_q = true;
|
||||||
}
|
}
|
||||||
spin_unlock(&mid->server->mid_lock);
|
spin_unlock(&mid->server->mid_queue_lock);
|
||||||
|
|
||||||
release_mid(mid);
|
release_mid(mid);
|
||||||
}
|
}
|
||||||
|
|
@ -269,9 +230,8 @@ smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
|
||||||
return buflen;
|
return buflen;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
int __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
|
||||||
__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
|
struct smb_rqst *rqst)
|
||||||
struct smb_rqst *rqst)
|
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
struct kvec *iov;
|
struct kvec *iov;
|
||||||
|
|
@ -397,7 +357,7 @@ unmask:
|
||||||
* socket so the server throws away the partial SMB
|
* socket so the server throws away the partial SMB
|
||||||
*/
|
*/
|
||||||
cifs_signal_cifsd_for_reconnect(server, false);
|
cifs_signal_cifsd_for_reconnect(server, false);
|
||||||
trace_smb3_partial_send_reconnect(server->CurrentMid,
|
trace_smb3_partial_send_reconnect(server->current_mid,
|
||||||
server->conn_id, server->hostname);
|
server->conn_id, server->hostname);
|
||||||
}
|
}
|
||||||
smbd_done:
|
smbd_done:
|
||||||
|
|
@ -456,22 +416,6 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
|
||||||
smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
|
|
||||||
unsigned int smb_buf_length)
|
|
||||||
{
|
|
||||||
struct kvec iov[2];
|
|
||||||
struct smb_rqst rqst = { .rq_iov = iov,
|
|
||||||
.rq_nvec = 2 };
|
|
||||||
|
|
||||||
iov[0].iov_base = smb_buffer;
|
|
||||||
iov[0].iov_len = 4;
|
|
||||||
iov[1].iov_base = (char *)smb_buffer + 4;
|
|
||||||
iov[1].iov_len = smb_buf_length;
|
|
||||||
|
|
||||||
return __smb_send_rqst(server, 1, &rqst);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
|
wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
|
||||||
const int timeout, const int flags,
|
const int timeout, const int flags,
|
||||||
|
|
@ -509,7 +453,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
|
||||||
in_flight = server->in_flight;
|
in_flight = server->in_flight;
|
||||||
spin_unlock(&server->req_lock);
|
spin_unlock(&server->req_lock);
|
||||||
|
|
||||||
trace_smb3_nblk_credits(server->CurrentMid,
|
trace_smb3_nblk_credits(server->current_mid,
|
||||||
server->conn_id, server->hostname, scredits, -1, in_flight);
|
server->conn_id, server->hostname, scredits, -1, in_flight);
|
||||||
cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
|
cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
|
||||||
__func__, 1, scredits);
|
__func__, 1, scredits);
|
||||||
|
|
@ -542,7 +486,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
|
||||||
in_flight = server->in_flight;
|
in_flight = server->in_flight;
|
||||||
spin_unlock(&server->req_lock);
|
spin_unlock(&server->req_lock);
|
||||||
|
|
||||||
trace_smb3_credit_timeout(server->CurrentMid,
|
trace_smb3_credit_timeout(server->current_mid,
|
||||||
server->conn_id, server->hostname, scredits,
|
server->conn_id, server->hostname, scredits,
|
||||||
num_credits, in_flight);
|
num_credits, in_flight);
|
||||||
cifs_server_dbg(VFS, "wait timed out after %d ms\n",
|
cifs_server_dbg(VFS, "wait timed out after %d ms\n",
|
||||||
|
|
@ -585,7 +529,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
|
||||||
spin_unlock(&server->req_lock);
|
spin_unlock(&server->req_lock);
|
||||||
|
|
||||||
trace_smb3_credit_timeout(
|
trace_smb3_credit_timeout(
|
||||||
server->CurrentMid,
|
server->current_mid,
|
||||||
server->conn_id, server->hostname,
|
server->conn_id, server->hostname,
|
||||||
scredits, num_credits, in_flight);
|
scredits, num_credits, in_flight);
|
||||||
cifs_server_dbg(VFS, "wait timed out after %d ms\n",
|
cifs_server_dbg(VFS, "wait timed out after %d ms\n",
|
||||||
|
|
@ -615,7 +559,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
|
||||||
in_flight = server->in_flight;
|
in_flight = server->in_flight;
|
||||||
spin_unlock(&server->req_lock);
|
spin_unlock(&server->req_lock);
|
||||||
|
|
||||||
trace_smb3_waitff_credits(server->CurrentMid,
|
trace_smb3_waitff_credits(server->current_mid,
|
||||||
server->conn_id, server->hostname, scredits,
|
server->conn_id, server->hostname, scredits,
|
||||||
-(num_credits), in_flight);
|
-(num_credits), in_flight);
|
||||||
cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
|
cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
|
||||||
|
|
@ -626,9 +570,8 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
int wait_for_free_request(struct TCP_Server_Info *server, const int flags,
|
||||||
wait_for_free_request(struct TCP_Server_Info *server, const int flags,
|
unsigned int *instance)
|
||||||
unsigned int *instance)
|
|
||||||
{
|
{
|
||||||
return wait_for_free_credits(server, 1, -1, flags,
|
return wait_for_free_credits(server, 1, -1, flags,
|
||||||
instance);
|
instance);
|
||||||
|
|
@ -666,7 +609,7 @@ wait_for_compound_request(struct TCP_Server_Info *server, int num,
|
||||||
*/
|
*/
|
||||||
if (server->in_flight == 0) {
|
if (server->in_flight == 0) {
|
||||||
spin_unlock(&server->req_lock);
|
spin_unlock(&server->req_lock);
|
||||||
trace_smb3_insufficient_credits(server->CurrentMid,
|
trace_smb3_insufficient_credits(server->current_mid,
|
||||||
server->conn_id, server->hostname, scredits,
|
server->conn_id, server->hostname, scredits,
|
||||||
num, in_flight);
|
num, in_flight);
|
||||||
cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
|
cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
|
||||||
|
|
@ -690,40 +633,7 @@ cifs_wait_mtu_credits(struct TCP_Server_Info *server, size_t size,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
|
int wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
|
||||||
struct mid_q_entry **ppmidQ)
|
|
||||||
{
|
|
||||||
spin_lock(&ses->ses_lock);
|
|
||||||
if (ses->ses_status == SES_NEW) {
|
|
||||||
if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
|
|
||||||
(in_buf->Command != SMB_COM_NEGOTIATE)) {
|
|
||||||
spin_unlock(&ses->ses_lock);
|
|
||||||
return -EAGAIN;
|
|
||||||
}
|
|
||||||
/* else ok - we are setting up session */
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ses->ses_status == SES_EXITING) {
|
|
||||||
/* check if SMB session is bad because we are setting it up */
|
|
||||||
if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
|
|
||||||
spin_unlock(&ses->ses_lock);
|
|
||||||
return -EAGAIN;
|
|
||||||
}
|
|
||||||
/* else ok - we are shutting down session */
|
|
||||||
}
|
|
||||||
spin_unlock(&ses->ses_lock);
|
|
||||||
|
|
||||||
*ppmidQ = alloc_mid(in_buf, ses->server);
|
|
||||||
if (*ppmidQ == NULL)
|
|
||||||
return -ENOMEM;
|
|
||||||
spin_lock(&ses->server->mid_lock);
|
|
||||||
list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
|
|
||||||
spin_unlock(&ses->server->mid_lock);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
|
|
||||||
{
|
{
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
|
|
@ -737,34 +647,6 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct mid_q_entry *
|
|
||||||
cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
|
|
||||||
{
|
|
||||||
int rc;
|
|
||||||
struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
|
|
||||||
struct mid_q_entry *mid;
|
|
||||||
|
|
||||||
if (rqst->rq_iov[0].iov_len != 4 ||
|
|
||||||
rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
|
|
||||||
return ERR_PTR(-EIO);
|
|
||||||
|
|
||||||
/* enable signing if server requires it */
|
|
||||||
if (server->sign)
|
|
||||||
hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
|
|
||||||
|
|
||||||
mid = alloc_mid(hdr, server);
|
|
||||||
if (mid == NULL)
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
|
|
||||||
rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
|
|
||||||
if (rc) {
|
|
||||||
release_mid(mid);
|
|
||||||
return ERR_PTR(rc);
|
|
||||||
}
|
|
||||||
|
|
||||||
return mid;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Send a SMB request and set the callback function in the mid to handle
|
* Send a SMB request and set the callback function in the mid to handle
|
||||||
* the result. Caller is responsible for dealing with timeouts.
|
* the result. Caller is responsible for dealing with timeouts.
|
||||||
|
|
@ -819,9 +701,9 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
|
||||||
mid->mid_state = MID_REQUEST_SUBMITTED;
|
mid->mid_state = MID_REQUEST_SUBMITTED;
|
||||||
|
|
||||||
/* put it on the pending_mid_q */
|
/* put it on the pending_mid_q */
|
||||||
spin_lock(&server->mid_lock);
|
spin_lock(&server->mid_queue_lock);
|
||||||
list_add_tail(&mid->qhead, &server->pending_mid_q);
|
list_add_tail(&mid->qhead, &server->pending_mid_q);
|
||||||
spin_unlock(&server->mid_lock);
|
spin_unlock(&server->mid_queue_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Need to store the time in mid before calling I/O. For call_async,
|
* Need to store the time in mid before calling I/O. For call_async,
|
||||||
|
|
@ -845,45 +727,17 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
int cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
|
||||||
*
|
|
||||||
* Send an SMB Request. No response info (other than return code)
|
|
||||||
* needs to be parsed.
|
|
||||||
*
|
|
||||||
* flags indicate the type of request buffer and how long to wait
|
|
||||||
* and whether to log NT STATUS code (error) before mapping it to POSIX error
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
int
|
|
||||||
SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
|
|
||||||
char *in_buf, int flags)
|
|
||||||
{
|
|
||||||
int rc;
|
|
||||||
struct kvec iov[1];
|
|
||||||
struct kvec rsp_iov;
|
|
||||||
int resp_buf_type;
|
|
||||||
|
|
||||||
iov[0].iov_base = in_buf;
|
|
||||||
iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
|
|
||||||
flags |= CIFS_NO_RSP_BUF;
|
|
||||||
rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
|
|
||||||
cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
|
|
||||||
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
|
|
||||||
{
|
{
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
|
cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
|
||||||
__func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
|
__func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
|
||||||
|
|
||||||
spin_lock(&server->mid_lock);
|
spin_lock(&server->mid_queue_lock);
|
||||||
switch (mid->mid_state) {
|
switch (mid->mid_state) {
|
||||||
case MID_RESPONSE_READY:
|
case MID_RESPONSE_READY:
|
||||||
spin_unlock(&server->mid_lock);
|
spin_unlock(&server->mid_queue_lock);
|
||||||
return rc;
|
return rc;
|
||||||
case MID_RETRY_NEEDED:
|
case MID_RETRY_NEEDED:
|
||||||
rc = -EAGAIN;
|
rc = -EAGAIN;
|
||||||
|
|
@ -898,85 +752,23 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
|
||||||
rc = mid->mid_rc;
|
rc = mid->mid_rc;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
if (!(mid->mid_flags & MID_DELETED)) {
|
if (mid->deleted_from_q == false) {
|
||||||
list_del_init(&mid->qhead);
|
list_del_init(&mid->qhead);
|
||||||
mid->mid_flags |= MID_DELETED;
|
mid->deleted_from_q = true;
|
||||||
}
|
}
|
||||||
spin_unlock(&server->mid_lock);
|
spin_unlock(&server->mid_queue_lock);
|
||||||
cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
|
cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
|
||||||
__func__, mid->mid, mid->mid_state);
|
__func__, mid->mid, mid->mid_state);
|
||||||
rc = -EIO;
|
rc = -EIO;
|
||||||
goto sync_mid_done;
|
goto sync_mid_done;
|
||||||
}
|
}
|
||||||
spin_unlock(&server->mid_lock);
|
spin_unlock(&server->mid_queue_lock);
|
||||||
|
|
||||||
sync_mid_done:
|
sync_mid_done:
|
||||||
release_mid(mid);
|
release_mid(mid);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
|
||||||
send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
|
|
||||||
struct mid_q_entry *mid)
|
|
||||||
{
|
|
||||||
return server->ops->send_cancel ?
|
|
||||||
server->ops->send_cancel(server, rqst, mid) : 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
|
||||||
cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
|
|
||||||
bool log_error)
|
|
||||||
{
|
|
||||||
unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
|
|
||||||
|
|
||||||
dump_smb(mid->resp_buf, min_t(u32, 92, len));
|
|
||||||
|
|
||||||
/* convert the length into a more usable form */
|
|
||||||
if (server->sign) {
|
|
||||||
struct kvec iov[2];
|
|
||||||
int rc = 0;
|
|
||||||
struct smb_rqst rqst = { .rq_iov = iov,
|
|
||||||
.rq_nvec = 2 };
|
|
||||||
|
|
||||||
iov[0].iov_base = mid->resp_buf;
|
|
||||||
iov[0].iov_len = 4;
|
|
||||||
iov[1].iov_base = (char *)mid->resp_buf + 4;
|
|
||||||
iov[1].iov_len = len - 4;
|
|
||||||
/* FIXME: add code to kill session */
|
|
||||||
rc = cifs_verify_signature(&rqst, server,
|
|
||||||
mid->sequence_number);
|
|
||||||
if (rc)
|
|
||||||
cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
|
|
||||||
rc);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* BB special case reconnect tid and uid here? */
|
|
||||||
return map_and_check_smb_error(mid, log_error);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct mid_q_entry *
|
|
||||||
cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
|
|
||||||
struct smb_rqst *rqst)
|
|
||||||
{
|
|
||||||
int rc;
|
|
||||||
struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
|
|
||||||
struct mid_q_entry *mid;
|
|
||||||
|
|
||||||
if (rqst->rq_iov[0].iov_len != 4 ||
|
|
||||||
rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
|
|
||||||
return ERR_PTR(-EIO);
|
|
||||||
|
|
||||||
rc = allocate_mid(ses, hdr, &mid);
|
|
||||||
if (rc)
|
|
||||||
return ERR_PTR(rc);
|
|
||||||
rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
|
|
||||||
if (rc) {
|
|
||||||
delete_mid(mid);
|
|
||||||
return ERR_PTR(rc);
|
|
||||||
}
|
|
||||||
return mid;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
cifs_compound_callback(struct mid_q_entry *mid)
|
cifs_compound_callback(struct mid_q_entry *mid)
|
||||||
{
|
{
|
||||||
|
|
@ -1213,15 +1005,15 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
|
||||||
cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
|
cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
|
||||||
midQ[i]->mid, le16_to_cpu(midQ[i]->command));
|
midQ[i]->mid, le16_to_cpu(midQ[i]->command));
|
||||||
send_cancel(server, &rqst[i], midQ[i]);
|
send_cancel(server, &rqst[i], midQ[i]);
|
||||||
spin_lock(&server->mid_lock);
|
spin_lock(&server->mid_queue_lock);
|
||||||
midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
|
midQ[i]->wait_cancelled = true;
|
||||||
if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED ||
|
if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED ||
|
||||||
midQ[i]->mid_state == MID_RESPONSE_RECEIVED) {
|
midQ[i]->mid_state == MID_RESPONSE_RECEIVED) {
|
||||||
midQ[i]->callback = cifs_cancelled_callback;
|
midQ[i]->callback = cifs_cancelled_callback;
|
||||||
cancelled_mid[i] = true;
|
cancelled_mid[i] = true;
|
||||||
credits[i].value = 0;
|
credits[i].value = 0;
|
||||||
}
|
}
|
||||||
spin_unlock(&server->mid_lock);
|
spin_unlock(&server->mid_queue_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1304,344 +1096,6 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
|
||||||
rqst, resp_buf_type, resp_iov);
|
rqst, resp_buf_type, resp_iov);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
|
||||||
SendReceive2(const unsigned int xid, struct cifs_ses *ses,
|
|
||||||
struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
|
|
||||||
const int flags, struct kvec *resp_iov)
|
|
||||||
{
|
|
||||||
struct smb_rqst rqst;
|
|
||||||
struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
|
|
||||||
int rc;
|
|
||||||
|
|
||||||
if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
|
|
||||||
new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (!new_iov) {
|
|
||||||
/* otherwise cifs_send_recv below sets resp_buf_type */
|
|
||||||
*resp_buf_type = CIFS_NO_BUFFER;
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
} else
|
|
||||||
new_iov = s_iov;
|
|
||||||
|
|
||||||
/* 1st iov is a RFC1001 length followed by the rest of the packet */
|
|
||||||
memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
|
|
||||||
|
|
||||||
new_iov[0].iov_base = new_iov[1].iov_base;
|
|
||||||
new_iov[0].iov_len = 4;
|
|
||||||
new_iov[1].iov_base += 4;
|
|
||||||
new_iov[1].iov_len -= 4;
|
|
||||||
|
|
||||||
memset(&rqst, 0, sizeof(struct smb_rqst));
|
|
||||||
rqst.rq_iov = new_iov;
|
|
||||||
rqst.rq_nvec = n_vec + 1;
|
|
||||||
|
|
||||||
rc = cifs_send_recv(xid, ses, ses->server,
|
|
||||||
&rqst, resp_buf_type, flags, resp_iov);
|
|
||||||
if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
|
|
||||||
kfree(new_iov);
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
|
||||||
SendReceive(const unsigned int xid, struct cifs_ses *ses,
|
|
||||||
struct smb_hdr *in_buf, struct smb_hdr *out_buf,
|
|
||||||
int *pbytes_returned, const int flags)
|
|
||||||
{
|
|
||||||
int rc = 0;
|
|
||||||
struct mid_q_entry *midQ;
|
|
||||||
unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
|
|
||||||
struct kvec iov = { .iov_base = in_buf, .iov_len = len };
|
|
||||||
struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
|
|
||||||
struct cifs_credits credits = { .value = 1, .instance = 0 };
|
|
||||||
struct TCP_Server_Info *server;
|
|
||||||
|
|
||||||
if (ses == NULL) {
|
|
||||||
cifs_dbg(VFS, "Null smb session\n");
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
server = ses->server;
|
|
||||||
if (server == NULL) {
|
|
||||||
cifs_dbg(VFS, "Null tcp session\n");
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock(&server->srv_lock);
|
|
||||||
if (server->tcpStatus == CifsExiting) {
|
|
||||||
spin_unlock(&server->srv_lock);
|
|
||||||
return -ENOENT;
|
|
||||||
}
|
|
||||||
spin_unlock(&server->srv_lock);
|
|
||||||
|
|
||||||
/* Ensure that we do not send more than 50 overlapping requests
|
|
||||||
to the same server. We may make this configurable later or
|
|
||||||
use ses->maxReq */
|
|
||||||
|
|
||||||
if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
|
|
||||||
cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
|
|
||||||
len);
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
|
|
||||||
rc = wait_for_free_request(server, flags, &credits.instance);
|
|
||||||
if (rc)
|
|
||||||
return rc;
|
|
||||||
|
|
||||||
/* make sure that we sign in the same order that we send on this socket
|
|
||||||
and avoid races inside tcp sendmsg code that could cause corruption
|
|
||||||
of smb data */
|
|
||||||
|
|
||||||
cifs_server_lock(server);
|
|
||||||
|
|
||||||
rc = allocate_mid(ses, in_buf, &midQ);
|
|
||||||
if (rc) {
|
|
||||||
cifs_server_unlock(server);
|
|
||||||
/* Update # of requests on wire to server */
|
|
||||||
add_credits(server, &credits, 0);
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
|
|
||||||
if (rc) {
|
|
||||||
cifs_server_unlock(server);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
midQ->mid_state = MID_REQUEST_SUBMITTED;
|
|
||||||
|
|
||||||
rc = smb_send(server, in_buf, len);
|
|
||||||
cifs_save_when_sent(midQ);
|
|
||||||
|
|
||||||
if (rc < 0)
|
|
||||||
server->sequence_number -= 2;
|
|
||||||
|
|
||||||
cifs_server_unlock(server);
|
|
||||||
|
|
||||||
if (rc < 0)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
rc = wait_for_response(server, midQ);
|
|
||||||
if (rc != 0) {
|
|
||||||
send_cancel(server, &rqst, midQ);
|
|
||||||
spin_lock(&server->mid_lock);
|
|
||||||
if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
|
|
||||||
midQ->mid_state == MID_RESPONSE_RECEIVED) {
|
|
||||||
/* no longer considered to be "in-flight" */
|
|
||||||
midQ->callback = release_mid;
|
|
||||||
spin_unlock(&server->mid_lock);
|
|
||||||
add_credits(server, &credits, 0);
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
spin_unlock(&server->mid_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
rc = cifs_sync_mid_result(midQ, server);
|
|
||||||
if (rc != 0) {
|
|
||||||
add_credits(server, &credits, 0);
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!midQ->resp_buf || !out_buf ||
|
|
||||||
midQ->mid_state != MID_RESPONSE_READY) {
|
|
||||||
rc = -EIO;
|
|
||||||
cifs_server_dbg(VFS, "Bad MID state?\n");
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
|
|
||||||
memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
|
|
||||||
rc = cifs_check_receive(midQ, server, 0);
|
|
||||||
out:
|
|
||||||
delete_mid(midQ);
|
|
||||||
add_credits(server, &credits, 0);
|
|
||||||
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
|
|
||||||
blocking lock to return. */
|
|
||||||
|
|
||||||
static int
|
|
||||||
send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
|
|
||||||
struct smb_hdr *in_buf,
|
|
||||||
struct smb_hdr *out_buf)
|
|
||||||
{
|
|
||||||
int bytes_returned;
|
|
||||||
struct cifs_ses *ses = tcon->ses;
|
|
||||||
LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
|
|
||||||
|
|
||||||
/* We just modify the current in_buf to change
|
|
||||||
the type of lock from LOCKING_ANDX_SHARED_LOCK
|
|
||||||
or LOCKING_ANDX_EXCLUSIVE_LOCK to
|
|
||||||
LOCKING_ANDX_CANCEL_LOCK. */
|
|
||||||
|
|
||||||
pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
|
|
||||||
pSMB->Timeout = 0;
|
|
||||||
pSMB->hdr.Mid = get_next_mid(ses->server);
|
|
||||||
|
|
||||||
return SendReceive(xid, ses, in_buf, out_buf,
|
|
||||||
&bytes_returned, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
|
||||||
SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
|
|
||||||
struct smb_hdr *in_buf, struct smb_hdr *out_buf,
|
|
||||||
int *pbytes_returned)
|
|
||||||
{
|
|
||||||
int rc = 0;
|
|
||||||
int rstart = 0;
|
|
||||||
struct mid_q_entry *midQ;
|
|
||||||
struct cifs_ses *ses;
|
|
||||||
unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
|
|
||||||
struct kvec iov = { .iov_base = in_buf, .iov_len = len };
|
|
||||||
struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
|
|
||||||
unsigned int instance;
|
|
||||||
struct TCP_Server_Info *server;
|
|
||||||
|
|
||||||
if (tcon == NULL || tcon->ses == NULL) {
|
|
||||||
cifs_dbg(VFS, "Null smb session\n");
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
ses = tcon->ses;
|
|
||||||
server = ses->server;
|
|
||||||
|
|
||||||
if (server == NULL) {
|
|
||||||
cifs_dbg(VFS, "Null tcp session\n");
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock(&server->srv_lock);
|
|
||||||
if (server->tcpStatus == CifsExiting) {
|
|
||||||
spin_unlock(&server->srv_lock);
|
|
||||||
return -ENOENT;
|
|
||||||
}
|
|
||||||
spin_unlock(&server->srv_lock);
|
|
||||||
|
|
||||||
/* Ensure that we do not send more than 50 overlapping requests
|
|
||||||
to the same server. We may make this configurable later or
|
|
||||||
use ses->maxReq */
|
|
||||||
|
|
||||||
if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
|
|
||||||
cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
|
|
||||||
len);
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
|
|
||||||
rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
|
|
||||||
if (rc)
|
|
||||||
return rc;
|
|
||||||
|
|
||||||
/* make sure that we sign in the same order that we send on this socket
|
|
||||||
and avoid races inside tcp sendmsg code that could cause corruption
|
|
||||||
of smb data */
|
|
||||||
|
|
||||||
cifs_server_lock(server);
|
|
||||||
|
|
||||||
rc = allocate_mid(ses, in_buf, &midQ);
|
|
||||||
if (rc) {
|
|
||||||
cifs_server_unlock(server);
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
|
|
||||||
if (rc) {
|
|
||||||
delete_mid(midQ);
|
|
||||||
cifs_server_unlock(server);
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
midQ->mid_state = MID_REQUEST_SUBMITTED;
|
|
||||||
rc = smb_send(server, in_buf, len);
|
|
||||||
cifs_save_when_sent(midQ);
|
|
||||||
|
|
||||||
if (rc < 0)
|
|
||||||
server->sequence_number -= 2;
|
|
||||||
|
|
||||||
cifs_server_unlock(server);
|
|
||||||
|
|
||||||
if (rc < 0) {
|
|
||||||
delete_mid(midQ);
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Wait for a reply - allow signals to interrupt. */
|
|
||||||
rc = wait_event_interruptible(server->response_q,
|
|
||||||
(!(midQ->mid_state == MID_REQUEST_SUBMITTED ||
|
|
||||||
midQ->mid_state == MID_RESPONSE_RECEIVED)) ||
|
|
||||||
((server->tcpStatus != CifsGood) &&
|
|
||||||
(server->tcpStatus != CifsNew)));
|
|
||||||
|
|
||||||
/* Were we interrupted by a signal ? */
|
|
||||||
spin_lock(&server->srv_lock);
|
|
||||||
if ((rc == -ERESTARTSYS) &&
|
|
||||||
(midQ->mid_state == MID_REQUEST_SUBMITTED ||
|
|
||||||
midQ->mid_state == MID_RESPONSE_RECEIVED) &&
|
|
||||||
((server->tcpStatus == CifsGood) ||
|
|
||||||
(server->tcpStatus == CifsNew))) {
|
|
||||||
spin_unlock(&server->srv_lock);
|
|
||||||
|
|
||||||
if (in_buf->Command == SMB_COM_TRANSACTION2) {
|
|
||||||
/* POSIX lock. We send a NT_CANCEL SMB to cause the
|
|
||||||
blocking lock to return. */
|
|
||||||
rc = send_cancel(server, &rqst, midQ);
|
|
||||||
if (rc) {
|
|
||||||
delete_mid(midQ);
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
|
|
||||||
to cause the blocking lock to return. */
|
|
||||||
|
|
||||||
rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
|
|
||||||
|
|
||||||
/* If we get -ENOLCK back the lock may have
|
|
||||||
already been removed. Don't exit in this case. */
|
|
||||||
if (rc && rc != -ENOLCK) {
|
|
||||||
delete_mid(midQ);
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rc = wait_for_response(server, midQ);
|
|
||||||
if (rc) {
|
|
||||||
send_cancel(server, &rqst, midQ);
|
|
||||||
spin_lock(&server->mid_lock);
|
|
||||||
if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
|
|
||||||
midQ->mid_state == MID_RESPONSE_RECEIVED) {
|
|
||||||
/* no longer considered to be "in-flight" */
|
|
||||||
midQ->callback = release_mid;
|
|
||||||
spin_unlock(&server->mid_lock);
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
spin_unlock(&server->mid_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* We got the response - restart system call. */
|
|
||||||
rstart = 1;
|
|
||||||
spin_lock(&server->srv_lock);
|
|
||||||
}
|
|
||||||
spin_unlock(&server->srv_lock);
|
|
||||||
|
|
||||||
rc = cifs_sync_mid_result(midQ, server);
|
|
||||||
if (rc != 0)
|
|
||||||
return rc;
|
|
||||||
|
|
||||||
/* rcvd frame is ok */
|
|
||||||
if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_READY) {
|
|
||||||
rc = -EIO;
|
|
||||||
cifs_tcon_dbg(VFS, "Bad MID state?\n");
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
|
|
||||||
memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
|
|
||||||
rc = cifs_check_receive(midQ, server, 0);
|
|
||||||
out:
|
|
||||||
delete_mid(midQ);
|
|
||||||
if (rstart && rc == -EACCES)
|
|
||||||
return -ERESTARTSYS;
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Discard any remaining data in the current SMB. To do this, we borrow the
|
* Discard any remaining data in the current SMB. To do this, we borrow the
|
||||||
|
|
|
||||||
|
|
@ -38,6 +38,124 @@ struct smbdirect_socket {
|
||||||
} ib;
|
} ib;
|
||||||
|
|
||||||
struct smbdirect_socket_parameters parameters;
|
struct smbdirect_socket_parameters parameters;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The state for posted send buffers
|
||||||
|
*/
|
||||||
|
struct {
|
||||||
|
/*
|
||||||
|
* Memory pools for preallocating
|
||||||
|
* smbdirect_send_io buffers
|
||||||
|
*/
|
||||||
|
struct {
|
||||||
|
struct kmem_cache *cache;
|
||||||
|
mempool_t *pool;
|
||||||
|
} mem;
|
||||||
|
} send_io;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The state for posted receive buffers
|
||||||
|
*/
|
||||||
|
struct {
|
||||||
|
/*
|
||||||
|
* The type of PDU we are expecting
|
||||||
|
*/
|
||||||
|
enum {
|
||||||
|
SMBDIRECT_EXPECT_NEGOTIATE_REQ = 1,
|
||||||
|
SMBDIRECT_EXPECT_NEGOTIATE_REP = 2,
|
||||||
|
SMBDIRECT_EXPECT_DATA_TRANSFER = 3,
|
||||||
|
} expected;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Memory pools for preallocating
|
||||||
|
* smbdirect_recv_io buffers
|
||||||
|
*/
|
||||||
|
struct {
|
||||||
|
struct kmem_cache *cache;
|
||||||
|
mempool_t *pool;
|
||||||
|
} mem;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The list of free smbdirect_recv_io
|
||||||
|
* structures
|
||||||
|
*/
|
||||||
|
struct {
|
||||||
|
struct list_head list;
|
||||||
|
spinlock_t lock;
|
||||||
|
} free;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The list of arrived non-empty smbdirect_recv_io
|
||||||
|
* structures
|
||||||
|
*
|
||||||
|
* This represents the reassembly queue.
|
||||||
|
*/
|
||||||
|
struct {
|
||||||
|
struct list_head list;
|
||||||
|
spinlock_t lock;
|
||||||
|
wait_queue_head_t wait_queue;
|
||||||
|
/* total data length of reassembly queue */
|
||||||
|
int data_length;
|
||||||
|
int queue_length;
|
||||||
|
/* the offset to first buffer in reassembly queue */
|
||||||
|
int first_entry_offset;
|
||||||
|
/*
|
||||||
|
* Indicate if we have received a full packet on the
|
||||||
|
* connection This is used to identify the first SMBD
|
||||||
|
* packet of a assembled payload (SMB packet) in
|
||||||
|
* reassembly queue so we can return a RFC1002 length to
|
||||||
|
* upper layer to indicate the length of the SMB packet
|
||||||
|
* received
|
||||||
|
*/
|
||||||
|
bool full_packet_received;
|
||||||
|
} reassembly;
|
||||||
|
} recv_io;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct smbdirect_send_io {
|
||||||
|
struct smbdirect_socket *socket;
|
||||||
|
struct ib_cqe cqe;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The SGE entries for this work request
|
||||||
|
*
|
||||||
|
* The first points to the packet header
|
||||||
|
*/
|
||||||
|
#define SMBDIRECT_SEND_IO_MAX_SGE 6
|
||||||
|
size_t num_sge;
|
||||||
|
struct ib_sge sge[SMBDIRECT_SEND_IO_MAX_SGE];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Link to the list of sibling smbdirect_send_io
|
||||||
|
* messages.
|
||||||
|
*/
|
||||||
|
struct list_head sibling_list;
|
||||||
|
struct ib_send_wr wr;
|
||||||
|
|
||||||
|
/* SMBD packet header follows this structure */
|
||||||
|
u8 packet[];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct smbdirect_recv_io {
|
||||||
|
struct smbdirect_socket *socket;
|
||||||
|
struct ib_cqe cqe;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For now we only use a single SGE
|
||||||
|
* as we have just one large buffer
|
||||||
|
* per posted recv.
|
||||||
|
*/
|
||||||
|
#define SMBDIRECT_RECV_IO_MAX_SGE 1
|
||||||
|
struct ib_sge sge;
|
||||||
|
|
||||||
|
/* Link to free or reassembly list */
|
||||||
|
struct list_head list;
|
||||||
|
|
||||||
|
/* Indicate if this is the 1st packet of a payload */
|
||||||
|
bool first_segment;
|
||||||
|
|
||||||
|
/* SMBD packet header and payload follows this structure */
|
||||||
|
u8 packet[];
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__ */
|
#endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__ */
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue