Commit 78d9affb authored by Linus Torvalds's avatar Linus Torvalds

Merge tag '5.2-smb3' of git://git.samba.org/sfrench/cifs-2.6

Pull cifs fixes from Steve French:
 "CIFS/SMB3 changes:

   - three fixes for stable

   - add fiemap support

   - improve zero-range support

   - various RDMA (smb direct fixes)

  I have an additional set of fixes (for improved handling of sparse
  files, mode bits, POSIX extensions) that are still being tested that
  are not included in this pull request but I expect to send in the next
  week"

* tag '5.2-smb3' of git://git.samba.org/sfrench/cifs-2.6: (29 commits)
  cifs: update module internal version number
  SMB3: Clean up query symlink when reparse point
  cifs: fix strcat buffer overflow and reduce raciness in smb21_set_oplock_level()
  Negotiate and save preferred compression algorithms
  cifs: rename and clarify CIFS_ASYNC_OP and CIFS_NO_RESP
  cifs: fix credits leak for SMB1 oplock breaks
  smb3: Add protocol structs for change notify support
  cifs: fix smb3_zero_range for Azure
  cifs: zero-range does not require the file is sparse
  Add new flag on SMB3.1.1 read
  cifs: add fiemap support
  SMB3: Add defines for new negotiate contexts
  cifs: fix bi-directional fsctl passthrough calls
  cifs: smbd: take an array of reqeusts when sending upper layer data
  SMB3: Add handling for different FSCTL access flags
  cifs: Add support for FSCTL passthrough that write data to the server
  cifs: remove superfluous inode_lock in cifs_{strict_}fsync
  cifs: Call MID callback before destroying transport
  cifs: smbd: Retry on memory registration failure
  cifs: smbd: Indicate to retry on transport sending failure
  ...
parents 8c79f4cd cb4f7bf6
......@@ -312,12 +312,10 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
atomic_read(&server->smbd_conn->send_credits),
atomic_read(&server->smbd_conn->receive_credits),
server->smbd_conn->receive_credit_target);
seq_printf(m, "\nPending send_pending: %x send_payload_pending:"
" %x smbd_send_pending: %x smbd_recv_pending: %x",
seq_printf(m, "\nPending send_pending: %x "
"send_payload_pending: %x",
atomic_read(&server->smbd_conn->send_pending),
atomic_read(&server->smbd_conn->send_payload_pending),
server->smbd_conn->smbd_send_pending,
server->smbd_conn->smbd_recv_pending);
atomic_read(&server->smbd_conn->send_payload_pending));
seq_printf(m, "\nReceive buffers count_receive_queue: %x "
"count_empty_packet_queue: %x",
server->smbd_conn->count_receive_queue,
......@@ -334,6 +332,12 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
#endif
seq_printf(m, "\nNumber of credits: %d Dialect 0x%x",
server->credits, server->dialect);
if (server->compress_algorithm == SMB3_COMPRESS_LZNT1)
seq_printf(m, " COMPRESS_LZNT1");
else if (server->compress_algorithm == SMB3_COMPRESS_LZ77)
seq_printf(m, " COMPRESS_LZ77");
else if (server->compress_algorithm == SMB3_COMPRESS_LZ77_HUFF)
seq_printf(m, " COMPRESS_LZ77_HUFF");
if (server->sign)
seq_printf(m, " signed");
if (server->posix_ext_supported)
......@@ -462,8 +466,13 @@ static ssize_t cifs_stats_proc_write(struct file *file,
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
#ifdef CONFIG_CIFS_STATS2
for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++)
for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
atomic_set(&server->num_cmds[i], 0);
atomic_set(&server->smb2slowcmd[i], 0);
server->time_per_cmd[i] = 0;
server->slowest_cmd[i] = 0;
server->fastest_cmd[0] = 0;
}
#endif /* CONFIG_CIFS_STATS2 */
list_for_each(tmp2, &server->smb_ses_list) {
ses = list_entry(tmp2, struct cifs_ses,
......@@ -531,9 +540,19 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
#ifdef CONFIG_CIFS_STATS2
seq_puts(m, "\nTotal time spent processing by command. Time ");
seq_printf(m, "units are jiffies (%d per second)\n", HZ);
seq_puts(m, " SMB3 CMD\tNumber\tTotal Time\tFastest\tSlowest\n");
seq_puts(m, " --------\t------\t----------\t-------\t-------\n");
for (j = 0; j < NUMBER_OF_SMB2_COMMANDS; j++)
seq_printf(m, " %d\t\t%d\t%llu\t\t%u\t%u\n", j,
atomic_read(&server->num_cmds[j]),
server->time_per_cmd[j],
server->fastest_cmd[j],
server->slowest_cmd[j]);
for (j = 0; j < NUMBER_OF_SMB2_COMMANDS; j++)
if (atomic_read(&server->smb2slowcmd[j]))
seq_printf(m, "%d slow responses from %s for command %d\n",
seq_printf(m, " %d slow responses from %s for command %d\n",
atomic_read(&server->smb2slowcmd[j]),
server->hostname, j);
#endif /* STATS2 */
......
......@@ -483,6 +483,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_puts(s, ",seal");
if (tcon->nocase)
seq_puts(s, ",nocase");
if (tcon->local_lease)
seq_puts(s, ",locallease");
if (tcon->retry)
seq_puts(s, ",hard");
else
......@@ -984,6 +986,7 @@ const struct inode_operations cifs_file_inode_ops = {
.getattr = cifs_getattr,
.permission = cifs_permission,
.listxattr = cifs_listxattr,
.fiemap = cifs_fiemap,
};
const struct inode_operations cifs_symlink_inode_ops = {
......
......@@ -84,6 +84,8 @@ extern int cifs_revalidate_mapping(struct inode *inode);
extern int cifs_zap_mapping(struct inode *inode);
extern int cifs_getattr(const struct path *, struct kstat *, u32, unsigned int);
extern int cifs_setattr(struct dentry *, struct iattr *);
extern int cifs_fiemap(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
extern const struct inode_operations cifs_file_inode_ops;
extern const struct inode_operations cifs_symlink_inode_ops;
......@@ -150,5 +152,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
#define CIFS_VERSION "2.19"
#define CIFS_VERSION "2.20"
#endif /* _CIFSFS_H */
......@@ -355,7 +355,8 @@ struct smb_version_operations {
struct cifs_sb_info *);
/* query symlink target */
int (*query_symlink)(const unsigned int, struct cifs_tcon *,
const char *, char **, struct cifs_sb_info *);
struct cifs_sb_info *, const char *,
char **, bool);
/* open a file for non-posix mounts */
int (*open)(const unsigned int, struct cifs_open_parms *,
__u32 *, FILE_ALL_INFO *);
......@@ -493,6 +494,9 @@ struct smb_version_operations {
char *full_path,
umode_t mode,
dev_t device_number);
/* version specific fiemap implementation */
int (*fiemap)(struct cifs_tcon *tcon, struct cifsFileInfo *,
struct fiemap_extent_info *, u64, u64);
};
struct smb_version_values {
......@@ -596,6 +600,10 @@ struct smb_vol {
unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */
};
/**
* CIFS superblock mount flags (mnt_cifs_flags) to consider when
* trying to reuse existing superblock for a new mount
*/
#define CIFS_MOUNT_MASK (CIFS_MOUNT_NO_PERM | CIFS_MOUNT_SET_UID | \
CIFS_MOUNT_SERVER_INUM | CIFS_MOUNT_DIRECT_IO | \
CIFS_MOUNT_NO_XATTR | CIFS_MOUNT_MAP_SPECIAL_CHR | \
......@@ -606,8 +614,13 @@ struct smb_vol {
CIFS_MOUNT_NOPOSIXBRL | CIFS_MOUNT_NOSSYNC | \
CIFS_MOUNT_FSCACHE | CIFS_MOUNT_MF_SYMLINKS | \
CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_STRICT_IO | \
CIFS_MOUNT_CIFS_BACKUPUID | CIFS_MOUNT_CIFS_BACKUPGID)
CIFS_MOUNT_CIFS_BACKUPUID | CIFS_MOUNT_CIFS_BACKUPGID | \
CIFS_MOUNT_NO_DFS)
/**
* Generic VFS superblock mount flags (s_flags) to consider when
* trying to reuse existing superblock for a new mount
*/
#define CIFS_MS_MASK (SB_RDONLY | SB_MANDLOCK | SB_NOEXEC | SB_NOSUID | \
SB_NODEV | SB_SYNCHRONOUS)
......@@ -714,10 +727,15 @@ struct TCP_Server_Info {
#ifdef CONFIG_CIFS_STATS2
atomic_t in_send; /* requests trying to send */
atomic_t num_waiters; /* blocked waiting to get in sendrecv */
atomic_t num_cmds[NUMBER_OF_SMB2_COMMANDS]; /* total requests by cmd */
atomic_t smb2slowcmd[NUMBER_OF_SMB2_COMMANDS]; /* count resps > 1 sec */
__u64 time_per_cmd[NUMBER_OF_SMB2_COMMANDS]; /* total time per cmd */
__u32 slowest_cmd[NUMBER_OF_SMB2_COMMANDS];
__u32 fastest_cmd[NUMBER_OF_SMB2_COMMANDS];
#endif /* STATS2 */
unsigned int max_read;
unsigned int max_write;
__le16 compress_algorithm;
__le16 cipher_type;
/* save initital negprot hash */
__u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
......@@ -1673,11 +1691,11 @@ static inline bool is_retryable_error(int error)
/* Type of Request to SendReceive2 */
#define CIFS_BLOCKING_OP 1 /* operation can block */
#define CIFS_ASYNC_OP 2 /* do not wait for response */
#define CIFS_NON_BLOCKING 2 /* do not block waiting for credits */
#define CIFS_TIMEOUT_MASK 0x003 /* only one of above set in req */
#define CIFS_LOG_ERROR 0x010 /* log NT STATUS if non-zero */
#define CIFS_LARGE_BUF_OP 0x020 /* large request buffer */
#define CIFS_NO_RESP 0x040 /* no response buffer required */
#define CIFS_NO_RSP_BUF 0x040 /* no response buffer required */
/* Type of request operation */
#define CIFS_ECHO_OP 0x080 /* echo request */
......@@ -1687,6 +1705,7 @@ static inline bool is_retryable_error(int error)
#define CIFS_HAS_CREDITS 0x0400 /* already has credits */
#define CIFS_TRANSFORM_REQ 0x0800 /* transform request before sending */
#define CIFS_NO_SRV_RSP 0x1000 /* there is no server response */
/* Security Flags: indicate type of session setup needed */
#define CIFSSEC_MAY_SIGN 0x00001
......
......@@ -526,12 +526,21 @@ extern int E_md4hash(const unsigned char *passwd, unsigned char *p16,
const struct nls_table *codepage);
extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8,
unsigned char *p24);
extern int
cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
const char *devname, bool is_smb3);
extern void
cifs_cleanup_volume_info_contents(struct smb_vol *volume_info);
extern struct TCP_Server_Info *
cifs_find_tcp_session(struct smb_vol *vol);
extern void cifs_put_smb_ses(struct cifs_ses *ses);
extern struct cifs_ses *
cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info);
void cifs_readdata_release(struct kref *refcount);
int cifs_async_readv(struct cifs_readdata *rdata);
int cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid);
......
......@@ -860,7 +860,7 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
iov[1].iov_base = (char *)smb + 4;
rc = cifs_call_async(server, &rqst, NULL, cifs_echo_callback, NULL,
server, CIFS_ASYNC_OP | CIFS_ECHO_OP, NULL);
server, CIFS_NON_BLOCKING | CIFS_ECHO_OP, NULL);
if (rc)
cifs_dbg(FYI, "Echo request failed: %d\n", rc);
......@@ -2508,8 +2508,8 @@ int cifs_lockv(const unsigned int xid, struct cifs_tcon *tcon,
iov[1].iov_len = (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE);
cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RESP,
&rsp_iov);
rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type,
CIFS_NO_RSP_BUF, &rsp_iov);
cifs_small_buf_release(pSMB);
if (rc)
cifs_dbg(FYI, "Send error in cifs_lockv = %d\n", rc);
......@@ -2540,7 +2540,7 @@ CIFSSMBLock(const unsigned int xid, struct cifs_tcon *tcon,
if (lockType == LOCKING_ANDX_OPLOCK_RELEASE) {
/* no response expected */
flags = CIFS_ASYNC_OP | CIFS_OBREAK_OP;
flags = CIFS_NO_SRV_RSP | CIFS_NON_BLOCKING | CIFS_OBREAK_OP;
pSMB->Timeout = 0;
} else if (waitFlag) {
flags = CIFS_BLOCKING_OP; /* blocking operation, no timeout */
......@@ -6567,93 +6567,3 @@ CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon,
return rc;
}
#endif
#ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* BB unused temporarily */
/*
* Years ago the kernel added a "dnotify" function for Samba server,
* to allow network clients (such as Windows) to display updated
* lists of files in directory listings automatically when
* files are added by one user when another user has the
* same directory open on their desktop. The Linux cifs kernel
* client hooked into the kernel side of this interface for
* the same reason, but ironically when the VFS moved from
* "dnotify" to "inotify" it became harder to plug in Linux
* network file system clients (the most obvious use case
* for notify interfaces is when multiple users can update
* the contents of the same directory - exactly what network
* file systems can do) although the server (Samba) could
* still use it. For the short term we leave the worker
* function ifdeffed out (below) until inotify is fixed
* in the VFS to make it easier to plug in network file
* system clients. If inotify turns out to be permanently
* incompatible for network fs clients, we could instead simply
* expose this config flag by adding a future cifs (and smb2) notify ioctl.
*/
int CIFSSMBNotify(const unsigned int xid, struct cifs_tcon *tcon,
const int notify_subdirs, const __u16 netfid,
__u32 filter, struct file *pfile, int multishot,
const struct nls_table *nls_codepage)
{
int rc = 0;
struct smb_com_transaction_change_notify_req *pSMB = NULL;
struct smb_com_ntransaction_change_notify_rsp *pSMBr = NULL;
struct dir_notify_req *dnotify_req;
int bytes_returned;
cifs_dbg(FYI, "In CIFSSMBNotify for file handle %d\n", (int)netfid);
rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
pSMB->TotalParameterCount = 0 ;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le32(2);
pSMB->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00);
pSMB->MaxSetupCount = 4;
pSMB->Reserved = 0;
pSMB->ParameterOffset = 0;
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 4; /* single byte does not need le conversion */
pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_NOTIFY_CHANGE);
pSMB->ParameterCount = pSMB->TotalParameterCount;
if (notify_subdirs)
pSMB->WatchTree = 1; /* one byte - no le conversion needed */
pSMB->Reserved2 = 0;
pSMB->CompletionFilter = cpu_to_le32(filter);
pSMB->Fid = netfid; /* file handle always le */
pSMB->ByteCount = 0;
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *)pSMBr, &bytes_returned,
CIFS_ASYNC_OP);
if (rc) {
cifs_dbg(FYI, "Error in Notify = %d\n", rc);
} else {
/* Add file to outstanding requests */
/* BB change to kmem cache alloc */
dnotify_req = kmalloc(
sizeof(struct dir_notify_req),
GFP_KERNEL);
if (dnotify_req) {
dnotify_req->Pid = pSMB->hdr.Pid;
dnotify_req->PidHigh = pSMB->hdr.PidHigh;
dnotify_req->Mid = pSMB->hdr.Mid;
dnotify_req->Tid = pSMB->hdr.Tid;
dnotify_req->Uid = pSMB->hdr.Uid;
dnotify_req->netfid = netfid;
dnotify_req->pfile = pfile;
dnotify_req->filter = filter;
dnotify_req->multishot = multishot;
spin_lock(&GlobalMid_Lock);
list_add_tail(&dnotify_req->lhead,
&GlobalDnotifyReqList);
spin_unlock(&GlobalMid_Lock);
} else
rc = -ENOMEM;
}
cifs_buf_release(pSMB);
return rc;
}
#endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
......@@ -323,8 +323,6 @@ static int ip_connect(struct TCP_Server_Info *server);
static int generic_ip_connect(struct TCP_Server_Info *server);
static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
static void cifs_prune_tlinks(struct work_struct *work);
static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
const char *devname, bool is_smb3);
static char *extract_hostname(const char *unc);
/*
......@@ -530,21 +528,6 @@ cifs_reconnect(struct TCP_Server_Info *server)
/* do not want to be sending data on a socket we are freeing */
cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
mutex_lock(&server->srv_mutex);
if (server->ssocket) {
cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n",
server->ssocket->state, server->ssocket->flags);
kernel_sock_shutdown(server->ssocket, SHUT_WR);
cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n",
server->ssocket->state, server->ssocket->flags);
sock_release(server->ssocket);
server->ssocket = NULL;
}
server->sequence_number = 0;
server->session_estab = false;
kfree(server->session_key.response);
server->session_key.response = NULL;
server->session_key.len = 0;
server->lstrp = jiffies;
/* mark submitted MIDs for retry and issue callback */
INIT_LIST_HEAD(&retry_list);
......@@ -557,7 +540,6 @@ cifs_reconnect(struct TCP_Server_Info *server)
list_move(&mid_entry->qhead, &retry_list);
}
spin_unlock(&GlobalMid_Lock);
mutex_unlock(&server->srv_mutex);
cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
list_for_each_safe(tmp, tmp2, &retry_list) {
......@@ -566,6 +548,25 @@ cifs_reconnect(struct TCP_Server_Info *server)
mid_entry->callback(mid_entry);
}
if (server->ssocket) {
cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n",
server->ssocket->state, server->ssocket->flags);
kernel_sock_shutdown(server->ssocket, SHUT_WR);
cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n",
server->ssocket->state, server->ssocket->flags);
sock_release(server->ssocket);
server->ssocket = NULL;
} else if (cifs_rdma_enabled(server))
smbd_destroy(server);
server->sequence_number = 0;
server->session_estab = false;
kfree(server->session_key.response);
server->session_key.response = NULL;
server->session_key.len = 0;
server->lstrp = jiffies;
mutex_unlock(&server->srv_mutex);
do {
try_to_freeze();
......@@ -931,10 +932,8 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
wake_up_all(&server->request_q);
/* give those requests time to exit */
msleep(125);
if (cifs_rdma_enabled(server) && server->smbd_conn) {
smbd_destroy(server->smbd_conn);
server->smbd_conn = NULL;
}
if (cifs_rdma_enabled(server))
smbd_destroy(server);
if (server->ssocket) {
sock_release(server->ssocket);
server->ssocket = NULL;
......@@ -2904,8 +2903,7 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
return NULL;
}
static void
cifs_put_smb_ses(struct cifs_ses *ses)
void cifs_put_smb_ses(struct cifs_ses *ses)
{
unsigned int rc, xid;
struct TCP_Server_Info *server = ses->server;
......@@ -3082,7 +3080,7 @@ cifs_set_cifscreds(struct smb_vol *vol __attribute__((unused)),
* already got a server reference (server refcount +1). See
* cifs_get_tcon() for refcount explanations.
*/
static struct cifs_ses *
struct cifs_ses *
cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
{
int rc = -ENOMEM;
......@@ -4389,7 +4387,7 @@ static int mount_do_dfs_failover(const char *path,
}
#endif
static int
int
cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
const char *devname, bool is_smb3)
{
......@@ -4543,7 +4541,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *vol)
struct cifs_tcon *tcon = NULL;
struct TCP_Server_Info *server;
char *root_path = NULL, *full_path = NULL;
char *old_mountdata;
char *old_mountdata, *origin_mountdata = NULL;
int count;
rc = mount_get_conns(vol, cifs_sb, &xid, &server, &ses, &tcon);
......@@ -4602,6 +4600,14 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *vol)
goto error;
}
/* Save DFS root volume information for DFS refresh worker */
origin_mountdata = kstrndup(cifs_sb->mountdata,
strlen(cifs_sb->mountdata), GFP_KERNEL);
if (!origin_mountdata) {
rc = -ENOMEM;
goto error;
}
if (cifs_sb->mountdata != old_mountdata) {
/* If we were redirected, reconnect to new target server */
mount_put_conns(cifs_sb, xid, server, ses, tcon);
......@@ -4710,7 +4716,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *vol)
}
spin_unlock(&cifs_tcp_ses_lock);
rc = dfs_cache_add_vol(vol, cifs_sb->origin_fullpath);
rc = dfs_cache_add_vol(origin_mountdata, vol, cifs_sb->origin_fullpath);
if (rc) {
kfree(cifs_sb->origin_fullpath);
goto error;
......@@ -4728,6 +4734,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *vol)
error:
kfree(full_path);
kfree(root_path);
kfree(origin_mountdata);
mount_put_conns(cifs_sb, xid, server, ses, tcon);
return rc;
}
......
......@@ -2,7 +2,7 @@
/*
* DFS referral cache routines
*
* Copyright (c) 2018 Paulo Alcantara <palcantara@suse.de>
* Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
*/
#include <linux/rcupdate.h>
......@@ -52,6 +52,7 @@ static struct kmem_cache *dfs_cache_slab __read_mostly;
struct dfs_cache_vol_info {
char *vi_fullpath;
struct smb_vol vi_vol;
char *vi_mntdata;
struct list_head vi_list;
};
......@@ -529,6 +530,7 @@ static inline void free_vol(struct dfs_cache_vol_info *vi)
{
list_del(&vi->vi_list);
kfree(vi->vi_fullpath);
kfree(vi->vi_mntdata);
cifs_cleanup_volume_info_contents(&vi->vi_vol);
kfree(vi);
}
......@@ -1139,17 +1141,18 @@ static int dup_vol(struct smb_vol *vol, struct smb_vol *new)
* dfs_cache_add_vol - add a cifs volume during mount() that will be handled by
* DFS cache refresh worker.
*
* @mntdata: mount data.
* @vol: cifs volume.
* @fullpath: origin full path.
*
* Return zero if volume was set up correctly, otherwise non-zero.
*/
int dfs_cache_add_vol(struct smb_vol *vol, const char *fullpath)
int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath)
{
int rc;
struct dfs_cache_vol_info *vi;
if (!vol || !fullpath)
if (!vol || !fullpath || !mntdata)
return -EINVAL;
cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
......@@ -1168,6 +1171,8 @@ int dfs_cache_add_vol(struct smb_vol *vol, const char *fullpath)
if (rc)
goto err_free_fullpath;
vi->vi_mntdata = mntdata;
mutex_lock(&dfs_cache.dc_lock);
list_add_tail(&vi->vi_list, &dfs_cache.dc_vol_list);
mutex_unlock(&dfs_cache.dc_lock);
......@@ -1275,8 +1280,102 @@ static void get_tcons(struct TCP_Server_Info *server, struct list_head *head)
spin_unlock(&cifs_tcp_ses_lock);
}
static inline bool is_dfs_link(const char *path)
{
char *s;
s = strchr(path + 1, '\\');
if (!s)
return false;
return !!strchr(s + 1, '\\');
}
static inline char *get_dfs_root(const char *path)
{
char *s, *npath;
s = strchr(path + 1, '\\');
if (!s)
return ERR_PTR(-EINVAL);
s = strchr(s + 1, '\\');
if (!s)
return ERR_PTR(-EINVAL);
npath = kstrndup(path, s - path, GFP_KERNEL);
if (!npath)
return ERR_PTR(-ENOMEM);
return npath;
}
/* Find root SMB session out of a DFS link path */
static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi,
struct cifs_tcon *tcon, const char *path)
{
char *rpath;
int rc;
struct dfs_info3_param ref = {0};
char *mdata = NULL, *devname = NULL;
bool is_smb3 = tcon->ses->server->vals->header_preamble_size == 0;
struct TCP_Server_Info *server;
struct cifs_ses *ses;
struct smb_vol vol;
rpath = get_dfs_root(path);
if (IS_ERR(rpath))
return ERR_CAST(rpath);
memset(&vol, 0, sizeof(vol));
rc = dfs_cache_noreq_find(rpath, &ref, NULL);
if (rc) {
ses = ERR_PTR(rc);
goto out;
}
mdata = cifs_compose_mount_options(vi->vi_mntdata, rpath, &ref,
&devname);
free_dfs_info_param(&ref);
if (IS_ERR(mdata)) {
ses = ERR_CAST(mdata);
mdata = NULL;
goto out;
}
rc = cifs_setup_volume_info(&vol, mdata, devname, is_smb3);
kfree(devname);
if (rc) {
ses = ERR_PTR(rc);
goto out;
}
server = cifs_find_tcp_session(&vol);
if (IS_ERR_OR_NULL(server)) {
ses = ERR_PTR(-EHOSTDOWN);
goto out;
}
if (server->tcpStatus != CifsGood) {
cifs_put_tcp_session(server, 0);
ses = ERR_PTR(-EHOSTDOWN);
goto out;
}
ses = cifs_get_smb_ses(server, &vol);
out:
cifs_cleanup_volume_info_contents(&vol);
kfree(mdata);
kfree(rpath);
return ses;
}
/* Refresh DFS cache entry from a given tcon */
static void do_refresh_tcon(struct dfs_cache *dc, struct cifs_tcon *tcon)
static void do_refresh_tcon(struct dfs_cache *dc, struct dfs_cache_vol_info *vi,
struct cifs_tcon *tcon)
{
int rc = 0;
unsigned int xid;
......@@ -1285,6 +1384,7 @@ static void do_refresh_tcon(struct dfs_cache *dc, struct cifs_tcon *tcon)
struct dfs_cache_entry *ce;
struct dfs_info3_param *refs = NULL;
int numrefs = 0;
struct cifs_ses *root_ses = NULL, *ses;
xid = get_xid();
......@@ -1306,12 +1406,23 @@ static void do_refresh_tcon(struct dfs_cache *dc, struct cifs_tcon *tcon)
if (!cache_entry_expired(ce))
goto out;
if (unlikely(!tcon->ses->server->ops->get_dfs_refer)) {
/* If it's a DFS Link, then use root SMB session for refreshing it */
if (is_dfs_link(npath)) {
ses = root_ses = find_root_ses(vi, tcon, npath);
if (IS_ERR(ses)) {
rc = PTR_ERR(ses);
root_ses = NULL;
goto out;
}
} else {
ses = tcon->ses;
}
if (unlikely(!ses->server->ops->get_dfs_refer)) {
rc = -EOPNOTSUPP;
} else {
rc = tcon->ses->server->ops->get_dfs_refer(xid, tcon->ses, path,
&refs, &numrefs,
dc->dc_nlsc,
rc = ses->server->ops->get_dfs_refer(xid, ses, path, &refs,
&numrefs, dc->dc_nlsc,
tcon->remap);
if (!rc) {
mutex_lock(&dfs_cache_list_lock);
......@@ -1323,9 +1434,11 @@ static void do_refresh_tcon(struct dfs_cache *dc, struct cifs_tcon *tcon)
rc = PTR_ERR(ce);
}
}
if (rc)
cifs_dbg(FYI, "%s: failed to update expired entry\n", __func__);
out:
if (root_ses)
cifs_put_smb_ses(root_ses);
free_xid(xid);
free_normalized_path(path, npath);
}
......@@ -1333,9 +1446,6 @@ static void do_refresh_tcon(struct dfs_cache *dc, struct cifs_tcon *tcon)
/*
* Worker that will refresh DFS cache based on lowest TTL value from a DFS
* referral.
*
* FIXME: ensure that all requests are sent to DFS root for refreshing the
* cache.
*/
static void refresh_cache_worker(struct work_struct *work)
{
......@@ -1356,7 +1466,7 @@ static void refresh_cache_worker(struct work_struct *work)
goto next;
get_tcons(server, &list);
list_for_each_entry_safe(tcon, ntcon, &list, ulist) {
do_refresh_tcon(dc, tcon);
do_refresh_tcon(dc, vi, tcon);
list_del_init(&tcon->ulist);
cifs_put_tcon(tcon);
}
......
......@@ -2,7 +2,7 @@
/*
* DFS referral cache routines
*
* Copyright (c) 2018 Paulo Alcantara <palcantara@suse.de>
* Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
*/
#ifndef _CIFS_DFS_CACHE_H
......@@ -43,7 +43,8 @@ dfs_cache_noreq_update_tgthint(const char *path,
extern int dfs_cache_get_tgt_referral(const char *path,
const struct dfs_cache_tgt_iterator *it,
struct dfs_info3_param *ref);
extern int dfs_cache_add_vol(struct smb_vol *vol, const char *fullpath);
extern int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol,
const char *fullpath);
extern int dfs_cache_update_vol(const char *fullpath,
struct TCP_Server_Info *server);
extern void dfs_cache_del_vol(const char *fullpath);
......
......@@ -2443,7 +2443,6 @@ int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
rc = file_write_and_wait_range(file, start, end);
if (rc)
return rc;
inode_lock(inode);
xid = get_xid();
......@@ -2468,7 +2467,6 @@ int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
}
free_xid(xid);
inode_unlock(inode);
return rc;
}
......@@ -2480,12 +2478,10 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
struct TCP_Server_Info *server;
struct cifsFileInfo *smbfile = file->private_data;
struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
struct inode *inode = file->f_mapping->host;
rc = file_write_and_wait_range(file, start, end);
if (rc)
return rc;
inode_lock(inode);
xid = get_xid();
......@@ -2502,7 +2498,6 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
}
free_xid(xid);
inode_unlock(inode);
return rc;
}
......
......@@ -2116,6 +2116,43 @@ int cifs_getattr(const struct path *path, struct kstat *stat,
return rc;
}
int cifs_fiemap(struct inode *inode, struct fiemap_extent_info *fei, u64 start,
u64 len)
{
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_i->vfs_inode.i_sb);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
struct TCP_Server_Info *server = tcon->ses->server;
struct cifsFileInfo *cfile;
int rc;
/*
* We need to be sure that all dirty pages are written as they
* might fill holes on the server.
*/
if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
inode->i_mapping->nrpages != 0) {
rc = filemap_fdatawait(inode->i_mapping);
if (rc) {
mapping_set_error(inode->i_mapping, rc);
return rc;
}
}
cfile = find_readable_file(cifs_i, false);
if (cfile == NULL)
return -EINVAL;
if (server->ops->fiemap) {
rc = server->ops->fiemap(tcon, cfile, fei, start, len);
cifsFileInfo_put(cfile);
return rc;
}
cifsFileInfo_put(cfile);
return -ENOTSUPP;
}
static int cifs_truncate_page(struct address_space *mapping, loff_t from)
{
pgoff_t index = from >> PAGE_SHIFT;
......
......@@ -648,9 +648,16 @@ cifs_get_link(struct dentry *direntry, struct inode *inode,
rc = query_mf_symlink(xid, tcon, cifs_sb, full_path,
&target_path);
if (rc != 0 && server->ops->query_symlink)
rc = server->ops->query_symlink(xid, tcon, full_path,
&target_path, cifs_sb);
if (rc != 0 && server->ops->query_symlink) {
struct cifsInodeInfo *cifsi = CIFS_I(inode);
bool reparse_point = false;
if (cifsi->cifsAttrs & ATTR_REPARSE)
reparse_point = true;
rc = server->ops->query_symlink(xid, tcon, cifs_sb, full_path,
&target_path, reparse_point);
}
kfree(full_path);
free_xid(xid);
......
......@@ -950,8 +950,8 @@ cifs_unix_dfs_readlink(const unsigned int xid, struct cifs_tcon *tcon,
static int
cifs_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
const char *full_path, char **target_path,
struct cifs_sb_info *cifs_sb)
struct cifs_sb_info *cifs_sb, const char *full_path,
char **target_path, bool is_reparse_point)
{
int rc;
int oplock = 0;
......@@ -960,6 +960,11 @@ cifs_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
if (is_reparse_point) {
cifs_dbg(VFS, "reparse points not handled for SMB1 symlinks\n");
return -EOPNOTSUPP;
}
/* Check for unix extensions */
if (cap_unix(tcon->ses)) {
rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, target_path,
......
......@@ -1382,6 +1382,26 @@ smb2_ioctl_query_info(const unsigned int xid,
oparms.fid = &fid;
oparms.reconnect = false;
/*
* FSCTL codes encode the special access they need in the fsctl code.
*/
if (qi.flags & PASSTHRU_FSCTL) {
switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE;
break;
case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS:
oparms.desired_access = GENERIC_ALL;
break;
case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS:
oparms.desired_access = GENERIC_READ;
break;
case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS:
oparms.desired_access = GENERIC_WRITE;
break;
}
}
rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, path);
if (rc)
goto iqinf_exit;
......@@ -1399,8 +1419,9 @@ smb2_ioctl_query_info(const unsigned int xid,
rc = SMB2_ioctl_init(tcon, &rqst[1],
COMPOUND_FID, COMPOUND_FID,
qi.info_type, true, NULL,
0, CIFSMaxBufSize);
qi.info_type, true, buffer,
qi.output_buffer_length,
CIFSMaxBufSize);
}
} else if (qi.flags == PASSTHRU_QUERY_INFO) {
memset(&qi_iov, 0, sizeof(qi_iov));
......@@ -1441,12 +1462,19 @@ smb2_ioctl_query_info(const unsigned int xid,
io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
if (qi.input_buffer_length > 0 &&
le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length > rsp_iov[1].iov_len) {
rc = -EFAULT;
goto iqinf_exit;
}
if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length,
sizeof(qi.input_buffer_length))) {
rc = -EFAULT;
goto iqinf_exit;
}
if (copy_to_user(pqi + 1, &io_rsp[1], qi.input_buffer_length)) {
if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
(const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
qi.input_buffer_length)) {
rc = -EFAULT;
goto iqinf_exit;
}
......@@ -1821,6 +1849,14 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
u32 max_response_size;
struct smb_snapshot_array snapshot_in;
/*
* On the first query to enumerate the list of snapshots available
* for this volume the buffer begins with 0 (number of snapshots
* which can be returned is zero since at that point we do not know
* how big the buffer needs to be). On the second query,
* it (ret_data_len) is set to number of snapshots so we can
* know to set the maximum response size larger (see below).
*/
if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
return -EFAULT;
......@@ -2354,46 +2390,129 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
static int
smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
const char *full_path, char **target_path,
struct cifs_sb_info *cifs_sb)
struct cifs_sb_info *cifs_sb, const char *full_path,
char **target_path, bool is_reparse_point)
{
int rc;
__le16 *utf16_path;
__le16 *utf16_path = NULL;
__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct cifs_open_parms oparms;
struct cifs_fid fid;
struct kvec err_iov = {NULL, 0};
struct smb2_err_rsp *err_buf = NULL;
int resp_buftype;
struct smb2_symlink_err_rsp *symlink;
unsigned int sub_len;
unsigned int sub_offset;
unsigned int print_len;
unsigned int print_offset;
int flags = 0;
struct smb_rqst rqst[3];
int resp_buftype[3];
struct kvec rsp_iov[3];
struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
struct kvec close_iov[1];
struct smb2_create_rsp *create_rsp;
struct smb2_ioctl_rsp *ioctl_rsp;
char *ioctl_buf;
u32 plen;
cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
memset(rqst, 0, sizeof(rqst));
resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
memset(rsp_iov, 0, sizeof(rsp_iov));
utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
if (!utf16_path)
return -ENOMEM;
/* Open */
memset(&open_iov, 0, sizeof(open_iov));
rqst[0].rq_iov = open_iov;
rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
memset(&oparms, 0, sizeof(oparms));
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_ATTRIBUTES;
oparms.disposition = FILE_OPEN;
if (backup_cred(cifs_sb))
oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
else
oparms.create_options = 0;
if (is_reparse_point)
oparms.create_options = OPEN_REPARSE_POINT;
oparms.fid = &fid;
oparms.reconnect = false;
rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov,
&resp_buftype);
if (!rc)
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
if (rc)
goto querty_exit;
smb2_set_next_command(tcon, &rqst[0]);
/* IOCTL */
memset(&io_iov, 0, sizeof(io_iov));
rqst[1].rq_iov = io_iov;
rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
rc = SMB2_ioctl_init(tcon, &rqst[1], fid.persistent_fid,
fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
true /* is_fctl */, NULL, 0, CIFSMaxBufSize);
if (rc)
goto querty_exit;
smb2_set_next_command(tcon, &rqst[1]);
smb2_set_related(&rqst[1]);
/* Close */
memset(&close_iov, 0, sizeof(close_iov));
rqst[2].rq_iov = close_iov;
rqst[2].rq_nvec = 1;
rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
if (rc)
goto querty_exit;
smb2_set_related(&rqst[2]);
rc = compound_send_recv(xid, tcon->ses, flags, 3, rqst,
resp_buftype, rsp_iov);
create_rsp = rsp_iov[0].iov_base;
if (create_rsp && create_rsp->sync_hdr.Status)
err_iov = rsp_iov[0];
ioctl_rsp = rsp_iov[1].iov_base;
/*
* Open was successful and we got an ioctl response.
*/
if ((rc == 0) && (is_reparse_point)) {
/* See MS-FSCC 2.3.23 */
ioctl_buf = (char *)ioctl_rsp + le32_to_cpu(ioctl_rsp->OutputOffset);
plen = le32_to_cpu(ioctl_rsp->OutputCount);
if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
rsp_iov[1].iov_len) {
cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", plen);
rc = -EIO;
goto querty_exit;
}
/* Do stuff with ioctl_buf/plen */
goto querty_exit;
}
if (!rc || !err_iov.iov_base) {
rc = -ENOENT;
goto free_path;
goto querty_exit;
}
err_buf = err_iov.iov_base;
......@@ -2433,9 +2552,14 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
querty_exit:
free_rsp_buf(resp_buftype, err_buf);
free_path:
cifs_dbg(FYI, "query symlink rc %d\n", rc);
kfree(utf16_path);
SMB2_open_free(&rqst[0]);
SMB2_ioctl_free(&rqst[1]);
SMB2_close_free(&rqst[2]);
free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
return rc;
}
......@@ -2612,16 +2736,8 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
struct cifsInodeInfo *cifsi;
struct cifsFileInfo *cfile = file->private_data;
struct file_zero_data_information fsctl_buf;
struct smb_rqst rqst[2];
int resp_buftype[2];
struct kvec rsp_iov[2];
struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
struct kvec si_iov[1];
unsigned int size[1];
void *data[1];
long rc;
unsigned int xid;
int num = 0, flags = 0;
__le64 eof;
xid = get_xid();
......@@ -2643,39 +2759,16 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
return rc;
}
/*
* Must check if file sparse since fallocate -z (zero range) assumes
* non-sparse allocation
*/
if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
rc = -EOPNOTSUPP;
trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
ses->Suid, offset, len, rc);
free_xid(xid);
return rc;
}
cifs_dbg(FYI, "offset %lld len %lld", offset, len);
fsctl_buf.FileOffset = cpu_to_le64(offset);
fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
memset(rqst, 0, sizeof(rqst));
resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
memset(rsp_iov, 0, sizeof(rsp_iov));
memset(&io_iov, 0, sizeof(io_iov));
rqst[num].rq_iov = io_iov;
rqst[num].rq_nvec = SMB2_IOCTL_IOV_SIZE;
rc = SMB2_ioctl_init(tcon, &rqst[num++], cfile->fid.persistent_fid,
cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
true /* is_fctl */, (char *)&fsctl_buf,
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true,
(char *)&fsctl_buf,
sizeof(struct file_zero_data_information),
CIFSMaxBufSize);
0, NULL, NULL);
if (rc)
goto zero_range_exit;
......@@ -2683,33 +2776,12 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
* do we also need to change the size of the file?
*/
if (keep_size == false && i_size_read(inode) < offset + len) {
smb2_set_next_command(tcon, &rqst[0]);
memset(&si_iov, 0, sizeof(si_iov));
rqst[num].rq_iov = si_iov;
rqst[num].rq_nvec = 1;
eof = cpu_to_le64(offset + len);
size[0] = 8; /* sizeof __le64 */
data[0] = &eof;
rc = SMB2_set_info_init(tcon, &rqst[num++],
cfile->fid.persistent_fid,
cfile->fid.persistent_fid,
current->tgid,
FILE_END_OF_FILE_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
smb2_set_related(&rqst[1]);
rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, cfile->pid, &eof);
}
rc = compound_send_recv(xid, ses, flags, num, rqst,
resp_buftype, rsp_iov);
zero_range_exit:
SMB2_ioctl_free(&rqst[0]);
SMB2_set_info_free(&rqst[1]);
free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
free_xid(xid);
if (rc)
trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
......@@ -2850,6 +2922,79 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
return rc;
}
static int smb3_fiemap(struct cifs_tcon *tcon,
struct cifsFileInfo *cfile,
struct fiemap_extent_info *fei, u64 start, u64 len)
{
unsigned int xid;
struct file_allocated_range_buffer in_data, *out_data;
u32 out_data_len;
int i, num, rc, flags, last_blob;
u64 next;
if (fiemap_check_flags(fei, FIEMAP_FLAG_SYNC))
return -EBADR;
xid = get_xid();
again:
in_data.file_offset = cpu_to_le64(start);
in_data.length = cpu_to_le64(len);
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid,
FSCTL_QUERY_ALLOCATED_RANGES, true,
(char *)&in_data, sizeof(in_data),
1024 * sizeof(struct file_allocated_range_buffer),
(char **)&out_data, &out_data_len);
if (rc == -E2BIG) {
last_blob = 0;
rc = 0;
} else
last_blob = 1;
if (rc)
goto out;
if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
rc = -EINVAL;
goto out;
}
if (out_data_len % sizeof(struct file_allocated_range_buffer)) {
rc = -EINVAL;
goto out;
}
num = out_data_len / sizeof(struct file_allocated_range_buffer);
for (i = 0; i < num; i++) {
flags = 0;
if (i == num - 1 && last_blob)
flags |= FIEMAP_EXTENT_LAST;
rc = fiemap_fill_next_extent(fei,
le64_to_cpu(out_data[i].file_offset),
le64_to_cpu(out_data[i].file_offset),
le64_to_cpu(out_data[i].length),
flags);
if (rc < 0)
goto out;
if (rc == 1) {
rc = 0;
goto out;
}
}
if (!last_blob) {
next = le64_to_cpu(out_data[num - 1].file_offset) +
le64_to_cpu(out_data[num - 1].length);
len = len - (next - start);
start = next;
goto again;
}
out:
free_xid(xid);
kfree(out_data);
return rc;
}
static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
loff_t off, loff_t len)
......@@ -2917,26 +3062,28 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
unsigned int epoch, bool *purge_cache)
{
char message[5] = {0};
unsigned int new_oplock = 0;
oplock &= 0xFF;
if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
return;
cinode->oplock = 0;
if (oplock & SMB2_LEASE_READ_CACHING_HE) {
cinode->oplock |= CIFS_CACHE_READ_FLG;
new_oplock |= CIFS_CACHE_READ_FLG;
strcat(message, "R");
}
if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
cinode->oplock |= CIFS_CACHE_HANDLE_FLG;
new_oplock |= CIFS_CACHE_HANDLE_FLG;
strcat(message, "H");
}
if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
cinode->oplock |= CIFS_CACHE_WRITE_FLG;
new_oplock |= CIFS_CACHE_WRITE_FLG;
strcat(message, "W");
}
if (!cinode->oplock)
strcat(message, "None");
if (!new_oplock)
strncpy(message, "None", sizeof(message));
cinode->oplock = new_oplock;
cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
&cinode->vfs_inode);
}
......@@ -4018,6 +4165,7 @@ struct smb_version_operations smb20_operations = {
.next_header = smb2_next_header,
.ioctl_query_info = smb2_ioctl_query_info,
.make_node = smb2_make_node,
.fiemap = smb3_fiemap,
};
struct smb_version_operations smb21_operations = {
......@@ -4117,6 +4265,7 @@ struct smb_version_operations smb21_operations = {
.next_header = smb2_next_header,
.ioctl_query_info = smb2_ioctl_query_info,
.make_node = smb2_make_node,
.fiemap = smb3_fiemap,
};
struct smb_version_operations smb30_operations = {
......@@ -4225,6 +4374,7 @@ struct smb_version_operations smb30_operations = {
.next_header = smb2_next_header,
.ioctl_query_info = smb2_ioctl_query_info,
.make_node = smb2_make_node,
.fiemap = smb3_fiemap,
};
struct smb_version_operations smb311_operations = {
......@@ -4334,6 +4484,7 @@ struct smb_version_operations smb311_operations = {
.next_header = smb2_next_header,
.ioctl_query_info = smb2_ioctl_query_info,
.make_node = smb2_make_node,
.fiemap = smb3_fiemap,
};
struct smb_version_values smb20_values = {
......
......@@ -459,10 +459,7 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
return rc;
}
#define SMB2_PREAUTH_INTEGRITY_CAPABILITIES cpu_to_le16(1)
#define SMB2_ENCRYPTION_CAPABILITIES cpu_to_le16(2)
#define SMB2_POSIX_EXTENSIONS_AVAILABLE cpu_to_le16(0x100)
/* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */
static void
build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt)
......@@ -475,6 +472,19 @@ build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt)
pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512;
}
static void
build_compression_ctxt(struct smb2_compression_capabilities_context *pneg_ctxt)
{
pneg_ctxt->ContextType = SMB2_COMPRESSION_CAPABILITIES;
pneg_ctxt->DataLength =
cpu_to_le16(sizeof(struct smb2_compression_capabilities_context)
- sizeof(struct smb2_neg_context));
pneg_ctxt->CompressionAlgorithmCount = cpu_to_le16(3);
pneg_ctxt->CompressionAlgorithms[0] = SMB3_COMPRESS_LZ77;
pneg_ctxt->CompressionAlgorithms[1] = SMB3_COMPRESS_LZ77_HUFF;
pneg_ctxt->CompressionAlgorithms[2] = SMB3_COMPRESS_LZNT1;
}
static void
build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
{
......@@ -541,10 +551,17 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,
*total_len += ctxt_len;
pneg_ctxt += ctxt_len;
build_compression_ctxt((struct smb2_compression_capabilities_context *)
pneg_ctxt);
ctxt_len = DIV_ROUND_UP(
sizeof(struct smb2_compression_capabilities_context), 8) * 8;
*total_len += ctxt_len;
pneg_ctxt += ctxt_len;
build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
*total_len += sizeof(struct smb2_posix_neg_context);
req->NegotiateContextCount = cpu_to_le16(3);
req->NegotiateContextCount = cpu_to_le16(4);
}
static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
......@@ -562,6 +579,27 @@ static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
printk_once(KERN_WARNING "unknown SMB3 hash algorithm\n");
}
static void decode_compress_ctx(struct TCP_Server_Info *server,
struct smb2_compression_capabilities_context *ctxt)
{
unsigned int len = le16_to_cpu(ctxt->DataLength);
/* sizeof compress context is a one element compression capbility struct */
if (len < 10) {
printk_once(KERN_WARNING "server sent bad compression cntxt\n");
return;
}
if (le16_to_cpu(ctxt->CompressionAlgorithmCount) != 1) {
printk_once(KERN_WARNING "illegal SMB3 compress algorithm count\n");
return;
}
if (le16_to_cpu(ctxt->CompressionAlgorithms[0]) > 3) {
printk_once(KERN_WARNING "unknown compression algorithm\n");
return;
}
server->compress_algorithm = ctxt->CompressionAlgorithms[0];
}
static int decode_encrypt_ctx(struct TCP_Server_Info *server,
struct smb2_encryption_neg_context *ctxt)
{
......@@ -626,6 +664,9 @@ static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES)
rc = decode_encrypt_ctx(server,
(struct smb2_encryption_neg_context *)pctx);
else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES)
decode_compress_ctx(server,
(struct smb2_compression_capabilities_context *)pctx);
else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE)
server->posix_ext_supported = true;
else
......@@ -1541,7 +1582,7 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
else if (server->sign)
req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
flags |= CIFS_NO_RESP;
flags |= CIFS_NO_RSP_BUF;
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
......@@ -1742,7 +1783,7 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
flags |= CIFS_NO_RESP;
flags |= CIFS_NO_RSP_BUF;
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
......@@ -2625,7 +2666,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
trace_smb3_fsctl_err(xid, persistent_fid, tcon->tid,
ses->Suid, 0, opcode, rc);
if ((rc != 0) && (rc != -EINVAL)) {
if ((rc != 0) && (rc != -EINVAL) && (rc != -E2BIG)) {
cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
goto ioctl_exit;
} else if (rc == -EINVAL) {
......@@ -2634,6 +2675,11 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
goto ioctl_exit;
}
} else if (rc == -E2BIG) {
if (opcode != FSCTL_QUERY_ALLOCATED_RANGES) {
cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
goto ioctl_exit;
}
}
/* check if caller wants to look at return data or just return rc */
......@@ -3223,7 +3269,7 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
rdata->nr_pages, rdata->page_offset,
rdata->tailsz, true, need_invalidate);
if (!rdata->mr)
return -ENOBUFS;
return -EAGAIN;
req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
if (need_invalidate)
......@@ -3628,7 +3674,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
wdata->nr_pages, wdata->page_offset,
wdata->tailsz, false, need_invalidate);
if (!wdata->mr) {
rc = -ENOBUFS;
rc = -EAGAIN;
goto async_writev_out;
}
req->Length = 0;
......@@ -4164,7 +4210,7 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
req->OplockLevel = oplock_level;
req->sync_hdr.CreditRequest = cpu_to_le16(1);
flags |= CIFS_NO_RESP;
flags |= CIFS_NO_RSP_BUF;
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
......@@ -4438,7 +4484,7 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
struct kvec rsp_iov;
int resp_buf_type;
unsigned int count;
int flags = CIFS_NO_RESP;
int flags = CIFS_NO_RSP_BUF;
unsigned int total_len;
cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
......@@ -4531,7 +4577,7 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
memcpy(req->LeaseKey, lease_key, 16);
req->LeaseState = lease_state;
flags |= CIFS_NO_RESP;
flags |= CIFS_NO_RSP_BUF;
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
......
......@@ -251,6 +251,14 @@ struct smb2_negotiate_req {
#define SMB2_NT_FIND 0x00100000
#define SMB2_LARGE_FILES 0x00200000
/* Negotiate Contexts - ContextTypes. See MS-SMB2 section 2.2.3.1 for details */
#define SMB2_PREAUTH_INTEGRITY_CAPABILITIES cpu_to_le16(1)
#define SMB2_ENCRYPTION_CAPABILITIES cpu_to_le16(2)
#define SMB2_COMPRESSION_CAPABILITIES cpu_to_le16(3)
#define SMB2_NETNAME_NEGOTIATE_CONTEXT_ID cpu_to_le16(5)
#define SMB2_POSIX_EXTENSIONS_AVAILABLE cpu_to_le16(0x100)
struct smb2_neg_context {
__le16 ContextType;
__le16 DataLength;
......@@ -288,6 +296,27 @@ struct smb2_encryption_neg_context {
__le16 Ciphers[1]; /* Ciphers[0] since only one used now */
} __packed;
/* See MS-SMB2 2.2.3.1.3 */
#define SMB3_COMPRESS_NONE cpu_to_le16(0x0000)
#define SMB3_COMPRESS_LZNT1 cpu_to_le16(0x0001)
#define SMB3_COMPRESS_LZ77 cpu_to_le16(0x0002)
#define SMB3_COMPRESS_LZ77_HUFF cpu_to_le16(0x0003)
struct smb2_compression_capabilities_context {
__le16 ContextType; /* 3 */
__le16 DataLength;
__u32 Reserved;
__le16 CompressionAlgorithmCount;
__u16 Padding;
__u32 Reserved1;
__le16 CompressionAlgorithms[3];
} __packed;
/*
* For smb2_netname_negotiate_context_id See MS-SMB2 2.2.3.1.4.
* Its struct simply contains NetName, an array of Unicode characters
*/
#define POSIX_CTXT_DATA_LEN 16
struct smb2_posix_neg_context {
__le16 ContextType; /* 0x100 */
......@@ -842,6 +871,11 @@ struct fsctl_get_integrity_information_rsp {
__le32 ClusterSizeInBytes;
} __packed;
struct file_allocated_range_buffer {
__le64 file_offset;
__le64 length;
} __packed;
/* Integrity ChecksumAlgorithm choices for above */
#define CHECKSUM_TYPE_NONE 0x0000
#define CHECKSUM_TYPE_CRC64 0x0002
......@@ -1047,6 +1081,7 @@ struct smb2_flush_rsp {
/* For read request Flags field below, following flag is defined for SMB3.02 */
#define SMB2_READFLAG_READ_UNBUFFERED 0x01
#define SMB2_READFLAG_REQUEST_COMPRESSED 0x02 /* See MS-SMB2 2.2.19 */
/* Channel field for read and write: exactly one of following flags can be set*/
#define SMB2_CHANNEL_NONE cpu_to_le32(0x00000000)
......@@ -1113,6 +1148,42 @@ struct smb2_write_rsp {
__u8 Buffer[1];
} __packed;
/* notify flags */
#define SMB2_WATCH_TREE 0x0001
/* notify completion filter flags. See MS-FSCC 2.6 and MS-SMB2 2.2.35 */
#define FILE_NOTIFY_CHANGE_FILE_NAME 0x00000001
#define FILE_NOTIFY_CHANGE_DIR_NAME 0x00000002
#define FILE_NOTIFY_CHANGE_ATTRIBUTES 0x00000004
#define FILE_NOTIFY_CHANGE_SIZE 0x00000008
#define FILE_NOTIFY_CHANGE_LAST_WRITE 0x00000010
#define FILE_NOTIFY_CHANGE_LAST_ACCESS 0x00000020
#define FILE_NOTIFY_CHANGE_CREATION 0x00000040
#define FILE_NOTIFY_CHANGE_EA 0x00000080
#define FILE_NOTIFY_CHANGE_SECURITY 0x00000100
#define FILE_NOTIFY_CHANGE_STREAM_NAME 0x00000200
#define FILE_NOTIFY_CHANGE_STREAM_SIZE 0x00000400
#define FILE_NOTIFY_CHANGE_STREAM_WRITE 0x00000800
struct smb2_change_notify_req {
struct smb2_sync_hdr sync_hdr;
__le16 StructureSize;
__le16 Flags;
__le32 OutputBufferLength;
__u64 PersistentFileId; /* opaque endianness */
__u64 VolatileFileId; /* opaque endianness */
__le32 CompletionFilter;
__u32 Reserved;
} __packed;
struct smb2_change_notify_rsp {
struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 9 */
__le16 OutputBufferOffset;
__le32 OutputBufferLength;
__u8 Buffer[1]; /* array of file notify structs */
} __packed;
#define SMB2_LOCKFLAG_SHARED_LOCK 0x0001
#define SMB2_LOCKFLAG_EXCLUSIVE_LOCK 0x0002
#define SMB2_LOCKFLAG_UNLOCK 0x0004
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -164,95 +164,6 @@ do { \
#define log_rdma_mr(level, fmt, args...) \
log_rdma(level, LOG_RDMA_MR, fmt, ##args)
/*
* Destroy the transport and related RDMA and memory resources
* Need to go through all the pending counters and make sure on one is using
* the transport while it is destroyed
*/
static void smbd_destroy_rdma_work(struct work_struct *work)
{
struct smbd_response *response;
struct smbd_connection *info =
container_of(work, struct smbd_connection, destroy_work);
unsigned long flags;
log_rdma_event(INFO, "destroying qp\n");
ib_drain_qp(info->id->qp);
rdma_destroy_qp(info->id);
/* Unblock all I/O waiting on the send queue */
wake_up_interruptible_all(&info->wait_send_queue);
log_rdma_event(INFO, "cancelling idle timer\n");
cancel_delayed_work_sync(&info->idle_timer_work);
log_rdma_event(INFO, "cancelling send immediate work\n");
cancel_delayed_work_sync(&info->send_immediate_work);
log_rdma_event(INFO, "wait for all send to finish\n");
wait_event(info->wait_smbd_send_pending,
info->smbd_send_pending == 0);
log_rdma_event(INFO, "wait for all recv to finish\n");
wake_up_interruptible(&info->wait_reassembly_queue);
wait_event(info->wait_smbd_recv_pending,
info->smbd_recv_pending == 0);
log_rdma_event(INFO, "wait for all send posted to IB to finish\n");
wait_event(info->wait_send_pending,
atomic_read(&info->send_pending) == 0);
wait_event(info->wait_send_payload_pending,
atomic_read(&info->send_payload_pending) == 0);
log_rdma_event(INFO, "freeing mr list\n");
wake_up_interruptible_all(&info->wait_mr);
wait_event(info->wait_for_mr_cleanup,
atomic_read(&info->mr_used_count) == 0);
destroy_mr_list(info);
/* It's not posssible for upper layer to get to reassembly */
log_rdma_event(INFO, "drain the reassembly queue\n");
do {
spin_lock_irqsave(&info->reassembly_queue_lock, flags);
response = _get_first_reassembly(info);
if (response) {
list_del(&response->list);
spin_unlock_irqrestore(
&info->reassembly_queue_lock, flags);
put_receive_buffer(info, response);
} else
spin_unlock_irqrestore(&info->reassembly_queue_lock, flags);
} while (response);
info->reassembly_data_length = 0;
log_rdma_event(INFO, "free receive buffers\n");
wait_event(info->wait_receive_queues,
info->count_receive_queue + info->count_empty_packet_queue
== info->receive_credit_max);
destroy_receive_buffers(info);
ib_free_cq(info->send_cq);
ib_free_cq(info->recv_cq);
ib_dealloc_pd(info->pd);
rdma_destroy_id(info->id);
/* free mempools */
mempool_destroy(info->request_mempool);
kmem_cache_destroy(info->request_cache);
mempool_destroy(info->response_mempool);
kmem_cache_destroy(info->response_cache);
info->transport_status = SMBD_DESTROYED;
wake_up_all(&info->wait_destroy);
}
static int smbd_process_disconnected(struct smbd_connection *info)
{
schedule_work(&info->destroy_work);
return 0;
}
static void smbd_disconnect_rdma_work(struct work_struct *work)
{
struct smbd_connection *info =
......@@ -319,7 +230,9 @@ static int smbd_conn_upcall(
}
info->transport_status = SMBD_DISCONNECTED;
smbd_process_disconnected(info);
wake_up_interruptible(&info->disconn_wait);
wake_up_interruptible(&info->wait_reassembly_queue);
wake_up_interruptible_all(&info->wait_send_queue);
break;
default:
......@@ -940,7 +853,7 @@ static int smbd_create_header(struct smbd_connection *info,
if (info->transport_status != SMBD_CONNECTED) {
log_outgoing(ERR, "disconnected not sending\n");
return -ENOENT;
return -EAGAIN;
}
atomic_dec(&info->send_credits);
......@@ -1066,6 +979,7 @@ static int smbd_post_send(struct smbd_connection *info,
wake_up(&info->wait_send_pending);
}
smbd_disconnect_rdma_connection(info);
rc = -EAGAIN;
} else
/* Reset timer for idle connection after packet is sent */
mod_delayed_work(info->workqueue, &info->idle_timer_work,
......@@ -1478,17 +1392,97 @@ static void idle_connection_timer(struct work_struct *work)
info->keep_alive_interval*HZ);
}
/* Destroy this SMBD connection, called from upper layer */
void smbd_destroy(struct smbd_connection *info)
/*
* Destroy the transport and related RDMA and memory resources
* Need to go through all the pending counters and make sure on one is using
* the transport while it is destroyed
*/
void smbd_destroy(struct TCP_Server_Info *server)
{
struct smbd_connection *info = server->smbd_conn;
struct smbd_response *response;
unsigned long flags;
if (!info) {
log_rdma_event(INFO, "rdma session already destroyed\n");
return;
}
log_rdma_event(INFO, "destroying rdma session\n");
if (info->transport_status != SMBD_DISCONNECTED) {
rdma_disconnect(server->smbd_conn->id);
log_rdma_event(INFO, "wait for transport being disconnected\n");
wait_event_interruptible(
info->disconn_wait,
info->transport_status == SMBD_DISCONNECTED);
}
/* Kick off the disconnection process */
smbd_disconnect_rdma_connection(info);
log_rdma_event(INFO, "destroying qp\n");
ib_drain_qp(info->id->qp);
rdma_destroy_qp(info->id);
log_rdma_event(INFO, "wait for transport being destroyed\n");
wait_event(info->wait_destroy,
info->transport_status == SMBD_DESTROYED);
log_rdma_event(INFO, "cancelling idle timer\n");
cancel_delayed_work_sync(&info->idle_timer_work);
log_rdma_event(INFO, "cancelling send immediate work\n");
cancel_delayed_work_sync(&info->send_immediate_work);
log_rdma_event(INFO, "wait for all send posted to IB to finish\n");
wait_event(info->wait_send_pending,
atomic_read(&info->send_pending) == 0);
wait_event(info->wait_send_payload_pending,
atomic_read(&info->send_payload_pending) == 0);
/* It's not posssible for upper layer to get to reassembly */
log_rdma_event(INFO, "drain the reassembly queue\n");
do {
spin_lock_irqsave(&info->reassembly_queue_lock, flags);
response = _get_first_reassembly(info);
if (response) {
list_del(&response->list);
spin_unlock_irqrestore(
&info->reassembly_queue_lock, flags);
put_receive_buffer(info, response);
} else
spin_unlock_irqrestore(
&info->reassembly_queue_lock, flags);
} while (response);
info->reassembly_data_length = 0;
log_rdma_event(INFO, "free receive buffers\n");
wait_event(info->wait_receive_queues,
info->count_receive_queue + info->count_empty_packet_queue
== info->receive_credit_max);
destroy_receive_buffers(info);
/*
* For performance reasons, memory registration and deregistration
* are not locked by srv_mutex. It is possible some processes are
* blocked on transport srv_mutex while holding memory registration.
* Release the transport srv_mutex to allow them to hit the failure
* path when sending data, and then release memory registartions.
*/
log_rdma_event(INFO, "freeing mr list\n");
wake_up_interruptible_all(&info->wait_mr);
while (atomic_read(&info->mr_used_count)) {
mutex_unlock(&server->srv_mutex);
msleep(1000);
mutex_lock(&server->srv_mutex);
}
destroy_mr_list(info);
ib_free_cq(info->send_cq);
ib_free_cq(info->recv_cq);
ib_dealloc_pd(info->pd);
rdma_destroy_id(info->id);
/* free mempools */
mempool_destroy(info->request_mempool);
kmem_cache_destroy(info->request_cache);
mempool_destroy(info->response_mempool);
kmem_cache_destroy(info->response_cache);
info->transport_status = SMBD_DESTROYED;
destroy_workqueue(info->workqueue);
kfree(info);
......@@ -1513,17 +1507,9 @@ int smbd_reconnect(struct TCP_Server_Info *server)
*/
if (server->smbd_conn->transport_status == SMBD_CONNECTED) {
log_rdma_event(INFO, "disconnecting transport\n");
smbd_disconnect_rdma_connection(server->smbd_conn);
smbd_destroy(server);
}
/* wait until the transport is destroyed */
if (!wait_event_timeout(server->smbd_conn->wait_destroy,
server->smbd_conn->transport_status == SMBD_DESTROYED, 5*HZ))
return -EAGAIN;
destroy_workqueue(server->smbd_conn->workqueue);
kfree(server->smbd_conn);
create_conn:
log_rdma_event(INFO, "creating rdma session\n");
server->smbd_conn = smbd_get_connection(
......@@ -1739,12 +1725,13 @@ static struct smbd_connection *_smbd_get_connection(
conn_param.retry_count = SMBD_CM_RETRY;
conn_param.rnr_retry_count = SMBD_CM_RNR_RETRY;
conn_param.flow_control = 0;
init_waitqueue_head(&info->wait_destroy);
log_rdma_event(INFO, "connecting to IP %pI4 port %d\n",
&addr_in->sin_addr, port);
init_waitqueue_head(&info->conn_wait);
init_waitqueue_head(&info->disconn_wait);
init_waitqueue_head(&info->wait_reassembly_queue);
rc = rdma_connect(info->id, &conn_param);
if (rc) {
log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
......@@ -1768,19 +1755,11 @@ static struct smbd_connection *_smbd_get_connection(
}
init_waitqueue_head(&info->wait_send_queue);
init_waitqueue_head(&info->wait_reassembly_queue);
INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer);
INIT_DELAYED_WORK(&info->send_immediate_work, send_immediate_work);
queue_delayed_work(info->workqueue, &info->idle_timer_work,
info->keep_alive_interval*HZ);
init_waitqueue_head(&info->wait_smbd_send_pending);
info->smbd_send_pending = 0;
init_waitqueue_head(&info->wait_smbd_recv_pending);
info->smbd_recv_pending = 0;
init_waitqueue_head(&info->wait_send_pending);
atomic_set(&info->send_pending, 0);
......@@ -1788,7 +1767,6 @@ static struct smbd_connection *_smbd_get_connection(
atomic_set(&info->send_payload_pending, 0);
INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work);
INIT_WORK(&info->destroy_work, smbd_destroy_rdma_work);
INIT_WORK(&info->recv_done_work, smbd_recv_done_work);
INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits);
info->new_credits_offered = 0;
......@@ -1810,7 +1788,7 @@ static struct smbd_connection *_smbd_get_connection(
allocate_mr_failed:
/* At this point, need to a full transport shutdown */
smbd_destroy(info);
smbd_destroy(server);
return NULL;
negotiation_failed:
......@@ -1882,11 +1860,6 @@ static int smbd_recv_buf(struct smbd_connection *info, char *buf,
int rc;
again:
if (info->transport_status != SMBD_CONNECTED) {
log_read(ERR, "disconnected\n");
return -ENODEV;
}
/*
* No need to hold the reassembly queue lock all the time as we are
* the only one reading from the front of the queue. The transport
......@@ -2000,7 +1973,12 @@ static int smbd_recv_buf(struct smbd_connection *info, char *buf,
info->transport_status != SMBD_CONNECTED);
/* Don't return any data if interrupted */
if (rc)
return -ENODEV;
return rc;
if (info->transport_status != SMBD_CONNECTED) {
log_read(ERR, "disconnected\n");
return 0;
}
goto again;
}
......@@ -2052,8 +2030,6 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
unsigned int to_read, page_offset;
int rc;
info->smbd_recv_pending++;
if (iov_iter_rw(&msg->msg_iter) == WRITE) {
/* It's a bug in upper layer to get there */
cifs_dbg(VFS, "CIFS: invalid msg iter dir %u\n",
......@@ -2084,9 +2060,6 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
}
out:
info->smbd_recv_pending--;
wake_up(&info->wait_smbd_recv_pending);
/* SMBDirect will read it all or nothing */
if (rc > 0)
msg->msg_iter.count = 0;
......@@ -2099,7 +2072,8 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
* rqst: the data to write
* return value: 0 if successfully write, otherwise error code
*/
int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
int smbd_send(struct TCP_Server_Info *server,
int num_rqst, struct smb_rqst *rqst_array)
{
struct smbd_connection *info = server->smbd_conn;
struct kvec vec;
......@@ -2111,54 +2085,49 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
info->max_send_size - sizeof(struct smbd_data_transfer);
struct kvec *iov;
int rc;
struct smb_rqst *rqst;
int rqst_idx;
info->smbd_send_pending++;
if (info->transport_status != SMBD_CONNECTED) {
rc = -ENODEV;
rc = -EAGAIN;
goto done;
}
/*
* Skip the RFC1002 length defined in MS-SMB2 section 2.1
* It is used only for TCP transport in the iov[0]
* In future we may want to add a transport layer under protocol
* layer so this will only be issued to TCP transport
*/
if (rqst->rq_iov[0].iov_len != 4) {
log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len);
return -EINVAL;
}
/*
* Add in the page array if there is one. The caller needs to set
* rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
* ends at page boundary
*/
buflen = smb_rqst_len(server, rqst);
remaining_data_length = 0;
for (i = 0; i < num_rqst; i++)
remaining_data_length += smb_rqst_len(server, &rqst_array[i]);
if (buflen + sizeof(struct smbd_data_transfer) >
if (remaining_data_length + sizeof(struct smbd_data_transfer) >
info->max_fragmented_send_size) {
log_write(ERR, "payload size %d > max size %d\n",
buflen, info->max_fragmented_send_size);
remaining_data_length, info->max_fragmented_send_size);
rc = -EINVAL;
goto done;
}
iov = &rqst->rq_iov[1];
rqst_idx = 0;
next_rqst:
rqst = &rqst_array[rqst_idx];
iov = rqst->rq_iov;
cifs_dbg(FYI, "Sending smb (RDMA): smb_len=%u\n", buflen);
for (i = 0; i < rqst->rq_nvec-1; i++)
cifs_dbg(FYI, "Sending smb (RDMA): idx=%d smb_len=%lu\n",
rqst_idx, smb_rqst_len(server, rqst));
for (i = 0; i < rqst->rq_nvec; i++)
dump_smb(iov[i].iov_base, iov[i].iov_len);
remaining_data_length = buflen;
log_write(INFO, "rqst->rq_nvec=%d rqst->rq_npages=%d rq_pagesz=%d "
"rq_tailsz=%d buflen=%d\n",
rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
rqst->rq_tailsz, buflen);
log_write(INFO, "rqst_idx=%d nvec=%d rqst->rq_npages=%d rq_pagesz=%d "
"rq_tailsz=%d buflen=%lu\n",
rqst_idx, rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
rqst->rq_tailsz, smb_rqst_len(server, rqst));
start = i = iov[0].iov_len ? 0 : 1;
start = i = 0;
buflen = 0;
while (true) {
buflen += iov[i].iov_len;
......@@ -2206,14 +2175,14 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
goto done;
}
i++;
if (i == rqst->rq_nvec-1)
if (i == rqst->rq_nvec)
break;
}
start = i;
buflen = 0;
} else {
i++;
if (i == rqst->rq_nvec-1) {
if (i == rqst->rq_nvec) {
/* send out all remaining vecs */
remaining_data_length -= buflen;
log_write(INFO,
......@@ -2257,6 +2226,10 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
}
}
rqst_idx++;
if (rqst_idx < num_rqst)
goto next_rqst;
done:
/*
* As an optimization, we don't wait for individual I/O to finish
......@@ -2268,9 +2241,6 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
wait_event(info->wait_send_payload_pending,
atomic_read(&info->send_payload_pending) == 0);
info->smbd_send_pending--;
wake_up(&info->wait_smbd_send_pending);
return rc;
}
......
......@@ -70,12 +70,11 @@ struct smbd_connection {
int ri_rc;
struct completion ri_done;
wait_queue_head_t conn_wait;
wait_queue_head_t wait_destroy;
wait_queue_head_t disconn_wait;
struct completion negotiate_completion;
bool negotiate_done;
struct work_struct destroy_work;
struct work_struct disconnect_work;
struct work_struct recv_done_work;
struct work_struct post_send_credits_work;
......@@ -123,13 +122,6 @@ struct smbd_connection {
wait_queue_head_t wait_for_mr_cleanup;
/* Activity accoutning */
/* Pending reqeusts issued from upper layer */
int smbd_send_pending;
wait_queue_head_t wait_smbd_send_pending;
int smbd_recv_pending;
wait_queue_head_t wait_smbd_recv_pending;
atomic_t send_pending;
wait_queue_head_t wait_send_pending;
atomic_t send_payload_pending;
......@@ -288,11 +280,12 @@ struct smbd_connection *smbd_get_connection(
/* Reconnect SMBDirect session */
int smbd_reconnect(struct TCP_Server_Info *server);
/* Destroy SMBDirect session */
void smbd_destroy(struct smbd_connection *info);
void smbd_destroy(struct TCP_Server_Info *server);
/* Interface for carrying upper layer I/O through send/recv */
int smbd_recv(struct smbd_connection *info, struct msghdr *msg);
int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst);
int smbd_send(struct TCP_Server_Info *server,
int num_rqst, struct smb_rqst *rqst);
enum mr_state {
MR_READY,
......@@ -330,9 +323,9 @@ struct smbd_connection {};
static inline void *smbd_get_connection(
struct TCP_Server_Info *server, struct sockaddr *dstaddr) {return NULL;}
static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; }
static inline void smbd_destroy(struct smbd_connection *info) {}
static inline void smbd_destroy(struct TCP_Server_Info *server) {}
static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; }
static inline int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) {return -1; }
static inline int smbd_send(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) {return -1; }
#endif
#endif
......@@ -35,6 +35,33 @@
* below). Additional detail on less common ones can be found in MS-FSCC
* section 2.3.
*/
/*
* FSCTL values are 32 bits and are constructed as
* <device 16bits> <access 2bits> <function 12bits> <method 2bits>
*/
/* Device */
#define FSCTL_DEVICE_DFS (0x0006 << 16)
#define FSCTL_DEVICE_FILE_SYSTEM (0x0009 << 16)
#define FSCTL_DEVICE_NAMED_PIPE (0x0011 << 16)
#define FSCTL_DEVICE_NETWORK_FILE_SYSTEM (0x0014 << 16)
#define FSCTL_DEVICE_MASK 0xffff0000
/* Access */
#define FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS (0x00 << 14)
#define FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS (0x01 << 14)
#define FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS (0x02 << 14)
#define FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS (0x03 << 14)
#define FSCTL_DEVICE_ACCESS_MASK 0x0000c000
/* Function */
#define FSCTL_DEVICE_FUNCTION_MASK 0x00003ffc
/* Method */
#define FSCTL_DEVICE_METHOD_BUFFERED 0x00
#define FSCTL_DEVICE_METHOD_IN_DIRECT 0x01
#define FSCTL_DEVICE_METHOD_OUT_DIRECT 0x02
#define FSCTL_DEVICE_METHOD_NEITHER 0x03
#define FSCTL_DEVICE_METHOD_MASK 0x00000003
#define FSCTL_DFS_GET_REFERRALS 0x00060194
#define FSCTL_DFS_GET_REFERRALS_EX 0x000601B0
#define FSCTL_REQUEST_OPLOCK_LEVEL_1 0x00090000
......@@ -76,7 +103,7 @@
#define FSCTL_SET_ZERO_ON_DEALLOC 0x00090194 /* BB add struct */
#define FSCTL_SET_SHORT_NAME_BEHAVIOR 0x000901B4 /* BB add struct */
#define FSCTL_GET_INTEGRITY_INFORMATION 0x0009027C
#define FSCTL_QUERY_ALLOCATED_RANGES 0x000940CF /* BB add struct */
#define FSCTL_QUERY_ALLOCATED_RANGES 0x000940CF
#define FSCTL_SET_DEFECT_MANAGEMENT 0x00098134 /* BB add struct */
#define FSCTL_FILE_LEVEL_TRIM 0x00098208 /* BB add struct */
#define FSCTL_DUPLICATE_EXTENTS_TO_FILE 0x00098344
......
......@@ -104,7 +104,10 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
{
#ifdef CONFIG_CIFS_STATS2
__le16 command = midEntry->server->vals->lock_cmd;
__u16 smb_cmd = le16_to_cpu(midEntry->command);
unsigned long now;
unsigned long roundtrip_time;
struct TCP_Server_Info *server = midEntry->server;
#endif
midEntry->mid_state = MID_FREE;
atomic_dec(&midCount);
......@@ -114,6 +117,23 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
cifs_small_buf_release(midEntry->resp_buf);
#ifdef CONFIG_CIFS_STATS2
now = jiffies;
if (now < midEntry->when_alloc)
cifs_dbg(VFS, "invalid mid allocation time\n");
roundtrip_time = now - midEntry->when_alloc;
if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
server->slowest_cmd[smb_cmd] = roundtrip_time;
server->fastest_cmd[smb_cmd] = roundtrip_time;
} else {
if (server->slowest_cmd[smb_cmd] < roundtrip_time)
server->slowest_cmd[smb_cmd] = roundtrip_time;
else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
server->fastest_cmd[smb_cmd] = roundtrip_time;
}
cifs_stats_inc(&server->num_cmds[smb_cmd]);
server->time_per_cmd[smb_cmd] += roundtrip_time;
}
/*
* commands taking longer than one second (default) can be indications
* that something is wrong, unless it is quite a slow link or a very
......@@ -131,11 +151,10 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
* smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
* NB: le16_to_cpu returns unsigned so can not be negative below
*/
if (le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS)
cifs_stats_inc(&midEntry->server->smb2slowcmd[le16_to_cpu(midEntry->command)]);
if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
trace_smb3_slow_rsp(le16_to_cpu(midEntry->command),
midEntry->mid, midEntry->pid,
trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
midEntry->when_sent, midEntry->when_received);
if (cifsFYI & CIFS_TIMER) {
pr_debug(" CIFS slow rsp: cmd %d mid %llu",
......@@ -300,7 +319,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
__be32 rfc1002_marker;
if (cifs_rdma_enabled(server) && server->smbd_conn) {
rc = smbd_send(server, rqst);
rc = smbd_send(server, num_rqst, rqst);
goto smbd_done;
}
......@@ -510,7 +529,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
return -EAGAIN;
spin_lock(&server->req_lock);
if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP) {
if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
/* oplock breaks must not be held up */
server->in_flight++;
*credits -= 1;
......@@ -819,7 +838,7 @@ SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
iov[0].iov_base = in_buf;
iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
flags |= CIFS_NO_RESP;
flags |= CIFS_NO_RSP_BUF;
rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
......@@ -1054,8 +1073,11 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
mutex_unlock(&ses->server->srv_mutex);
if (rc < 0) {
/* Sending failed for some reason - return credits back */
/*
* If sending failed for some reason or it is an oplock break that we
* will not receive a response to - return credits back
*/
if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
for (i = 0; i < num_rqst; i++)
add_credits(ses->server, &credits[i], optype);
goto out;
......@@ -1076,9 +1098,6 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
smb311_update_preauth_hash(ses, rqst[0].rq_iov,
rqst[0].rq_nvec);
if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP)
goto out;
for (i = 0; i < num_rqst; i++) {
rc = wait_for_response(ses->server, midQ[i]);
if (rc != 0)
......@@ -1132,7 +1151,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
flags & CIFS_LOG_ERROR);
/* mark it so buf will not be freed by cifs_delete_mid */
if ((flags & CIFS_NO_RESP) == 0)
if ((flags & CIFS_NO_RSP_BUF) == 0)
midQ[i]->resp_buf = NULL;
}
......@@ -1283,9 +1302,6 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
if (rc < 0)
goto out;
if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP)
goto out;
rc = wait_for_response(ses->server, midQ);
if (rc != 0) {
send_cancel(ses->server, &rqst, midQ);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment