Commit 63342b1d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag '6.4-rc-smb3-client-fixes-part2' of git://git.samba.org/sfrench/cifs-2.6

Pull cifs fixes from Steve French:
 "smb3 client fixes, mostly DFS or reconnect related:

   - Two DFS connection sharing fixes

   - DFS refresh fix

   - Reconnect fix

   - Two potential use after free fixes

   - Also print prefix patch in mount debug msg

   - Two small cleanup fixes"

* tag '6.4-rc-smb3-client-fixes-part2' of git://git.samba.org/sfrench/cifs-2.6:
  cifs: Remove unneeded semicolon
  cifs: fix sharing of DFS connections
  cifs: avoid potential races when handling multiple dfs tcons
  cifs: protect access of TCP_Server_Info::{origin,leaf}_fullpath
  cifs: fix potential race when tree connecting ipc
  cifs: fix potential use-after-free bugs in TCP_Server_Info::hostname
  cifs: print smb3_fs_context::source when mounting
  cifs: protect session status check in smb2_reconnect()
  SMB3.1.1: correct definition for app_instance_id create contexts
parents d6b8a8c4 9ee04875
...@@ -280,8 +280,10 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) ...@@ -280,8 +280,10 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_printf(m, "\n%d) ConnectionId: 0x%llx ", seq_printf(m, "\n%d) ConnectionId: 0x%llx ",
c, server->conn_id); c, server->conn_id);
spin_lock(&server->srv_lock);
if (server->hostname) if (server->hostname)
seq_printf(m, "Hostname: %s ", server->hostname); seq_printf(m, "Hostname: %s ", server->hostname);
spin_unlock(&server->srv_lock);
#ifdef CONFIG_CIFS_SMB_DIRECT #ifdef CONFIG_CIFS_SMB_DIRECT
if (!server->rdma) if (!server->rdma)
goto skip_rdma; goto skip_rdma;
...@@ -623,10 +625,13 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v) ...@@ -623,10 +625,13 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
server->fastest_cmd[j], server->fastest_cmd[j],
server->slowest_cmd[j]); server->slowest_cmd[j]);
for (j = 0; j < NUMBER_OF_SMB2_COMMANDS; j++) for (j = 0; j < NUMBER_OF_SMB2_COMMANDS; j++)
if (atomic_read(&server->smb2slowcmd[j])) if (atomic_read(&server->smb2slowcmd[j])) {
spin_lock(&server->srv_lock);
seq_printf(m, " %d slow responses from %s for command %d\n", seq_printf(m, " %d slow responses from %s for command %d\n",
atomic_read(&server->smb2slowcmd[j]), atomic_read(&server->smb2slowcmd[j]),
server->hostname, j); server->hostname, j);
spin_unlock(&server->srv_lock);
}
#endif /* STATS2 */ #endif /* STATS2 */
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
......
...@@ -81,19 +81,19 @@ do { \ ...@@ -81,19 +81,19 @@ do { \
#define cifs_server_dbg_func(ratefunc, type, fmt, ...) \ #define cifs_server_dbg_func(ratefunc, type, fmt, ...) \
do { \ do { \
const char *sn = ""; \ spin_lock(&server->srv_lock); \
if (server && server->hostname) \
sn = server->hostname; \
if ((type) & FYI && cifsFYI & CIFS_INFO) { \ if ((type) & FYI && cifsFYI & CIFS_INFO) { \
pr_debug_ ## ratefunc("%s: \\\\%s " fmt, \ pr_debug_ ## ratefunc("%s: \\\\%s " fmt, \
__FILE__, sn, ##__VA_ARGS__); \ __FILE__, server->hostname, \
##__VA_ARGS__); \
} else if ((type) & VFS) { \ } else if ((type) & VFS) { \
pr_err_ ## ratefunc("VFS: \\\\%s " fmt, \ pr_err_ ## ratefunc("VFS: \\\\%s " fmt, \
sn, ##__VA_ARGS__); \ server->hostname, ##__VA_ARGS__); \
} else if ((type) & NOISY && (NOISY != 0)) { \ } else if ((type) & NOISY && (NOISY != 0)) { \
pr_debug_ ## ratefunc("\\\\%s " fmt, \ pr_debug_ ## ratefunc("\\\\%s " fmt, \
sn, ##__VA_ARGS__); \ server->hostname, ##__VA_ARGS__); \
} \ } \
spin_unlock(&server->srv_lock); \
} while (0) } while (0)
#define cifs_server_dbg(type, fmt, ...) \ #define cifs_server_dbg(type, fmt, ...) \
......
...@@ -874,14 +874,12 @@ cifs_smb3_do_mount(struct file_system_type *fs_type, ...@@ -874,14 +874,12 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
struct cifs_mnt_data mnt_data; struct cifs_mnt_data mnt_data;
struct dentry *root; struct dentry *root;
/* if (cifsFYI) {
* Prints in Kernel / CIFS log the attempted mount operation cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
* If CIFS_DEBUG && cifs_FYI old_ctx->source, flags);
*/ } else {
if (cifsFYI) cifs_info("Attempting to mount %s\n", old_ctx->source);
cifs_dbg(FYI, "Devname: %s flags: %d\n", old_ctx->UNC, flags); }
else
cifs_info("Attempting to mount %s\n", old_ctx->UNC);
cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL); cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
if (cifs_sb == NULL) { if (cifs_sb == NULL) {
......
...@@ -736,17 +736,23 @@ struct TCP_Server_Info { ...@@ -736,17 +736,23 @@ struct TCP_Server_Info {
#endif #endif
struct mutex refpath_lock; /* protects leaf_fullpath */ struct mutex refpath_lock; /* protects leaf_fullpath */
/* /*
* Canonical DFS full paths that were used to chase referrals in mount and reconnect. * origin_fullpath: Canonical copy of smb3_fs_context::source.
* It is used for matching existing DFS tcons.
* *
* origin_fullpath: first or original referral path * leaf_fullpath: Canonical DFS referral path related to this
* leaf_fullpath: last referral path (might be changed due to nested links in reconnect) * connection.
* It is used in DFS cache refresher, reconnect and may
* change due to nested DFS links.
* *
* current_fullpath: pointer to either origin_fullpath or leaf_fullpath * Both protected by @refpath_lock and @srv_lock. The @refpath_lock is
* NOTE: cannot be accessed outside cifs_reconnect() and smb2_reconnect() * mosly used for not requiring a copy of @leaf_fullpath when getting
* cached or new DFS referrals (which might also sleep during I/O).
* While @srv_lock is held for making string and NULL comparions against
* both fields as in mount(2) and cache refresh.
* *
* format: \\HOST\SHARE\[OPTIONAL PATH] * format: \\HOST\SHARE[\OPTIONAL PATH]
*/ */
char *origin_fullpath, *leaf_fullpath, *current_fullpath; char *origin_fullpath, *leaf_fullpath;
}; };
static inline bool is_smb1(struct TCP_Server_Info *server) static inline bool is_smb1(struct TCP_Server_Info *server)
...@@ -1232,8 +1238,8 @@ struct cifs_tcon { ...@@ -1232,8 +1238,8 @@ struct cifs_tcon {
struct cached_fids *cfids; struct cached_fids *cfids;
/* BB add field for back pointer to sb struct(s)? */ /* BB add field for back pointer to sb struct(s)? */
#ifdef CONFIG_CIFS_DFS_UPCALL #ifdef CONFIG_CIFS_DFS_UPCALL
struct list_head ulist; /* cache update list */
struct list_head dfs_ses_list; struct list_head dfs_ses_list;
struct delayed_work dfs_cache_work;
#endif #endif
struct delayed_work query_interfaces; /* query interfaces workqueue job */ struct delayed_work query_interfaces; /* query interfaces workqueue job */
}; };
...@@ -1750,7 +1756,6 @@ struct cifs_mount_ctx { ...@@ -1750,7 +1756,6 @@ struct cifs_mount_ctx {
struct TCP_Server_Info *server; struct TCP_Server_Info *server;
struct cifs_ses *ses; struct cifs_ses *ses;
struct cifs_tcon *tcon; struct cifs_tcon *tcon;
char *origin_fullpath, *leaf_fullpath;
struct list_head dfs_ses_list; struct list_head dfs_ses_list;
}; };
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#ifndef _CIFSPROTO_H #ifndef _CIFSPROTO_H
#define _CIFSPROTO_H #define _CIFSPROTO_H
#include <linux/nls.h> #include <linux/nls.h>
#include <linux/ctype.h>
#include "trace.h" #include "trace.h"
#ifdef CONFIG_CIFS_DFS_UPCALL #ifdef CONFIG_CIFS_DFS_UPCALL
#include "dfs_cache.h" #include "dfs_cache.h"
...@@ -572,7 +573,7 @@ extern int E_md4hash(const unsigned char *passwd, unsigned char *p16, ...@@ -572,7 +573,7 @@ extern int E_md4hash(const unsigned char *passwd, unsigned char *p16,
extern struct TCP_Server_Info * extern struct TCP_Server_Info *
cifs_find_tcp_session(struct smb3_fs_context *ctx); cifs_find_tcp_session(struct smb3_fs_context *ctx);
extern void cifs_put_smb_ses(struct cifs_ses *ses); void __cifs_put_smb_ses(struct cifs_ses *ses);
extern struct cifs_ses * extern struct cifs_ses *
cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx); cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx);
...@@ -696,4 +697,45 @@ struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon); ...@@ -696,4 +697,45 @@ struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon);
void cifs_put_tcon_super(struct super_block *sb); void cifs_put_tcon_super(struct super_block *sb);
int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry); int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry);
/* Put references of @ses and @ses->dfs_root_ses */
static inline void cifs_put_smb_ses(struct cifs_ses *ses)
{
struct cifs_ses *rses = ses->dfs_root_ses;
__cifs_put_smb_ses(ses);
if (rses)
__cifs_put_smb_ses(rses);
}
/* Get an active reference of @ses and @ses->dfs_root_ses.
*
* NOTE: make sure to call this function when incrementing reference count of
* @ses to ensure that any DFS root session attached to it (@ses->dfs_root_ses)
* will also get its reference count incremented.
*
* cifs_put_smb_ses() will put both references, so call it when you're done.
*/
static inline void cifs_smb_ses_inc_refcount(struct cifs_ses *ses)
{
lockdep_assert_held(&cifs_tcp_ses_lock);
ses->ses_count++;
if (ses->dfs_root_ses)
ses->dfs_root_ses->ses_count++;
}
static inline bool dfs_src_pathname_equal(const char *s1, const char *s2)
{
if (strlen(s1) != strlen(s2))
return false;
for (; *s1; s1++, s2++) {
if (*s1 == '/' || *s1 == '\\') {
if (*s2 != '/' && *s2 != '\\')
return false;
} else if (tolower(*s1) != tolower(*s2))
return false;
}
return true;
}
#endif /* _CIFSPROTO_H */ #endif /* _CIFSPROTO_H */
This diff is collapsed.
...@@ -99,7 +99,7 @@ static int get_session(struct cifs_mount_ctx *mnt_ctx, const char *full_path) ...@@ -99,7 +99,7 @@ static int get_session(struct cifs_mount_ctx *mnt_ctx, const char *full_path)
return rc; return rc;
} }
static int get_root_smb_session(struct cifs_mount_ctx *mnt_ctx) static int add_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
{ {
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct dfs_root_ses *root_ses; struct dfs_root_ses *root_ses;
...@@ -127,7 +127,7 @@ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, co ...@@ -127,7 +127,7 @@ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, co
{ {
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct dfs_info3_param ref = {}; struct dfs_info3_param ref = {};
bool is_refsrv = false; bool is_refsrv;
int rc, rc2; int rc, rc2;
rc = dfs_cache_get_tgt_referral(ref_path + 1, tit, &ref); rc = dfs_cache_get_tgt_referral(ref_path + 1, tit, &ref);
...@@ -157,8 +157,10 @@ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, co ...@@ -157,8 +157,10 @@ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, co
rc = cifs_is_path_remote(mnt_ctx); rc = cifs_is_path_remote(mnt_ctx);
} }
dfs_cache_noreq_update_tgthint(ref_path + 1, tit);
if (rc == -EREMOTE && is_refsrv) { if (rc == -EREMOTE && is_refsrv) {
rc2 = get_root_smb_session(mnt_ctx); rc2 = add_root_smb_session(mnt_ctx);
if (rc2) if (rc2)
rc = rc2; rc = rc2;
} }
...@@ -248,16 +250,19 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx) ...@@ -248,16 +250,19 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
tcon = mnt_ctx->tcon; tcon = mnt_ctx->tcon;
mutex_lock(&server->refpath_lock); mutex_lock(&server->refpath_lock);
spin_lock(&server->srv_lock);
if (!server->origin_fullpath) { if (!server->origin_fullpath) {
server->origin_fullpath = origin_fullpath; server->origin_fullpath = origin_fullpath;
server->current_fullpath = server->leaf_fullpath;
origin_fullpath = NULL; origin_fullpath = NULL;
} }
spin_unlock(&server->srv_lock);
mutex_unlock(&server->refpath_lock); mutex_unlock(&server->refpath_lock);
if (list_empty(&tcon->dfs_ses_list)) { if (list_empty(&tcon->dfs_ses_list)) {
list_replace_init(&mnt_ctx->dfs_ses_list, list_replace_init(&mnt_ctx->dfs_ses_list,
&tcon->dfs_ses_list); &tcon->dfs_ses_list);
queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
dfs_cache_get_ttl() * HZ);
} else { } else {
dfs_put_root_smb_sessions(&mnt_ctx->dfs_ses_list); dfs_put_root_smb_sessions(&mnt_ctx->dfs_ses_list);
} }
...@@ -272,15 +277,21 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx) ...@@ -272,15 +277,21 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs) int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
{ {
struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct cifs_ses *ses;
char *source = ctx->source;
bool nodfs = ctx->nodfs;
int rc; int rc;
*isdfs = false; *isdfs = false;
/* Temporarily set @ctx->source to NULL as we're not matching DFS
* superblocks yet. See cifs_match_super() and match_server().
*/
ctx->source = NULL;
rc = get_session(mnt_ctx, NULL); rc = get_session(mnt_ctx, NULL);
if (rc) if (rc)
return rc; goto out;
ctx->dfs_root_ses = mnt_ctx->ses; ctx->dfs_root_ses = mnt_ctx->ses;
/* /*
* If called with 'nodfs' mount option, then skip DFS resolving. Otherwise unconditionally * If called with 'nodfs' mount option, then skip DFS resolving. Otherwise unconditionally
...@@ -289,23 +300,41 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs) ...@@ -289,23 +300,41 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
* Skip prefix path to provide support for DFS referrals from w2k8 servers which don't seem * Skip prefix path to provide support for DFS referrals from w2k8 servers which don't seem
* to respond with PATH_NOT_COVERED to requests that include the prefix. * to respond with PATH_NOT_COVERED to requests that include the prefix.
*/ */
if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) || if (!nodfs) {
dfs_get_referral(mnt_ctx, ctx->UNC + 1, NULL, NULL)) { rc = dfs_get_referral(mnt_ctx, ctx->UNC + 1, NULL, NULL);
if (rc) {
if (rc != -ENOENT && rc != -EOPNOTSUPP)
goto out;
nodfs = true;
}
}
if (nodfs) {
rc = cifs_mount_get_tcon(mnt_ctx); rc = cifs_mount_get_tcon(mnt_ctx);
if (rc) if (!rc)
return rc; rc = cifs_is_path_remote(mnt_ctx);
goto out;
rc = cifs_is_path_remote(mnt_ctx);
if (!rc || rc != -EREMOTE)
return rc;
} }
*isdfs = true; *isdfs = true;
rc = get_root_smb_session(mnt_ctx); /*
if (rc) * Prevent DFS root session of being put in the first call to
return rc; * cifs_mount_put_conns(). If another DFS root server was not found
* while chasing the referrals (@ctx->dfs_root_ses == @ses), then we
return __dfs_mount_share(mnt_ctx); * can safely put extra refcount of @ses.
*/
ses = mnt_ctx->ses;
mnt_ctx->ses = NULL;
mnt_ctx->server = NULL;
rc = __dfs_mount_share(mnt_ctx);
if (ses == ctx->dfs_root_ses)
cifs_put_smb_ses(ses);
out:
/*
* Restore previous value of @ctx->source so DFS superblock can be
* matched in cifs_match_super().
*/
ctx->source = source;
return rc;
} }
/* Update dfs referral path of superblock */ /* Update dfs referral path of superblock */
...@@ -342,10 +371,11 @@ static int update_server_fullpath(struct TCP_Server_Info *server, struct cifs_sb ...@@ -342,10 +371,11 @@ static int update_server_fullpath(struct TCP_Server_Info *server, struct cifs_sb
rc = PTR_ERR(npath); rc = PTR_ERR(npath);
} else { } else {
mutex_lock(&server->refpath_lock); mutex_lock(&server->refpath_lock);
spin_lock(&server->srv_lock);
kfree(server->leaf_fullpath); kfree(server->leaf_fullpath);
server->leaf_fullpath = npath; server->leaf_fullpath = npath;
spin_unlock(&server->srv_lock);
mutex_unlock(&server->refpath_lock); mutex_unlock(&server->refpath_lock);
server->current_fullpath = server->leaf_fullpath;
} }
return rc; return rc;
} }
...@@ -374,6 +404,54 @@ static int target_share_matches_server(struct TCP_Server_Info *server, char *sha ...@@ -374,6 +404,54 @@ static int target_share_matches_server(struct TCP_Server_Info *server, char *sha
return rc; return rc;
} }
static void __tree_connect_ipc(const unsigned int xid, char *tree,
struct cifs_sb_info *cifs_sb,
struct cifs_ses *ses)
{
struct TCP_Server_Info *server = ses->server;
struct cifs_tcon *tcon = ses->tcon_ipc;
int rc;
spin_lock(&ses->ses_lock);
spin_lock(&ses->chan_lock);
if (cifs_chan_needs_reconnect(ses, server) ||
ses->ses_status != SES_GOOD) {
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
cifs_server_dbg(FYI, "%s: skipping ipc reconnect due to disconnected ses\n",
__func__);
return;
}
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
cifs_server_lock(server);
scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
cifs_server_unlock(server);
rc = server->ops->tree_connect(xid, ses, tree, tcon,
cifs_sb->local_nls);
cifs_server_dbg(FYI, "%s: tree_reconnect %s: %d\n", __func__, tree, rc);
spin_lock(&tcon->tc_lock);
if (rc) {
tcon->status = TID_NEED_TCON;
} else {
tcon->status = TID_GOOD;
tcon->need_reconnect = false;
}
spin_unlock(&tcon->tc_lock);
}
static void tree_connect_ipc(const unsigned int xid, char *tree,
struct cifs_sb_info *cifs_sb,
struct cifs_tcon *tcon)
{
struct cifs_ses *ses = tcon->ses;
__tree_connect_ipc(xid, tree, cifs_sb, ses);
__tree_connect_ipc(xid, tree, cifs_sb, CIFS_DFS_ROOT_SES(ses));
}
static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon, static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, char *tree, bool islink, struct cifs_sb_info *cifs_sb, char *tree, bool islink,
struct dfs_cache_tgt_list *tl) struct dfs_cache_tgt_list *tl)
...@@ -382,7 +460,6 @@ static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *t ...@@ -382,7 +460,6 @@ static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *t
struct TCP_Server_Info *server = tcon->ses->server; struct TCP_Server_Info *server = tcon->ses->server;
const struct smb_version_operations *ops = server->ops; const struct smb_version_operations *ops = server->ops;
struct cifs_ses *root_ses = CIFS_DFS_ROOT_SES(tcon->ses); struct cifs_ses *root_ses = CIFS_DFS_ROOT_SES(tcon->ses);
struct cifs_tcon *ipc = root_ses->tcon_ipc;
char *share = NULL, *prefix = NULL; char *share = NULL, *prefix = NULL;
struct dfs_cache_tgt_iterator *tit; struct dfs_cache_tgt_iterator *tit;
bool target_match; bool target_match;
...@@ -403,7 +480,7 @@ static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *t ...@@ -403,7 +480,7 @@ static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *t
share = prefix = NULL; share = prefix = NULL;
/* Check if share matches with tcp ses */ /* Check if share matches with tcp ses */
rc = dfs_cache_get_tgt_share(server->current_fullpath + 1, tit, &share, &prefix); rc = dfs_cache_get_tgt_share(server->leaf_fullpath + 1, tit, &share, &prefix);
if (rc) { if (rc) {
cifs_dbg(VFS, "%s: failed to parse target share: %d\n", __func__, rc); cifs_dbg(VFS, "%s: failed to parse target share: %d\n", __func__, rc);
break; break;
...@@ -417,19 +494,15 @@ static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *t ...@@ -417,19 +494,15 @@ static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *t
continue; continue;
} }
dfs_cache_noreq_update_tgthint(server->current_fullpath + 1, tit); dfs_cache_noreq_update_tgthint(server->leaf_fullpath + 1, tit);
tree_connect_ipc(xid, tree, cifs_sb, tcon);
if (ipc->need_reconnect) {
scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
rc = ops->tree_connect(xid, ipc->ses, tree, ipc, cifs_sb->local_nls);
cifs_dbg(FYI, "%s: reconnect ipc: %d\n", __func__, rc);
}
scnprintf(tree, MAX_TREE_SIZE, "\\%s", share); scnprintf(tree, MAX_TREE_SIZE, "\\%s", share);
if (!islink) { if (!islink) {
rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls); rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls);
break; break;
} }
/* /*
* If no dfs referrals were returned from link target, then just do a TREE_CONNECT * If no dfs referrals were returned from link target, then just do a TREE_CONNECT
* to it. Otherwise, cache the dfs referral and then mark current tcp ses for * to it. Otherwise, cache the dfs referral and then mark current tcp ses for
...@@ -539,8 +612,8 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru ...@@ -539,8 +612,8 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
cifs_sb = CIFS_SB(sb); cifs_sb = CIFS_SB(sb);
/* If it is not dfs or there was no cached dfs referral, then reconnect to same share */ /* If it is not dfs or there was no cached dfs referral, then reconnect to same share */
if (!server->current_fullpath || if (!server->leaf_fullpath ||
dfs_cache_noreq_find(server->current_fullpath + 1, &ref, &tl)) { dfs_cache_noreq_find(server->leaf_fullpath + 1, &ref, &tl)) {
rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, cifs_sb->local_nls); rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, cifs_sb->local_nls);
goto out; goto out;
} }
......
...@@ -43,8 +43,12 @@ static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page) ...@@ -43,8 +43,12 @@ static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page)
size_t len; size_t len;
char *s; char *s;
if (unlikely(!server->origin_fullpath)) spin_lock(&server->srv_lock);
if (unlikely(!server->origin_fullpath)) {
spin_unlock(&server->srv_lock);
return ERR_PTR(-EREMOTE); return ERR_PTR(-EREMOTE);
}
spin_unlock(&server->srv_lock);
s = dentry_path_raw(dentry, page, PATH_MAX); s = dentry_path_raw(dentry, page, PATH_MAX);
if (IS_ERR(s)) if (IS_ERR(s))
...@@ -53,13 +57,18 @@ static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page) ...@@ -53,13 +57,18 @@ static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page)
if (!s[1]) if (!s[1])
s++; s++;
spin_lock(&server->srv_lock);
len = strlen(server->origin_fullpath); len = strlen(server->origin_fullpath);
if (s < (char *)page + len) if (s < (char *)page + len) {
spin_unlock(&server->srv_lock);
return ERR_PTR(-ENAMETOOLONG); return ERR_PTR(-ENAMETOOLONG);
}
s -= len; s -= len;
memcpy(s, server->origin_fullpath, len); memcpy(s, server->origin_fullpath, len);
spin_unlock(&server->srv_lock);
convert_delimiter(s, '/'); convert_delimiter(s, '/');
return s; return s;
} }
......
...@@ -20,12 +20,14 @@ ...@@ -20,12 +20,14 @@
#include "cifs_unicode.h" #include "cifs_unicode.h"
#include "smb2glob.h" #include "smb2glob.h"
#include "dns_resolve.h" #include "dns_resolve.h"
#include "dfs.h"
#include "dfs_cache.h" #include "dfs_cache.h"
#define CACHE_HTABLE_SIZE 32 #define CACHE_HTABLE_SIZE 32
#define CACHE_MAX_ENTRIES 64 #define CACHE_MAX_ENTRIES 64
#define CACHE_MIN_TTL 120 /* 2 minutes */ #define CACHE_MIN_TTL 120 /* 2 minutes */
#define CACHE_DEFAULT_TTL 300 /* 5 minutes */
#define IS_DFS_INTERLINK(v) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER)) #define IS_DFS_INTERLINK(v) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
...@@ -50,10 +52,9 @@ struct cache_entry { ...@@ -50,10 +52,9 @@ struct cache_entry {
}; };
static struct kmem_cache *cache_slab __read_mostly; static struct kmem_cache *cache_slab __read_mostly;
static struct workqueue_struct *dfscache_wq __read_mostly; struct workqueue_struct *dfscache_wq;
static int cache_ttl; atomic_t dfs_cache_ttl;
static DEFINE_SPINLOCK(cache_ttl_lock);
static struct nls_table *cache_cp; static struct nls_table *cache_cp;
...@@ -65,10 +66,6 @@ static atomic_t cache_count; ...@@ -65,10 +66,6 @@ static atomic_t cache_count;
static struct hlist_head cache_htable[CACHE_HTABLE_SIZE]; static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
static DECLARE_RWSEM(htable_rw_lock); static DECLARE_RWSEM(htable_rw_lock);
static void refresh_cache_worker(struct work_struct *work);
static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
/** /**
* dfs_cache_canonical_path - get a canonical DFS path * dfs_cache_canonical_path - get a canonical DFS path
* *
...@@ -290,7 +287,9 @@ int dfs_cache_init(void) ...@@ -290,7 +287,9 @@ int dfs_cache_init(void)
int rc; int rc;
int i; int i;
dfscache_wq = alloc_workqueue("cifs-dfscache", WQ_FREEZABLE | WQ_UNBOUND, 1); dfscache_wq = alloc_workqueue("cifs-dfscache",
WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM,
0);
if (!dfscache_wq) if (!dfscache_wq)
return -ENOMEM; return -ENOMEM;
...@@ -306,6 +305,7 @@ int dfs_cache_init(void) ...@@ -306,6 +305,7 @@ int dfs_cache_init(void)
INIT_HLIST_HEAD(&cache_htable[i]); INIT_HLIST_HEAD(&cache_htable[i]);
atomic_set(&cache_count, 0); atomic_set(&cache_count, 0);
atomic_set(&dfs_cache_ttl, CACHE_DEFAULT_TTL);
cache_cp = load_nls("utf8"); cache_cp = load_nls("utf8");
if (!cache_cp) if (!cache_cp)
cache_cp = load_nls_default(); cache_cp = load_nls_default();
...@@ -480,6 +480,7 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs, ...@@ -480,6 +480,7 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
int rc; int rc;
struct cache_entry *ce; struct cache_entry *ce;
unsigned int hash; unsigned int hash;
int ttl;
WARN_ON(!rwsem_is_locked(&htable_rw_lock)); WARN_ON(!rwsem_is_locked(&htable_rw_lock));
...@@ -496,15 +497,8 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs, ...@@ -496,15 +497,8 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
if (IS_ERR(ce)) if (IS_ERR(ce))
return ce; return ce;
spin_lock(&cache_ttl_lock); ttl = min_t(int, atomic_read(&dfs_cache_ttl), ce->ttl);
if (!cache_ttl) { atomic_set(&dfs_cache_ttl, ttl);
cache_ttl = ce->ttl;
queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
} else {
cache_ttl = min_t(int, cache_ttl, ce->ttl);
mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
}
spin_unlock(&cache_ttl_lock);
hlist_add_head(&ce->hlist, &cache_htable[hash]); hlist_add_head(&ce->hlist, &cache_htable[hash]);
dump_ce(ce); dump_ce(ce);
...@@ -616,7 +610,6 @@ static struct cache_entry *lookup_cache_entry(const char *path) ...@@ -616,7 +610,6 @@ static struct cache_entry *lookup_cache_entry(const char *path)
*/ */
void dfs_cache_destroy(void) void dfs_cache_destroy(void)
{ {
cancel_delayed_work_sync(&refresh_task);
unload_nls(cache_cp); unload_nls(cache_cp);
flush_cache_ents(); flush_cache_ents();
kmem_cache_destroy(cache_slab); kmem_cache_destroy(cache_slab);
...@@ -1142,6 +1135,7 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c ...@@ -1142,6 +1135,7 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c
* target shares in @refs. * target shares in @refs.
*/ */
static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server, static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
const char *path,
struct dfs_cache_tgt_list *old_tl, struct dfs_cache_tgt_list *old_tl,
struct dfs_cache_tgt_list *new_tl) struct dfs_cache_tgt_list *new_tl)
{ {
...@@ -1153,8 +1147,10 @@ static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server, ...@@ -1153,8 +1147,10 @@ static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
nit = dfs_cache_get_next_tgt(new_tl, nit)) { nit = dfs_cache_get_next_tgt(new_tl, nit)) {
if (target_share_equal(server, if (target_share_equal(server,
dfs_cache_get_tgt_name(oit), dfs_cache_get_tgt_name(oit),
dfs_cache_get_tgt_name(nit))) dfs_cache_get_tgt_name(nit))) {
dfs_cache_noreq_update_tgthint(path, nit);
return; return;
}
} }
} }
...@@ -1162,13 +1158,28 @@ static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server, ...@@ -1162,13 +1158,28 @@ static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
cifs_signal_cifsd_for_reconnect(server, true); cifs_signal_cifsd_for_reconnect(server, true);
} }
static bool is_ses_good(struct cifs_ses *ses)
{
struct TCP_Server_Info *server = ses->server;
struct cifs_tcon *tcon = ses->tcon_ipc;
bool ret;
spin_lock(&ses->ses_lock);
spin_lock(&ses->chan_lock);
ret = !cifs_chan_needs_reconnect(ses, server) &&
ses->ses_status == SES_GOOD &&
!tcon->need_reconnect;
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
return ret;
}
/* Refresh dfs referral of tcon and mark it for reconnect if needed */ /* Refresh dfs referral of tcon and mark it for reconnect if needed */
static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_refresh) static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_refresh)
{ {
struct dfs_cache_tgt_list old_tl = DFS_CACHE_TGT_LIST_INIT(old_tl); struct dfs_cache_tgt_list old_tl = DFS_CACHE_TGT_LIST_INIT(old_tl);
struct dfs_cache_tgt_list new_tl = DFS_CACHE_TGT_LIST_INIT(new_tl); struct dfs_cache_tgt_list new_tl = DFS_CACHE_TGT_LIST_INIT(new_tl);
struct cifs_ses *ses = CIFS_DFS_ROOT_SES(tcon->ses); struct TCP_Server_Info *server = ses->server;
struct cifs_tcon *ipc = ses->tcon_ipc;
bool needs_refresh = false; bool needs_refresh = false;
struct cache_entry *ce; struct cache_entry *ce;
unsigned int xid; unsigned int xid;
...@@ -1190,20 +1201,19 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r ...@@ -1190,20 +1201,19 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r
goto out; goto out;
} }
spin_lock(&ipc->tc_lock); ses = CIFS_DFS_ROOT_SES(ses);
if (ipc->status != TID_GOOD) { if (!is_ses_good(ses)) {
spin_unlock(&ipc->tc_lock); cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n",
cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n", __func__); __func__);
goto out; goto out;
} }
spin_unlock(&ipc->tc_lock);
ce = cache_refresh_path(xid, ses, path, true); ce = cache_refresh_path(xid, ses, path, true);
if (!IS_ERR(ce)) { if (!IS_ERR(ce)) {
rc = get_targets(ce, &new_tl); rc = get_targets(ce, &new_tl);
up_read(&htable_rw_lock); up_read(&htable_rw_lock);
cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc); cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
mark_for_reconnect_if_needed(tcon->ses->server, &old_tl, &new_tl); mark_for_reconnect_if_needed(server, path, &old_tl, &new_tl);
} }
out: out:
...@@ -1216,10 +1226,11 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r ...@@ -1216,10 +1226,11 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r
static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh) static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
{ {
struct TCP_Server_Info *server = tcon->ses->server; struct TCP_Server_Info *server = tcon->ses->server;
struct cifs_ses *ses = tcon->ses;
mutex_lock(&server->refpath_lock); mutex_lock(&server->refpath_lock);
if (server->leaf_fullpath) if (server->leaf_fullpath)
__refresh_tcon(server->leaf_fullpath + 1, tcon, force_refresh); __refresh_tcon(server->leaf_fullpath + 1, ses, force_refresh);
mutex_unlock(&server->refpath_lock); mutex_unlock(&server->refpath_lock);
return 0; return 0;
} }
...@@ -1263,56 +1274,32 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb) ...@@ -1263,56 +1274,32 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
return refresh_tcon(tcon, true); return refresh_tcon(tcon, true);
} }
/* /* Refresh all DFS referrals related to DFS tcon */
* Worker that will refresh DFS cache from all active mounts based on lowest TTL value void dfs_cache_refresh(struct work_struct *work)
* from a DFS referral.
*/
static void refresh_cache_worker(struct work_struct *work)
{ {
struct TCP_Server_Info *server; struct TCP_Server_Info *server;
struct cifs_tcon *tcon, *ntcon; struct dfs_root_ses *rses;
struct list_head tcons; struct cifs_tcon *tcon;
struct cifs_ses *ses; struct cifs_ses *ses;
INIT_LIST_HEAD(&tcons); tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
ses = tcon->ses;
spin_lock(&cifs_tcp_ses_lock); server = ses->server;
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
if (!server->leaf_fullpath)
continue;
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
if (ses->tcon_ipc) {
ses->ses_count++;
list_add_tail(&ses->tcon_ipc->ulist, &tcons);
}
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
if (!tcon->ipc) {
tcon->tc_count++;
list_add_tail(&tcon->ulist, &tcons);
}
}
}
}
spin_unlock(&cifs_tcp_ses_lock);
list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) { mutex_lock(&server->refpath_lock);
struct TCP_Server_Info *server = tcon->ses->server; if (server->leaf_fullpath)
__refresh_tcon(server->leaf_fullpath + 1, ses, false);
list_del_init(&tcon->ulist); mutex_unlock(&server->refpath_lock);
list_for_each_entry(rses, &tcon->dfs_ses_list, list) {
ses = rses->ses;
server = ses->server;
mutex_lock(&server->refpath_lock); mutex_lock(&server->refpath_lock);
if (server->leaf_fullpath) if (server->leaf_fullpath)
__refresh_tcon(server->leaf_fullpath + 1, tcon, false); __refresh_tcon(server->leaf_fullpath + 1, ses, false);
mutex_unlock(&server->refpath_lock); mutex_unlock(&server->refpath_lock);
if (tcon->ipc)
cifs_put_smb_ses(tcon->ses);
else
cifs_put_tcon(tcon);
} }
spin_lock(&cache_ttl_lock); queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ); atomic_read(&dfs_cache_ttl) * HZ);
spin_unlock(&cache_ttl_lock);
} }
...@@ -13,6 +13,9 @@ ...@@ -13,6 +13,9 @@
#include <linux/uuid.h> #include <linux/uuid.h>
#include "cifsglob.h" #include "cifsglob.h"
extern struct workqueue_struct *dfscache_wq;
extern atomic_t dfs_cache_ttl;
#define DFS_CACHE_TGT_LIST_INIT(var) { .tl_numtgts = 0, .tl_list = LIST_HEAD_INIT((var).tl_list), } #define DFS_CACHE_TGT_LIST_INIT(var) { .tl_numtgts = 0, .tl_list = LIST_HEAD_INIT((var).tl_list), }
struct dfs_cache_tgt_list { struct dfs_cache_tgt_list {
...@@ -42,6 +45,7 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, ...@@ -42,6 +45,7 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
char **prefix); char **prefix);
char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap); char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap);
int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb); int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb);
void dfs_cache_refresh(struct work_struct *work);
static inline struct dfs_cache_tgt_iterator * static inline struct dfs_cache_tgt_iterator *
dfs_cache_get_next_tgt(struct dfs_cache_tgt_list *tl, dfs_cache_get_next_tgt(struct dfs_cache_tgt_list *tl,
...@@ -89,4 +93,9 @@ dfs_cache_get_nr_tgts(const struct dfs_cache_tgt_list *tl) ...@@ -89,4 +93,9 @@ dfs_cache_get_nr_tgts(const struct dfs_cache_tgt_list *tl)
return tl ? tl->tl_numtgts : 0; return tl ? tl->tl_numtgts : 0;
} }
static inline int dfs_cache_get_ttl(void)
{
return atomic_read(&dfs_cache_ttl);
}
#endif /* _CIFS_DFS_CACHE_H */ #endif /* _CIFS_DFS_CACHE_H */
...@@ -239,7 +239,7 @@ static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug ...@@ -239,7 +239,7 @@ static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug
* section, we need to make sure it won't be released * section, we need to make sure it won't be released
* so increment its refcount * so increment its refcount
*/ */
ses->ses_count++; cifs_smb_ses_inc_refcount(ses);
found = true; found = true;
goto search_end; goto search_end;
} }
......
...@@ -159,6 +159,7 @@ cifs_chan_is_iface_active(struct cifs_ses *ses, ...@@ -159,6 +159,7 @@ cifs_chan_is_iface_active(struct cifs_ses *ses,
/* returns number of channels added */ /* returns number of channels added */
int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses) int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
{ {
struct TCP_Server_Info *server = ses->server;
int old_chan_count, new_chan_count; int old_chan_count, new_chan_count;
int left; int left;
int rc = 0; int rc = 0;
...@@ -178,16 +179,16 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses) ...@@ -178,16 +179,16 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
return 0; return 0;
} }
if (ses->server->dialect < SMB30_PROT_ID) { if (server->dialect < SMB30_PROT_ID) {
spin_unlock(&ses->chan_lock); spin_unlock(&ses->chan_lock);
cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n"); cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
return 0; return 0;
} }
if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) { if (!(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
ses->chan_max = 1; ses->chan_max = 1;
spin_unlock(&ses->chan_lock); spin_unlock(&ses->chan_lock);
cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname); cifs_server_dbg(VFS, "no multichannel support\n");
return 0; return 0;
} }
spin_unlock(&ses->chan_lock); spin_unlock(&ses->chan_lock);
......
...@@ -175,8 +175,17 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, ...@@ -175,8 +175,17 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
} }
} }
spin_unlock(&tcon->tc_lock); spin_unlock(&tcon->tc_lock);
if ((!tcon->ses) || (tcon->ses->ses_status == SES_EXITING) ||
(!tcon->ses->server) || !server) ses = tcon->ses;
if (!ses)
return -EIO;
spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_EXITING) {
spin_unlock(&ses->ses_lock);
return -EIO;
}
spin_unlock(&ses->ses_lock);
if (!ses->server || !server)
return -EIO; return -EIO;
spin_lock(&server->srv_lock); spin_lock(&server->srv_lock);
...@@ -204,8 +213,6 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, ...@@ -204,8 +213,6 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
if (rc) if (rc)
return rc; return rc;
ses = tcon->ses;
spin_lock(&ses->chan_lock); spin_lock(&ses->chan_lock);
if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) { if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) {
spin_unlock(&ses->chan_lock); spin_unlock(&ses->chan_lock);
...@@ -3794,7 +3801,7 @@ void smb2_reconnect_server(struct work_struct *work) ...@@ -3794,7 +3801,7 @@ void smb2_reconnect_server(struct work_struct *work)
if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) { if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) {
list_add_tail(&ses->tcon_ipc->rlist, &tmp_list); list_add_tail(&ses->tcon_ipc->rlist, &tmp_list);
tcon_selected = tcon_exist = true; tcon_selected = tcon_exist = true;
ses->ses_count++; cifs_smb_ses_inc_refcount(ses);
} }
/* /*
* handle the case where channel needs to reconnect * handle the case where channel needs to reconnect
...@@ -3805,7 +3812,7 @@ void smb2_reconnect_server(struct work_struct *work) ...@@ -3805,7 +3812,7 @@ void smb2_reconnect_server(struct work_struct *work)
if (!tcon_selected && cifs_chan_needs_reconnect(ses, server)) { if (!tcon_selected && cifs_chan_needs_reconnect(ses, server)) {
list_add_tail(&ses->rlist, &tmp_ses_list); list_add_tail(&ses->rlist, &tmp_ses_list);
ses_exist = true; ses_exist = true;
ses->ses_count++; cifs_smb_ses_inc_refcount(ses);
} }
spin_unlock(&ses->chan_lock); spin_unlock(&ses->chan_lock);
} }
...@@ -4130,7 +4137,7 @@ smb2_readv_callback(struct mid_q_entry *mid) ...@@ -4130,7 +4137,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
if (rdata->got_bytes) { if (rdata->got_bytes) {
rqst.rq_iter = rdata->iter; rqst.rq_iter = rdata->iter;
rqst.rq_iter_size = iov_iter_count(&rdata->iter); rqst.rq_iter_size = iov_iter_count(&rdata->iter);
}; }
WARN_ONCE(rdata->server != mid->server, WARN_ONCE(rdata->server != mid->server,
"rdata server %p != mid server %p", "rdata server %p != mid server %p",
......
...@@ -83,22 +83,6 @@ struct create_durable_reconn_v2_req { ...@@ -83,22 +83,6 @@ struct create_durable_reconn_v2_req {
__le32 Flags; __le32 Flags;
} __packed; } __packed;
struct create_app_inst_id {
struct create_context ccontext;
__u8 Name[8];
__u8 Reserved[8];
__u8 AppInstanceId[16];
} __packed;
struct create_app_inst_id_vers {
struct create_context ccontext;
__u8 Name[8];
__u8 Reserved[2];
__u8 Padding[4];
__le64 AppInstanceVersionHigh;
__le64 AppInstanceVersionLow;
} __packed;
struct create_alloc_size_req { struct create_alloc_size_req {
struct create_context ccontext; struct create_context ccontext;
__u8 Name[8]; __u8 Name[8];
......
...@@ -1250,6 +1250,26 @@ struct create_disk_id_rsp { ...@@ -1250,6 +1250,26 @@ struct create_disk_id_rsp {
__u8 Reserved[16]; __u8 Reserved[16];
} __packed; } __packed;
/* See MS-SMB2 2.2.13.2.13 */
struct create_app_inst_id {
struct create_context ccontext;
__u8 Name[16];
__le32 StructureSize; /* Must be 20 */
__u16 Reserved;
__u8 AppInstanceId[16];
} __packed;
/* See MS-SMB2 2.2.13.2.15 */
struct create_app_inst_id_vers {
struct create_context ccontext;
__u8 Name[16];
__le32 StructureSize; /* Must be 24 */
__u16 Reserved;
__u32 Padding;
__le64 AppInstanceVersionHigh;
__le64 AppInstanceVersionLow;
} __packed;
/* See MS-SMB2 2.2.31 and 2.2.32 */ /* See MS-SMB2 2.2.31 and 2.2.32 */
struct smb2_ioctl_req { struct smb2_ioctl_req {
struct smb2_hdr hdr; struct smb2_hdr hdr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment