Commit 63342b1d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag '6.4-rc-smb3-client-fixes-part2' of git://git.samba.org/sfrench/cifs-2.6

Pull cifs fixes from Steve French:
 "smb3 client fixes, mostly DFS or reconnect related:

   - Two DFS connection sharing fixes

   - DFS refresh fix

   - Reconnect fix

   - Two potential use after free fixes

   - Also print prefix patch in mount debug msg

   - Two small cleanup fixes"

* tag '6.4-rc-smb3-client-fixes-part2' of git://git.samba.org/sfrench/cifs-2.6:
  cifs: Remove unneeded semicolon
  cifs: fix sharing of DFS connections
  cifs: avoid potential races when handling multiple dfs tcons
  cifs: protect access of TCP_Server_Info::{origin,leaf}_fullpath
  cifs: fix potential race when tree connecting ipc
  cifs: fix potential use-after-free bugs in TCP_Server_Info::hostname
  cifs: print smb3_fs_context::source when mounting
  cifs: protect session status check in smb2_reconnect()
  SMB3.1.1: correct definition for app_instance_id create contexts
parents d6b8a8c4 9ee04875
......@@ -280,8 +280,10 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_printf(m, "\n%d) ConnectionId: 0x%llx ",
c, server->conn_id);
spin_lock(&server->srv_lock);
if (server->hostname)
seq_printf(m, "Hostname: %s ", server->hostname);
spin_unlock(&server->srv_lock);
#ifdef CONFIG_CIFS_SMB_DIRECT
if (!server->rdma)
goto skip_rdma;
......@@ -623,10 +625,13 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
server->fastest_cmd[j],
server->slowest_cmd[j]);
for (j = 0; j < NUMBER_OF_SMB2_COMMANDS; j++)
if (atomic_read(&server->smb2slowcmd[j]))
if (atomic_read(&server->smb2slowcmd[j])) {
spin_lock(&server->srv_lock);
seq_printf(m, " %d slow responses from %s for command %d\n",
atomic_read(&server->smb2slowcmd[j]),
server->hostname, j);
spin_unlock(&server->srv_lock);
}
#endif /* STATS2 */
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
......
......@@ -81,19 +81,19 @@ do { \
#define cifs_server_dbg_func(ratefunc, type, fmt, ...) \
do { \
const char *sn = ""; \
if (server && server->hostname) \
sn = server->hostname; \
spin_lock(&server->srv_lock); \
if ((type) & FYI && cifsFYI & CIFS_INFO) { \
pr_debug_ ## ratefunc("%s: \\\\%s " fmt, \
__FILE__, sn, ##__VA_ARGS__); \
__FILE__, server->hostname, \
##__VA_ARGS__); \
} else if ((type) & VFS) { \
pr_err_ ## ratefunc("VFS: \\\\%s " fmt, \
sn, ##__VA_ARGS__); \
server->hostname, ##__VA_ARGS__); \
} else if ((type) & NOISY && (NOISY != 0)) { \
pr_debug_ ## ratefunc("\\\\%s " fmt, \
sn, ##__VA_ARGS__); \
server->hostname, ##__VA_ARGS__); \
} \
spin_unlock(&server->srv_lock); \
} while (0)
#define cifs_server_dbg(type, fmt, ...) \
......
......@@ -874,14 +874,12 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
struct cifs_mnt_data mnt_data;
struct dentry *root;
/*
* Prints in Kernel / CIFS log the attempted mount operation
* If CIFS_DEBUG && cifs_FYI
*/
if (cifsFYI)
cifs_dbg(FYI, "Devname: %s flags: %d\n", old_ctx->UNC, flags);
else
cifs_info("Attempting to mount %s\n", old_ctx->UNC);
if (cifsFYI) {
cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
old_ctx->source, flags);
} else {
cifs_info("Attempting to mount %s\n", old_ctx->source);
}
cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
if (cifs_sb == NULL) {
......
......@@ -736,17 +736,23 @@ struct TCP_Server_Info {
#endif
struct mutex refpath_lock; /* protects leaf_fullpath */
/*
* Canonical DFS full paths that were used to chase referrals in mount and reconnect.
* origin_fullpath: Canonical copy of smb3_fs_context::source.
* It is used for matching existing DFS tcons.
*
* origin_fullpath: first or original referral path
* leaf_fullpath: last referral path (might be changed due to nested links in reconnect)
* leaf_fullpath: Canonical DFS referral path related to this
* connection.
* It is used in DFS cache refresher, reconnect and may
* change due to nested DFS links.
*
* current_fullpath: pointer to either origin_fullpath or leaf_fullpath
* NOTE: cannot be accessed outside cifs_reconnect() and smb2_reconnect()
* Both protected by @refpath_lock and @srv_lock. The @refpath_lock is
* mosly used for not requiring a copy of @leaf_fullpath when getting
* cached or new DFS referrals (which might also sleep during I/O).
* While @srv_lock is held for making string and NULL comparions against
* both fields as in mount(2) and cache refresh.
*
* format: \\HOST\SHARE\[OPTIONAL PATH]
* format: \\HOST\SHARE[\OPTIONAL PATH]
*/
char *origin_fullpath, *leaf_fullpath, *current_fullpath;
char *origin_fullpath, *leaf_fullpath;
};
static inline bool is_smb1(struct TCP_Server_Info *server)
......@@ -1232,8 +1238,8 @@ struct cifs_tcon {
struct cached_fids *cfids;
/* BB add field for back pointer to sb struct(s)? */
#ifdef CONFIG_CIFS_DFS_UPCALL
struct list_head ulist; /* cache update list */
struct list_head dfs_ses_list;
struct delayed_work dfs_cache_work;
#endif
struct delayed_work query_interfaces; /* query interfaces workqueue job */
};
......@@ -1750,7 +1756,6 @@ struct cifs_mount_ctx {
struct TCP_Server_Info *server;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
char *origin_fullpath, *leaf_fullpath;
struct list_head dfs_ses_list;
};
......
......@@ -8,6 +8,7 @@
#ifndef _CIFSPROTO_H
#define _CIFSPROTO_H
#include <linux/nls.h>
#include <linux/ctype.h>
#include "trace.h"
#ifdef CONFIG_CIFS_DFS_UPCALL
#include "dfs_cache.h"
......@@ -572,7 +573,7 @@ extern int E_md4hash(const unsigned char *passwd, unsigned char *p16,
extern struct TCP_Server_Info *
cifs_find_tcp_session(struct smb3_fs_context *ctx);
extern void cifs_put_smb_ses(struct cifs_ses *ses);
void __cifs_put_smb_ses(struct cifs_ses *ses);
extern struct cifs_ses *
cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx);
......@@ -696,4 +697,45 @@ struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon);
void cifs_put_tcon_super(struct super_block *sb);
int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry);
/* Put references of @ses and @ses->dfs_root_ses */
static inline void cifs_put_smb_ses(struct cifs_ses *ses)
{
struct cifs_ses *rses = ses->dfs_root_ses;
__cifs_put_smb_ses(ses);
if (rses)
__cifs_put_smb_ses(rses);
}
/* Get an active reference of @ses and @ses->dfs_root_ses.
*
* NOTE: make sure to call this function when incrementing reference count of
* @ses to ensure that any DFS root session attached to it (@ses->dfs_root_ses)
* will also get its reference count incremented.
*
* cifs_put_smb_ses() will put both references, so call it when you're done.
*/
static inline void cifs_smb_ses_inc_refcount(struct cifs_ses *ses)
{
lockdep_assert_held(&cifs_tcp_ses_lock);
ses->ses_count++;
if (ses->dfs_root_ses)
ses->dfs_root_ses->ses_count++;
}
static inline bool dfs_src_pathname_equal(const char *s1, const char *s2)
{
if (strlen(s1) != strlen(s2))
return false;
for (; *s1; s1++, s2++) {
if (*s1 == '/' || *s1 == '\\') {
if (*s2 != '/' && *s2 != '\\')
return false;
} else if (tolower(*s1) != tolower(*s2))
return false;
}
return true;
}
#endif /* _CIFSPROTO_H */
......@@ -403,8 +403,10 @@ static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const cha
if (server->hostname != target) {
hostname = extract_hostname(target);
if (!IS_ERR(hostname)) {
spin_lock(&server->srv_lock);
kfree(server->hostname);
server->hostname = hostname;
spin_unlock(&server->srv_lock);
} else {
cifs_dbg(FYI, "%s: couldn't extract hostname or address from dfs target: %ld\n",
__func__, PTR_ERR(hostname));
......@@ -452,7 +454,6 @@ static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_
static int reconnect_dfs_server(struct TCP_Server_Info *server)
{
int rc = 0;
const char *refpath = server->current_fullpath + 1;
struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
struct dfs_cache_tgt_iterator *target_hint = NULL;
int num_targets = 0;
......@@ -465,8 +466,10 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
* through /proc/fs/cifs/dfscache or the target list is empty due to server settings after
* refreshing the referral, so, in this case, default it to 1.
*/
if (!dfs_cache_noreq_find(refpath, NULL, &tl))
mutex_lock(&server->refpath_lock);
if (!dfs_cache_noreq_find(server->leaf_fullpath + 1, NULL, &tl))
num_targets = dfs_cache_get_nr_tgts(&tl);
mutex_unlock(&server->refpath_lock);
if (!num_targets)
num_targets = 1;
......@@ -510,7 +513,9 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
} while (server->tcpStatus == CifsNeedReconnect);
dfs_cache_noreq_update_tgthint(refpath, target_hint);
mutex_lock(&server->refpath_lock);
dfs_cache_noreq_update_tgthint(server->leaf_fullpath + 1, target_hint);
mutex_unlock(&server->refpath_lock);
dfs_cache_free_tgts(&tl);
/* Need to set up echo worker again once connection has been established */
......@@ -561,9 +566,7 @@ cifs_echo_request(struct work_struct *work)
goto requeue_echo;
rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;
if (rc)
cifs_dbg(FYI, "Unable to send echo request to server: %s\n",
server->hostname);
cifs_server_dbg(FYI, "send echo request: rc = %d\n", rc);
/* Check witness registrations */
cifs_swn_check();
......@@ -993,10 +996,8 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
*/
}
#ifdef CONFIG_CIFS_DFS_UPCALL
kfree(server->origin_fullpath);
kfree(server->leaf_fullpath);
#endif
kfree(server);
length = atomic_dec_return(&tcpSesAllocCount);
......@@ -1384,26 +1385,13 @@ match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
return true;
}
static bool dfs_src_pathname_equal(const char *s1, const char *s2)
{
if (strlen(s1) != strlen(s2))
return false;
for (; *s1; s1++, s2++) {
if (*s1 == '/' || *s1 == '\\') {
if (*s2 != '/' && *s2 != '\\')
return false;
} else if (tolower(*s1) != tolower(*s2))
return false;
}
return true;
}
/* this function must be called with srv_lock held */
static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *ctx,
bool dfs_super_cmp)
static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
{
struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
lockdep_assert_held(&server->srv_lock);
if (ctx->nosharesock)
return 0;
......@@ -1429,27 +1417,41 @@ static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *
(struct sockaddr *)&server->srcaddr))
return 0;
/*
* When matching DFS superblocks, we only check for original source pathname as the
* currently connected target might be different than the one parsed earlier in i.e.
* mount.cifs(8).
* - Match for an DFS tcon (@server->origin_fullpath).
* - Match for an DFS root server connection (@server->leaf_fullpath).
* - If none of the above and @ctx->leaf_fullpath is set, then
* it is a new DFS connection.
* - If 'nodfs' mount option was passed, then match only connections
* that have no DFS referrals set
* (e.g. can't failover to other targets).
*/
if (dfs_super_cmp) {
if (!ctx->source || !server->origin_fullpath ||
!dfs_src_pathname_equal(server->origin_fullpath, ctx->source))
if (!ctx->nodfs) {
if (ctx->source && server->origin_fullpath) {
if (!dfs_src_pathname_equal(ctx->source,
server->origin_fullpath))
return 0;
} else {
/* Skip addr, hostname and port matching for DFS connections */
if (server->leaf_fullpath) {
} else if (server->leaf_fullpath) {
if (!ctx->leaf_fullpath ||
strcasecmp(server->leaf_fullpath, ctx->leaf_fullpath))
strcasecmp(server->leaf_fullpath,
ctx->leaf_fullpath))
return 0;
} else if (strcasecmp(server->hostname, ctx->server_hostname) ||
!match_server_address(server, addr) ||
!match_port(server, addr)) {
} else if (ctx->leaf_fullpath) {
return 0;
}
} else if (server->origin_fullpath || server->leaf_fullpath) {
return 0;
}
/*
* Match for a regular connection (address/hostname/port) which has no
* DFS referrals set.
*/
if (!server->origin_fullpath && !server->leaf_fullpath &&
(strcasecmp(server->hostname, ctx->server_hostname) ||
!match_server_address(server, addr) ||
!match_port(server, addr)))
return 0;
if (!match_security(server, ctx))
return 0;
......@@ -1480,7 +1482,7 @@ cifs_find_tcp_session(struct smb3_fs_context *ctx)
* Skip ses channels since they're only handled in lower layers
* (e.g. cifs_send_recv).
*/
if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx, false)) {
if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx)) {
spin_unlock(&server->srv_lock);
continue;
}
......@@ -1580,7 +1582,6 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
rc = -ENOMEM;
goto out_err;
}
tcp_ses->current_fullpath = tcp_ses->leaf_fullpath;
}
if (ctx->nosharesock)
......@@ -1810,7 +1811,9 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
if (tcon == NULL)
return -ENOMEM;
spin_lock(&server->srv_lock);
scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname);
spin_unlock(&server->srv_lock);
xid = get_xid();
tcon->ses = ses;
......@@ -1863,7 +1866,7 @@ cifs_free_ipc(struct cifs_ses *ses)
static struct cifs_ses *
cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
{
struct cifs_ses *ses;
struct cifs_ses *ses, *ret = NULL;
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
......@@ -1873,23 +1876,22 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
continue;
}
spin_lock(&ses->chan_lock);
if (!match_session(ses, ctx)) {
if (match_session(ses, ctx)) {
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
continue;
ret = ses;
break;
}
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
++ses->ses_count;
spin_unlock(&cifs_tcp_ses_lock);
return ses;
}
if (ret)
cifs_smb_ses_inc_refcount(ret);
spin_unlock(&cifs_tcp_ses_lock);
return NULL;
return ret;
}
void cifs_put_smb_ses(struct cifs_ses *ses)
void __cifs_put_smb_ses(struct cifs_ses *ses)
{
unsigned int rc, xid;
unsigned int chan_count;
......@@ -2244,6 +2246,8 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
*/
spin_lock(&cifs_tcp_ses_lock);
ses->dfs_root_ses = ctx->dfs_root_ses;
if (ses->dfs_root_ses)
ses->dfs_root_ses->ses_count++;
list_add(&ses->smb_ses_list, &server->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
......@@ -2260,12 +2264,15 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
}
/* this function must be called with tc_lock held */
static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx, bool dfs_super_cmp)
static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
{
struct TCP_Server_Info *server = tcon->ses->server;
if (tcon->status == TID_EXITING)
return 0;
/* Skip UNC validation when matching DFS superblocks */
if (!dfs_super_cmp && strncmp(tcon->tree_name, ctx->UNC, MAX_TREE_SIZE))
/* Skip UNC validation when matching DFS connections or superblocks */
if (!server->origin_fullpath && !server->leaf_fullpath &&
strncmp(tcon->tree_name, ctx->UNC, MAX_TREE_SIZE))
return 0;
if (tcon->seal != ctx->seal)
return 0;
......@@ -2288,7 +2295,7 @@ cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
spin_lock(&tcon->tc_lock);
if (!match_tcon(tcon, ctx, false)) {
if (!match_tcon(tcon, ctx)) {
spin_unlock(&tcon->tc_lock);
continue;
}
......@@ -2334,6 +2341,9 @@ cifs_put_tcon(struct cifs_tcon *tcon)
/* cancel polling of interfaces */
cancel_delayed_work_sync(&tcon->query_interfaces);
#ifdef CONFIG_CIFS_DFS_UPCALL
cancel_delayed_work_sync(&tcon->dfs_cache_work);
#endif
if (tcon->use_witness) {
int rc;
......@@ -2581,7 +2591,9 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
(SMB_INTERFACE_POLL_INTERVAL * HZ));
}
#ifdef CONFIG_CIFS_DFS_UPCALL
INIT_DELAYED_WORK(&tcon->dfs_cache_work, dfs_cache_refresh);
#endif
spin_lock(&cifs_tcp_ses_lock);
list_add(&tcon->tcon_list, &ses->tcon_list);
spin_unlock(&cifs_tcp_ses_lock);
......@@ -2659,9 +2671,11 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
return 1;
}
static int
match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
static int match_prepath(struct super_block *sb,
struct TCP_Server_Info *server,
struct cifs_mnt_data *mnt_data)
{
struct smb3_fs_context *ctx = mnt_data->ctx;
struct cifs_sb_info *old = CIFS_SB(sb);
struct cifs_sb_info *new = mnt_data->cifs_sb;
bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
......@@ -2669,6 +2683,10 @@ match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
new->prepath;
if (server->origin_fullpath &&
dfs_src_pathname_equal(server->origin_fullpath, ctx->source))
return 1;
if (old_set && new_set && !strcmp(new->prepath, old->prepath))
return 1;
else if (!old_set && !new_set)
......@@ -2687,7 +2705,6 @@ cifs_match_super(struct super_block *sb, void *data)
struct cifs_ses *ses;
struct cifs_tcon *tcon;
struct tcon_link *tlink;
bool dfs_super_cmp;
int rc = 0;
spin_lock(&cifs_tcp_ses_lock);
......@@ -2702,18 +2719,16 @@ cifs_match_super(struct super_block *sb, void *data)
ses = tcon->ses;
tcp_srv = ses->server;
dfs_super_cmp = IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && tcp_srv->origin_fullpath;
ctx = mnt_data->ctx;
spin_lock(&tcp_srv->srv_lock);
spin_lock(&ses->ses_lock);
spin_lock(&ses->chan_lock);
spin_lock(&tcon->tc_lock);
if (!match_server(tcp_srv, ctx, dfs_super_cmp) ||
if (!match_server(tcp_srv, ctx) ||
!match_session(ses, ctx) ||
!match_tcon(tcon, ctx, dfs_super_cmp) ||
!match_prepath(sb, mnt_data)) {
!match_tcon(tcon, ctx) ||
!match_prepath(sb, tcp_srv, mnt_data)) {
rc = 0;
goto out;
}
......@@ -3458,8 +3473,6 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
error:
dfs_put_root_smb_sessions(&mnt_ctx.dfs_ses_list);
kfree(mnt_ctx.origin_fullpath);
kfree(mnt_ctx.leaf_fullpath);
cifs_mount_put_conns(&mnt_ctx);
return rc;
}
......
......@@ -99,7 +99,7 @@ static int get_session(struct cifs_mount_ctx *mnt_ctx, const char *full_path)
return rc;
}
static int get_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
static int add_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct dfs_root_ses *root_ses;
......@@ -127,7 +127,7 @@ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, co
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct dfs_info3_param ref = {};
bool is_refsrv = false;
bool is_refsrv;
int rc, rc2;
rc = dfs_cache_get_tgt_referral(ref_path + 1, tit, &ref);
......@@ -157,8 +157,10 @@ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, co
rc = cifs_is_path_remote(mnt_ctx);
}
dfs_cache_noreq_update_tgthint(ref_path + 1, tit);
if (rc == -EREMOTE && is_refsrv) {
rc2 = get_root_smb_session(mnt_ctx);
rc2 = add_root_smb_session(mnt_ctx);
if (rc2)
rc = rc2;
}
......@@ -248,16 +250,19 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
tcon = mnt_ctx->tcon;
mutex_lock(&server->refpath_lock);
spin_lock(&server->srv_lock);
if (!server->origin_fullpath) {
server->origin_fullpath = origin_fullpath;
server->current_fullpath = server->leaf_fullpath;
origin_fullpath = NULL;
}
spin_unlock(&server->srv_lock);
mutex_unlock(&server->refpath_lock);
if (list_empty(&tcon->dfs_ses_list)) {
list_replace_init(&mnt_ctx->dfs_ses_list,
&tcon->dfs_ses_list);
queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
dfs_cache_get_ttl() * HZ);
} else {
dfs_put_root_smb_sessions(&mnt_ctx->dfs_ses_list);
}
......@@ -272,15 +277,21 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
{
struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct cifs_ses *ses;
char *source = ctx->source;
bool nodfs = ctx->nodfs;
int rc;
*isdfs = false;
/* Temporarily set @ctx->source to NULL as we're not matching DFS
* superblocks yet. See cifs_match_super() and match_server().
*/
ctx->source = NULL;
rc = get_session(mnt_ctx, NULL);
if (rc)
return rc;
goto out;
ctx->dfs_root_ses = mnt_ctx->ses;
/*
* If called with 'nodfs' mount option, then skip DFS resolving. Otherwise unconditionally
......@@ -289,23 +300,41 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
* Skip prefix path to provide support for DFS referrals from w2k8 servers which don't seem
* to respond with PATH_NOT_COVERED to requests that include the prefix.
*/
if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
dfs_get_referral(mnt_ctx, ctx->UNC + 1, NULL, NULL)) {
if (!nodfs) {
rc = dfs_get_referral(mnt_ctx, ctx->UNC + 1, NULL, NULL);
if (rc) {
if (rc != -ENOENT && rc != -EOPNOTSUPP)
goto out;
nodfs = true;
}
}
if (nodfs) {
rc = cifs_mount_get_tcon(mnt_ctx);
if (rc)
return rc;
if (!rc)
rc = cifs_is_path_remote(mnt_ctx);
if (!rc || rc != -EREMOTE)
return rc;
goto out;
}
*isdfs = true;
rc = get_root_smb_session(mnt_ctx);
if (rc)
/*
* Prevent DFS root session of being put in the first call to
* cifs_mount_put_conns(). If another DFS root server was not found
* while chasing the referrals (@ctx->dfs_root_ses == @ses), then we
* can safely put extra refcount of @ses.
*/
ses = mnt_ctx->ses;
mnt_ctx->ses = NULL;
mnt_ctx->server = NULL;
rc = __dfs_mount_share(mnt_ctx);
if (ses == ctx->dfs_root_ses)
cifs_put_smb_ses(ses);
out:
/*
* Restore previous value of @ctx->source so DFS superblock can be
* matched in cifs_match_super().
*/
ctx->source = source;
return rc;
return __dfs_mount_share(mnt_ctx);
}
/* Update dfs referral path of superblock */
......@@ -342,10 +371,11 @@ static int update_server_fullpath(struct TCP_Server_Info *server, struct cifs_sb
rc = PTR_ERR(npath);
} else {
mutex_lock(&server->refpath_lock);
spin_lock(&server->srv_lock);
kfree(server->leaf_fullpath);
server->leaf_fullpath = npath;
spin_unlock(&server->srv_lock);
mutex_unlock(&server->refpath_lock);
server->current_fullpath = server->leaf_fullpath;
}
return rc;
}
......@@ -374,6 +404,54 @@ static int target_share_matches_server(struct TCP_Server_Info *server, char *sha
return rc;
}
static void __tree_connect_ipc(const unsigned int xid, char *tree,
struct cifs_sb_info *cifs_sb,
struct cifs_ses *ses)
{
struct TCP_Server_Info *server = ses->server;
struct cifs_tcon *tcon = ses->tcon_ipc;
int rc;
spin_lock(&ses->ses_lock);
spin_lock(&ses->chan_lock);
if (cifs_chan_needs_reconnect(ses, server) ||
ses->ses_status != SES_GOOD) {
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
cifs_server_dbg(FYI, "%s: skipping ipc reconnect due to disconnected ses\n",
__func__);
return;
}
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
cifs_server_lock(server);
scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
cifs_server_unlock(server);
rc = server->ops->tree_connect(xid, ses, tree, tcon,
cifs_sb->local_nls);
cifs_server_dbg(FYI, "%s: tree_reconnect %s: %d\n", __func__, tree, rc);
spin_lock(&tcon->tc_lock);
if (rc) {
tcon->status = TID_NEED_TCON;
} else {
tcon->status = TID_GOOD;
tcon->need_reconnect = false;
}
spin_unlock(&tcon->tc_lock);
}
static void tree_connect_ipc(const unsigned int xid, char *tree,
struct cifs_sb_info *cifs_sb,
struct cifs_tcon *tcon)
{
struct cifs_ses *ses = tcon->ses;
__tree_connect_ipc(xid, tree, cifs_sb, ses);
__tree_connect_ipc(xid, tree, cifs_sb, CIFS_DFS_ROOT_SES(ses));
}
static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, char *tree, bool islink,
struct dfs_cache_tgt_list *tl)
......@@ -382,7 +460,6 @@ static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *t
struct TCP_Server_Info *server = tcon->ses->server;
const struct smb_version_operations *ops = server->ops;
struct cifs_ses *root_ses = CIFS_DFS_ROOT_SES(tcon->ses);
struct cifs_tcon *ipc = root_ses->tcon_ipc;
char *share = NULL, *prefix = NULL;
struct dfs_cache_tgt_iterator *tit;
bool target_match;
......@@ -403,7 +480,7 @@ static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *t
share = prefix = NULL;
/* Check if share matches with tcp ses */
rc = dfs_cache_get_tgt_share(server->current_fullpath + 1, tit, &share, &prefix);
rc = dfs_cache_get_tgt_share(server->leaf_fullpath + 1, tit, &share, &prefix);
if (rc) {
cifs_dbg(VFS, "%s: failed to parse target share: %d\n", __func__, rc);
break;
......@@ -417,19 +494,15 @@ static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *t
continue;
}
dfs_cache_noreq_update_tgthint(server->current_fullpath + 1, tit);
if (ipc->need_reconnect) {
scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
rc = ops->tree_connect(xid, ipc->ses, tree, ipc, cifs_sb->local_nls);
cifs_dbg(FYI, "%s: reconnect ipc: %d\n", __func__, rc);
}
dfs_cache_noreq_update_tgthint(server->leaf_fullpath + 1, tit);
tree_connect_ipc(xid, tree, cifs_sb, tcon);
scnprintf(tree, MAX_TREE_SIZE, "\\%s", share);
if (!islink) {
rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls);
break;
}
/*
* If no dfs referrals were returned from link target, then just do a TREE_CONNECT
* to it. Otherwise, cache the dfs referral and then mark current tcp ses for
......@@ -539,8 +612,8 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
cifs_sb = CIFS_SB(sb);
/* If it is not dfs or there was no cached dfs referral, then reconnect to same share */
if (!server->current_fullpath ||
dfs_cache_noreq_find(server->current_fullpath + 1, &ref, &tl)) {
if (!server->leaf_fullpath ||
dfs_cache_noreq_find(server->leaf_fullpath + 1, &ref, &tl)) {
rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, cifs_sb->local_nls);
goto out;
}
......
......@@ -43,8 +43,12 @@ static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page)
size_t len;
char *s;
if (unlikely(!server->origin_fullpath))
spin_lock(&server->srv_lock);
if (unlikely(!server->origin_fullpath)) {
spin_unlock(&server->srv_lock);
return ERR_PTR(-EREMOTE);
}
spin_unlock(&server->srv_lock);
s = dentry_path_raw(dentry, page, PATH_MAX);
if (IS_ERR(s))
......@@ -53,13 +57,18 @@ static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page)
if (!s[1])
s++;
spin_lock(&server->srv_lock);
len = strlen(server->origin_fullpath);
if (s < (char *)page + len)
if (s < (char *)page + len) {
spin_unlock(&server->srv_lock);
return ERR_PTR(-ENAMETOOLONG);
}
s -= len;
memcpy(s, server->origin_fullpath, len);
spin_unlock(&server->srv_lock);
convert_delimiter(s, '/');
return s;
}
......
......@@ -20,12 +20,14 @@
#include "cifs_unicode.h"
#include "smb2glob.h"
#include "dns_resolve.h"
#include "dfs.h"
#include "dfs_cache.h"
#define CACHE_HTABLE_SIZE 32
#define CACHE_MAX_ENTRIES 64
#define CACHE_MIN_TTL 120 /* 2 minutes */
#define CACHE_DEFAULT_TTL 300 /* 5 minutes */
#define IS_DFS_INTERLINK(v) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
......@@ -50,10 +52,9 @@ struct cache_entry {
};
static struct kmem_cache *cache_slab __read_mostly;
static struct workqueue_struct *dfscache_wq __read_mostly;
struct workqueue_struct *dfscache_wq;
static int cache_ttl;
static DEFINE_SPINLOCK(cache_ttl_lock);
atomic_t dfs_cache_ttl;
static struct nls_table *cache_cp;
......@@ -65,10 +66,6 @@ static atomic_t cache_count;
static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
static DECLARE_RWSEM(htable_rw_lock);
static void refresh_cache_worker(struct work_struct *work);
static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
/**
* dfs_cache_canonical_path - get a canonical DFS path
*
......@@ -290,7 +287,9 @@ int dfs_cache_init(void)
int rc;
int i;
dfscache_wq = alloc_workqueue("cifs-dfscache", WQ_FREEZABLE | WQ_UNBOUND, 1);
dfscache_wq = alloc_workqueue("cifs-dfscache",
WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM,
0);
if (!dfscache_wq)
return -ENOMEM;
......@@ -306,6 +305,7 @@ int dfs_cache_init(void)
INIT_HLIST_HEAD(&cache_htable[i]);
atomic_set(&cache_count, 0);
atomic_set(&dfs_cache_ttl, CACHE_DEFAULT_TTL);
cache_cp = load_nls("utf8");
if (!cache_cp)
cache_cp = load_nls_default();
......@@ -480,6 +480,7 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
int rc;
struct cache_entry *ce;
unsigned int hash;
int ttl;
WARN_ON(!rwsem_is_locked(&htable_rw_lock));
......@@ -496,15 +497,8 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
if (IS_ERR(ce))
return ce;
spin_lock(&cache_ttl_lock);
if (!cache_ttl) {
cache_ttl = ce->ttl;
queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
} else {
cache_ttl = min_t(int, cache_ttl, ce->ttl);
mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
}
spin_unlock(&cache_ttl_lock);
ttl = min_t(int, atomic_read(&dfs_cache_ttl), ce->ttl);
atomic_set(&dfs_cache_ttl, ttl);
hlist_add_head(&ce->hlist, &cache_htable[hash]);
dump_ce(ce);
......@@ -616,7 +610,6 @@ static struct cache_entry *lookup_cache_entry(const char *path)
*/
void dfs_cache_destroy(void)
{
cancel_delayed_work_sync(&refresh_task);
unload_nls(cache_cp);
flush_cache_ents();
kmem_cache_destroy(cache_slab);
......@@ -1142,6 +1135,7 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c
* target shares in @refs.
*/
static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
const char *path,
struct dfs_cache_tgt_list *old_tl,
struct dfs_cache_tgt_list *new_tl)
{
......@@ -1153,22 +1147,39 @@ static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
nit = dfs_cache_get_next_tgt(new_tl, nit)) {
if (target_share_equal(server,
dfs_cache_get_tgt_name(oit),
dfs_cache_get_tgt_name(nit)))
dfs_cache_get_tgt_name(nit))) {
dfs_cache_noreq_update_tgthint(path, nit);
return;
}
}
}
cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
cifs_signal_cifsd_for_reconnect(server, true);
}
static bool is_ses_good(struct cifs_ses *ses)
{
struct TCP_Server_Info *server = ses->server;
struct cifs_tcon *tcon = ses->tcon_ipc;
bool ret;
spin_lock(&ses->ses_lock);
spin_lock(&ses->chan_lock);
ret = !cifs_chan_needs_reconnect(ses, server) &&
ses->ses_status == SES_GOOD &&
!tcon->need_reconnect;
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
return ret;
}
/* Refresh dfs referral of tcon and mark it for reconnect if needed */
static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_refresh)
static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_refresh)
{
struct dfs_cache_tgt_list old_tl = DFS_CACHE_TGT_LIST_INIT(old_tl);
struct dfs_cache_tgt_list new_tl = DFS_CACHE_TGT_LIST_INIT(new_tl);
struct cifs_ses *ses = CIFS_DFS_ROOT_SES(tcon->ses);
struct cifs_tcon *ipc = ses->tcon_ipc;
struct TCP_Server_Info *server = ses->server;
bool needs_refresh = false;
struct cache_entry *ce;
unsigned int xid;
......@@ -1190,20 +1201,19 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r
goto out;
}
spin_lock(&ipc->tc_lock);
if (ipc->status != TID_GOOD) {
spin_unlock(&ipc->tc_lock);
cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n", __func__);
ses = CIFS_DFS_ROOT_SES(ses);
if (!is_ses_good(ses)) {
cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n",
__func__);
goto out;
}
spin_unlock(&ipc->tc_lock);
ce = cache_refresh_path(xid, ses, path, true);
if (!IS_ERR(ce)) {
rc = get_targets(ce, &new_tl);
up_read(&htable_rw_lock);
cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
mark_for_reconnect_if_needed(tcon->ses->server, &old_tl, &new_tl);
mark_for_reconnect_if_needed(server, path, &old_tl, &new_tl);
}
out:
......@@ -1216,10 +1226,11 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r
static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
{
struct TCP_Server_Info *server = tcon->ses->server;
struct cifs_ses *ses = tcon->ses;
mutex_lock(&server->refpath_lock);
if (server->leaf_fullpath)
__refresh_tcon(server->leaf_fullpath + 1, tcon, force_refresh);
__refresh_tcon(server->leaf_fullpath + 1, ses, force_refresh);
mutex_unlock(&server->refpath_lock);
return 0;
}
......@@ -1263,56 +1274,32 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
return refresh_tcon(tcon, true);
}
/*
* Worker that will refresh DFS cache from all active mounts based on lowest TTL value
* from a DFS referral.
*/
static void refresh_cache_worker(struct work_struct *work)
/* Refresh all DFS referrals related to DFS tcon */
void dfs_cache_refresh(struct work_struct *work)
{
struct TCP_Server_Info *server;
struct cifs_tcon *tcon, *ntcon;
struct list_head tcons;
struct dfs_root_ses *rses;
struct cifs_tcon *tcon;
struct cifs_ses *ses;
INIT_LIST_HEAD(&tcons);
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
if (!server->leaf_fullpath)
continue;
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
if (ses->tcon_ipc) {
ses->ses_count++;
list_add_tail(&ses->tcon_ipc->ulist, &tcons);
}
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
if (!tcon->ipc) {
tcon->tc_count++;
list_add_tail(&tcon->ulist, &tcons);
}
}
}
}
spin_unlock(&cifs_tcp_ses_lock);
list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
struct TCP_Server_Info *server = tcon->ses->server;
list_del_init(&tcon->ulist);
tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
ses = tcon->ses;
server = ses->server;
mutex_lock(&server->refpath_lock);
if (server->leaf_fullpath)
__refresh_tcon(server->leaf_fullpath + 1, tcon, false);
__refresh_tcon(server->leaf_fullpath + 1, ses, false);
mutex_unlock(&server->refpath_lock);
if (tcon->ipc)
cifs_put_smb_ses(tcon->ses);
else
cifs_put_tcon(tcon);
list_for_each_entry(rses, &tcon->dfs_ses_list, list) {
ses = rses->ses;
server = ses->server;
mutex_lock(&server->refpath_lock);
if (server->leaf_fullpath)
__refresh_tcon(server->leaf_fullpath + 1, ses, false);
mutex_unlock(&server->refpath_lock);
}
spin_lock(&cache_ttl_lock);
queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
spin_unlock(&cache_ttl_lock);
queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
atomic_read(&dfs_cache_ttl) * HZ);
}
......@@ -13,6 +13,9 @@
#include <linux/uuid.h>
#include "cifsglob.h"
extern struct workqueue_struct *dfscache_wq;
extern atomic_t dfs_cache_ttl;
#define DFS_CACHE_TGT_LIST_INIT(var) { .tl_numtgts = 0, .tl_list = LIST_HEAD_INIT((var).tl_list), }
struct dfs_cache_tgt_list {
......@@ -42,6 +45,7 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
char **prefix);
char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap);
int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb);
void dfs_cache_refresh(struct work_struct *work);
static inline struct dfs_cache_tgt_iterator *
dfs_cache_get_next_tgt(struct dfs_cache_tgt_list *tl,
......@@ -89,4 +93,9 @@ dfs_cache_get_nr_tgts(const struct dfs_cache_tgt_list *tl)
return tl ? tl->tl_numtgts : 0;
}
static inline int dfs_cache_get_ttl(void)
{
return atomic_read(&dfs_cache_ttl);
}
#endif /* _CIFS_DFS_CACHE_H */
......@@ -239,7 +239,7 @@ static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug
* section, we need to make sure it won't be released
* so increment its refcount
*/
ses->ses_count++;
cifs_smb_ses_inc_refcount(ses);
found = true;
goto search_end;
}
......
......@@ -159,6 +159,7 @@ cifs_chan_is_iface_active(struct cifs_ses *ses,
/* returns number of channels added */
int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
{
struct TCP_Server_Info *server = ses->server;
int old_chan_count, new_chan_count;
int left;
int rc = 0;
......@@ -178,16 +179,16 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
return 0;
}
if (ses->server->dialect < SMB30_PROT_ID) {
if (server->dialect < SMB30_PROT_ID) {
spin_unlock(&ses->chan_lock);
cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
return 0;
}
if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
if (!(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
ses->chan_max = 1;
spin_unlock(&ses->chan_lock);
cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname);
cifs_server_dbg(VFS, "no multichannel support\n");
return 0;
}
spin_unlock(&ses->chan_lock);
......
......@@ -175,8 +175,17 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
}
}
spin_unlock(&tcon->tc_lock);
if ((!tcon->ses) || (tcon->ses->ses_status == SES_EXITING) ||
(!tcon->ses->server) || !server)
ses = tcon->ses;
if (!ses)
return -EIO;
spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_EXITING) {
spin_unlock(&ses->ses_lock);
return -EIO;
}
spin_unlock(&ses->ses_lock);
if (!ses->server || !server)
return -EIO;
spin_lock(&server->srv_lock);
......@@ -204,8 +213,6 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
if (rc)
return rc;
ses = tcon->ses;
spin_lock(&ses->chan_lock);
if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) {
spin_unlock(&ses->chan_lock);
......@@ -3794,7 +3801,7 @@ void smb2_reconnect_server(struct work_struct *work)
if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) {
list_add_tail(&ses->tcon_ipc->rlist, &tmp_list);
tcon_selected = tcon_exist = true;
ses->ses_count++;
cifs_smb_ses_inc_refcount(ses);
}
/*
* handle the case where channel needs to reconnect
......@@ -3805,7 +3812,7 @@ void smb2_reconnect_server(struct work_struct *work)
if (!tcon_selected && cifs_chan_needs_reconnect(ses, server)) {
list_add_tail(&ses->rlist, &tmp_ses_list);
ses_exist = true;
ses->ses_count++;
cifs_smb_ses_inc_refcount(ses);
}
spin_unlock(&ses->chan_lock);
}
......@@ -4130,7 +4137,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
if (rdata->got_bytes) {
rqst.rq_iter = rdata->iter;
rqst.rq_iter_size = iov_iter_count(&rdata->iter);
};
}
WARN_ONCE(rdata->server != mid->server,
"rdata server %p != mid server %p",
......
......@@ -83,22 +83,6 @@ struct create_durable_reconn_v2_req {
__le32 Flags;
} __packed;
struct create_app_inst_id {
struct create_context ccontext;
__u8 Name[8];
__u8 Reserved[8];
__u8 AppInstanceId[16];
} __packed;
struct create_app_inst_id_vers {
struct create_context ccontext;
__u8 Name[8];
__u8 Reserved[2];
__u8 Padding[4];
__le64 AppInstanceVersionHigh;
__le64 AppInstanceVersionLow;
} __packed;
struct create_alloc_size_req {
struct create_context ccontext;
__u8 Name[8];
......
......@@ -1250,6 +1250,26 @@ struct create_disk_id_rsp {
__u8 Reserved[16];
} __packed;
/* See MS-SMB2 2.2.13.2.13 */
struct create_app_inst_id {
struct create_context ccontext;
__u8 Name[16];
__le32 StructureSize; /* Must be 20 */
__u16 Reserved;
__u8 AppInstanceId[16];
} __packed;
/* See MS-SMB2 2.2.13.2.15 */
struct create_app_inst_id_vers {
struct create_context ccontext;
__u8 Name[16];
__le32 StructureSize; /* Must be 24 */
__u16 Reserved;
__u32 Padding;
__le64 AppInstanceVersionHigh;
__le64 AppInstanceVersionLow;
} __packed;
/* See MS-SMB2 2.2.31 and 2.2.32 */
struct smb2_ioctl_req {
struct smb2_hdr hdr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment