Commit 88280037 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag '6.4-rc4-smb3-server-fixes' of git://git.samba.org/ksmbd

Pull smb server fixes from Steve French:
 "Eight server fixes (most also for stable):

   - Two fixes for uninitialized pointer reads (rename and link)

   - Fix potential UAF in oplock break

   - Two fixes for potential out of bound reads in negotiate

   - Fix crediting bug

   - Two fixes for xfstests (allocation size fix for test 694 and lookup
     issue shown by test 464)"

* tag '6.4-rc4-smb3-server-fixes' of git://git.samba.org/ksmbd:
  ksmbd: call putname after using the last component
  ksmbd: fix incorrect AllocationSize set in smb2_get_info
  ksmbd: fix UAF issue from opinfo->conn
  ksmbd: fix multiple out-of-bounds read during context decoding
  ksmbd: fix slab-out-of-bounds read in smb2_handle_negotiate
  ksmbd: fix credit count leakage
  ksmbd: fix uninitialized pointer read in smb2_create_link()
  ksmbd: fix uninitialized pointer read in ksmbd_vfs_rename()
parents 929ed21d 6fe55c27
......@@ -157,13 +157,42 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci)
rcu_read_lock();
opinfo = list_first_or_null_rcu(&ci->m_op_list, struct oplock_info,
op_entry);
if (opinfo && !atomic_inc_not_zero(&opinfo->refcount))
if (opinfo) {
if (!atomic_inc_not_zero(&opinfo->refcount))
opinfo = NULL;
else {
atomic_inc(&opinfo->conn->r_count);
if (ksmbd_conn_releasing(opinfo->conn)) {
atomic_dec(&opinfo->conn->r_count);
atomic_dec(&opinfo->refcount);
opinfo = NULL;
}
}
}
rcu_read_unlock();
return opinfo;
}
static void opinfo_conn_put(struct oplock_info *opinfo)
{
struct ksmbd_conn *conn;
if (!opinfo)
return;
conn = opinfo->conn;
/*
* Checking waitqueue to dropping pending requests on
* disconnection. waitqueue_active is safe because it
* uses atomic operation for condition.
*/
if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
wake_up(&conn->r_count_q);
opinfo_put(opinfo);
}
void opinfo_put(struct oplock_info *opinfo)
{
if (!atomic_dec_and_test(&opinfo->refcount))
......@@ -666,13 +695,6 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
out:
ksmbd_free_work_struct(work);
/*
* Checking waitqueue to dropping pending requests on
* disconnection. waitqueue_active is safe because it
* uses atomic operation for condition.
*/
if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
wake_up(&conn->r_count_q);
}
/**
......@@ -706,7 +728,6 @@ static int smb2_oplock_break_noti(struct oplock_info *opinfo)
work->conn = conn;
work->sess = opinfo->sess;
atomic_inc(&conn->r_count);
if (opinfo->op_state == OPLOCK_ACK_WAIT) {
INIT_WORK(&work->work, __smb2_oplock_break_noti);
ksmbd_queue_work(work);
......@@ -776,13 +797,6 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
out:
ksmbd_free_work_struct(work);
/*
* Checking waitqueue to dropping pending requests on
* disconnection. waitqueue_active is safe because it
* uses atomic operation for condition.
*/
if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
wake_up(&conn->r_count_q);
}
/**
......@@ -822,7 +836,6 @@ static int smb2_lease_break_noti(struct oplock_info *opinfo)
work->conn = conn;
work->sess = opinfo->sess;
atomic_inc(&conn->r_count);
if (opinfo->op_state == OPLOCK_ACK_WAIT) {
list_for_each_safe(tmp, t, &opinfo->interim_list) {
struct ksmbd_work *in_work;
......@@ -1144,8 +1157,10 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
}
prev_opinfo = opinfo_get_list(ci);
if (!prev_opinfo ||
(prev_opinfo->level == SMB2_OPLOCK_LEVEL_NONE && lctx))
(prev_opinfo->level == SMB2_OPLOCK_LEVEL_NONE && lctx)) {
opinfo_conn_put(prev_opinfo);
goto set_lev;
}
prev_op_has_lease = prev_opinfo->is_lease;
if (prev_op_has_lease)
prev_op_state = prev_opinfo->o_lease->state;
......@@ -1153,19 +1168,19 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
if (share_ret < 0 &&
prev_opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
err = share_ret;
opinfo_put(prev_opinfo);
opinfo_conn_put(prev_opinfo);
goto err_out;
}
if (prev_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH &&
prev_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
opinfo_put(prev_opinfo);
opinfo_conn_put(prev_opinfo);
goto op_break_not_needed;
}
list_add(&work->interim_entry, &prev_opinfo->interim_list);
err = oplock_break(prev_opinfo, SMB2_OPLOCK_LEVEL_II);
opinfo_put(prev_opinfo);
opinfo_conn_put(prev_opinfo);
if (err == -ENOENT)
goto set_lev;
/* Check all oplock was freed by close */
......@@ -1228,14 +1243,14 @@ static void smb_break_all_write_oplock(struct ksmbd_work *work,
return;
if (brk_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH &&
brk_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
opinfo_put(brk_opinfo);
opinfo_conn_put(brk_opinfo);
return;
}
brk_opinfo->open_trunc = is_trunc;
list_add(&work->interim_entry, &brk_opinfo->interim_list);
oplock_break(brk_opinfo, SMB2_OPLOCK_LEVEL_II);
opinfo_put(brk_opinfo);
opinfo_conn_put(brk_opinfo);
}
/**
......@@ -1263,6 +1278,13 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
list_for_each_entry_rcu(brk_op, &ci->m_op_list, op_entry) {
if (!atomic_inc_not_zero(&brk_op->refcount))
continue;
atomic_inc(&brk_op->conn->r_count);
if (ksmbd_conn_releasing(brk_op->conn)) {
atomic_dec(&brk_op->conn->r_count);
continue;
}
rcu_read_unlock();
if (brk_op->is_lease && (brk_op->o_lease->state &
(~(SMB2_LEASE_READ_CACHING_LE |
......@@ -1292,7 +1314,7 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
brk_op->open_trunc = is_trunc;
oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE);
next:
opinfo_put(brk_op);
opinfo_conn_put(brk_op);
rcu_read_lock();
}
rcu_read_unlock();
......
......@@ -326,13 +326,9 @@ int smb2_set_rsp_credits(struct ksmbd_work *work)
if (hdr->Command == SMB2_NEGOTIATE)
aux_max = 1;
else
aux_max = conn->vals->max_credits - credit_charge;
aux_max = conn->vals->max_credits - conn->total_credits;
credits_granted = min_t(unsigned short, credits_requested, aux_max);
if (conn->vals->max_credits - conn->total_credits < credits_granted)
credits_granted = conn->vals->max_credits -
conn->total_credits;
conn->total_credits += credits_granted;
work->credits_granted += credits_granted;
......@@ -849,13 +845,14 @@ static void assemble_neg_contexts(struct ksmbd_conn *conn,
static __le32 decode_preauth_ctxt(struct ksmbd_conn *conn,
struct smb2_preauth_neg_context *pneg_ctxt,
int len_of_ctxts)
int ctxt_len)
{
/*
* sizeof(smb2_preauth_neg_context) assumes SMB311_SALT_SIZE Salt,
* which may not be present. Only check for used HashAlgorithms[1].
*/
if (len_of_ctxts < MIN_PREAUTH_CTXT_DATA_LEN)
if (ctxt_len <
sizeof(struct smb2_neg_context) + MIN_PREAUTH_CTXT_DATA_LEN)
return STATUS_INVALID_PARAMETER;
if (pneg_ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
......@@ -867,15 +864,23 @@ static __le32 decode_preauth_ctxt(struct ksmbd_conn *conn,
static void decode_encrypt_ctxt(struct ksmbd_conn *conn,
struct smb2_encryption_neg_context *pneg_ctxt,
int len_of_ctxts)
int ctxt_len)
{
int cph_cnt = le16_to_cpu(pneg_ctxt->CipherCount);
int i, cphs_size = cph_cnt * sizeof(__le16);
int cph_cnt;
int i, cphs_size;
if (sizeof(struct smb2_encryption_neg_context) > ctxt_len) {
pr_err("Invalid SMB2_ENCRYPTION_CAPABILITIES context size\n");
return;
}
conn->cipher_type = 0;
cph_cnt = le16_to_cpu(pneg_ctxt->CipherCount);
cphs_size = cph_cnt * sizeof(__le16);
if (sizeof(struct smb2_encryption_neg_context) + cphs_size >
len_of_ctxts) {
ctxt_len) {
pr_err("Invalid cipher count(%d)\n", cph_cnt);
return;
}
......@@ -923,15 +928,22 @@ static void decode_compress_ctxt(struct ksmbd_conn *conn,
static void decode_sign_cap_ctxt(struct ksmbd_conn *conn,
struct smb2_signing_capabilities *pneg_ctxt,
int len_of_ctxts)
int ctxt_len)
{
int sign_algo_cnt = le16_to_cpu(pneg_ctxt->SigningAlgorithmCount);
int i, sign_alos_size = sign_algo_cnt * sizeof(__le16);
int sign_algo_cnt;
int i, sign_alos_size;
if (sizeof(struct smb2_signing_capabilities) > ctxt_len) {
pr_err("Invalid SMB2_SIGNING_CAPABILITIES context length\n");
return;
}
conn->signing_negotiated = false;
sign_algo_cnt = le16_to_cpu(pneg_ctxt->SigningAlgorithmCount);
sign_alos_size = sign_algo_cnt * sizeof(__le16);
if (sizeof(struct smb2_signing_capabilities) + sign_alos_size >
len_of_ctxts) {
ctxt_len) {
pr_err("Invalid signing algorithm count(%d)\n", sign_algo_cnt);
return;
}
......@@ -969,18 +981,16 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
len_of_ctxts = len_of_smb - offset;
while (i++ < neg_ctxt_cnt) {
int clen;
/* check that offset is not beyond end of SMB */
if (len_of_ctxts == 0)
break;
int clen, ctxt_len;
if (len_of_ctxts < sizeof(struct smb2_neg_context))
break;
pctx = (struct smb2_neg_context *)((char *)pctx + offset);
clen = le16_to_cpu(pctx->DataLength);
if (clen + sizeof(struct smb2_neg_context) > len_of_ctxts)
ctxt_len = clen + sizeof(struct smb2_neg_context);
if (ctxt_len > len_of_ctxts)
break;
if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES) {
......@@ -991,7 +1001,7 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
status = decode_preauth_ctxt(conn,
(struct smb2_preauth_neg_context *)pctx,
len_of_ctxts);
ctxt_len);
if (status != STATUS_SUCCESS)
break;
} else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES) {
......@@ -1002,7 +1012,7 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
decode_encrypt_ctxt(conn,
(struct smb2_encryption_neg_context *)pctx,
len_of_ctxts);
ctxt_len);
} else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES) {
ksmbd_debug(SMB,
"deassemble SMB2_COMPRESSION_CAPABILITIES context\n");
......@@ -1021,9 +1031,10 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
} else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES) {
ksmbd_debug(SMB,
"deassemble SMB2_SIGNING_CAPABILITIES context\n");
decode_sign_cap_ctxt(conn,
(struct smb2_signing_capabilities *)pctx,
len_of_ctxts);
ctxt_len);
}
/* offsets must be 8 byte aligned */
......@@ -1057,16 +1068,16 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
return rc;
}
if (req->DialectCount == 0) {
pr_err("malformed packet\n");
smb2_buf_len = get_rfc1002_len(work->request_buf);
smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects);
if (smb2_neg_size > smb2_buf_len) {
rsp->hdr.Status = STATUS_INVALID_PARAMETER;
rc = -EINVAL;
goto err_out;
}
smb2_buf_len = get_rfc1002_len(work->request_buf);
smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects);
if (smb2_neg_size > smb2_buf_len) {
if (req->DialectCount == 0) {
pr_err("malformed packet\n");
rsp->hdr.Status = STATUS_INVALID_PARAMETER;
rc = -EINVAL;
goto err_out;
......@@ -4358,21 +4369,6 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
return 0;
}
static unsigned long long get_allocation_size(struct inode *inode,
struct kstat *stat)
{
unsigned long long alloc_size = 0;
if (!S_ISDIR(stat->mode)) {
if ((inode->i_blocks << 9) <= stat->size)
alloc_size = stat->size;
else
alloc_size = inode->i_blocks << 9;
}
return alloc_size;
}
static void get_file_standard_info(struct smb2_query_info_rsp *rsp,
struct ksmbd_file *fp, void *rsp_org)
{
......@@ -4387,7 +4383,7 @@ static void get_file_standard_info(struct smb2_query_info_rsp *rsp,
sinfo = (struct smb2_file_standard_info *)rsp->Buffer;
delete_pending = ksmbd_inode_pending_delete(fp);
sinfo->AllocationSize = cpu_to_le64(get_allocation_size(inode, &stat));
sinfo->AllocationSize = cpu_to_le64(inode->i_blocks << 9);
sinfo->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
sinfo->NumberOfLinks = cpu_to_le32(get_nlink(&stat) - delete_pending);
sinfo->DeletePending = delete_pending;
......@@ -4452,7 +4448,7 @@ static int get_file_all_info(struct ksmbd_work *work,
file_info->Attributes = fp->f_ci->m_fattr;
file_info->Pad1 = 0;
file_info->AllocationSize =
cpu_to_le64(get_allocation_size(inode, &stat));
cpu_to_le64(inode->i_blocks << 9);
file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
file_info->NumberOfLinks =
cpu_to_le32(get_nlink(&stat) - delete_pending);
......@@ -4641,7 +4637,7 @@ static int get_file_network_open_info(struct smb2_query_info_rsp *rsp,
file_info->ChangeTime = cpu_to_le64(time);
file_info->Attributes = fp->f_ci->m_fattr;
file_info->AllocationSize =
cpu_to_le64(get_allocation_size(inode, &stat));
cpu_to_le64(inode->i_blocks << 9);
file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
file_info->Reserved = cpu_to_le32(0);
rsp->OutputBufferLength =
......@@ -5506,7 +5502,7 @@ static int smb2_create_link(struct ksmbd_work *work,
{
char *link_name = NULL, *target_name = NULL, *pathname = NULL;
struct path path;
bool file_present = true;
bool file_present = false;
int rc;
if (buf_len < (u64)sizeof(struct smb2_file_link_info) +
......@@ -5539,8 +5535,8 @@ static int smb2_create_link(struct ksmbd_work *work,
if (rc) {
if (rc != -ENOENT)
goto out;
file_present = false;
}
} else
file_present = true;
if (file_info->ReplaceIfExists) {
if (file_present) {
......
......@@ -86,12 +86,14 @@ static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf,
err = vfs_path_parent_lookup(filename, flags,
&parent_path, &last, &type,
root_share_path);
if (err) {
putname(filename);
if (err)
return err;
}
if (unlikely(type != LAST_NORM)) {
path_put(&parent_path);
putname(filename);
return -ENOENT;
}
......@@ -108,12 +110,14 @@ static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf,
path->dentry = d;
path->mnt = share_conf->vfs_path.mnt;
path_put(&parent_path);
putname(filename);
return 0;
err_out:
inode_unlock(parent_path.dentry->d_inode);
path_put(&parent_path);
putname(filename);
return -ENOENT;
}
......@@ -743,6 +747,7 @@ int ksmbd_vfs_rename(struct ksmbd_work *work, const struct path *old_path,
rd.new_dir = new_path.dentry->d_inode,
rd.new_dentry = new_dentry,
rd.flags = flags,
rd.delegated_inode = NULL,
err = vfs_rename(&rd);
if (err)
ksmbd_debug(VFS, "vfs_rename failed err %d\n", err);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment