Commit 36322523 authored by Namjae Jeon's avatar Namjae Jeon Committed by Steve French

ksmbd: fix UAF issue from opinfo->conn

If opinfo->conn is another connection and while ksmbd send oplock break
request to cient on current connection, The connection for opinfo->conn
can be disconnect and conn could be freed. When sending oplock break
request, this ksmbd_conn can be used and cause user-after-free issue.
When getting opinfo from the list, ksmbd check connection is being
released. If it is not released, Increase ->r_count to wait that connection
is freed.

Cc: stable@vger.kernel.org
Reported-by: default avatarPer Forlin <per.forlin@axis.com>
Tested-by: default avatarPer Forlin <per.forlin@axis.com>
Signed-off-by: default avatarNamjae Jeon <linkinjeon@kernel.org>
Signed-off-by: default avatarSteve French <stfrench@microsoft.com>
parent 0512a5f8
......@@ -157,13 +157,42 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci)
rcu_read_lock();
opinfo = list_first_or_null_rcu(&ci->m_op_list, struct oplock_info,
op_entry);
if (opinfo && !atomic_inc_not_zero(&opinfo->refcount))
opinfo = NULL;
if (opinfo) {
if (!atomic_inc_not_zero(&opinfo->refcount))
opinfo = NULL;
else {
atomic_inc(&opinfo->conn->r_count);
if (ksmbd_conn_releasing(opinfo->conn)) {
atomic_dec(&opinfo->conn->r_count);
atomic_dec(&opinfo->refcount);
opinfo = NULL;
}
}
}
rcu_read_unlock();
return opinfo;
}
static void opinfo_conn_put(struct oplock_info *opinfo)
{
struct ksmbd_conn *conn;
if (!opinfo)
return;
conn = opinfo->conn;
/*
* Checking waitqueue to dropping pending requests on
* disconnection. waitqueue_active is safe because it
* uses atomic operation for condition.
*/
if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
wake_up(&conn->r_count_q);
opinfo_put(opinfo);
}
void opinfo_put(struct oplock_info *opinfo)
{
if (!atomic_dec_and_test(&opinfo->refcount))
......@@ -666,13 +695,6 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
out:
ksmbd_free_work_struct(work);
/*
* Checking waitqueue to dropping pending requests on
* disconnection. waitqueue_active is safe because it
* uses atomic operation for condition.
*/
if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
wake_up(&conn->r_count_q);
}
/**
......@@ -706,7 +728,6 @@ static int smb2_oplock_break_noti(struct oplock_info *opinfo)
work->conn = conn;
work->sess = opinfo->sess;
atomic_inc(&conn->r_count);
if (opinfo->op_state == OPLOCK_ACK_WAIT) {
INIT_WORK(&work->work, __smb2_oplock_break_noti);
ksmbd_queue_work(work);
......@@ -776,13 +797,6 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
out:
ksmbd_free_work_struct(work);
/*
* Checking waitqueue to dropping pending requests on
* disconnection. waitqueue_active is safe because it
* uses atomic operation for condition.
*/
if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
wake_up(&conn->r_count_q);
}
/**
......@@ -822,7 +836,6 @@ static int smb2_lease_break_noti(struct oplock_info *opinfo)
work->conn = conn;
work->sess = opinfo->sess;
atomic_inc(&conn->r_count);
if (opinfo->op_state == OPLOCK_ACK_WAIT) {
list_for_each_safe(tmp, t, &opinfo->interim_list) {
struct ksmbd_work *in_work;
......@@ -1144,8 +1157,10 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
}
prev_opinfo = opinfo_get_list(ci);
if (!prev_opinfo ||
(prev_opinfo->level == SMB2_OPLOCK_LEVEL_NONE && lctx))
(prev_opinfo->level == SMB2_OPLOCK_LEVEL_NONE && lctx)) {
opinfo_conn_put(prev_opinfo);
goto set_lev;
}
prev_op_has_lease = prev_opinfo->is_lease;
if (prev_op_has_lease)
prev_op_state = prev_opinfo->o_lease->state;
......@@ -1153,19 +1168,19 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
if (share_ret < 0 &&
prev_opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
err = share_ret;
opinfo_put(prev_opinfo);
opinfo_conn_put(prev_opinfo);
goto err_out;
}
if (prev_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH &&
prev_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
opinfo_put(prev_opinfo);
opinfo_conn_put(prev_opinfo);
goto op_break_not_needed;
}
list_add(&work->interim_entry, &prev_opinfo->interim_list);
err = oplock_break(prev_opinfo, SMB2_OPLOCK_LEVEL_II);
opinfo_put(prev_opinfo);
opinfo_conn_put(prev_opinfo);
if (err == -ENOENT)
goto set_lev;
/* Check all oplock was freed by close */
......@@ -1228,14 +1243,14 @@ static void smb_break_all_write_oplock(struct ksmbd_work *work,
return;
if (brk_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH &&
brk_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
opinfo_put(brk_opinfo);
opinfo_conn_put(brk_opinfo);
return;
}
brk_opinfo->open_trunc = is_trunc;
list_add(&work->interim_entry, &brk_opinfo->interim_list);
oplock_break(brk_opinfo, SMB2_OPLOCK_LEVEL_II);
opinfo_put(brk_opinfo);
opinfo_conn_put(brk_opinfo);
}
/**
......@@ -1263,6 +1278,13 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
list_for_each_entry_rcu(brk_op, &ci->m_op_list, op_entry) {
if (!atomic_inc_not_zero(&brk_op->refcount))
continue;
atomic_inc(&brk_op->conn->r_count);
if (ksmbd_conn_releasing(brk_op->conn)) {
atomic_dec(&brk_op->conn->r_count);
continue;
}
rcu_read_unlock();
if (brk_op->is_lease && (brk_op->o_lease->state &
(~(SMB2_LEASE_READ_CACHING_LE |
......@@ -1292,7 +1314,7 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
brk_op->open_trunc = is_trunc;
oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE);
next:
opinfo_put(brk_op);
opinfo_conn_put(brk_op);
rcu_read_lock();
}
rcu_read_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment