Commit 8addebc1 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Martin K. Petersen

scsi: bnx2fc: Plug CPU hotplug race

bnx2fc_process_new_cqes() has protection against CPU hotplug, which relies
on the per cpu thread pointer. This protection is racy because it happens
only partially with the per cpu fp_work_lock held.

If the CPU is unplugged after the lock is dropped, the wakeup code can
dereference a NULL pointer or access freed and potentially reused memory.

Restructure the code so the thread check and wakeup happens with the
fp_work_lock held.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarChad Dupuis <chad.dupuis@cavium.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 2c675218
...@@ -1008,6 +1008,28 @@ static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) ...@@ -1008,6 +1008,28 @@ static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
return work; return work;
} }
/* Pending work request completion */
static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
{
unsigned int cpu = wqe % num_possible_cpus();
struct bnx2fc_percpu_s *fps;
struct bnx2fc_work *work;
fps = &per_cpu(bnx2fc_percpu, cpu);
spin_lock_bh(&fps->fp_work_lock);
if (fps->iothread) {
work = bnx2fc_alloc_work(tgt, wqe);
if (work) {
list_add_tail(&work->list, &fps->work_list);
wake_up_process(fps->iothread);
spin_unlock_bh(&fps->fp_work_lock);
return;
}
}
spin_unlock_bh(&fps->fp_work_lock);
bnx2fc_process_cq_compl(tgt, wqe);
}
int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
{ {
struct fcoe_cqe *cq; struct fcoe_cqe *cq;
...@@ -1042,28 +1064,7 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) ...@@ -1042,28 +1064,7 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
/* Unsolicited event notification */ /* Unsolicited event notification */
bnx2fc_process_unsol_compl(tgt, wqe); bnx2fc_process_unsol_compl(tgt, wqe);
} else { } else {
/* Pending work request completion */ bnx2fc_pending_work(tgt, wqe);
struct bnx2fc_work *work = NULL;
struct bnx2fc_percpu_s *fps = NULL;
unsigned int cpu = wqe % num_possible_cpus();
fps = &per_cpu(bnx2fc_percpu, cpu);
spin_lock_bh(&fps->fp_work_lock);
if (unlikely(!fps->iothread))
goto unlock;
work = bnx2fc_alloc_work(tgt, wqe);
if (work)
list_add_tail(&work->list,
&fps->work_list);
unlock:
spin_unlock_bh(&fps->fp_work_lock);
/* Pending work request completion */
if (fps->iothread && work)
wake_up_process(fps->iothread);
else
bnx2fc_process_cq_compl(tgt, wqe);
num_free_sqes++; num_free_sqes++;
} }
cqe++; cqe++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment