Commit 463429f8 authored by Kashyap Desai's avatar Kashyap Desai Committed by Martin K. Petersen

scsi: mpi3mr: Add support for threaded ISR

Register driver for threaded interrupts.

By default the driver will attempt I/O completion from interrupt context
(primary handler). Since the driver tracks per reply queue outstanding
I/Os, it will schedule threaded ISR if there are any outstanding I/Os
expected on that particular reply queue.

Threaded ISR (secondary handler) will loop for I/O completion as long as
there are outstanding I/Os (speculative method using same per reply queue
outstanding counter) or it has completed some X amount of commands
(something like budget).

Link: https://lore.kernel.org/r/20210520152545.2710479-18-kashyap.desai@broadcom.com
Cc: sathya.prakash@broadcom.com
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarTomas Henzl <thenzl@redhat.com>
Reviewed-by: default avatarHimanshu Madhani <himanshu.madhani@oracle.com>
Signed-off-by: default avatarKashyap Desai <kashyap.desai@broadcom.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 392bbeb8
...@@ -149,6 +149,10 @@ extern struct list_head mrioc_list; ...@@ -149,6 +149,10 @@ extern struct list_head mrioc_list;
/* Default target device queue depth */ /* Default target device queue depth */
#define MPI3MR_DEFAULT_SDEV_QD 32 #define MPI3MR_DEFAULT_SDEV_QD 32
/* Definitions for Threaded IRQ poll*/
#define MPI3MR_IRQ_POLL_SLEEP 2
#define MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT 8
/* SGE Flag definition */ /* SGE Flag definition */
#define MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST \ #define MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST \
(MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | MPI3_SGE_FLAGS_DLAS_SYSTEM | \ (MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | MPI3_SGE_FLAGS_DLAS_SYSTEM | \
...@@ -300,6 +304,9 @@ struct op_req_qinfo { ...@@ -300,6 +304,9 @@ struct op_req_qinfo {
* @q_segment_list: Segment list base virtual address * @q_segment_list: Segment list base virtual address
* @q_segment_list_dma: Segment list base DMA address * @q_segment_list_dma: Segment list base DMA address
* @ephase: Expected phased identifier for the reply queue * @ephase: Expected phased identifier for the reply queue
* @pend_ios: Number of IOs pending in HW for this queue
* @enable_irq_poll: Flag to indicate polling is enabled
* @in_use: Queue is handled by poll/ISR
*/ */
struct op_reply_qinfo { struct op_reply_qinfo {
u16 ci; u16 ci;
...@@ -311,6 +318,9 @@ struct op_reply_qinfo { ...@@ -311,6 +318,9 @@ struct op_reply_qinfo {
void *q_segment_list; void *q_segment_list;
dma_addr_t q_segment_list_dma; dma_addr_t q_segment_list_dma;
u8 ephase; u8 ephase;
atomic_t pend_ios;
bool enable_irq_poll;
atomic_t in_use;
}; };
/** /**
...@@ -562,6 +572,7 @@ struct scmd_priv { ...@@ -562,6 +572,7 @@ struct scmd_priv {
* @shost: Scsi_Host pointer * @shost: Scsi_Host pointer
* @id: Controller ID * @id: Controller ID
* @cpu_count: Number of online CPUs * @cpu_count: Number of online CPUs
* @irqpoll_sleep: usleep unit used in threaded isr irqpoll
* @name: Controller ASCII name * @name: Controller ASCII name
* @driver_name: Driver ASCII name * @driver_name: Driver ASCII name
* @sysif_regs: System interface registers virtual address * @sysif_regs: System interface registers virtual address
...@@ -663,6 +674,7 @@ struct mpi3mr_ioc { ...@@ -663,6 +674,7 @@ struct mpi3mr_ioc {
u8 id; u8 id;
int cpu_count; int cpu_count;
bool enable_segqueue; bool enable_segqueue;
u32 irqpoll_sleep;
char name[MPI3MR_NAME_LENGTH]; char name[MPI3MR_NAME_LENGTH];
char driver_name[MPI3MR_NAME_LENGTH]; char driver_name[MPI3MR_NAME_LENGTH];
......
...@@ -345,12 +345,16 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, ...@@ -345,12 +345,16 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
reply_qidx = op_reply_q->qid - 1; reply_qidx = op_reply_q->qid - 1;
if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
return 0;
exp_phase = op_reply_q->ephase; exp_phase = op_reply_q->ephase;
reply_ci = op_reply_q->ci; reply_ci = op_reply_q->ci;
reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
if ((le16_to_cpu(reply_desc->reply_flags) & if ((le16_to_cpu(reply_desc->reply_flags) &
MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) { MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
atomic_dec(&op_reply_q->in_use);
return 0; return 0;
} }
...@@ -361,6 +365,7 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, ...@@ -361,6 +365,7 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci)); WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma, mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
reply_qidx); reply_qidx);
atomic_dec(&op_reply_q->pend_ios);
if (reply_dma) if (reply_dma)
mpi3mr_repost_reply_buf(mrioc, reply_dma); mpi3mr_repost_reply_buf(mrioc, reply_dma);
num_op_reply++; num_op_reply++;
...@@ -375,6 +380,14 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, ...@@ -375,6 +380,14 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
if ((le16_to_cpu(reply_desc->reply_flags) & if ((le16_to_cpu(reply_desc->reply_flags) &
MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
break; break;
/*
* Exit completion loop to avoid CPU lockup
* Ensure remaining completion happens from threaded ISR.
*/
if (num_op_reply > mrioc->max_host_ios) {
intr_info->op_reply_q->enable_irq_poll = true;
break;
}
} while (1); } while (1);
...@@ -383,6 +396,7 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, ...@@ -383,6 +396,7 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
op_reply_q->ci = reply_ci; op_reply_q->ci = reply_ci;
op_reply_q->ephase = exp_phase; op_reply_q->ephase = exp_phase;
atomic_dec(&op_reply_q->in_use);
return num_op_reply; return num_op_reply;
} }
...@@ -391,7 +405,7 @@ static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata) ...@@ -391,7 +405,7 @@ static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
struct mpi3mr_intr_info *intr_info = privdata; struct mpi3mr_intr_info *intr_info = privdata;
struct mpi3mr_ioc *mrioc; struct mpi3mr_ioc *mrioc;
u16 midx; u16 midx;
u32 num_admin_replies = 0; u32 num_admin_replies = 0, num_op_reply = 0;
if (!intr_info) if (!intr_info)
return IRQ_NONE; return IRQ_NONE;
...@@ -405,8 +419,10 @@ static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata) ...@@ -405,8 +419,10 @@ static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
if (!midx) if (!midx)
num_admin_replies = mpi3mr_process_admin_reply_q(mrioc); num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
if (intr_info->op_reply_q)
num_op_reply = mpi3mr_process_op_reply_q(mrioc, intr_info);
if (num_admin_replies) if (num_admin_replies || num_op_reply)
return IRQ_HANDLED; return IRQ_HANDLED;
else else
return IRQ_NONE; return IRQ_NONE;
...@@ -415,15 +431,32 @@ static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata) ...@@ -415,15 +431,32 @@ static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
static irqreturn_t mpi3mr_isr(int irq, void *privdata) static irqreturn_t mpi3mr_isr(int irq, void *privdata)
{ {
struct mpi3mr_intr_info *intr_info = privdata; struct mpi3mr_intr_info *intr_info = privdata;
struct mpi3mr_ioc *mrioc;
u16 midx;
int ret; int ret;
if (!intr_info) if (!intr_info)
return IRQ_NONE; return IRQ_NONE;
mrioc = intr_info->mrioc;
midx = intr_info->msix_index;
/* Call primary ISR routine */ /* Call primary ISR routine */
ret = mpi3mr_isr_primary(irq, privdata); ret = mpi3mr_isr_primary(irq, privdata);
return ret; /*
* If more IOs are expected, schedule IRQ polling thread.
* Otherwise exit from ISR.
*/
if (!intr_info->op_reply_q)
return ret;
if (!intr_info->op_reply_q->enable_irq_poll ||
!atomic_read(&intr_info->op_reply_q->pend_ios))
return ret;
disable_irq_nosync(pci_irq_vector(mrioc->pdev, midx));
return IRQ_WAKE_THREAD;
} }
/** /**
...@@ -438,6 +471,36 @@ static irqreturn_t mpi3mr_isr(int irq, void *privdata) ...@@ -438,6 +471,36 @@ static irqreturn_t mpi3mr_isr(int irq, void *privdata)
*/ */
static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata) static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
{ {
struct mpi3mr_intr_info *intr_info = privdata;
struct mpi3mr_ioc *mrioc;
u16 midx;
u32 num_op_reply = 0;
if (!intr_info || !intr_info->op_reply_q)
return IRQ_NONE;
mrioc = intr_info->mrioc;
midx = intr_info->msix_index;
/* Poll for pending IOs completions */
do {
if (!mrioc->intr_enabled)
break;
if (!midx)
mpi3mr_process_admin_reply_q(mrioc);
if (intr_info->op_reply_q)
num_op_reply +=
mpi3mr_process_op_reply_q(mrioc, intr_info);
usleep_range(mrioc->irqpoll_sleep, 10 * mrioc->irqpoll_sleep);
} while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
(num_op_reply < mrioc->max_host_ios));
intr_info->op_reply_q->enable_irq_poll = false;
enable_irq(pci_irq_vector(mrioc->pdev, midx));
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -1147,6 +1210,9 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) ...@@ -1147,6 +1210,9 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD; op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
op_reply_q->ci = 0; op_reply_q->ci = 0;
op_reply_q->ephase = 1; op_reply_q->ephase = 1;
atomic_set(&op_reply_q->pend_ios, 0);
atomic_set(&op_reply_q->in_use, 0);
op_reply_q->enable_irq_poll = false;
if (!op_reply_q->q_segments) { if (!op_reply_q->q_segments) {
retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx); retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
...@@ -1465,6 +1531,10 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc, ...@@ -1465,6 +1531,10 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
pi = 0; pi = 0;
op_req_q->pi = pi; op_req_q->pi = pi;
if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
> MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
writel(op_req_q->pi, writel(op_req_q->pi,
&mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index); &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
...@@ -2795,6 +2865,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init) ...@@ -2795,6 +2865,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
u32 ioc_status, ioc_config, i; u32 ioc_status, ioc_config, i;
struct mpi3_ioc_facts_data facts_data; struct mpi3_ioc_facts_data facts_data;
mrioc->irqpoll_sleep = MPI3MR_IRQ_POLL_SLEEP;
mrioc->change_count = 0; mrioc->change_count = 0;
if (!re_init) { if (!re_init) {
mrioc->cpu_count = num_online_cpus(); mrioc->cpu_count = num_online_cpus();
...@@ -3081,6 +3152,8 @@ static void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc) ...@@ -3081,6 +3152,8 @@ static void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
mrioc->op_reply_qinfo[i].ci = 0; mrioc->op_reply_qinfo[i].ci = 0;
mrioc->op_reply_qinfo[i].num_replies = 0; mrioc->op_reply_qinfo[i].num_replies = 0;
mrioc->op_reply_qinfo[i].ephase = 0; mrioc->op_reply_qinfo[i].ephase = 0;
atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
mpi3mr_memset_op_reply_q_buffers(mrioc, i); mpi3mr_memset_op_reply_q_buffers(mrioc, i);
mrioc->req_qinfo[i].ci = 0; mrioc->req_qinfo[i].ci = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment