Commit a7b94c15 authored by Justin Tee's avatar Justin Tee Committed by Martin K. Petersen

scsi: lpfc: Replace blk_irq_poll intr handler with threaded IRQ

It has been determined that the threaded IRQ API accomplishes effectively
the same performance metrics as blk_irq_poll.  As blk_irq_poll is mostly
scheduled by the softirqd and handled in softirq context, this is not
entirely desired from a Fibre Channel driver context.  A threaded IRQ model
fits cleaner.  This patch replaces the blk_irq_poll logic with threaded
IRQ.
Signed-off-by: default avatarJustin Tee <justin.tee@broadcom.com>
Link: https://lore.kernel.org/r/20230417191558.83100-7-justintee8345@gmail.comSigned-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 5fc849d8
......@@ -247,6 +247,7 @@ irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
irqreturn_t lpfc_sli4_intr_handler(int, void *);
irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id);
int lpfc_read_object(struct lpfc_hba *phba, char *s, uint32_t *datap,
uint32_t len);
......
......@@ -1279,7 +1279,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
/*
* lpfc_idle_stat_delay_work - idle_stat tracking
*
* This routine tracks per-cq idle_stat and determines polling decisions.
* This routine tracks per-eq idle_stat and determines polling decisions.
*
* Return codes:
* None
......@@ -1290,7 +1290,7 @@ lpfc_idle_stat_delay_work(struct work_struct *work)
struct lpfc_hba *phba = container_of(to_delayed_work(work),
struct lpfc_hba,
idle_stat_delay_work);
struct lpfc_queue *cq;
struct lpfc_queue *eq;
struct lpfc_sli4_hdw_queue *hdwq;
struct lpfc_idle_stat *idle_stat;
u32 i, idle_percent;
......@@ -1306,10 +1306,10 @@ lpfc_idle_stat_delay_work(struct work_struct *work)
for_each_present_cpu(i) {
hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
cq = hdwq->io_cq;
eq = hdwq->hba_eq;
/* Skip if we've already handled this cq's primary CPU */
if (cq->chann != i)
/* Skip if we've already handled this eq's primary CPU */
if (eq->chann != i)
continue;
idle_stat = &phba->sli4_hba.idle_stat[i];
......@@ -1333,9 +1333,9 @@ lpfc_idle_stat_delay_work(struct work_struct *work)
idle_percent = 100 - idle_percent;
if (idle_percent < 15)
cq->poll_mode = LPFC_QUEUE_WORK;
eq->poll_mode = LPFC_QUEUE_WORK;
else
cq->poll_mode = LPFC_IRQ_POLL;
eq->poll_mode = LPFC_THREADED_IRQ;
idle_stat->prev_idle = wall_idle;
idle_stat->prev_wall = wall;
......@@ -4357,6 +4357,7 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
struct lpfc_sli4_hdw_queue *qp;
struct lpfc_io_buf *lpfc_cmd;
int idx, cnt;
unsigned long iflags;
qp = phba->sli4_hba.hdwq;
cnt = 0;
......@@ -4371,12 +4372,13 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
lpfc_cmd->hdwq_no = idx;
lpfc_cmd->hdwq = qp;
lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
spin_lock(&qp->io_buf_list_put_lock);
spin_lock_irqsave(&qp->io_buf_list_put_lock, iflags);
list_add_tail(&lpfc_cmd->list,
&qp->lpfc_io_buf_list_put);
qp->put_io_bufs++;
qp->total_io_bufs++;
spin_unlock(&qp->io_buf_list_put_lock);
spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
iflags);
}
}
return cnt;
......@@ -13117,8 +13119,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
}
eqhdl->irq = rc;
rc = request_irq(eqhdl->irq, &lpfc_sli4_hba_intr_handler, 0,
name, eqhdl);
rc = request_threaded_irq(eqhdl->irq,
&lpfc_sli4_hba_intr_handler,
&lpfc_sli4_hba_intr_handler_th,
IRQF_ONESHOT, name, eqhdl);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) "
......
This diff is collapsed.
......@@ -140,7 +140,7 @@ struct lpfc_rqb {
enum lpfc_poll_mode {
LPFC_QUEUE_WORK,
LPFC_IRQ_POLL
LPFC_THREADED_IRQ,
};
struct lpfc_idle_stat {
......@@ -279,8 +279,6 @@ struct lpfc_queue {
struct list_head _poll_list;
void **q_pgs; /* array to index entries per page */
#define LPFC_IRQ_POLL_WEIGHT 256
struct irq_poll iop;
enum lpfc_poll_mode poll_mode;
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment