Commit 6a828b0f authored by James Smart's avatar James Smart Committed by Martin K. Petersen

scsi: lpfc: Support non-uniform allocation of MSIX vectors to hardware queues

So far MSIX vector allocation assumed it would be 1:1 with hardware
queues. However, there are several reasons why fewer MSIX vectors may be
allocated than hardware queues such as the platform being out of vectors or
adapter limits being less than cpu count.

This patch reworks the MSIX/EQ relationships with the per-cpu hardware
queues so they can function independently. MSIX vectors will be equitably
split been cpu sockets/cores and then the per-cpu hardware queues will be
mapped to the vectors most efficient for them.
Signed-off-by: default avatarDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: default avatarJames Smart <jsmart2021@gmail.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent b3295c2a
......@@ -84,8 +84,6 @@ struct lpfc_sli2_slim;
#define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */
#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */
#define LPFC_LOOK_AHEAD_OFF 0 /* Look ahead logic is turned off */
/* Error Attention event polling interval */
#define LPFC_ERATT_POLL_INTERVAL 5 /* EATT poll interval in seconds */
......@@ -821,6 +819,7 @@ struct lpfc_hba {
uint32_t cfg_fcp_imax;
uint32_t cfg_fcp_cpu_map;
uint32_t cfg_hdw_queue;
uint32_t cfg_irq_chann;
uint32_t cfg_suppress_rsp;
uint32_t cfg_nvme_oas;
uint32_t cfg_nvme_embed_cmd;
......@@ -1042,6 +1041,9 @@ struct lpfc_hba {
struct dentry *debug_nvmeio_trc;
struct lpfc_debugfs_nvmeio_trc *nvmeio_trc;
struct dentry *debug_hdwqinfo;
#ifdef LPFC_HDWQ_LOCK_STAT
struct dentry *debug_lockstat;
#endif
atomic_t nvmeio_trc_cnt;
uint32_t nvmeio_trc_size;
uint32_t nvmeio_trc_output_idx;
......@@ -1161,6 +1163,7 @@ struct lpfc_hba {
#define LPFC_CHECK_NVME_IO 1
#define LPFC_CHECK_NVMET_RCV 2
#define LPFC_CHECK_NVMET_IO 4
#define LPFC_CHECK_SCSI_IO 8
uint16_t ktime_on;
uint64_t ktime_data_samples;
uint64_t ktime_status_samples;
......
......@@ -4958,7 +4958,7 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
phba->cfg_fcp_imax = (uint32_t)val;
phba->initial_imax = phba->cfg_fcp_imax;
for (i = 0; i < phba->cfg_hdw_queue; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
for (i = 0; i < phba->cfg_irq_chann; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT,
val);
......@@ -5059,13 +5059,6 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
phba->cfg_fcp_cpu_map,
phba->sli4_hba.num_online_cpu);
break;
case 2:
len += snprintf(buf + len, PAGE_SIZE-len,
"fcp_cpu_map: Driver centric mapping (%d): "
"%d online CPUs\n",
phba->cfg_fcp_cpu_map,
phba->sli4_hba.num_online_cpu);
break;
}
while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu) {
......@@ -5076,35 +5069,35 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
len += snprintf(
buf + len, PAGE_SIZE - len,
"CPU %02d hdwq None "
"physid %d coreid %d\n",
"physid %d coreid %d ht %d\n",
phba->sli4_hba.curr_disp_cpu,
cpup->phys_id,
cpup->core_id);
cpup->core_id, cpup->hyper);
else
len += snprintf(
buf + len, PAGE_SIZE - len,
"CPU %02d hdwq %04d "
"physid %d coreid %d\n",
"CPU %02d EQ %04d hdwq %04d "
"physid %d coreid %d ht %d\n",
phba->sli4_hba.curr_disp_cpu,
cpup->hdwq, cpup->phys_id,
cpup->core_id);
cpup->eq, cpup->hdwq, cpup->phys_id,
cpup->core_id, cpup->hyper);
} else {
if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
len += snprintf(
buf + len, PAGE_SIZE - len,
"CPU %02d hdwq None "
"physid %d coreid %d IRQ %d\n",
"physid %d coreid %d ht %d IRQ %d\n",
phba->sli4_hba.curr_disp_cpu,
cpup->phys_id,
cpup->core_id, cpup->irq);
cpup->core_id, cpup->hyper, cpup->irq);
else
len += snprintf(
buf + len, PAGE_SIZE - len,
"CPU %02d hdwq %04d "
"physid %d coreid %d IRQ %d\n",
"CPU %02d EQ %04d hdwq %04d "
"physid %d coreid %d ht %d IRQ %d\n",
phba->sli4_hba.curr_disp_cpu,
cpup->hdwq, cpup->phys_id,
cpup->core_id, cpup->irq);
cpup->eq, cpup->hdwq, cpup->phys_id,
cpup->core_id, cpup->hyper, cpup->irq);
}
phba->sli4_hba.curr_disp_cpu++;
......@@ -5146,14 +5139,13 @@ lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
# lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
# for the HBA.
#
# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2).
# Value range is [0 to 1]. Default value is LPFC_HBA_CPU_MAP (1).
# 0 - Do not affinitze IRQ vectors
# 1 - Affintize HBA vectors with respect to each HBA
# (start with CPU0 for each HBA)
# 2 - Affintize HBA vectors with respect to the entire driver
# (round robin thru all CPUs across all HBAs)
# This also defines how Hardware Queues are mapped to specific CPUs.
*/
static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
static int lpfc_fcp_cpu_map = LPFC_HBA_CPU_MAP;
module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(lpfc_fcp_cpu_map,
"Defines how to map CPUs to IRQ vectors per HBA");
......@@ -5187,7 +5179,7 @@ lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3326 lpfc_fcp_cpu_map: %d out of range, using "
"default\n", val);
phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
phba->cfg_fcp_cpu_map = LPFC_HBA_CPU_MAP;
return 0;
}
......@@ -5308,7 +5300,7 @@ LPFC_ATTR_R(xri_rebalancing, 1, 0, 1, "Enable/Disable XRI rebalancing");
* CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os
* through WQs will be used.
*/
LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_BY_HDWQ,
LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_BY_CPU,
LPFC_FCP_SCHED_BY_HDWQ,
LPFC_FCP_SCHED_BY_CPU,
"Determine scheduling algorithm for "
......@@ -5474,24 +5466,40 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
"Embed NVME Command in WQE");
/*
* lpfc_hdw_queue: Set the number of IO channels the driver
* lpfc_hdw_queue: Set the number of Hardware Queues the driver
* will advertise it supports to the NVME and SCSI layers. This also
* will map to the number of EQ/CQ/WQs the driver will create.
* will map to the number of CQ/WQ pairs the driver will create.
*
* The NVME Layer will try to create this many, plus 1 administrative
* hardware queue. The administrative queue will always map to WQ 0
* A hardware IO queue maps (qidx) to a specific driver WQ.
* A hardware IO queue maps (qidx) to a specific driver CQ/WQ.
*
* 0 = Configure the number of hdw queues to the number of active CPUs.
* 1,64 = Manually specify how many hdw queues to use.
* 1,128 = Manually specify how many hdw queues to use.
*
* Value range is [0,64]. Default value is 0.
* Value range is [0,128]. Default value is 0.
*/
LPFC_ATTR_R(hdw_queue,
LPFC_HBA_HDWQ_DEF,
LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
"Set the number of I/O Hardware Queues");
/*
* lpfc_irq_chann: Set the number of IRQ vectors that are available
* for Hardware Queues to utilize. This also will map to the number
* of EQ / MSI-X vectors the driver will create. This should never be
* more than the number of Hardware Queues
*
* 0 = Configure number of IRQ Channels to the number of active CPUs.
* 1,128 = Manually specify how many IRQ Channels to use.
*
* Value range is [0,128]. Default value is 0.
*/
LPFC_ATTR_R(irq_chann,
LPFC_HBA_HDWQ_DEF,
LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
"Set the number of I/O IRQ Channels");
/*
# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
# 0 = HBA resets disabled
......@@ -5532,16 +5540,6 @@ LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
*/
LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
/*
# lpfc_fcp_look_ahead: Look ahead for completions in FCP start routine
# 0 = disabled (default)
# 1 = enabled
# Value range is [0,1]. Default value is 0.
#
# This feature in under investigation and may be supported in the future.
*/
unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
/*
# lpfc_prot_mask: i
# - Bit mask of host protection capabilities used to register with the
......@@ -5788,6 +5786,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_fcp_imax,
&dev_attr_lpfc_fcp_cpu_map,
&dev_attr_lpfc_hdw_queue,
&dev_attr_lpfc_irq_chann,
&dev_attr_lpfc_suppress_rsp,
&dev_attr_lpfc_nvmet_mrq,
&dev_attr_lpfc_nvmet_mrq_post,
......@@ -6867,6 +6866,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
lpfc_irq_chann_init(phba, lpfc_irq_chann);
lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
lpfc_enable_dpp_init(phba, lpfc_enable_dpp);
......@@ -6891,6 +6891,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
/* A value of 0 means use the number of CPUs found in the system */
if (phba->cfg_hdw_queue == 0)
phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
if (phba->cfg_irq_chann == 0)
phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
if (phba->cfg_irq_chann > phba->cfg_hdw_queue)
phba->cfg_irq_chann = phba->cfg_hdw_queue;
phba->cfg_soft_wwnn = 0L;
phba->cfg_soft_wwpn = 0L;
......@@ -6933,6 +6937,10 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
{
if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu)
phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu)
phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
if (phba->cfg_irq_chann > phba->cfg_hdw_queue)
phba->cfg_irq_chann = phba->cfg_hdw_queue;
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
phba->nvmet_support) {
......@@ -6953,11 +6961,11 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
}
if (!phba->cfg_nvmet_mrq)
phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) {
phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
if (phba->cfg_nvmet_mrq > phba->cfg_irq_chann) {
phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
"6018 Adjust lpfc_nvmet_mrq to %d\n",
phba->cfg_nvmet_mrq);
......
......@@ -440,7 +440,6 @@ extern spinlock_t _dump_buf_lock;
extern int _dump_buf_done;
extern spinlock_t pgcnt_lock;
extern unsigned int pgcnt;
extern unsigned int lpfc_fcp_look_ahead;
/* Interface exported by fabric iocb scheduler */
void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
......
This diff is collapsed.
......@@ -290,9 +290,6 @@ struct lpfc_idiag {
/* multixripool output buffer size */
#define LPFC_DUMP_MULTIXRIPOOL_SIZE 8192
/* hdwqinfo output buffer size */
#define LPFC_HDWQINFO_SIZE 8192
enum {
DUMP_FCP,
DUMP_NVME,
......
......@@ -211,9 +211,8 @@ struct lpfc_sli_intf {
#define LPFC_DEF_IMAX 150000
#define LPFC_MIN_CPU_MAP 0
#define LPFC_MAX_CPU_MAP 2
#define LPFC_MAX_CPU_MAP 1
#define LPFC_HBA_CPU_MAP 1
#define LPFC_DRIVER_CPU_MAP 2 /* Default */
/* PORT_CAPABILITIES constants. */
#define LPFC_MAX_SUPPORTED_PAGES 8
......
This diff is collapsed.
......@@ -239,7 +239,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
if (qidx) {
str = "IO "; /* IO queue */
qhandle->index = ((qidx - 1) %
vport->phba->cfg_hdw_queue);
lpfc_nvme_template.max_hw_queues);
} else {
str = "ADM"; /* Admin queue */
qhandle->index = qidx;
......@@ -1546,14 +1546,12 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
}
}
/* Lookup Hardware Queue index based on fcp_io_sched module parameter */
if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
idx = lpfc_queue_info->index;
} else {
cpu = smp_processor_id();
if (cpu < phba->cfg_hdw_queue)
idx = cpu;
else
idx = cpu % phba->cfg_hdw_queue;
idx = phba->sli4_hba.cpu_map[cpu].hdwq;
}
lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite);
......@@ -2060,7 +2058,13 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
* allocate + 3, one for cmd, one for rsp and one for this alignment
*/
lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
/* Advertise how many hw queues we support based on fcp_io_sched */
if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
else
lpfc_nvme_template.max_hw_queues =
phba->sli4_hba.num_present_cpu;
/* localport is allocated from the stack, but the registration
* call allocates heap memory as well as the private area.
......@@ -2554,6 +2558,8 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
* WQEs have been removed from the txcmplqs.
*/
for (i = 0; i < phba->cfg_hdw_queue; i++) {
if (!phba->sli4_hba.hdwq[i].nvme_wq)
continue;
pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
if (!pring)
......
......@@ -692,10 +692,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
tag = blk_mq_unique_tag(cmnd->request);
idx = blk_mq_unique_tag_to_hwq(tag);
} else {
if (cpu < phba->cfg_hdw_queue)
idx = cpu;
else
idx = cpu % phba->cfg_hdw_queue;
idx = phba->sli4_hba.cpu_map[cpu].hdwq;
}
lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
......@@ -3650,6 +3647,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct Scsi_Host *shost;
int idx;
uint32_t logit = LOG_FCP;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
int cpu;
#endif
/* Sanity check on return of outstanding command */
cmd = lpfc_cmd->pCmd;
......@@ -3660,6 +3660,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
if (phba->sli4_hba.hdwq)
phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
cpu = smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT)
phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
}
#endif
shost = cmd->device->host;
lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
......@@ -4336,6 +4343,9 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
struct lpfc_io_buf *lpfc_cmd;
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
int err, idx;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
int cpu;
#endif
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
......@@ -4450,6 +4460,16 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
cpu = smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT) {
struct lpfc_sli4_hdw_queue *hdwq =
&phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no];
hdwq->cpucheck_xmt_io[cpu]++;
}
}
#endif
err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
&lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
if (err) {
......
This diff is collapsed.
......@@ -41,7 +41,7 @@
/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
#define LPFC_HBA_HDWQ_MIN 0
#define LPFC_HBA_HDWQ_MAX 64
#define LPFC_HBA_HDWQ_MAX 128
#define LPFC_HBA_HDWQ_DEF 0
/* Common buffer size to accomidate SCSI and NVME IO buffers */
......@@ -166,16 +166,19 @@ struct lpfc_queue {
uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
uint32_t host_index; /* The host's index for putting or getting */
uint32_t hba_index; /* The last known hba index for get or put */
uint32_t q_mode;
struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
struct lpfc_rqb *rqbp; /* ptr to RQ buffers */
uint32_t q_mode;
uint16_t page_count; /* Number of pages allocated for this queue */
uint16_t page_size; /* size of page allocated for this queue */
#define LPFC_EXPANDED_PAGE_SIZE 16384
#define LPFC_DEFAULT_PAGE_SIZE 4096
uint16_t chann; /* IO channel this queue is associated with */
uint16_t chann; /* Hardware Queue association WQ/CQ */
/* CPU affinity for EQ */
#define LPFC_FIND_BY_EQ 0
#define LPFC_FIND_BY_HDWQ 1
uint8_t db_format;
#define LPFC_DB_RING_FORMAT 0x01
#define LPFC_DB_LIST_FORMAT 0x02
......@@ -431,11 +434,6 @@ struct lpfc_hba_eq_hdl {
uint32_t idx;
char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
struct lpfc_hba *phba;
atomic_t hba_eq_in_use;
struct cpumask *cpumask;
/* CPU affinitsed to or 0xffffffff if multiple */
uint32_t cpu;
#define LPFC_MULTI_CPU_AFFINITY 0xffffffff
};
/*BB Credit recovery value*/
......@@ -529,7 +527,9 @@ struct lpfc_vector_map_info {
uint16_t phys_id;
uint16_t core_id;
uint16_t irq;
uint16_t eq;
uint16_t hdwq;
uint16_t hyper;
};
#define LPFC_VECTOR_MAP_EMPTY 0xffff
......@@ -593,6 +593,21 @@ struct lpfc_fc4_ctrl_stat {
u32 io_cmpls;
};
#ifdef LPFC_HDWQ_LOCK_STAT
struct lpfc_lock_stat {
uint32_t alloc_xri_get;
uint32_t alloc_xri_put;
uint32_t free_xri;
uint32_t wq_access;
uint32_t alloc_pvt_pool;
uint32_t mv_from_pvt_pool;
uint32_t mv_to_pub_pool;
uint32_t mv_to_pvt_pool;
uint32_t free_pub_pool;
uint32_t free_pvt_pool;
};
#endif
/* SLI4 HBA data structure entries */
struct lpfc_sli4_hdw_queue {
/* Pointers to the constructed SLI4 queues */
......@@ -626,6 +641,9 @@ struct lpfc_sli4_hdw_queue {
/* FC-4 Stats counters */
struct lpfc_fc4_ctrl_stat nvme_cstat;
struct lpfc_fc4_ctrl_stat scsi_cstat;
#ifdef LPFC_HDWQ_LOCK_STAT
struct lpfc_lock_stat lock_conflict;
#endif
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
#define LPFC_CHECK_CPU_CNT 128
......@@ -635,6 +653,34 @@ struct lpfc_sli4_hdw_queue {
#endif
};
#ifdef LPFC_HDWQ_LOCK_STAT
/* compile time trylock stats */
#define lpfc_qp_spin_lock_irqsave(lock, flag, qp, lstat) \
{ \
int only_once = 1; \
while (spin_trylock_irqsave(lock, flag) == 0) { \
if (only_once) { \
only_once = 0; \
qp->lock_conflict.lstat++; \
} \
} \
}
#define lpfc_qp_spin_lock(lock, qp, lstat) \
{ \
int only_once = 1; \
while (spin_trylock(lock) == 0) { \
if (only_once) { \
only_once = 0; \
qp->lock_conflict.lstat++; \
} \
} \
}
#else
#define lpfc_qp_spin_lock_irqsave(lock, flag, qp, lstat) \
spin_lock_irqsave(lock, flag)
#define lpfc_qp_spin_lock(lock, qp, lstat) spin_lock(lock)
#endif
struct lpfc_sli4_hba {
void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
* config space registers
......@@ -764,6 +810,8 @@ struct lpfc_sli4_hba {
uint16_t nvmet_xri_cnt;
uint16_t nvmet_io_wait_cnt;
uint16_t nvmet_io_wait_total;
uint16_t cq_max;
struct lpfc_queue **cq_lookup;
struct list_head lpfc_els_sgl_list;
struct list_head lpfc_abts_els_sgl_list;
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment