Commit a6478b26 authored by Martin K. Petersen's avatar Martin K. Petersen

Merge patch series "Introduce support for multiqueue (MQ) in fnic"

Karan Tilak Kumar <kartilak@cisco.com> says:

Hi Martin, reviewers,

This cover letter describes the feature: add support for
multiqueue (MQ) to fnic driver.

Background: The Virtual Interface Card (VIC) firmware exposes several
queues that can be configured for sending IOs and receiving IO
responses. Unified Computing System Manager (UCSM) and Intersight
Manager (IMM) allows users to configure the number of queues to be
used for IOs.

The number of IO queues to be used is stored in a configuration file
by the VIC firmware. The fNIC driver reads the configuration file and
sets the number of queues to be used. Previously, the driver was
hard-coded to use only one queue. With this set of changes, the fNIC
driver will configure itself to use multiple queues. This feature
takes advantage of the block multiqueue layer to parallelize IOs being
sent out of the VIC card.

Here's a brief description of some of the salient patches:

- vnic_scsi.h needs to be in sync with VIC firmware to be able to read
the number of queues from the firmware config file. A patch has been
created for this.
- In an environment with many fnics (like we see in our customer
environments), it is hard to distinguish which fnic is printing logs.
Therefore, an fnic number has been included in the logs.
- read the number of queues from the firmware config file.
- include definitions in fnic.h to support multiqueue.
- modify the interrupt service routines (ISRs) to read from the
correct registers. The numbers that are used here come from discussions
with the VIC firmware team.
- track IO statistics for different queues.
- remove usage of host_lock, and only use fnic_lock in the fnic driver.
- use a hardware queue based spinlock to protect io_req.
- replace the hard-coded zeroth queue with a hardware queue number.
This presents a bulk of the changes.
- modify the definition of fnic_queuecommand to accept multiqueue tags.
- improve log messages, and indicate fnic number and multiqueue tags for
effective debugging.

Even though the patches have been made into a series, some patches are
heavier than others.
But, every effort has been made to keep the purpose of each patch as
a single-purpose, and to compile cleanly.

This patchset has been tested as a whole. Therefore, the tested-by fields
have been added only to two patches
in the set. All the individual patches compile cleanly. However,
I've refrained from adding tested-by to
most of the patches, so as to not mislead the reviewer/reader.

A brief note on the unit tests:

1. Increase number of queues to 64. Load driver. Run IOs via Medusa.
12+ hour run successful.
2. Configure multipathing, and run link flaps on single link.
IOs drop briefly, but pick up as expected.
3. Configure multipathing, and run link flaps on two links, with a
30 second delay in between. IOs drop briefly, but pick up as expected.

Repeat the above tests with 1 queue and 32 queues.  All tests were
successful.

Please consider this patch series for the next merge window.

Link: https://lore.kernel.org/r/20231211173617.932990-1-kartilak@cisco.comSigned-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parents fc1fbd13 53021c19
......@@ -27,7 +27,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
#define DRV_VERSION "1.6.0.57"
#define DRV_VERSION "1.7.0.0"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
......@@ -36,7 +36,6 @@
#define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */
#define FNIC_MAX_IO_REQ 1024 /* scsi_cmnd tag map entries */
#define FNIC_DFLT_IO_REQ 256 /* Default scsi_cmnd tag map entries */
#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
#define FNIC_DFLT_QUEUE_DEPTH 256
#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
......@@ -109,7 +108,7 @@ static inline u64 fnic_flags_and_state(struct scsi_cmnd *cmd)
#define FNIC_ABT_TERM_DELAY_TIMEOUT 500 /* mSec */
#define FNIC_MAX_FCP_TARGET 256
#define FNIC_PCI_OFFSET 2
/**
* state_flags to identify host state along along with fnic's state
**/
......@@ -144,31 +143,48 @@ do { \
} while (0); \
} while (0)
#define FNIC_MAIN_DBG(kern_level, host, fmt, args...) \
#define FNIC_MAIN_DBG(kern_level, host, fnic_num, fmt, args...) \
FNIC_CHECK_LOGGING(FNIC_MAIN_LOGGING, \
shost_printk(kern_level, host, fmt, ##args);)
shost_printk(kern_level, host, \
"fnic<%d>: %s: %d: " fmt, fnic_num,\
__func__, __LINE__, ##args);)
#define FNIC_FCS_DBG(kern_level, host, fmt, args...) \
#define FNIC_FCS_DBG(kern_level, host, fnic_num, fmt, args...) \
FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \
shost_printk(kern_level, host, fmt, ##args);)
shost_printk(kern_level, host, \
"fnic<%d>: %s: %d: " fmt, fnic_num,\
__func__, __LINE__, ##args);)
#define FNIC_SCSI_DBG(kern_level, host, fmt, args...) \
#define FNIC_SCSI_DBG(kern_level, host, fnic_num, fmt, args...) \
FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING, \
shost_printk(kern_level, host, fmt, ##args);)
shost_printk(kern_level, host, \
"fnic<%d>: %s: %d: " fmt, fnic_num,\
__func__, __LINE__, ##args);)
#define FNIC_ISR_DBG(kern_level, host, fmt, args...) \
#define FNIC_ISR_DBG(kern_level, host, fnic_num, fmt, args...) \
FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \
shost_printk(kern_level, host, fmt, ##args);)
shost_printk(kern_level, host, \
"fnic<%d>: %s: %d: " fmt, fnic_num,\
__func__, __LINE__, ##args);)
#define FNIC_MAIN_NOTE(kern_level, host, fmt, args...) \
shost_printk(kern_level, host, fmt, ##args)
#define FNIC_WQ_COPY_MAX 64
#define FNIC_WQ_MAX 1
#define FNIC_RQ_MAX 1
#define FNIC_CQ_MAX (FNIC_WQ_COPY_MAX + FNIC_WQ_MAX + FNIC_RQ_MAX)
#define FNIC_DFLT_IO_COMPLETIONS 256
#define FNIC_MQ_CQ_INDEX 2
extern const char *fnic_state_str[];
enum fnic_intx_intr_index {
FNIC_INTX_WQ_RQ_COPYWQ,
FNIC_INTX_ERR,
FNIC_INTX_DUMMY,
FNIC_INTX_NOTIFY,
FNIC_INTX_ERR,
FNIC_INTX_INTR_MAX,
};
......@@ -176,7 +192,7 @@ enum fnic_msix_intr_index {
FNIC_MSIX_RQ,
FNIC_MSIX_WQ,
FNIC_MSIX_WQ_COPY,
FNIC_MSIX_ERR_NOTIFY,
FNIC_MSIX_ERR_NOTIFY = FNIC_MSIX_WQ_COPY + FNIC_WQ_COPY_MAX,
FNIC_MSIX_INTR_MAX,
};
......@@ -185,6 +201,7 @@ struct fnic_msix_entry {
char devname[IFNAMSIZ + 11];
irqreturn_t (*isr)(int, void *);
void *devid;
int irq_num;
};
enum fnic_state {
......@@ -194,12 +211,6 @@ enum fnic_state {
FNIC_IN_ETH_TRANS_FC_MODE,
};
#define FNIC_WQ_COPY_MAX 1
#define FNIC_WQ_MAX 1
#define FNIC_RQ_MAX 1
#define FNIC_CQ_MAX (FNIC_WQ_COPY_MAX + FNIC_WQ_MAX + FNIC_RQ_MAX)
#define FNIC_DFLT_IO_COMPLETIONS 256
struct mempool;
enum fnic_evt {
......@@ -214,8 +225,16 @@ struct fnic_event {
enum fnic_evt event;
};
struct fnic_cpy_wq {
unsigned long hw_lock_flags;
u16 active_ioreq_count;
u16 ioreq_table_size;
____cacheline_aligned struct fnic_io_req **io_req_table;
};
/* Per-instance private data structure */
struct fnic {
int fnic_num;
struct fc_lport *lport;
struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */
struct vnic_dev_bar bar0;
......@@ -282,8 +301,8 @@ struct fnic {
struct fnic_host_tag *tags;
mempool_t *io_req_pool;
mempool_t *io_sgl_pool[FNIC_SGL_NUM_CACHES];
spinlock_t io_req_lock[FNIC_IO_LOCKS]; /* locks for scsi cmnds */
unsigned int copy_wq_base;
struct work_struct link_work;
struct work_struct frame_work;
struct sk_buff_head frame_queue;
......@@ -302,7 +321,9 @@ struct fnic {
/*** FIP related data members -- end ***/
/* copy work queue cache line section */
____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX];
____cacheline_aligned struct vnic_wq_copy hw_copy_wq[FNIC_WQ_COPY_MAX];
____cacheline_aligned struct fnic_cpy_wq sw_copy_wq[FNIC_WQ_COPY_MAX];
/* completion queue cache line section */
____cacheline_aligned struct vnic_cq cq[FNIC_CQ_MAX];
......@@ -330,6 +351,7 @@ extern const struct attribute_group *fnic_host_groups[];
void fnic_clear_intr_mode(struct fnic *fnic);
int fnic_set_intr_mode(struct fnic *fnic);
int fnic_set_intr_mode_msix(struct fnic *fnic);
void fnic_free_intr(struct fnic *fnic);
int fnic_request_intr(struct fnic *fnic);
......@@ -356,7 +378,7 @@ void fnic_scsi_cleanup(struct fc_lport *);
void fnic_scsi_abort_io(struct fc_lport *);
void fnic_empty_scsi_cleanup(struct fc_lport *);
void fnic_exch_mgr_reset(struct fc_lport *, u32, u32);
int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int);
int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do, unsigned int cq_index);
int fnic_wq_cmpl_handler(struct fnic *fnic, int);
int fnic_flogi_reg_handler(struct fnic *fnic, u32);
void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
......@@ -364,7 +386,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
int fnic_fw_reset_handler(struct fnic *fnic);
void fnic_terminate_rport_io(struct fc_rport *);
const char *fnic_state_to_str(unsigned int state);
void fnic_mq_map_queues_cpus(struct Scsi_Host *host);
void fnic_log_q_error(struct fnic *fnic);
void fnic_handle_link_event(struct fnic *fnic);
......
......@@ -63,8 +63,8 @@ void fnic_handle_link(struct work_struct *work)
atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed,
new_port_speed);
if (old_port_speed != new_port_speed)
FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host,
"Current vnic speed set to : %llu\n",
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"Current vnic speed set to: %llu\n",
new_port_speed);
switch (vnic_dev_port_speed(fnic->vdev)) {
......@@ -102,6 +102,8 @@ void fnic_handle_link(struct work_struct *work)
fnic_fc_trace_set_data(fnic->lport->host->host_no,
FNIC_FC_LE, "Link Status: DOWN->DOWN",
strlen("Link Status: DOWN->DOWN"));
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"down->down\n");
} else {
if (old_link_down_cnt != fnic->link_down_cnt) {
/* UP -> DOWN -> UP */
......@@ -113,7 +115,7 @@ void fnic_handle_link(struct work_struct *work)
"Link Status:UP_DOWN_UP",
strlen("Link_Status:UP_DOWN_UP")
);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"link down\n");
fcoe_ctlr_link_down(&fnic->ctlr);
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
......@@ -128,8 +130,8 @@ void fnic_handle_link(struct work_struct *work)
fnic_fcoe_send_vlan_req(fnic);
return;
}
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"link up\n");
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"up->down->up: Link up\n");
fcoe_ctlr_link_up(&fnic->ctlr);
} else {
/* UP -> UP */
......@@ -138,6 +140,8 @@ void fnic_handle_link(struct work_struct *work)
fnic->lport->host->host_no, FNIC_FC_LE,
"Link Status: UP_UP",
strlen("Link Status: UP_UP"));
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"up->up\n");
}
}
} else if (fnic->link_status) {
......@@ -153,7 +157,8 @@ void fnic_handle_link(struct work_struct *work)
return;
}
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"down->up: Link up\n");
fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
"Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
fcoe_ctlr_link_up(&fnic->ctlr);
......@@ -161,13 +166,14 @@ void fnic_handle_link(struct work_struct *work)
/* UP -> DOWN */
fnic->lport->host_stats.link_failure_count++;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"up->down: Link down\n");
fnic_fc_trace_set_data(
fnic->lport->host->host_no, FNIC_FC_LE,
"Link Status: UP_DOWN",
strlen("Link Status: UP_DOWN"));
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"deleting fip-timer during link-down\n");
del_timer_sync(&fnic->fip_timer);
}
......@@ -270,12 +276,12 @@ void fnic_handle_event(struct work_struct *work)
spin_lock_irqsave(&fnic->fnic_lock, flags);
break;
case FNIC_EVT_START_FCF_DISC:
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"Start FCF Discovery\n");
fnic_fcoe_start_fcf_disc(fnic);
break;
default:
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"Unknown event 0x%x\n", fevt->event);
break;
}
......@@ -370,7 +376,7 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
fnic->set_vlan(fnic, 0);
if (printk_ratelimit())
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"Sending VLAN request...\n");
skb = dev_alloc_skb(sizeof(struct fip_vlan));
......@@ -423,12 +429,12 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
u64 sol_time;
unsigned long flags;
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"Received VLAN response...\n");
fiph = (struct fip_header *) skb->data;
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
ntohs(fiph->fip_op), fiph->fip_subcode);
......@@ -463,7 +469,7 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
if (list_empty(&fnic->vlans)) {
/* retry from timer */
atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"No VLAN descriptors in FIP VLAN response\n");
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
goto out;
......@@ -721,7 +727,8 @@ void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
new = ctl;
if (ether_addr_equal(data, new))
return;
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"update_mac %pM\n", new);
if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
vnic_dev_del_addr(fnic->vdev, data);
memcpy(data, new, ETH_ALEN);
......@@ -763,8 +770,9 @@ void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
u8 *mac;
int ret;
FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
port_id, fp);
FNIC_FCS_DBG(KERN_DEBUG, lport->host, fnic->fnic_num,
"set port_id 0x%x fp 0x%p\n",
port_id, fp);
/*
* If we're clearing the FC_ID, change to use the ctl_src_addr.
......@@ -790,10 +798,9 @@ void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
else {
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"Unexpected fnic state %s while"
" processing flogi resp\n",
fnic_state_to_str(fnic->state));
FNIC_FCS_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
"Unexpected fnic state: %s processing FLOGI response",
fnic_state_to_str(fnic->state));
spin_unlock_irq(&fnic->fnic_lock);
return;
}
......@@ -870,7 +877,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
skb_trim(skb, bytes_written);
if (!fcs_ok) {
atomic64_inc(&fnic_stats->misc_stats.frame_errors);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"fcs error. dropping packet.\n");
goto drop;
}
......@@ -886,7 +893,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
atomic64_inc(&fnic_stats->misc_stats.frame_errors);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"fnic rq_cmpl fcoe x%x fcsok x%x"
" pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
" x%x\n",
......@@ -967,7 +974,7 @@ int fnic_alloc_rq_frame(struct vnic_rq *rq)
len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
skb = dev_alloc_skb(len);
if (!skb) {
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"Unable to allocate RQ sk_buff\n");
return -ENOMEM;
}
......@@ -1341,12 +1348,12 @@ void fnic_handle_fip_timer(struct fnic *fnic)
}
vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"fip_timer: vlan %d state %d sol_count %d\n",
vlan->vid, vlan->state, vlan->sol_count);
switch (vlan->state) {
case FIP_VLAN_USED:
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"FIP VLAN is selected for FC transaction\n");
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
break;
......@@ -1365,7 +1372,7 @@ void fnic_handle_fip_timer(struct fnic *fnic)
* no response on this vlan, remove from the list.
* Try the next vlan
*/
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"Dequeue this VLAN ID %d from list\n",
vlan->vid);
list_del(&vlan->list);
......@@ -1375,7 +1382,7 @@ void fnic_handle_fip_timer(struct fnic *fnic)
/* we exhausted all vlans, restart vlan disc */
spin_unlock_irqrestore(&fnic->vlans_lock,
flags);
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"fip_timer: vlan list empty, "
"trigger vlan disc\n");
fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
......
......@@ -38,8 +38,13 @@ static irqreturn_t fnic_isr_legacy(int irq, void *data)
fnic_log_q_error(fnic);
}
if (pba & (1 << FNIC_INTX_DUMMY)) {
atomic64_inc(&fnic->fnic_stats.misc_stats.intx_dummy);
vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_DUMMY]);
}
if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) {
work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions);
work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions, FNIC_MQ_CQ_INDEX);
work_done += fnic_wq_cmpl_handler(fnic, -1);
work_done += fnic_rq_cmpl_handler(fnic, -1);
......@@ -60,7 +65,7 @@ static irqreturn_t fnic_isr_msi(int irq, void *data)
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions);
work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions, FNIC_MQ_CQ_INDEX);
work_done += fnic_wq_cmpl_handler(fnic, -1);
work_done += fnic_rq_cmpl_handler(fnic, -1);
......@@ -109,12 +114,22 @@ static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data)
{
struct fnic *fnic = data;
unsigned long wq_copy_work_done = 0;
int i;
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, io_completions);
vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],
i = irq - fnic->msix[0].irq_num;
if (i >= fnic->wq_copy_count + fnic->copy_wq_base ||
i < 0 || fnic->msix[i].irq_num != irq) {
for (i = fnic->copy_wq_base; i < fnic->wq_copy_count + fnic->copy_wq_base ; i++) {
if (fnic->msix[i].irq_num == irq)
break;
}
}
wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, io_completions, i);
vnic_intr_return_credits(&fnic->intr[i],
wq_copy_work_done,
1 /* unmask intr */,
1 /* reset intr timer */);
......@@ -128,7 +143,7 @@ static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data)
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]);
vnic_intr_return_all_credits(&fnic->intr[fnic->err_intr_offset]);
fnic_log_q_error(fnic);
fnic_handle_link_event(fnic);
......@@ -186,26 +201,30 @@ int fnic_request_intr(struct fnic *fnic)
fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq;
fnic->msix[FNIC_MSIX_WQ].devid = fnic;
sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname,
"%.11s-scsi-wq", fnic->name);
fnic->msix[FNIC_MSIX_WQ_COPY].isr = fnic_isr_msix_wq_copy;
fnic->msix[FNIC_MSIX_WQ_COPY].devid = fnic;
for (i = fnic->copy_wq_base; i < fnic->wq_copy_count + fnic->copy_wq_base; i++) {
sprintf(fnic->msix[i].devname,
"%.11s-scsi-wq-%d", fnic->name, i-FNIC_MSIX_WQ_COPY);
fnic->msix[i].isr = fnic_isr_msix_wq_copy;
fnic->msix[i].devid = fnic;
}
sprintf(fnic->msix[FNIC_MSIX_ERR_NOTIFY].devname,
sprintf(fnic->msix[fnic->err_intr_offset].devname,
"%.11s-err-notify", fnic->name);
fnic->msix[FNIC_MSIX_ERR_NOTIFY].isr =
fnic->msix[fnic->err_intr_offset].isr =
fnic_isr_msix_err_notify;
fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic;
fnic->msix[fnic->err_intr_offset].devid = fnic;
for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) {
err = request_irq(pci_irq_vector(fnic->pdev, i),
fnic->msix[i].isr, 0,
fnic->msix[i].devname,
fnic->msix[i].devid);
for (i = 0; i < fnic->intr_count; i++) {
fnic->msix[i].irq_num = pci_irq_vector(fnic->pdev, i);
err = request_irq(fnic->msix[i].irq_num,
fnic->msix[i].isr, 0,
fnic->msix[i].devname,
fnic->msix[i].devid);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"MSIX: request_irq"
" failed %d\n", err);
FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
"request_irq failed with error: %d\n",
err);
fnic_free_intr(fnic);
break;
}
......@@ -220,44 +239,99 @@ int fnic_request_intr(struct fnic *fnic)
return err;
}
int fnic_set_intr_mode(struct fnic *fnic)
int fnic_set_intr_mode_msix(struct fnic *fnic)
{
unsigned int n = ARRAY_SIZE(fnic->rq);
unsigned int m = ARRAY_SIZE(fnic->wq);
unsigned int o = ARRAY_SIZE(fnic->wq_copy);
unsigned int o = ARRAY_SIZE(fnic->hw_copy_wq);
unsigned int min_irqs = n + m + 1 + 1; /*rq, raw wq, wq, err*/
/*
* Set interrupt mode (INTx, MSI, MSI-X) depending
* system capabilities.
*
* Try MSI-X first
*
* We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs
* (last INTR is used for WQ/RQ errors and notification area)
*/
if (fnic->rq_count >= n &&
fnic->raw_wq_count >= m &&
fnic->wq_copy_count >= o &&
fnic->cq_count >= n + m + o) {
int vecs = n + m + o + 1;
if (pci_alloc_irq_vectors(fnic->pdev, vecs, vecs,
PCI_IRQ_MSIX) == vecs) {
FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"rq-array size: %d wq-array size: %d copy-wq array size: %d\n",
n, m, o);
FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"rq_count: %d raw_wq_count: %d wq_copy_count: %d cq_count: %d\n",
fnic->rq_count, fnic->raw_wq_count,
fnic->wq_copy_count, fnic->cq_count);
if (fnic->rq_count <= n && fnic->raw_wq_count <= m &&
fnic->wq_copy_count <= o) {
int vec_count = 0;
int vecs = fnic->rq_count + fnic->raw_wq_count + fnic->wq_copy_count + 1;
vec_count = pci_alloc_irq_vectors(fnic->pdev, min_irqs, vecs,
PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"allocated %d MSI-X vectors\n",
vec_count);
if (vec_count > 0) {
if (vec_count < vecs) {
FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
"interrupts number mismatch: vec_count: %d vecs: %d\n",
vec_count, vecs);
if (vec_count < min_irqs) {
FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
"no interrupts for copy wq\n");
return 1;
}
}
fnic->rq_count = n;
fnic->raw_wq_count = m;
fnic->wq_copy_count = o;
fnic->wq_count = m + o;
fnic->cq_count = n + m + o;
fnic->intr_count = vecs;
fnic->err_intr_offset = FNIC_MSIX_ERR_NOTIFY;
FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
"Using MSI-X Interrupts\n");
vnic_dev_set_intr_mode(fnic->vdev,
VNIC_DEV_INTR_MODE_MSIX);
fnic->copy_wq_base = fnic->rq_count + fnic->raw_wq_count;
fnic->wq_copy_count = vec_count - n - m - 1;
fnic->wq_count = fnic->raw_wq_count + fnic->wq_copy_count;
if (fnic->cq_count != vec_count - 1) {
FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
"CQ count: %d does not match MSI-X vector count: %d\n",
fnic->cq_count, vec_count);
fnic->cq_count = vec_count - 1;
}
fnic->intr_count = vec_count;
fnic->err_intr_offset = fnic->rq_count + fnic->wq_count;
FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"rq_count: %d raw_wq_count: %d copy_wq_base: %d\n",
fnic->rq_count,
fnic->raw_wq_count, fnic->copy_wq_base);
FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"wq_copy_count: %d wq_count: %d cq_count: %d\n",
fnic->wq_copy_count,
fnic->wq_count, fnic->cq_count);
FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"intr_count: %d err_intr_offset: %u",
fnic->intr_count,
fnic->err_intr_offset);
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSIX);
FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"fnic using MSI-X\n");
return 0;
}
}
return 1;
}
int fnic_set_intr_mode(struct fnic *fnic)
{
int ret_status = 0;
/*
* Set interrupt mode (INTx, MSI, MSI-X) depending
* system capabilities.
*
* Try MSI-X first
*/
ret_status = fnic_set_intr_mode_msix(fnic);
if (ret_status == 0)
return ret_status;
/*
* Next try MSI
......@@ -277,7 +351,7 @@ int fnic_set_intr_mode(struct fnic *fnic)
fnic->intr_count = 1;
fnic->err_intr_offset = 0;
FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"Using MSI Interrupts\n");
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI);
......@@ -303,7 +377,7 @@ int fnic_set_intr_mode(struct fnic *fnic)
fnic->cq_count = 3;
fnic->intr_count = 3;
FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"Using Legacy Interrupts\n");
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
......
This diff is collapsed.
......@@ -57,6 +57,8 @@ int fnic_get_vnic_config(struct fnic *fnic)
GET_CONFIG(port_down_timeout);
GET_CONFIG(port_down_io_retries);
GET_CONFIG(luns_per_tgt);
GET_CONFIG(intr_mode);
GET_CONFIG(wq_copy_count);
c->wq_enet_desc_count =
min_t(u32, VNIC_FNIC_WQ_DESCS_MAX,
......@@ -131,6 +133,12 @@ int fnic_get_vnic_config(struct fnic *fnic)
c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
c->intr_timer_type = c->intr_timer_type;
/* for older firmware, GET_CONFIG will not return anything */
if (c->wq_copy_count == 0)
c->wq_copy_count = 1;
c->wq_copy_count = min_t(u16, FNIC_WQ_COPY_MAX, c->wq_copy_count);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC MAC addr %pM "
"wq/wq_copy/rq %d/%d/%d\n",
......@@ -161,6 +169,10 @@ int fnic_get_vnic_config(struct fnic *fnic)
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC port dn io retries %d port dn timeout %d\n",
c->port_down_io_retries, c->port_down_timeout);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC wq_copy_count: %d\n", c->wq_copy_count);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC intr mode: %d\n", c->intr_mode);
return 0;
}
......@@ -187,12 +199,25 @@ int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu,
void fnic_get_res_counts(struct fnic *fnic)
{
fnic->wq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_WQ);
fnic->raw_wq_count = fnic->wq_count - 1;
fnic->wq_copy_count = fnic->wq_count - fnic->raw_wq_count;
fnic->raw_wq_count = 1;
fnic->wq_copy_count = fnic->config.wq_copy_count;
fnic->rq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_RQ);
fnic->cq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_CQ);
fnic->intr_count = vnic_dev_get_res_count(fnic->vdev,
RES_TYPE_INTR_CTRL);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC fw resources wq_count: %d\n", fnic->wq_count);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC fw resources raw_wq_count: %d\n", fnic->raw_wq_count);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC fw resources wq_copy_count: %d\n", fnic->wq_copy_count);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC fw resources rq_count: %d\n", fnic->rq_count);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC fw resources cq_count: %d\n", fnic->cq_count);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC fw resources intr_count: %d\n", fnic->intr_count);
}
void fnic_free_vnic_resources(struct fnic *fnic)
......@@ -203,7 +228,7 @@ void fnic_free_vnic_resources(struct fnic *fnic)
vnic_wq_free(&fnic->wq[i]);
for (i = 0; i < fnic->wq_copy_count; i++)
vnic_wq_copy_free(&fnic->wq_copy[i]);
vnic_wq_copy_free(&fnic->hw_copy_wq[i]);
for (i = 0; i < fnic->rq_count; i++)
vnic_rq_free(&fnic->rq[i]);
......@@ -234,10 +259,15 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
intr_mode == VNIC_DEV_INTR_MODE_MSIX ?
"MSI-X" : "unknown");
shost_printk(KERN_INFO, fnic->lport->host, "vNIC resources avail: "
"wq %d cp_wq %d raw_wq %d rq %d cq %d intr %d\n",
fnic->wq_count, fnic->wq_copy_count, fnic->raw_wq_count,
fnic->rq_count, fnic->cq_count, fnic->intr_count);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC resources avail: wq %d cp_wq %d raw_wq %d rq %d",
fnic->wq_count, fnic->wq_copy_count,
fnic->raw_wq_count, fnic->rq_count);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC resources avail: cq %d intr %d cpy-wq desc count %d\n",
fnic->cq_count, fnic->intr_count,
fnic->config.wq_copy_desc_count);
/* Allocate Raw WQ used for FCS frames */
for (i = 0; i < fnic->raw_wq_count; i++) {
......@@ -250,7 +280,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
/* Allocate Copy WQs used for SCSI IOs */
for (i = 0; i < fnic->wq_copy_count; i++) {
err = vnic_wq_copy_alloc(fnic->vdev, &fnic->wq_copy[i],
err = vnic_wq_copy_alloc(fnic->vdev, &fnic->hw_copy_wq[i],
(fnic->raw_wq_count + i),
fnic->config.wq_copy_desc_count,
sizeof(struct fcpio_host_req));
......@@ -357,7 +387,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
}
for (i = 0; i < fnic->wq_copy_count; i++) {
vnic_wq_copy_init(&fnic->wq_copy[i],
vnic_wq_copy_init(&fnic->hw_copy_wq[i],
0 /* cq_index 0 - always */,
error_interrupt_enable,
error_interrupt_offset);
......
This diff is collapsed.
......@@ -2,6 +2,7 @@
/* Copyright 2013 Cisco Systems, Inc. All rights reserved. */
#ifndef _FNIC_STATS_H_
#define _FNIC_STATS_H_
#define FNIC_MQ_MAX_QUEUES 64
struct stats_timestamps {
struct timespec64 last_reset_time;
......@@ -26,6 +27,7 @@ struct io_path_stats {
atomic64_t io_btw_10000_to_30000_msec;
atomic64_t io_greater_than_30000_msec;
atomic64_t current_max_io_time;
atomic64_t ios[FNIC_MQ_MAX_QUEUES];
};
struct abort_stats {
......@@ -103,6 +105,7 @@ struct misc_stats {
atomic64_t rport_not_ready;
atomic64_t frame_errors;
atomic64_t current_port_speed;
atomic64_t intx_dummy;
};
struct fnic_stats {
......
......@@ -204,6 +204,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
int len = 0;
int buf_size = debug->buf_size;
struct timespec64 val1, val2;
int i = 0;
ktime_get_real_ts64(&val1);
len = scnprintf(debug->debug_buffer + len, buf_size - len,
......@@ -266,6 +267,16 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->io_stats.io_btw_10000_to_30000_msec),
(u64)atomic64_read(&stats->io_stats.io_greater_than_30000_msec));
len += scnprintf(debug->debug_buffer + len, buf_size - len,
"------------------------------------------\n"
"\t\tIO Queues and cumulative IOs\n"
"------------------------------------------\n");
for (i = 0; i < FNIC_MQ_MAX_QUEUES; i++) {
len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Q:%d -> %lld\n", i, (u64)atomic64_read(&stats->io_stats.ios[i]));
}
len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\nCurrent Max IO time : %lld\n",
(u64)atomic64_read(&stats->io_stats.current_max_io_time));
......
......@@ -143,6 +143,10 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
}
pr_info("res_type_wq: %d res_type_rq: %d res_type_cq: %d res_type_intr_ctrl: %d\n",
vdev->res[RES_TYPE_WQ].count, vdev->res[RES_TYPE_RQ].count,
vdev->res[RES_TYPE_CQ].count, vdev->res[RES_TYPE_INTR_CTRL].count);
return 0;
}
......
......@@ -26,7 +26,7 @@
#define VNIC_FNIC_RATOV_MAX 255000
#define VNIC_FNIC_MAXDATAFIELDSIZE_MIN 256
#define VNIC_FNIC_MAXDATAFIELDSIZE_MAX 2112
#define VNIC_FNIC_MAXDATAFIELDSIZE_MAX 2048
#define VNIC_FNIC_FLOGI_RETRIES_MIN 0
#define VNIC_FNIC_FLOGI_RETRIES_MAX 0xffffffff
......@@ -55,7 +55,7 @@
#define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX 255
#define VNIC_FNIC_LUNS_PER_TARGET_MIN 1
#define VNIC_FNIC_LUNS_PER_TARGET_MAX 1024
#define VNIC_FNIC_LUNS_PER_TARGET_MAX 4096
/* Device-specific region: scsi configuration */
struct vnic_fc_config {
......@@ -79,10 +79,19 @@ struct vnic_fc_config {
u16 ra_tov;
u16 intr_timer;
u8 intr_timer_type;
u8 intr_mode;
u8 lun_queue_depth;
u8 io_timeout_retry;
u16 wq_copy_count;
};
#define VFCF_FCP_SEQ_LVL_ERR 0x1 /* Enable FCP-2 Error Recovery */
#define VFCF_PERBI 0x2 /* persistent binding info available */
#define VFCF_FIP_CAPABLE 0x4 /* firmware can handle FIP */
#define VFCF_FC_INITIATOR 0x20 /* FC Initiator Mode */
#define VFCF_FC_TARGET 0x40 /* FC Target Mode */
#define VFCF_FC_NVME_INITIATOR 0x80 /* FC-NVMe Initiator Mode */
#define VFCF_FC_NVME_TARGET 0x100 /* FC-NVMe Target Mode */
#endif /* _VNIC_SCSI_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment