Commit c8806b6c authored by Narsimhulu Musini's avatar Narsimhulu Musini Committed by James Bottomley

snic: driver for Cisco SCSI HBA

Cisco has developed a new PCI HBA interface called sNIC, which stands for
SCSI NIC. This is a new storage feature supported on specialized network
adapter. The new PCI function provides a uniform host interface and abstracts
backend storage.

[jejb: fix up checkpatch errors]
Signed-off-by: default avatarNarsimhulu Musini <nmusini@cisco.com>
Signed-off-by: default avatarSesidhar Baddela <sebaddel@cisco.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarJames Bottomley <JBottomley@Odin.com>
parent 8d2b21db
...@@ -2590,6 +2590,13 @@ L: linux-scsi@vger.kernel.org ...@@ -2590,6 +2590,13 @@ L: linux-scsi@vger.kernel.org
S: Supported S: Supported
F: drivers/scsi/fnic/ F: drivers/scsi/fnic/
CISCO SCSI HBA DRIVER
M: Narsimhulu Musini <nmusini@cisco.com>
M: Sesidhar Baddela <sebaddel@cisco.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/snic/
CMPC ACPI DRIVER CMPC ACPI DRIVER
M: Thadeu Lima de Souza Cascardo <cascardo@holoscopio.com> M: Thadeu Lima de Souza Cascardo <cascardo@holoscopio.com>
M: Daniel Oliveira Nascimento <don@syst.com.br> M: Daniel Oliveira Nascimento <don@syst.com.br>
......
...@@ -634,6 +634,23 @@ config FCOE_FNIC ...@@ -634,6 +634,23 @@ config FCOE_FNIC
<file:Documentation/scsi/scsi.txt>. <file:Documentation/scsi/scsi.txt>.
The module will be called fnic. The module will be called fnic.
config SCSI_SNIC
tristate "Cisco SNIC Driver"
depends on PCI && SCSI
help
This is support for the Cisco PCI-Express SCSI HBA.
To compile this driver as a module, choose M here and read
<file:Documentation/scsi/scsi.txt>.
The module will be called snic.
config SCSI_SNIC_DEBUG_FS
bool "Cisco SNIC Driver Debugfs Support"
depends on SCSI_SNIC && DEBUG_FS
help
This enables to list debugging information from SNIC Driver
available via debugfs file system
config SCSI_DMX3191D config SCSI_DMX3191D
tristate "DMX3191D SCSI support" tristate "DMX3191D SCSI support"
depends on PCI && SCSI depends on PCI && SCSI
......
...@@ -39,6 +39,7 @@ obj-$(CONFIG_LIBFC) += libfc/ ...@@ -39,6 +39,7 @@ obj-$(CONFIG_LIBFC) += libfc/
obj-$(CONFIG_LIBFCOE) += fcoe/ obj-$(CONFIG_LIBFCOE) += fcoe/
obj-$(CONFIG_FCOE) += fcoe/ obj-$(CONFIG_FCOE) += fcoe/
obj-$(CONFIG_FCOE_FNIC) += fnic/ obj-$(CONFIG_FCOE_FNIC) += fnic/
obj-$(CONFIG_SCSI_SNIC) += snic/
obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/ obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
......
obj-$(CONFIG_SCSI_SNIC) += snic.o
snic-y := \
snic_attrs.o \
snic_main.o \
snic_res.o \
snic_isr.o \
snic_ctl.o \
snic_io.o \
snic_scsi.o \
snic_disc.o \
vnic_cq.o \
vnic_intr.o \
vnic_dev.o \
vnic_wq.o
snic-$(CONFIG_SCSI_SNIC_DEBUG_FS) += snic_debugfs.o snic_trc.o
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _CQ_DESC_H_
#define _CQ_DESC_H_
/*
* Completion queue descriptor types
*/
enum cq_desc_types {
CQ_DESC_TYPE_WQ_ENET = 0,
CQ_DESC_TYPE_DESC_COPY = 1,
CQ_DESC_TYPE_WQ_EXCH = 2,
CQ_DESC_TYPE_RQ_ENET = 3,
CQ_DESC_TYPE_RQ_FCP = 4,
};
/* Completion queue descriptor: 16B
*
* All completion queues have this basic layout. The
* type_specific area is unique for each completion
* queue type.
*/
struct cq_desc {
__le16 completed_index;
__le16 q_number;
u8 type_specific[11];
u8 type_color;
};
#define CQ_DESC_TYPE_BITS 4
#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
#define CQ_DESC_COLOR_MASK 1
#define CQ_DESC_COLOR_SHIFT 7
#define CQ_DESC_Q_NUM_BITS 10
#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
#define CQ_DESC_COMP_NDX_BITS 12
#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
static inline void cq_desc_dec(const struct cq_desc *desc_arg,
u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
{
const struct cq_desc *desc = desc_arg;
const u8 type_color = desc->type_color;
*color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
/*
* Make sure color bit is read from desc *before* other fields
* are read from desc. Hardware guarantees color bit is last
* bit (byte) written. Adding the rmb() prevents the compiler
* and/or CPU from reordering the reads which would potentially
* result in reading stale values.
*/
rmb();
*type = type_color & CQ_DESC_TYPE_MASK;
*q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
*completed_index = le16_to_cpu(desc->completed_index) &
CQ_DESC_COMP_NDX_MASK;
}
#endif /* _CQ_DESC_H_ */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _CQ_ENET_DESC_H_
#define _CQ_ENET_DESC_H_
#include "cq_desc.h"
/* Ethernet completion queue descriptor: 16B */
struct cq_enet_wq_desc {
__le16 completed_index;
__le16 q_number;
u8 reserved[11];
u8 type_color;
};
static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
{
cq_desc_dec((struct cq_desc *)desc, type,
color, q_number, completed_index);
}
#endif /* _CQ_ENET_DESC_H_ */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _SNIC_H_
#define _SNIC_H_
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/mempool.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include "snic_disc.h"
#include "snic_io.h"
#include "snic_res.h"
#include "snic_trc.h"
#include "snic_stats.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_cq.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "vnic_snic.h"
#define SNIC_DRV_NAME "snic"
#define SNIC_DRV_DESCRIPTION "Cisco SCSI NIC Driver"
#define SNIC_DRV_VERSION "0.0.1.18"
#define PFX SNIC_DRV_NAME ":"
#define DFX SNIC_DRV_NAME "%d: "
#define DESC_CLEAN_LOW_WATERMARK 8
#define SNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */
#define SNIC_MAX_IO_REQ 50 /* scsi_cmnd tag map entries */
#define SNIC_MIN_IO_REQ 8 /* Min IO throttle count */
#define SNIC_IO_LOCKS 64 /* IO locks: power of 2 */
#define SNIC_DFLT_QUEUE_DEPTH 32 /* Default Queue Depth */
#define SNIC_MAX_QUEUE_DEPTH 64 /* Max Queue Depth */
#define SNIC_DFLT_CMD_TIMEOUT 90 /* Extended tmo for FW */
/*
* Tag bits used for special requests.
*/
#define SNIC_TAG_ABORT BIT(30) /* Tag indicating abort */
#define SNIC_TAG_DEV_RST BIT(29) /* Tag for device reset */
#define SNIC_TAG_IOCTL_DEV_RST BIT(28) /* Tag for User Device Reset */
#define SNIC_TAG_MASK (BIT(24) - 1) /* Mask for lookup */
#define SNIC_NO_TAG -1
/*
* Command flags to identify the type of command and for other future use
*/
#define SNIC_NO_FLAGS 0
#define SNIC_IO_INITIALIZED BIT(0)
#define SNIC_IO_ISSUED BIT(1)
#define SNIC_IO_DONE BIT(2)
#define SNIC_IO_REQ_NULL BIT(3)
#define SNIC_IO_ABTS_PENDING BIT(4)
#define SNIC_IO_ABORTED BIT(5)
#define SNIC_IO_ABTS_ISSUED BIT(6)
#define SNIC_IO_TERM_ISSUED BIT(7)
#define SNIC_IO_ABTS_TIMEDOUT BIT(8)
#define SNIC_IO_ABTS_TERM_DONE BIT(9)
#define SNIC_IO_ABTS_TERM_REQ_NULL BIT(10)
#define SNIC_IO_ABTS_TERM_TIMEDOUT BIT(11)
#define SNIC_IO_INTERNAL_TERM_PENDING BIT(12)
#define SNIC_IO_INTERNAL_TERM_ISSUED BIT(13)
#define SNIC_DEVICE_RESET BIT(14)
#define SNIC_DEV_RST_ISSUED BIT(15)
#define SNIC_DEV_RST_TIMEDOUT BIT(16)
#define SNIC_DEV_RST_ABTS_ISSUED BIT(17)
#define SNIC_DEV_RST_TERM_ISSUED BIT(18)
#define SNIC_DEV_RST_DONE BIT(19)
#define SNIC_DEV_RST_REQ_NULL BIT(20)
#define SNIC_DEV_RST_ABTS_DONE BIT(21)
#define SNIC_DEV_RST_TERM_DONE BIT(22)
#define SNIC_DEV_RST_ABTS_PENDING BIT(23)
#define SNIC_DEV_RST_PENDING BIT(24)
#define SNIC_DEV_RST_NOTSUP BIT(25)
#define SNIC_SCSI_CLEANUP BIT(26)
#define SNIC_HOST_RESET_ISSUED BIT(27)
#define SNIC_ABTS_TIMEOUT 30000 /* msec */
#define SNIC_LUN_RESET_TIMEOUT 30000 /* msec */
#define SNIC_HOST_RESET_TIMEOUT 30000 /* msec */
/*
* These are protected by the hashed req_lock.
*/
#define CMD_SP(Cmnd) \
(((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->rqi)
#define CMD_STATE(Cmnd) \
(((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->state)
#define CMD_ABTS_STATUS(Cmnd) \
(((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->abts_status)
#define CMD_LR_STATUS(Cmnd) \
(((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->lr_status)
#define CMD_FLAGS(Cmnd) \
(((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->flags)
#define SNIC_INVALID_CODE 0x100 /* Hdr Status val unused by firmware */
#define SNIC_MAX_TARGET 256
#define SNIC_FLAGS_NONE (0)
/* snic module params */
extern unsigned int snic_max_qdepth;
/* snic debugging */
extern unsigned int snic_log_level;
#define SNIC_MAIN_LOGGING 0x1
#define SNIC_SCSI_LOGGING 0x2
#define SNIC_ISR_LOGGING 0x8
#define SNIC_DESC_LOGGING 0x10
#define SNIC_CHECK_LOGGING(LEVEL, CMD) \
do { \
if (unlikely(snic_log_level & LEVEL)) \
do { \
CMD; \
} while (0); \
} while (0)
#define SNIC_MAIN_DBG(host, fmt, args...) \
SNIC_CHECK_LOGGING(SNIC_MAIN_LOGGING, \
shost_printk(KERN_INFO, host, fmt, ## args);)
#define SNIC_SCSI_DBG(host, fmt, args...) \
SNIC_CHECK_LOGGING(SNIC_SCSI_LOGGING, \
shost_printk(KERN_INFO, host, fmt, ##args);)
#define SNIC_DISC_DBG(host, fmt, args...) \
SNIC_CHECK_LOGGING(SNIC_SCSI_LOGGING, \
shost_printk(KERN_INFO, host, fmt, ##args);)
#define SNIC_ISR_DBG(host, fmt, args...) \
SNIC_CHECK_LOGGING(SNIC_ISR_LOGGING, \
shost_printk(KERN_INFO, host, fmt, ##args);)
#define SNIC_HOST_ERR(host, fmt, args...) \
shost_printk(KERN_ERR, host, fmt, ##args)
#define SNIC_HOST_INFO(host, fmt, args...) \
shost_printk(KERN_INFO, host, fmt, ##args)
#define SNIC_INFO(fmt, args...) \
pr_info(PFX fmt, ## args)
#define SNIC_DBG(fmt, args...) \
pr_info(PFX fmt, ## args)
#define SNIC_ERR(fmt, args...) \
pr_err(PFX fmt, ## args)
#ifdef DEBUG
#define SNIC_BUG_ON(EXPR) \
({ \
if (EXPR) { \
SNIC_ERR("SNIC BUG(%s)\n", #EXPR); \
BUG_ON(EXPR); \
} \
})
#else
#define SNIC_BUG_ON(EXPR) \
({ \
if (EXPR) { \
SNIC_ERR("SNIC BUG(%s) at %s : %d\n", \
#EXPR, __func__, __LINE__); \
WARN_ON_ONCE(EXPR); \
} \
})
#endif
/* Soft assert */
#define SNIC_ASSERT_NOT_IMPL(EXPR) \
({ \
if (EXPR) {\
SNIC_INFO("Functionality not impl'ed at %s:%d\n", \
__func__, __LINE__); \
WARN_ON_ONCE(EXPR); \
} \
})
extern const char *snic_state_str[];
enum snic_intx_intr_index {
SNIC_INTX_WQ_RQ_COPYWQ,
SNIC_INTX_ERR,
SNIC_INTX_NOTIFY,
SNIC_INTX_INTR_MAX,
};
enum snic_msix_intr_index {
SNIC_MSIX_WQ,
SNIC_MSIX_IO_CMPL,
SNIC_MSIX_ERR_NOTIFY,
SNIC_MSIX_INTR_MAX,
};
struct snic_msix_entry {
int requested;
char devname[IFNAMSIZ];
irqreturn_t (*isr)(int, void *);
void *devid;
};
enum snic_state {
SNIC_INIT = 0,
SNIC_ERROR,
SNIC_ONLINE,
SNIC_OFFLINE,
SNIC_FWRESET,
};
#define SNIC_WQ_MAX 1
#define SNIC_CQ_IO_CMPL_MAX 1
#define SNIC_CQ_MAX (SNIC_WQ_MAX + SNIC_CQ_IO_CMPL_MAX)
/* firmware version information */
struct snic_fw_info {
u32 fw_ver;
u32 hid; /* u16 hid | u16 vnic id */
u32 max_concur_ios; /* max concurrent ios */
u32 max_sgs_per_cmd; /* max sgls per IO */
u32 max_io_sz; /* max io size supported */
u32 hba_cap; /* hba capabilities */
u32 max_tgts; /* max tgts supported */
u16 io_tmo; /* FW Extended timeout */
struct completion *wait; /* protected by snic lock*/
};
/*
* snic_work item : defined to process asynchronous events
*/
struct snic_work {
struct work_struct work;
u16 ev_id;
u64 *ev_data;
};
/*
* snic structure to represent SCSI vNIC
*/
struct snic {
/* snic specific members */
struct list_head list;
char name[IFNAMSIZ];
atomic_t state;
spinlock_t snic_lock;
struct completion *remove_wait;
bool in_remove;
bool stop_link_events; /* stop processing link events */
/* discovery related */
struct snic_disc disc;
/* Scsi Host info */
struct Scsi_Host *shost;
/* vnic related structures */
struct vnic_dev_bar bar0;
struct vnic_stats *stats;
unsigned long stats_time;
unsigned long stats_reset_time;
struct vnic_dev *vdev;
/* hw resource info */
unsigned int wq_count;
unsigned int cq_count;
unsigned int intr_count;
unsigned int err_intr_offset;
int link_status; /* retrieved from svnic_dev_link_status() */
u32 link_down_cnt;
/* pci related */
struct pci_dev *pdev;
struct msix_entry msix_entry[SNIC_MSIX_INTR_MAX];
struct snic_msix_entry msix[SNIC_MSIX_INTR_MAX];
/* io related info */
mempool_t *req_pool[SNIC_REQ_MAX_CACHES]; /* (??) */
____cacheline_aligned spinlock_t io_req_lock[SNIC_IO_LOCKS];
/* Maintain snic specific commands, cmds with no tag in spl_cmd_list */
____cacheline_aligned spinlock_t spl_cmd_lock;
struct list_head spl_cmd_list;
unsigned int max_tag_id;
atomic_t ios_inflight; /* io in flight counter */
struct vnic_snic_config config;
struct work_struct link_work;
/* firmware information */
struct snic_fw_info fwinfo;
/* Work for processing Target related work */
struct work_struct tgt_work;
/* Work for processing Discovery */
struct work_struct disc_work;
/* stats related */
unsigned int reset_stats;
atomic64_t io_cmpl_skip;
struct snic_stats s_stats; /* Per SNIC driver stats */
/* platform specific */
#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
struct dentry *stats_host; /* Per snic debugfs root */
struct dentry *stats_file; /* Per snic debugfs file */
struct dentry *reset_stats_file;/* Per snic reset stats file */
#endif
/* completion queue cache line section */
____cacheline_aligned struct vnic_cq cq[SNIC_CQ_MAX];
/* work queue cache line section */
____cacheline_aligned struct vnic_wq wq[SNIC_WQ_MAX];
spinlock_t wq_lock[SNIC_WQ_MAX];
/* interrupt resource cache line section */
____cacheline_aligned struct vnic_intr intr[SNIC_MSIX_INTR_MAX];
}; /* end of snic structure */
/*
* SNIC Driver's Global Data
*/
struct snic_global {
struct list_head snic_list;
spinlock_t snic_list_lock;
struct kmem_cache *req_cache[SNIC_REQ_MAX_CACHES];
struct workqueue_struct *event_q;
#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
/* debugfs related global data */
struct dentry *trc_root;
struct dentry *stats_root;
struct snic_trc trc ____cacheline_aligned;
#endif
};
extern struct snic_global *snic_glob;
int snic_glob_init(void);
void snic_glob_cleanup(void);
extern struct workqueue_struct *snic_event_queue;
extern struct device_attribute *snic_attrs[];
int snic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
int snic_abort_cmd(struct scsi_cmnd *);
int snic_device_reset(struct scsi_cmnd *);
int snic_host_reset(struct scsi_cmnd *);
int snic_reset(struct Scsi_Host *, struct scsi_cmnd *);
void snic_shutdown_scsi_cleanup(struct snic *);
int snic_request_intr(struct snic *);
void snic_free_intr(struct snic *);
int snic_set_intr_mode(struct snic *);
void snic_clear_intr_mode(struct snic *);
int snic_fwcq_cmpl_handler(struct snic *, int);
int snic_wq_cmpl_handler(struct snic *, int);
void snic_free_wq_buf(struct vnic_wq *, struct vnic_wq_buf *);
void snic_log_q_error(struct snic *);
void snic_handle_link_event(struct snic *);
void snic_handle_link(struct work_struct *);
int snic_queue_exch_ver_req(struct snic *);
int snic_io_exch_ver_cmpl_handler(struct snic *, struct snic_fw_req *);
int snic_queue_wq_desc(struct snic *, void *os_buf, u16 len);
void snic_handle_untagged_req(struct snic *, struct snic_req_info *);
void snic_release_untagged_req(struct snic *, struct snic_req_info *);
void snic_free_all_untagged_reqs(struct snic *);
int snic_get_conf(struct snic *);
void snic_set_state(struct snic *, enum snic_state);
int snic_get_state(struct snic *);
const char *snic_state_to_str(unsigned int);
void snic_hex_dump(char *, char *, int);
void snic_print_desc(const char *fn, char *os_buf, int len);
const char *show_opcode_name(int val);
#endif /* _SNIC_H */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/string.h>
#include <linux/device.h>
#include "snic.h"
static ssize_t
snic_show_sym_name(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct snic *snic = shost_priv(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%s\n", snic->name);
}
static ssize_t
snic_show_state(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct snic *snic = shost_priv(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%s\n",
snic_state_str[snic_get_state(snic)]);
}
static ssize_t
snic_show_drv_version(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", SNIC_DRV_VERSION);
}
static ssize_t
snic_show_link_state(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct snic *snic = shost_priv(class_to_shost(dev));
if (snic->config.xpt_type == SNIC_DAS)
snic->link_status = svnic_dev_link_status(snic->vdev);
return snprintf(buf, PAGE_SIZE, "%s\n",
(snic->link_status) ? "Link Up" : "Link Down");
}
static DEVICE_ATTR(snic_sym_name, S_IRUGO, snic_show_sym_name, NULL);
static DEVICE_ATTR(snic_state, S_IRUGO, snic_show_state, NULL);
static DEVICE_ATTR(drv_version, S_IRUGO, snic_show_drv_version, NULL);
static DEVICE_ATTR(link_state, S_IRUGO, snic_show_link_state, NULL);
struct device_attribute *snic_attrs[] = {
&dev_attr_snic_sym_name,
&dev_attr_snic_state,
&dev_attr_drv_version,
&dev_attr_link_state,
NULL,
};
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/mempool.h>
#include <scsi/scsi_tcq.h>
#include <linux/ctype.h>
#include "snic_io.h"
#include "snic.h"
#include "cq_enet_desc.h"
#include "snic_fwint.h"
/*
* snic_handle_link : Handles link flaps.
*/
void
snic_handle_link(struct work_struct *work)
{
struct snic *snic = container_of(work, struct snic, link_work);
if (snic->config.xpt_type != SNIC_DAS) {
SNIC_HOST_INFO(snic->shost, "Link Event Received.\n");
SNIC_ASSERT_NOT_IMPL(1);
return;
}
snic->link_status = svnic_dev_link_status(snic->vdev);
snic->link_down_cnt = svnic_dev_link_down_cnt(snic->vdev);
SNIC_HOST_INFO(snic->shost, "Link Event: Link %s.\n",
((snic->link_status) ? "Up" : "Down"));
}
/*
* snic_ver_enc : Encodes version str to int
* version string is similar to netmask string
*/
static int
snic_ver_enc(const char *s)
{
int v[4] = {0};
int i = 0, x = 0;
char c;
const char *p = s;
/* validate version string */
if ((strlen(s) > 15) || (strlen(s) < 7))
goto end;
while ((c = *p++)) {
if (c == '.') {
i++;
continue;
}
if (i > 4 || !isdigit(c))
goto end;
v[i] = v[i] * 10 + (c - '0');
}
/* validate sub version numbers */
for (i = 3; i >= 0; i--)
if (v[i] > 0xff)
goto end;
x |= (v[0] << 24) | v[1] << 16 | v[2] << 8 | v[3];
end:
if (x == 0) {
SNIC_ERR("Invalid version string [%s].\n", s);
return -1;
}
return x;
} /* end of snic_ver_enc */
/*
* snic_qeueue_exch_ver_req :
*
* Queues Exchange Version Request, to communicate host information
* in return, it gets firmware version details
*/
int
snic_queue_exch_ver_req(struct snic *snic)
{
struct snic_req_info *rqi = NULL;
struct snic_host_req *req = NULL;
u32 ver = 0;
int ret = 0;
SNIC_HOST_INFO(snic->shost, "Exch Ver Req Preparing...\n");
rqi = snic_req_init(snic, 0);
if (!rqi) {
SNIC_HOST_ERR(snic->shost,
"Queuing Exch Ver Req failed, err = %d\n",
ret);
ret = -ENOMEM;
goto error;
}
req = rqi_to_req(rqi);
/* Initialize snic_host_req */
snic_io_hdr_enc(&req->hdr, SNIC_REQ_EXCH_VER, 0, SCSI_NO_TAG,
snic->config.hid, 0, (ulong)rqi);
ver = snic_ver_enc(SNIC_DRV_VERSION);
req->u.exch_ver.drvr_ver = cpu_to_le32(ver);
req->u.exch_ver.os_type = cpu_to_le32(SNIC_OS_LINUX);
snic_handle_untagged_req(snic, rqi);
ret = snic_queue_wq_desc(snic, req, sizeof(*req));
if (ret) {
snic_release_untagged_req(snic, rqi);
SNIC_HOST_ERR(snic->shost,
"Queuing Exch Ver Req failed, err = %d\n",
ret);
goto error;
}
SNIC_HOST_INFO(snic->shost, "Exch Ver Req is issued. ret = %d\n", ret);
error:
return ret;
} /* end of snic_queue_exch_ver_req */
/*
* snic_io_exch_ver_cmpl_handler
*/
int
snic_io_exch_ver_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
{
struct snic_req_info *rqi = NULL;
struct snic_exch_ver_rsp *exv_cmpl = &fwreq->u.exch_ver_cmpl;
u8 typ, hdr_stat;
u32 cmnd_id, hid, max_sgs;
ulong ctx = 0;
unsigned long flags;
int ret = 0;
SNIC_HOST_INFO(snic->shost, "Exch Ver Compl Received.\n");
snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
SNIC_BUG_ON(snic->config.hid != hid);
rqi = (struct snic_req_info *) ctx;
if (hdr_stat) {
SNIC_HOST_ERR(snic->shost,
"Exch Ver Completed w/ err status %d\n",
hdr_stat);
goto exch_cmpl_end;
}
spin_lock_irqsave(&snic->snic_lock, flags);
snic->fwinfo.fw_ver = le32_to_cpu(exv_cmpl->version);
snic->fwinfo.hid = le32_to_cpu(exv_cmpl->hid);
snic->fwinfo.max_concur_ios = le32_to_cpu(exv_cmpl->max_concur_ios);
snic->fwinfo.max_sgs_per_cmd = le32_to_cpu(exv_cmpl->max_sgs_per_cmd);
snic->fwinfo.max_io_sz = le32_to_cpu(exv_cmpl->max_io_sz);
snic->fwinfo.max_tgts = le32_to_cpu(exv_cmpl->max_tgts);
snic->fwinfo.io_tmo = le16_to_cpu(exv_cmpl->io_timeout);
SNIC_HOST_INFO(snic->shost,
"vers %u hid %u max_concur_ios %u max_sgs_per_cmd %u max_io_sz %u max_tgts %u fw tmo %u\n",
snic->fwinfo.fw_ver,
snic->fwinfo.hid,
snic->fwinfo.max_concur_ios,
snic->fwinfo.max_sgs_per_cmd,
snic->fwinfo.max_io_sz,
snic->fwinfo.max_tgts,
snic->fwinfo.io_tmo);
SNIC_HOST_INFO(snic->shost,
"HBA Capabilities = 0x%x\n",
le32_to_cpu(exv_cmpl->hba_cap));
/* Updating SGList size */
max_sgs = snic->fwinfo.max_sgs_per_cmd;
if (max_sgs && max_sgs < SNIC_MAX_SG_DESC_CNT) {
snic->shost->sg_tablesize = max_sgs;
SNIC_HOST_INFO(snic->shost, "Max SGs set to %d\n",
snic->shost->sg_tablesize);
} else if (max_sgs > snic->shost->sg_tablesize) {
SNIC_HOST_INFO(snic->shost,
"Target type %d Supports Larger Max SGList %d than driver's Max SG List %d.\n",
snic->config.xpt_type, max_sgs,
snic->shost->sg_tablesize);
}
if (snic->shost->can_queue > snic->fwinfo.max_concur_ios)
snic->shost->can_queue = snic->fwinfo.max_concur_ios;
snic->shost->max_sectors = snic->fwinfo.max_io_sz >> 9;
if (snic->fwinfo.wait)
complete(snic->fwinfo.wait);
spin_unlock_irqrestore(&snic->snic_lock, flags);
exch_cmpl_end:
snic_release_untagged_req(snic, rqi);
SNIC_HOST_INFO(snic->shost, "Exch_cmpl Done, hdr_stat %d.\n", hdr_stat);
return ret;
} /* end of snic_io_exch_ver_cmpl_handler */
/*
* snic_get_conf
*
* Synchronous call, and Retrieves snic params.
*/
int
snic_get_conf(struct snic *snic)
{
DECLARE_COMPLETION_ONSTACK(wait);
unsigned long flags;
int ret;
int nr_retries = 3;
SNIC_HOST_INFO(snic->shost, "Retrieving snic params.\n");
spin_lock_irqsave(&snic->snic_lock, flags);
memset(&snic->fwinfo, 0, sizeof(snic->fwinfo));
snic->fwinfo.wait = &wait;
spin_unlock_irqrestore(&snic->snic_lock, flags);
/* Additional delay to handle HW Resource initialization. */
msleep(50);
/*
* Exch ver req can be ignored by FW, if HW Resource initialization
* is in progress, Hence retry.
*/
do {
ret = snic_queue_exch_ver_req(snic);
if (ret)
return ret;
wait_for_completion_timeout(&wait, msecs_to_jiffies(2000));
spin_lock_irqsave(&snic->snic_lock, flags);
ret = (snic->fwinfo.fw_ver != 0) ? 0 : -ETIMEDOUT;
if (ret)
SNIC_HOST_ERR(snic->shost,
"Failed to retrieve snic params,\n");
/* Unset fwinfo.wait, on success or on last retry */
if (ret == 0 || nr_retries == 1)
snic->fwinfo.wait = NULL;
spin_unlock_irqrestore(&snic->snic_lock, flags);
} while (ret && --nr_retries);
return ret;
} /* end of snic_get_info */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/debugfs.h>
#include "snic.h"
/*
* snic_debugfs_init - Initialize debugfs for snic debug logging
*
* Description:
* When Debugfs is configured this routine sets up fnic debugfs
* filesystem. If not already created. this routine will crate the
* fnic directory and statistics directory for trace buffer and
* stats logging
*/
int
snic_debugfs_init(void)
{
int rc = -1;
struct dentry *de = NULL;
de = debugfs_create_dir("snic", NULL);
if (!de) {
SNIC_DBG("Cannot create debugfs root\n");
return rc;
}
snic_glob->trc_root = de;
de = debugfs_create_dir("statistics", snic_glob->trc_root);
if (!de) {
SNIC_DBG("Cannot create Statistics directory\n");
return rc;
}
snic_glob->stats_root = de;
rc = 0;
return rc;
} /* end of snic_debugfs_init */
/*
* snic_debugfs_term - Tear down debugfs intrastructure
*
* Description:
* When Debufs is configured this routine removes debugfs file system
* elements that are specific to snic
*/
void
snic_debugfs_term(void)
{
debugfs_remove(snic_glob->stats_root);
snic_glob->stats_root = NULL;
debugfs_remove(snic_glob->trc_root);
snic_glob->trc_root = NULL;
}
/*
* snic_reset_stats_open - Open the reset_stats file
*/
static int
snic_reset_stats_open(struct inode *inode, struct file *filp)
{
SNIC_BUG_ON(!inode->i_private);
filp->private_data = inode->i_private;
return 0;
}
/*
* snic_reset_stats_read - Read a reset_stats debugfs file
* @filp: The file pointer to read from.
* @ubuf: The buffer tocopy the data to.
* @cnt: The number of bytes to read.
* @ppos: The position in the file to start reading frm.
*
* Description:
* This routine reads value of variable reset_stats
* and stores into local @buf. It will start reading file @ppos and
* copy up to @cnt of data to @ubuf from @buf.
*
* Returns:
* This function returns the amount of data that was read.
*/
static ssize_t
snic_reset_stats_read(struct file *filp,
char __user *ubuf,
size_t cnt,
loff_t *ppos)
{
struct snic *snic = (struct snic *) filp->private_data;
char buf[64];
int len;
len = sprintf(buf, "%u\n", snic->reset_stats);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
}
/*
* snic_reset_stats_write - Write to reset_stats debugfs file
* @filp: The file pointer to write from
* @ubuf: The buffer to copy the data from.
* @cnt: The number of bytes to write.
* @ppos: The position in the file to start writing to.
*
* Description:
* This routine writes data from user buffer @ubuf to buffer @buf and
* resets cumulative stats of snic.
*
* Returns:
* This function returns the amount of data that was written.
*/
static ssize_t
snic_reset_stats_write(struct file *filp,
const char __user *ubuf,
size_t cnt,
loff_t *ppos)
{
struct snic *snic = (struct snic *) filp->private_data;
struct snic_stats *stats = &snic->s_stats;
u64 *io_stats_p = (u64 *) &stats->io;
u64 *fw_stats_p = (u64 *) &stats->fw;
char buf[64];
unsigned long val;
int ret;
if (cnt >= sizeof(buf))
return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = '\0';
ret = kstrtoul(buf, 10, &val);
if (ret < 0)
return ret;
snic->reset_stats = val;
if (snic->reset_stats) {
/* Skip variable is used to avoid descrepancies to Num IOs
* and IO Completions stats. Skip incrementing No IO Compls
* for pending active IOs after reset_stats
*/
atomic64_set(&snic->io_cmpl_skip,
atomic64_read(&stats->io.active));
memset(&stats->abts, 0, sizeof(struct snic_abort_stats));
memset(&stats->reset, 0, sizeof(struct snic_reset_stats));
memset(&stats->misc, 0, sizeof(struct snic_misc_stats));
memset(io_stats_p+1,
0,
sizeof(struct snic_io_stats) - sizeof(u64));
memset(fw_stats_p+1,
0,
sizeof(struct snic_fw_stats) - sizeof(u64));
}
(*ppos)++;
SNIC_HOST_INFO(snic->shost, "Reset Op: Driver statistics.\n");
return cnt;
}
static int
snic_reset_stats_release(struct inode *inode, struct file *filp)
{
filp->private_data = NULL;
return 0;
}
/*
* snic_stats_show - Formats and prints per host specific driver stats.
*/
static int
snic_stats_show(struct seq_file *sfp, void *data)
{
struct snic *snic = (struct snic *) sfp->private;
struct snic_stats *stats = &snic->s_stats;
struct timespec last_isr_tms, last_ack_tms;
u64 maxio_tm;
int i;
/* Dump IO Stats */
seq_printf(sfp,
"------------------------------------------\n"
"\t\t IO Statistics\n"
"------------------------------------------\n");
maxio_tm = (u64) atomic64_read(&stats->io.max_time);
seq_printf(sfp,
"Active IOs : %lld\n"
"Max Active IOs : %lld\n"
"Total IOs : %lld\n"
"IOs Completed : %lld\n"
"IOs Failed : %lld\n"
"IOs Not Found : %lld\n"
"Memory Alloc Failures : %lld\n"
"REQs Null : %lld\n"
"SCSI Cmd Pointers Null : %lld\n"
"Max SGL for any IO : %lld\n"
"Max IO Size : %lld Sectors\n"
"Max Queuing Time : %lld\n"
"Max Completion Time : %lld\n"
"Max IO Process Time(FW) : %lld (%u msec)\n",
(u64) atomic64_read(&stats->io.active),
(u64) atomic64_read(&stats->io.max_active),
(u64) atomic64_read(&stats->io.num_ios),
(u64) atomic64_read(&stats->io.compl),
(u64) atomic64_read(&stats->io.fail),
(u64) atomic64_read(&stats->io.io_not_found),
(u64) atomic64_read(&stats->io.alloc_fail),
(u64) atomic64_read(&stats->io.req_null),
(u64) atomic64_read(&stats->io.sc_null),
(u64) atomic64_read(&stats->io.max_sgl),
(u64) atomic64_read(&stats->io.max_io_sz),
(u64) atomic64_read(&stats->io.max_qtime),
(u64) atomic64_read(&stats->io.max_cmpl_time),
maxio_tm,
jiffies_to_msecs(maxio_tm));
seq_puts(sfp, "\nSGL Counters\n");
for (i = 0; i < SNIC_MAX_SG_DESC_CNT; i++) {
seq_printf(sfp,
"%10lld ",
(u64) atomic64_read(&stats->io.sgl_cnt[i]));
if ((i + 1) % 8 == 0)
seq_puts(sfp, "\n");
}
/* Dump Abort Stats */
seq_printf(sfp,
"\n-------------------------------------------\n"
"\t\t Abort Statistics\n"
"---------------------------------------------\n");
seq_printf(sfp,
"Aborts : %lld\n"
"Aborts Fail : %lld\n"
"Aborts Driver Timeout : %lld\n"
"Abort FW Timeout : %lld\n"
"Abort IO NOT Found : %lld\n",
(u64) atomic64_read(&stats->abts.num),
(u64) atomic64_read(&stats->abts.fail),
(u64) atomic64_read(&stats->abts.drv_tmo),
(u64) atomic64_read(&stats->abts.fw_tmo),
(u64) atomic64_read(&stats->abts.io_not_found));
/* Dump Reset Stats */
seq_printf(sfp,
"\n-------------------------------------------\n"
"\t\t Reset Statistics\n"
"---------------------------------------------\n");
seq_printf(sfp,
"HBA Resets : %lld\n"
"HBA Reset Cmpls : %lld\n"
"HBA Reset Fail : %lld\n",
(u64) atomic64_read(&stats->reset.hba_resets),
(u64) atomic64_read(&stats->reset.hba_reset_cmpl),
(u64) atomic64_read(&stats->reset.hba_reset_fail));
/* Dump Firmware Stats */
seq_printf(sfp,
"\n-------------------------------------------\n"
"\t\t Firmware Statistics\n"
"---------------------------------------------\n");
seq_printf(sfp,
"Active FW Requests : %lld\n"
"Max FW Requests : %lld\n"
"FW Out Of Resource Errs : %lld\n"
"FW IO Errors : %lld\n"
"FW SCSI Errors : %lld\n",
(u64) atomic64_read(&stats->fw.actv_reqs),
(u64) atomic64_read(&stats->fw.max_actv_reqs),
(u64) atomic64_read(&stats->fw.out_of_res),
(u64) atomic64_read(&stats->fw.io_errs),
(u64) atomic64_read(&stats->fw.scsi_errs));
/* Dump Miscellenous Stats */
seq_printf(sfp,
"\n---------------------------------------------\n"
"\t\t Other Statistics\n"
"\n---------------------------------------------\n");
jiffies_to_timespec(stats->misc.last_isr_time, &last_isr_tms);
jiffies_to_timespec(stats->misc.last_ack_time, &last_ack_tms);
seq_printf(sfp,
"Last ISR Time : %llu (%8lu.%8lu)\n"
"Last Ack Time : %llu (%8lu.%8lu)\n"
"ISRs : %llu\n"
"Max CQ Entries : %lld\n"
"Data Count Mismatch : %lld\n"
"IOs w/ Timeout Status : %lld\n"
"IOs w/ Aborted Status : %lld\n"
"IOs w/ SGL Invalid Stat : %lld\n"
"WQ Desc Alloc Fail : %lld\n"
"Queue Full : %lld\n"
"Target Not Ready : %lld\n",
(u64) stats->misc.last_isr_time,
last_isr_tms.tv_sec, last_isr_tms.tv_nsec,
(u64)stats->misc.last_ack_time,
last_ack_tms.tv_sec, last_ack_tms.tv_nsec,
(u64) atomic64_read(&stats->misc.isr_cnt),
(u64) atomic64_read(&stats->misc.max_cq_ents),
(u64) atomic64_read(&stats->misc.data_cnt_mismat),
(u64) atomic64_read(&stats->misc.io_tmo),
(u64) atomic64_read(&stats->misc.io_aborted),
(u64) atomic64_read(&stats->misc.sgl_inval),
(u64) atomic64_read(&stats->misc.wq_alloc_fail),
(u64) atomic64_read(&stats->misc.qfull),
(u64) atomic64_read(&stats->misc.tgt_not_rdy));
return 0;
}
/*
* snic_stats_open - Open the stats file for specific host
*
* Description:
* This routine opens a debugfs file stats of specific host
*/
static int
snic_stats_open(struct inode *inode, struct file *filp)
{
return single_open(filp, snic_stats_show, inode->i_private);
}
static const struct file_operations snic_stats_fops = {
.owner = THIS_MODULE,
.open = snic_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations snic_reset_stats_fops = {
.owner = THIS_MODULE,
.open = snic_reset_stats_open,
.read = snic_reset_stats_read,
.write = snic_reset_stats_write,
.release = snic_reset_stats_release,
};
/*
* snic_stats_init - Initialize stats struct and create stats file
* per snic
*
* Description:
* When debugfs is cofigured this routine sets up the stats file per snic
* It will create file stats and reset_stats under statistics/host# directory
* to log per snic stats
*/
int
snic_stats_debugfs_init(struct snic *snic)
{
int rc = -1;
char name[16];
struct dentry *de = NULL;
snprintf(name, sizeof(name), "host%d", snic->shost->host_no);
if (!snic_glob->stats_root) {
SNIC_DBG("snic_stats root doesn't exist\n");
return rc;
}
de = debugfs_create_dir(name, snic_glob->stats_root);
if (!de) {
SNIC_DBG("Cannot create host directory\n");
return rc;
}
snic->stats_host = de;
de = debugfs_create_file("stats",
S_IFREG|S_IRUGO,
snic->stats_host,
snic,
&snic_stats_fops);
if (!de) {
SNIC_DBG("Cannot create host's stats file\n");
return rc;
}
snic->stats_file = de;
de = debugfs_create_file("reset_stats",
S_IFREG|S_IRUGO|S_IWUSR,
snic->stats_host,
snic,
&snic_reset_stats_fops);
if (!de) {
SNIC_DBG("Cannot create host's reset_stats file\n");
return rc;
}
snic->reset_stats_file = de;
rc = 0;
return rc;
} /* end of snic_stats_debugfs_init */
/*
* snic_stats_debugfs_remove - Tear down debugfs infrastructure of stats
*
* Description:
* When Debufs is configured this routine removes debugfs file system
* elements that are specific to to snic stats
*/
void
snic_stats_debugfs_remove(struct snic *snic)
{
debugfs_remove(snic->stats_file);
snic->stats_file = NULL;
debugfs_remove(snic->reset_stats_file);
snic->reset_stats_file = NULL;
debugfs_remove(snic->stats_host);
snic->stats_host = NULL;
}
/* Trace Facility related API */
static void *
snic_trc_seq_start(struct seq_file *sfp, loff_t *pos)
{
return &snic_glob->trc;
}
static void *
snic_trc_seq_next(struct seq_file *sfp, void *data, loff_t *pos)
{
return NULL;
}
static void
snic_trc_seq_stop(struct seq_file *sfp, void *data)
{
}
#define SNIC_TRC_PBLEN 256
static int
snic_trc_seq_show(struct seq_file *sfp, void *data)
{
char buf[SNIC_TRC_PBLEN];
if (snic_get_trc_data(buf, SNIC_TRC_PBLEN) > 0)
seq_printf(sfp, "%s\n", buf);
return 0;
}
static const struct seq_operations snic_trc_seq_ops = {
.start = snic_trc_seq_start,
.next = snic_trc_seq_next,
.stop = snic_trc_seq_stop,
.show = snic_trc_seq_show,
};
static int
snic_trc_open(struct inode *inode, struct file *filp)
{
return seq_open(filp, &snic_trc_seq_ops);
}
static const struct file_operations snic_trc_fops = {
.owner = THIS_MODULE,
.open = snic_trc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/*
* snic_trc_debugfs_init : creates trace/tracing_enable files for trace
* under debugfs
*/
int
snic_trc_debugfs_init(void)
{
struct dentry *de = NULL;
int ret = -1;
if (!snic_glob->trc_root) {
SNIC_ERR("Debugfs root directory for snic doesn't exist.\n");
return ret;
}
de = debugfs_create_bool("tracing_enable",
S_IFREG | S_IRUGO | S_IWUSR,
snic_glob->trc_root,
&snic_glob->trc.enable);
if (!de) {
SNIC_ERR("Can't create trace_enable file.\n");
return ret;
}
snic_glob->trc.trc_enable = de;
de = debugfs_create_file("trace",
S_IFREG | S_IRUGO | S_IWUSR,
snic_glob->trc_root,
NULL,
&snic_trc_fops);
if (!de) {
SNIC_ERR("Cann't create trace file.\n");
return ret;
}
snic_glob->trc.trc_file = de;
ret = 0;
return ret;
} /* end of snic_trc_debugfs_init */
/*
* snic_trc_debugfs_term : cleans up the files created for trace under debugfs
*/
void
snic_trc_debugfs_term(void)
{
debugfs_remove(snic_glob->trc.trc_file);
snic_glob->trc.trc_file = NULL;
debugfs_remove(snic_glob->trc.trc_enable);
snic_glob->trc.trc_enable = NULL;
}
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/mempool.h>
#include <scsi/scsi_tcq.h>
#include "snic_disc.h"
#include "snic.h"
#include "snic_io.h"
/* snic target types */
static const char * const snic_tgt_type_str[] = {
[SNIC_TGT_DAS] = "DAS",
[SNIC_TGT_SAN] = "SAN",
};
static inline const char *
snic_tgt_type_to_str(int typ)
{
return ((typ > SNIC_TGT_NONE && typ <= SNIC_TGT_SAN) ?
snic_tgt_type_str[typ] : "Unknown");
}
static const char * const snic_tgt_state_str[] = {
[SNIC_TGT_STAT_INIT] = "INIT",
[SNIC_TGT_STAT_ONLINE] = "ONLINE",
[SNIC_TGT_STAT_OFFLINE] = "OFFLINE",
[SNIC_TGT_STAT_DEL] = "DELETION IN PROGRESS",
};
const char *
snic_tgt_state_to_str(int state)
{
return ((state >= SNIC_TGT_STAT_INIT && state <= SNIC_TGT_STAT_DEL) ?
snic_tgt_state_str[state] : "UNKNOWN");
}
/*
* Initiate report_tgt req desc
*/
static void
snic_report_tgt_init(struct snic_host_req *req, u32 hid, u8 *buf, u32 len,
dma_addr_t rsp_buf_pa, ulong ctx)
{
struct snic_sg_desc *sgd = NULL;
snic_io_hdr_enc(&req->hdr, SNIC_REQ_REPORT_TGTS, 0, SCSI_NO_TAG, hid,
1, ctx);
req->u.rpt_tgts.sg_cnt = cpu_to_le16(1);
sgd = req_to_sgl(req);
sgd[0].addr = cpu_to_le64(rsp_buf_pa);
sgd[0].len = cpu_to_le32(len);
sgd[0]._resvd = 0;
req->u.rpt_tgts.sg_addr = cpu_to_le64((ulong)sgd);
}
/*
* snic_queue_report_tgt_req: Queues report target request.
*/
static int
snic_queue_report_tgt_req(struct snic *snic)
{
struct snic_req_info *rqi = NULL;
u32 ntgts, buf_len = 0;
u8 *buf = NULL;
dma_addr_t pa = 0;
int ret = 0;
rqi = snic_req_init(snic, 1);
if (!rqi) {
ret = -ENOMEM;
goto error;
}
if (snic->fwinfo.max_tgts)
ntgts = min_t(u32, snic->fwinfo.max_tgts, snic->shost->max_id);
else
ntgts = snic->shost->max_id;
/* Allocate Response Buffer */
SNIC_BUG_ON(ntgts == 0);
buf_len = ntgts * sizeof(struct snic_tgt_id) + SNIC_SG_DESC_ALIGN;
buf = kzalloc(buf_len, GFP_KERNEL|GFP_DMA);
if (!buf) {
snic_req_free(snic, rqi);
SNIC_HOST_ERR(snic->shost, "Resp Buf Alloc Failed.\n");
ret = -ENOMEM;
goto error;
}
SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0);
pa = pci_map_single(snic->pdev, buf, buf_len, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(snic->pdev, pa)) {
kfree(buf);
snic_req_free(snic, rqi);
SNIC_HOST_ERR(snic->shost,
"Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n",
buf);
ret = -EINVAL;
goto error;
}
SNIC_BUG_ON(pa == 0);
rqi->sge_va = (ulong) buf;
snic_report_tgt_init(rqi->req,
snic->config.hid,
buf,
buf_len,
pa,
(ulong)rqi);
snic_handle_untagged_req(snic, rqi);
ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
if (ret) {
pci_unmap_single(snic->pdev, pa, buf_len, PCI_DMA_FROMDEVICE);
kfree(buf);
rqi->sge_va = 0;
snic_release_untagged_req(snic, rqi);
SNIC_HOST_ERR(snic->shost, "Queuing Report Tgts Failed.\n");
goto error;
}
SNIC_DISC_DBG(snic->shost, "Report Targets Issued.\n");
return ret;
error:
SNIC_HOST_ERR(snic->shost,
"Queuing Report Targets Failed, err = %d\n",
ret);
return ret;
} /* end of snic_queue_report_tgt_req */
/* call into SML */
static void
snic_scsi_scan_tgt(struct work_struct *work)
{
struct snic_tgt *tgt = container_of(work, struct snic_tgt, scan_work);
struct Scsi_Host *shost = dev_to_shost(&tgt->dev);
unsigned long flags;
SNIC_HOST_INFO(shost, "Scanning Target id 0x%x\n", tgt->id);
scsi_scan_target(&tgt->dev,
tgt->channel,
tgt->scsi_tgt_id,
SCAN_WILD_CARD,
1);
spin_lock_irqsave(shost->host_lock, flags);
tgt->flags &= ~SNIC_TGT_SCAN_PENDING;
spin_unlock_irqrestore(shost->host_lock, flags);
} /* end of snic_scsi_scan_tgt */
/*
* snic_tgt_lookup :
*/
static struct snic_tgt *
snic_tgt_lookup(struct snic *snic, struct snic_tgt_id *tgtid)
{
struct list_head *cur, *nxt;
struct snic_tgt *tgt = NULL;
list_for_each_safe(cur, nxt, &snic->disc.tgt_list) {
tgt = list_entry(cur, struct snic_tgt, list);
if (tgt->id == le32_to_cpu(tgtid->tgt_id))
return tgt;
tgt = NULL;
}
return tgt;
} /* end of snic_tgt_lookup */
/*
* snic_tgt_dev_release : Called on dropping last ref for snic_tgt object
*/
void
snic_tgt_dev_release(struct device *dev)
{
struct snic_tgt *tgt = dev_to_tgt(dev);
SNIC_HOST_INFO(snic_tgt_to_shost(tgt),
"Target Device ID %d (%s) Permanently Deleted.\n",
tgt->id,
dev_name(dev));
SNIC_BUG_ON(!list_empty(&tgt->list));
kfree(tgt);
}
/*
* snic_tgt_del : work function to delete snic_tgt
*/
static void
snic_tgt_del(struct work_struct *work)
{
struct snic_tgt *tgt = container_of(work, struct snic_tgt, del_work);
struct Scsi_Host *shost = snic_tgt_to_shost(tgt);
if (tgt->flags & SNIC_TGT_SCAN_PENDING)
scsi_flush_work(shost);
/* Block IOs on child devices, stops new IOs */
scsi_target_block(&tgt->dev);
/* Cleanup IOs */
snic_tgt_scsi_abort_io(tgt);
/* Unblock IOs now, to flush if there are any. */
scsi_target_unblock(&tgt->dev, SDEV_TRANSPORT_OFFLINE);
/* Delete SCSI Target and sdevs */
scsi_remove_target(&tgt->dev); /* ?? */
device_del(&tgt->dev);
put_device(&tgt->dev);
} /* end of snic_tgt_del */
/* snic_tgt_create: checks for existence of snic_tgt, if it doesn't
* it creates one.
*/
static struct snic_tgt *
snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
{
struct snic_tgt *tgt = NULL;
unsigned long flags;
int ret;
tgt = snic_tgt_lookup(snic, tgtid);
if (tgt) {
/* update the information if required */
return tgt;
}
tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
if (!tgt) {
SNIC_HOST_ERR(snic->shost, "Failure to allocate snic_tgt.\n");
ret = -ENOMEM;
return tgt;
}
INIT_LIST_HEAD(&tgt->list);
tgt->id = le32_to_cpu(tgtid->tgt_id);
tgt->channel = 0;
SNIC_BUG_ON(le16_to_cpu(tgtid->tgt_type) > SNIC_TGT_SAN);
tgt->tdata.typ = le16_to_cpu(tgtid->tgt_type);
/*
* Plugging into SML Device Tree
*/
tgt->tdata.disc_id = 0;
tgt->state = SNIC_TGT_STAT_INIT;
device_initialize(&tgt->dev);
tgt->dev.parent = get_device(&snic->shost->shost_gendev);
tgt->dev.release = snic_tgt_dev_release;
INIT_WORK(&tgt->scan_work, snic_scsi_scan_tgt);
INIT_WORK(&tgt->del_work, snic_tgt_del);
switch (tgt->tdata.typ) {
case SNIC_TGT_DAS:
dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d",
snic->shost->host_no, tgt->channel, tgt->id);
break;
case SNIC_TGT_SAN:
dev_set_name(&tgt->dev, "snic_san_tgt:%d:%d-%d",
snic->shost->host_no, tgt->channel, tgt->id);
break;
default:
SNIC_HOST_INFO(snic->shost, "Target type Unknown Detected.\n");
dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d",
snic->shost->host_no, tgt->channel, tgt->id);
break;
}
spin_lock_irqsave(snic->shost->host_lock, flags);
list_add_tail(&tgt->list, &snic->disc.tgt_list);
tgt->scsi_tgt_id = snic->disc.nxt_tgt_id++;
tgt->state = SNIC_TGT_STAT_ONLINE;
spin_unlock_irqrestore(snic->shost->host_lock, flags);
SNIC_HOST_INFO(snic->shost,
"Tgt %d, type = %s detected. Adding..\n",
tgt->id, snic_tgt_type_to_str(tgt->tdata.typ));
ret = device_add(&tgt->dev);
if (ret) {
SNIC_HOST_ERR(snic->shost,
"Snic Tgt: device_add, with err = %d\n",
ret);
put_device(&snic->shost->shost_gendev);
kfree(tgt);
tgt = NULL;
return tgt;
}
SNIC_HOST_INFO(snic->shost, "Scanning %s.\n", dev_name(&tgt->dev));
scsi_queue_work(snic->shost, &tgt->scan_work);
return tgt;
} /* end of snic_tgt_create */
/* Handler for discovery */
void
snic_handle_tgt_disc(struct work_struct *work)
{
struct snic *snic = container_of(work, struct snic, tgt_work);
struct snic_tgt_id *tgtid = NULL;
struct snic_tgt *tgt = NULL;
unsigned long flags;
int i;
spin_lock_irqsave(&snic->snic_lock, flags);
if (snic->in_remove) {
spin_unlock_irqrestore(&snic->snic_lock, flags);
kfree(snic->disc.rtgt_info);
return;
}
spin_unlock_irqrestore(&snic->snic_lock, flags);
mutex_lock(&snic->disc.mutex);
/* Discover triggered during disc in progress */
if (snic->disc.req_cnt) {
snic->disc.state = SNIC_DISC_DONE;
snic->disc.req_cnt = 0;
mutex_unlock(&snic->disc.mutex);
kfree(snic->disc.rtgt_info);
snic->disc.rtgt_info = NULL;
SNIC_HOST_INFO(snic->shost, "tgt_disc: Discovery restart.\n");
/* Start Discovery Again */
snic_disc_start(snic);
return;
}
tgtid = (struct snic_tgt_id *)snic->disc.rtgt_info;
SNIC_BUG_ON(snic->disc.rtgt_cnt == 0 || tgtid == NULL);
for (i = 0; i < snic->disc.rtgt_cnt; i++) {
tgt = snic_tgt_create(snic, &tgtid[i]);
if (!tgt) {
int buf_sz = snic->disc.rtgt_cnt * sizeof(*tgtid);
SNIC_HOST_ERR(snic->shost, "Failed to create tgt.\n");
snic_hex_dump("rpt_tgt_rsp", (char *)tgtid, buf_sz);
break;
}
}
snic->disc.rtgt_info = NULL;
snic->disc.state = SNIC_DISC_DONE;
mutex_unlock(&snic->disc.mutex);
SNIC_HOST_INFO(snic->shost, "Discovery Completed.\n");
kfree(tgtid);
} /* end of snic_handle_tgt_disc */
int
snic_report_tgt_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
{
u8 typ, cmpl_stat;
u32 cmnd_id, hid, tgt_cnt = 0;
ulong ctx;
struct snic_req_info *rqi = NULL;
struct snic_tgt_id *tgtid;
int i, ret = 0;
snic_io_hdr_dec(&fwreq->hdr, &typ, &cmpl_stat, &cmnd_id, &hid, &ctx);
rqi = (struct snic_req_info *) ctx;
tgtid = (struct snic_tgt_id *) rqi->sge_va;
tgt_cnt = le32_to_cpu(fwreq->u.rpt_tgts_cmpl.tgt_cnt);
if (tgt_cnt == 0) {
SNIC_HOST_ERR(snic->shost, "No Targets Found on this host.\n");
ret = 1;
goto end;
}
/* printing list of targets here */
SNIC_HOST_INFO(snic->shost, "Target Count = %d\n", tgt_cnt);
SNIC_BUG_ON(tgt_cnt > snic->fwinfo.max_tgts);
for (i = 0; i < tgt_cnt; i++)
SNIC_HOST_INFO(snic->shost,
"Tgt id = 0x%x\n",
le32_to_cpu(tgtid[i].tgt_id));
/*
* Queue work for further processing,
* Response Buffer Memory is freed after creating targets
*/
snic->disc.rtgt_cnt = tgt_cnt;
snic->disc.rtgt_info = (u8 *) tgtid;
queue_work(snic_glob->event_q, &snic->tgt_work);
ret = 0;
end:
/* Unmap Response Buffer */
snic_pci_unmap_rsp_buf(snic, rqi);
if (ret)
kfree(tgtid);
rqi->sge_va = 0;
snic_release_untagged_req(snic, rqi);
return ret;
} /* end of snic_report_tgt_cmpl_handler */
/* Discovery init fn */
void
snic_disc_init(struct snic_disc *disc)
{
INIT_LIST_HEAD(&disc->tgt_list);
mutex_init(&disc->mutex);
disc->disc_id = 0;
disc->nxt_tgt_id = 0;
disc->state = SNIC_DISC_INIT;
disc->req_cnt = 0;
disc->rtgt_cnt = 0;
disc->rtgt_info = NULL;
disc->cb = NULL;
} /* end of snic_disc_init */
/* Discovery, uninit fn */
void
snic_disc_term(struct snic *snic)
{
struct snic_disc *disc = &snic->disc;
mutex_lock(&disc->mutex);
if (disc->req_cnt) {
disc->req_cnt = 0;
SNIC_SCSI_DBG(snic->shost, "Terminating Discovery.\n");
}
mutex_unlock(&disc->mutex);
}
/*
* snic_disc_start: Discovery Start ...
*/
int
snic_disc_start(struct snic *snic)
{
struct snic_disc *disc = &snic->disc;
int ret = 0;
SNIC_SCSI_DBG(snic->shost, "Discovery Start.\n");
mutex_lock(&disc->mutex);
if (disc->state == SNIC_DISC_PENDING) {
disc->req_cnt++;
mutex_unlock(&disc->mutex);
return ret;
}
disc->state = SNIC_DISC_PENDING;
mutex_unlock(&disc->mutex);
ret = snic_queue_report_tgt_req(snic);
if (ret)
SNIC_HOST_INFO(snic->shost, "Discovery Failed, err=%d.\n", ret);
return ret;
} /* end of snic_disc_start */
/*
* snic_disc_work :
*/
void
snic_handle_disc(struct work_struct *work)
{
struct snic *snic = container_of(work, struct snic, disc_work);
int ret = 0;
SNIC_HOST_INFO(snic->shost, "disc_work: Discovery\n");
ret = snic_disc_start(snic);
if (ret)
goto disc_err;
disc_err:
SNIC_HOST_ERR(snic->shost,
"disc_work: Discovery Failed w/ err = %d\n",
ret);
} /* end of snic_disc_work */
/*
* snic_tgt_del_all : cleanup all snic targets
* Called on unbinding the interface
*/
void
snic_tgt_del_all(struct snic *snic)
{
struct snic_tgt *tgt = NULL;
struct list_head *cur, *nxt;
unsigned long flags;
mutex_lock(&snic->disc.mutex);
spin_lock_irqsave(snic->shost->host_lock, flags);
list_for_each_safe(cur, nxt, &snic->disc.tgt_list) {
tgt = list_entry(cur, struct snic_tgt, list);
tgt->state = SNIC_TGT_STAT_DEL;
list_del_init(&tgt->list);
SNIC_HOST_INFO(snic->shost, "Tgt %d q'ing for del\n", tgt->id);
queue_work(snic_glob->event_q, &tgt->del_work);
tgt = NULL;
}
spin_unlock_irqrestore(snic->shost->host_lock, flags);
scsi_flush_work(snic->shost);
mutex_unlock(&snic->disc.mutex);
} /* end of snic_tgt_del_all */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __SNIC_DISC_H
#define __SNIC_DISC_H
#include "snic_fwint.h"
enum snic_disc_state {
SNIC_DISC_NONE,
SNIC_DISC_INIT,
SNIC_DISC_PENDING,
SNIC_DISC_DONE
};
struct snic;
struct snic_disc {
struct list_head tgt_list;
enum snic_disc_state state;
struct mutex mutex;
u16 disc_id;
u8 req_cnt;
u32 nxt_tgt_id;
u32 rtgt_cnt;
u8 *rtgt_info;
struct delayed_work disc_timeout;
void (*cb)(struct snic *);
};
#define SNIC_TGT_NAM_LEN 16
enum snic_tgt_state {
SNIC_TGT_STAT_NONE,
SNIC_TGT_STAT_INIT,
SNIC_TGT_STAT_ONLINE, /* Target is Online */
SNIC_TGT_STAT_OFFLINE, /* Target is Offline */
SNIC_TGT_STAT_DEL,
};
struct snic_tgt_priv {
struct list_head list;
enum snic_tgt_type typ;
u16 disc_id;
char *name[SNIC_TGT_NAM_LEN];
union {
/*DAS Target specific info */
/*SAN Target specific info */
u8 dummmy;
} u;
};
/* snic tgt flags */
#define SNIC_TGT_SCAN_PENDING 0x01
struct snic_tgt {
struct list_head list;
u16 id;
u16 channel;
u32 flags;
u32 scsi_tgt_id;
enum snic_tgt_state state;
struct device dev;
struct work_struct scan_work;
struct work_struct del_work;
struct snic_tgt_priv tdata;
};
struct snic_fw_req;
void snic_disc_init(struct snic_disc *);
int snic_disc_start(struct snic *);
void snic_disc_term(struct snic *);
int snic_report_tgt_cmpl_handler(struct snic *, struct snic_fw_req *);
int snic_tgtinfo_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq);
void snic_process_report_tgts_rsp(struct work_struct *);
void snic_handle_tgt_disc(struct work_struct *);
void snic_handle_disc(struct work_struct *);
void snic_tgt_dev_release(struct device *);
void snic_tgt_del_all(struct snic *);
#define dev_to_tgt(d) \
container_of(d, struct snic_tgt, dev)
static inline int
is_snic_target(struct device *dev)
{
return dev->release == snic_tgt_dev_release;
}
#define starget_to_tgt(st) \
(is_snic_target(((struct scsi_target *) st)->dev.parent) ? \
dev_to_tgt(st->dev.parent) : NULL)
#define snic_tgt_to_shost(t) \
dev_to_shost(t->dev.parent)
static inline int
snic_tgt_chkready(struct snic_tgt *tgt)
{
if (tgt->state == SNIC_TGT_STAT_ONLINE)
return 0;
else
return DID_NO_CONNECT << 16;
}
const char *snic_tgt_state_to_str(int);
int snic_tgt_scsi_abort_io(struct snic_tgt *);
#endif /* end of __SNIC_DISC_H */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __SNIC_FWINT_H
#define __SNIC_FWINT_H
#define SNIC_CDB_LEN 32 /* SCSI CDB size 32, can be used for 16 bytes */
#define LUN_ADDR_LEN 8
/*
* Command entry type
*/
enum snic_io_type {
/*
* Initiator request types
*/
SNIC_REQ_REPORT_TGTS = 0x2, /* Report Targets */
SNIC_REQ_ICMND, /* Initiator command for SCSI IO */
SNIC_REQ_ITMF, /* Initiator command for Task Mgmt */
SNIC_REQ_HBA_RESET, /* SNIC Reset */
SNIC_REQ_EXCH_VER, /* Exchange Version Information */
SNIC_REQ_TGT_INFO, /* Backend/Target Information */
SNIC_REQ_BOOT_LUNS,
/*
* Response type
*/
SNIC_RSP_REPORT_TGTS_CMPL = 0x12,/* Report Targets Completion */
SNIC_RSP_ICMND_CMPL, /* SCSI IO Completion */
SNIC_RSP_ITMF_CMPL, /* Task Management Completion */
SNIC_RSP_HBA_RESET_CMPL, /* SNIC Reset Completion */
SNIC_RSP_EXCH_VER_CMPL, /* Exchange Version Completion*/
SNIC_RSP_BOOT_LUNS_CMPL,
/*
* Misc Request types
*/
SNIC_MSG_ACK = 0x80, /* Ack: snic_notify_msg */
SNIC_MSG_ASYNC_EVNOTIFY, /* Asynchronous Event Notification */
}; /* end of enum snic_io_type */
/*
* Header status codes from firmware
*/
enum snic_io_status {
SNIC_STAT_IO_SUCCESS = 0, /* request was successful */
/*
* If a request to the fw is rejected, the original request header
* will be returned with the status set to one of the following:
*/
SNIC_STAT_INVALID_HDR, /* header contains invalid data */
SNIC_STAT_OUT_OF_RES, /* out of resources to complete request */
SNIC_STAT_INVALID_PARM, /* some parameter in request is not valid */
SNIC_STAT_REQ_NOT_SUP, /* req type is not supported */
SNIC_STAT_IO_NOT_FOUND, /* requested IO was not found */
/*
* Once a request is processed, the fw will usually return
* a cmpl message type. In cases where errors occurred,
* the header status would be filled in with one of the following:
*/
SNIC_STAT_ABORTED, /* req was aborted */
SNIC_STAT_TIMEOUT, /* req was timed out */
SNIC_STAT_SGL_INVALID, /* req was aborted due to sgl error */
SNIC_STAT_DATA_CNT_MISMATCH, /*recv/sent more/less data than expec */
SNIC_STAT_FW_ERR, /* req was terminated due to fw error */
SNIC_STAT_ITMF_REJECT, /* itmf req was rejected by target */
SNIC_STAT_ITMF_FAIL, /* itmf req was failed */
SNIC_STAT_ITMF_INCORRECT_LUN, /* itmf req has incorrect LUN id*/
SNIC_STAT_CMND_REJECT, /* req was invalid and rejected */
SNIC_STAT_DEV_OFFLINE, /* req sent to offline device */
SNIC_STAT_NO_BOOTLUN,
SNIC_STAT_SCSI_ERR, /* SCSI error returned by Target. */
SNIC_STAT_NOT_READY, /* sNIC Subsystem is not ready */
SNIC_STAT_FATAL_ERROR, /* sNIC is in unrecoverable state */
}; /* end of enum snic_io_status */
/*
* snic_io_hdr : host <--> firmare
*
* for any other message that will be queued to firmware should
* have the following request header
*/
struct snic_io_hdr {
__le32 hid;
__le32 cmnd_id; /* tag here */
ulong init_ctx; /* initiator context */
u8 type; /* request/response type */
u8 status; /* header status entry */
u8 protocol; /* Protocol specific, may needed for RoCE*/
u8 flags;
__le16 sg_cnt;
u16 resvd;
};
/* auxillary funciton for encoding the snic_io_hdr */
static inline void
snic_io_hdr_enc(struct snic_io_hdr *hdr, u8 typ, u8 status, u32 id, u32 hid,
u16 sg_cnt, ulong ctx)
{
hdr->type = typ;
hdr->status = status;
hdr->protocol = 0;
hdr->hid = cpu_to_le32(hid);
hdr->cmnd_id = cpu_to_le32(id);
hdr->sg_cnt = cpu_to_le16(sg_cnt);
hdr->init_ctx = ctx;
hdr->flags = 0;
}
/* auxillary funciton for decoding the snic_io_hdr */
static inline void
snic_io_hdr_dec(struct snic_io_hdr *hdr, u8 *typ, u8 *stat, u32 *cmnd_id,
u32 *hid, ulong *ctx)
{
*typ = hdr->type;
*stat = hdr->status;
*hid = le32_to_cpu(hdr->hid);
*cmnd_id = le32_to_cpu(hdr->cmnd_id);
*ctx = hdr->init_ctx;
}
/*
* snic_host_info: host -> firmware
*
* Used for sending host information to firmware, and request fw version
*/
struct snic_exch_ver_req {
__le32 drvr_ver; /* for debugging, when fw dump captured */
__le32 os_type; /* for OS specific features */
};
/*
* os_type flags
* Bit 0-7 : OS information
* Bit 8-31: Feature/Capability Information
*/
#define SNIC_OS_LINUX 0x1
#define SNIC_OS_WIN 0x2
#define SNIC_OS_ESX 0x3
/*
* HBA Capabilities
* Bit 1: Reserved.
* Bit 2: Dynamic Discovery of LUNs.
* Bit 3: Async event notifications on on tgt online/offline events.
* Bit 4: IO timeout support in FW.
* Bit 5-31: Reserved.
*/
#define SNIC_HBA_CAP_DDL 0x02 /* Supports Dynamic Discovery of LUNs */
#define SNIC_HBA_CAP_AEN 0x04 /* Supports Async Event Noitifcation */
#define SNIC_HBA_CAP_TMO 0x08 /* Supports IO timeout in FW */
/*
* snic_exch_ver_rsp : firmware -> host
*
* Used by firmware to send response to version request
*/
struct snic_exch_ver_rsp {
__le32 version;
__le32 hid;
__le32 max_concur_ios; /* max concurrent ios */
__le32 max_sgs_per_cmd; /* max sgls per IO */
__le32 max_io_sz; /* max io size supported */
__le32 hba_cap; /* hba capabilities */
__le32 max_tgts; /* max tgts supported */
__le16 io_timeout; /* FW extended timeout */
u16 rsvd;
};
/*
* snic_report_tgts : host -> firmware request
*
* Used by the host to request list of targets
*/
struct snic_report_tgts {
__le16 sg_cnt;
__le16 flags; /* specific flags from fw */
u8 _resvd[4];
__le64 sg_addr; /* Points to SGL */
__le64 sense_addr;
};
enum snic_type {
SNIC_NONE = 0x0,
SNIC_DAS,
SNIC_SAN,
};
/* Report Target Response */
enum snic_tgt_type {
SNIC_TGT_NONE = 0x0,
SNIC_TGT_DAS, /* DAS Target */
SNIC_TGT_SAN, /* SAN Target */
};
/* target id format */
struct snic_tgt_id {
__le32 tgt_id; /* target id */
__le16 tgt_type; /* tgt type */
__le16 vnic_id; /* corresponding vnic id */
};
/*
* snic_report_tgts_cmpl : firmware -> host response
*
* Used by firmware to send response to Report Targets request
*/
struct snic_report_tgts_cmpl {
__le32 tgt_cnt; /* Number of Targets accessible */
u32 _resvd;
};
/*
* Command flags
*
* Bit 0: Read flags
* Bit 1: Write flag
* Bit 2: ESGL - sg/esg array contains extended sg
* ESGE - is a host buffer contains sg elements
* Bit 3-4: Task Attributes
* 00b - simple
* 01b - head of queue
* 10b - ordered
* Bit 5-7: Priority - future use
* Bit 8-15: Reserved
*/
#define SNIC_ICMND_WR 0x01 /* write command */
#define SNIC_ICMND_RD 0x02 /* read command */
#define SNIC_ICMND_ESGL 0x04 /* SGE/ESGE array contains valid data*/
/*
* Priority/Task Attribute settings
*/
#define SNIC_ICMND_TSK_SHIFT 2 /* task attr starts at bit 2 */
#define SNIC_ICMND_TSK_MASK(x) ((x>>SNIC_ICMND_TSK_SHIFT) & ~(0xffff))
#define SNIC_ICMND_TSK_SIMPLE 0 /* simple task attr */
#define SNIC_ICMND_TSK_HEAD_OF_QUEUE 1 /* head of qeuue task attr */
#define SNIC_ICMND_TSK_ORDERED 2 /* ordered task attr */
#define SNIC_ICMND_PRI_SHIFT 5 /* prio val starts at bit 5 */
/*
* snic_icmnd : host-> firmware request
*
* used for sending out an initiator SCSI 16/32-byte command
*/
struct snic_icmnd {
__le16 sg_cnt; /* Number of SG Elements */
__le16 flags; /* flags */
__le32 sense_len; /* Sense buffer length */
__le64 tgt_id; /* Destination Target ID */
__le64 lun_id; /* Destination LUN ID */
u8 cdb_len;
u8 _resvd;
__le16 time_out; /* ms time for Res allocations fw to handle io*/
__le32 data_len; /* Total number of bytes to be transferred */
u8 cdb[SNIC_CDB_LEN];
__le64 sg_addr; /* Points to SG List */
__le64 sense_addr; /* Sense buffer address */
};
/* Response flags */
/* Bit 0: Under run
* Bit 1: Over Run
* Bit 2-7: Reserved
*/
#define SNIC_ICMND_CMPL_UNDR_RUN 0x01 /* resid under and valid */
#define SNIC_ICMND_CMPL_OVER_RUN 0x02 /* resid over and valid */
/*
* snic_icmnd_cmpl: firmware -> host response
*
* Used for sending the host a response to an icmnd (initiator command)
*/
struct snic_icmnd_cmpl {
u8 scsi_status; /* value as per SAM */
u8 flags;
__le16 sense_len; /* Sense Length */
__le32 resid; /* Residue : # bytes under or over run */
};
/*
* snic_itmf: host->firmware request
*
* used for requesting the firmware to abort a request and/or send out
* a task management function
*
* the req_id field is valid in case of abort task and clear task
*/
struct snic_itmf {
u8 tm_type; /* SCSI Task Management request */
u8 resvd;
__le16 flags; /* flags */
__le32 req_id; /* Command id of snic req to be aborted */
__le64 tgt_id; /* Target ID */
__le64 lun_id; /* Destination LUN ID */
__le16 timeout; /* in sec */
};
/*
* Task Management Request
*/
enum snic_itmf_tm_type {
SNIC_ITMF_ABTS_TASK = 0x01, /* Abort Task */
SNIC_ITMF_ABTS_TASK_SET, /* Abort Task Set */
SNIC_ITMF_CLR_TASK, /* Clear Task */
SNIC_ITMF_CLR_TASKSET, /* Clear Task Set */
SNIC_ITMF_LUN_RESET, /* Lun Reset */
SNIC_ITMF_ABTS_TASK_TERM, /* Supported for SAN Targets */
};
/*
* snic_itmf_cmpl: firmware -> host resposne
*
* used for sending the host a response for a itmf request
*/
struct snic_itmf_cmpl {
__le32 nterminated; /* # IOs terminated as a result of tmf */
u8 flags; /* flags */
u8 _resvd[3];
};
/*
* itmfl_cmpl flags
* Bit 0 : 1 - Num terminated field valid
* Bit 1 - 7 : Reserved
*/
#define SNIC_NUM_TERM_VALID 0x01 /* Number of IOs terminated */
/*
* snic_hba_reset: host -> firmware request
*
* used for requesting firmware to reset snic
*/
struct snic_hba_reset {
__le16 flags; /* flags */
u8 _resvd[6];
};
/*
* snic_hba_reset_cmpl: firmware -> host response
*
* Used by firmware to respond to the host's hba reset request
*/
struct snic_hba_reset_cmpl {
u8 flags; /* flags : more info needs to be added*/
u8 _resvd[7];
};
/*
* snic_notify_msg: firmware -> host response
*
* Used by firmware to notify host of the last work queue entry received
*/
struct snic_notify_msg {
__le32 wqe_num; /* wq entry number */
u8 flags; /* flags, macros */
u8 _resvd[4];
};
#define SNIC_EVDATA_LEN 24 /* in bytes */
/* snic_async_evnotify: firmware -> host notification
*
* Used by firmware to notify the host about configuration/state changes
*/
struct snic_async_evnotify {
u8 FLS_EVENT_DESC;
u8 vnic; /* vnic id */
u8 _resvd[2];
__le32 ev_id; /* Event ID */
u8 ev_data[SNIC_EVDATA_LEN]; /* Event Data */
u8 _resvd2[4];
};
/* async event flags */
enum snic_ev_type {
SNIC_EV_TGT_OFFLINE = 0x01, /* Target Offline, PL contains TGT ID */
SNIC_EV_TGT_ONLINE, /* Target Online, PL contains TGT ID */
SNIC_EV_LUN_OFFLINE, /* LUN Offline, PL contains LUN ID */
SNIC_EV_LUN_ONLINE, /* LUN Online, PL contains LUN ID */
SNIC_EV_CONF_CHG, /* Dev Config/Attr Change Event */
SNIC_EV_TGT_ADDED, /* Target Added */
SNIC_EV_TGT_DELTD, /* Target Del'd, PL contains TGT ID */
SNIC_EV_LUN_ADDED, /* LUN Added */
SNIC_EV_LUN_DELTD, /* LUN Del'd, PL cont. TGT & LUN ID */
SNIC_EV_DISC_CMPL = 0x10, /* Discovery Completed Event */
};
#define SNIC_HOST_REQ_LEN 128 /*Exp length of host req, wq desc sz*/
/* Payload 88 bytes = 128 - 24 - 16 */
#define SNIC_HOST_REQ_PAYLOAD ((int)(SNIC_HOST_REQ_LEN - \
sizeof(struct snic_io_hdr) - \
(2 * sizeof(u64))))
/*
* snic_host_req: host -> firmware request
*
* Basic structure for all snic requests that are sent from the host to
* firmware. They are 128 bytes in size.
*/
struct snic_host_req {
u64 ctrl_data[2]; /*16 bytes - Control Data */
struct snic_io_hdr hdr;
union {
/*
* Entry specific space, last byte contains color
*/
u8 buf[SNIC_HOST_REQ_PAYLOAD];
/*
* Exchange firmware version
*/
struct snic_exch_ver_req exch_ver;
/* report targets */
struct snic_report_tgts rpt_tgts;
/* io request */
struct snic_icmnd icmnd;
/* task management request */
struct snic_itmf itmf;
/* hba reset */
struct snic_hba_reset reset;
} u;
}; /* end of snic_host_req structure */
#define SNIC_FW_REQ_LEN 64 /* Expected length of fw req */
struct snic_fw_req {
struct snic_io_hdr hdr;
union {
/*
* Entry specific space, last byte contains color
*/
u8 buf[SNIC_FW_REQ_LEN - sizeof(struct snic_io_hdr)];
/* Exchange Version Response */
struct snic_exch_ver_rsp exch_ver_cmpl;
/* Report Targets Response */
struct snic_report_tgts_cmpl rpt_tgts_cmpl;
/* scsi response */
struct snic_icmnd_cmpl icmnd_cmpl;
/* task management response */
struct snic_itmf_cmpl itmf_cmpl;
/* hba reset response */
struct snic_hba_reset_cmpl reset_cmpl;
/* notify message */
struct snic_notify_msg ack;
/* async notification event */
struct snic_async_evnotify async_ev;
} u;
}; /* end of snic_fw_req structure */
/*
* Auxillary macro to verify specific snic req/cmpl structures
* to ensure that it will be aligned to 64 bit, and not using
* color bit field
*/
#define VERIFY_REQ_SZ(x)
#define VERIFY_CMPL_SZ(x)
/*
* Access routines to encode and decode the color bit, which is the most
* significant bit of the structure.
*/
static inline void
snic_color_enc(struct snic_fw_req *req, u8 color)
{
u8 *c = ((u8 *) req) + sizeof(struct snic_fw_req) - 1;
if (color)
*c |= 0x80;
else
*c &= ~0x80;
}
static inline void
snic_color_dec(struct snic_fw_req *req, u8 *color)
{
u8 *c = ((u8 *) req) + sizeof(struct snic_fw_req) - 1;
*color = *c >> 7;
/* Make sure color bit is read from desc *before* other fields
* are read from desc. Hardware guarantees color bit is last
* bit (byte) written. Adding the rmb() prevents the compiler
* and/or CPU from reordering the reads which would potentially
* result in reading stale values.
*/
rmb();
}
#endif /* end of __SNIC_FWINT_H */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/mempool.h>
#include <scsi/scsi_tcq.h>
#include "snic_io.h"
#include "snic.h"
#include "cq_enet_desc.h"
#include "snic_fwint.h"
static void
snic_wq_cmpl_frame_send(struct vnic_wq *wq,
struct cq_desc *cq_desc,
struct vnic_wq_buf *buf,
void *opaque)
{
struct snic *snic = svnic_dev_priv(wq->vdev);
SNIC_BUG_ON(buf->os_buf == NULL);
if (snic_log_level & SNIC_DESC_LOGGING)
SNIC_HOST_INFO(snic->shost,
"Ack received for snic_host_req %p.\n",
buf->os_buf);
SNIC_TRC(snic->shost->host_no, 0, 0,
((ulong)(buf->os_buf) - sizeof(struct snic_req_info)), 0, 0,
0);
pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE);
buf->os_buf = NULL;
}
static int
snic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
struct cq_desc *cq_desc,
u8 type,
u16 q_num,
u16 cmpl_idx,
void *opaque)
{
struct snic *snic = svnic_dev_priv(vdev);
unsigned long flags;
SNIC_BUG_ON(q_num != 0);
spin_lock_irqsave(&snic->wq_lock[q_num], flags);
svnic_wq_service(&snic->wq[q_num],
cq_desc,
cmpl_idx,
snic_wq_cmpl_frame_send,
NULL);
spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
return 0;
} /* end of snic_cmpl_handler_cont */
int
snic_wq_cmpl_handler(struct snic *snic, int work_to_do)
{
unsigned int work_done = 0;
unsigned int i;
snic->s_stats.misc.last_ack_time = jiffies;
for (i = 0; i < snic->wq_count; i++) {
work_done += svnic_cq_service(&snic->cq[i],
work_to_do,
snic_wq_cmpl_handler_cont,
NULL);
}
return work_done;
} /* end of snic_wq_cmpl_handler */
void
snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
{
struct snic_host_req *req = buf->os_buf;
struct snic *snic = svnic_dev_priv(wq->vdev);
struct snic_req_info *rqi = NULL;
unsigned long flags;
pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE);
rqi = req_to_rqi(req);
spin_lock_irqsave(&snic->spl_cmd_lock, flags);
if (list_empty(&rqi->list)) {
spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
goto end;
}
SNIC_BUG_ON(rqi->list.next == NULL); /* if not added to spl_cmd_list */
list_del_init(&rqi->list);
spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
if (rqi->sge_va) {
snic_pci_unmap_rsp_buf(snic, rqi);
kfree((void *)rqi->sge_va);
rqi->sge_va = 0;
}
snic_req_free(snic, rqi);
SNIC_HOST_INFO(snic->shost, "snic_free_wq_buf .. freed.\n");
end:
return;
}
/* Criteria to select work queue in multi queue mode */
static int
snic_select_wq(struct snic *snic)
{
/* No multi queue support for now */
BUILD_BUG_ON(SNIC_WQ_MAX > 1);
return 0;
}
int
snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
{
dma_addr_t pa = 0;
unsigned long flags;
struct snic_fw_stats *fwstats = &snic->s_stats.fw;
long act_reqs;
int q_num = 0;
snic_print_desc(__func__, os_buf, len);
/* Map request buffer */
pa = pci_map_single(snic->pdev, os_buf, len, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(snic->pdev, pa)) {
SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n");
return -ENOMEM;
}
q_num = snic_select_wq(snic);
spin_lock_irqsave(&snic->wq_lock[q_num], flags);
if (!svnic_wq_desc_avail(snic->wq)) {
pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE);
spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
atomic64_inc(&snic->s_stats.misc.wq_alloc_fail);
SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no);
return -ENOMEM;
}
snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1);
spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
/* Update stats */
act_reqs = atomic64_inc_return(&fwstats->actv_reqs);
if (act_reqs > atomic64_read(&fwstats->max_actv_reqs))
atomic64_set(&fwstats->max_actv_reqs, act_reqs);
return 0;
} /* end of snic_queue_wq_desc() */
/*
* snic_handle_untagged_req: Adds snic specific requests to spl_cmd_list.
* Purpose : Used during driver unload to clean up the requests.
*/
void
snic_handle_untagged_req(struct snic *snic, struct snic_req_info *rqi)
{
unsigned long flags;
INIT_LIST_HEAD(&rqi->list);
spin_lock_irqsave(&snic->spl_cmd_lock, flags);
list_add_tail(&rqi->list, &snic->spl_cmd_list);
spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
}
/*
* snic_req_init:
* Allocates snic_req_info + snic_host_req + sgl data, and initializes.
*/
struct snic_req_info *
snic_req_init(struct snic *snic, int sg_cnt)
{
u8 typ;
struct snic_req_info *rqi = NULL;
typ = (sg_cnt <= SNIC_REQ_CACHE_DFLT_SGL) ?
SNIC_REQ_CACHE_DFLT_SGL : SNIC_REQ_CACHE_MAX_SGL;
rqi = mempool_alloc(snic->req_pool[typ], GFP_ATOMIC);
if (!rqi) {
atomic64_inc(&snic->s_stats.io.alloc_fail);
SNIC_HOST_ERR(snic->shost,
"Failed to allocate memory from snic req pool id = %d\n",
typ);
return rqi;
}
memset(rqi, 0, sizeof(*rqi));
rqi->rq_pool_type = typ;
rqi->start_time = jiffies;
rqi->req = (struct snic_host_req *) (rqi + 1);
rqi->req_len = sizeof(struct snic_host_req);
rqi->snic = snic;
rqi->req = (struct snic_host_req *)(rqi + 1);
if (sg_cnt == 0)
goto end;
rqi->req_len += (sg_cnt * sizeof(struct snic_sg_desc));
if (sg_cnt > atomic64_read(&snic->s_stats.io.max_sgl))
atomic64_set(&snic->s_stats.io.max_sgl, sg_cnt);
SNIC_BUG_ON(sg_cnt > SNIC_MAX_SG_DESC_CNT);
atomic64_inc(&snic->s_stats.io.sgl_cnt[sg_cnt - 1]);
end:
memset(rqi->req, 0, rqi->req_len);
/* pre initialization of init_ctx to support req_to_rqi */
rqi->req->hdr.init_ctx = (ulong) rqi;
SNIC_SCSI_DBG(snic->shost, "Req_alloc:rqi = %p allocatd.\n", rqi);
return rqi;
} /* end of snic_req_init */
/*
* snic_abort_req_init : Inits abort request.
*/
struct snic_host_req *
snic_abort_req_init(struct snic *snic, struct snic_req_info *rqi)
{
struct snic_host_req *req = NULL;
SNIC_BUG_ON(!rqi);
/* If abort to be issued second time, then reuse */
if (rqi->abort_req)
return rqi->abort_req;
req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC);
if (!req) {
SNIC_HOST_ERR(snic->shost, "abts:Failed to alloc tm req.\n");
WARN_ON_ONCE(1);
return NULL;
}
rqi->abort_req = req;
memset(req, 0, sizeof(struct snic_host_req));
/* pre initialization of init_ctx to support req_to_rqi */
req->hdr.init_ctx = (ulong) rqi;
return req;
} /* end of snic_abort_req_init */
/*
* snic_dr_req_init : Inits device reset req
*/
struct snic_host_req *
snic_dr_req_init(struct snic *snic, struct snic_req_info *rqi)
{
struct snic_host_req *req = NULL;
SNIC_BUG_ON(!rqi);
req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC);
if (!req) {
SNIC_HOST_ERR(snic->shost, "dr:Failed to alloc tm req.\n");
WARN_ON_ONCE(1);
return NULL;
}
SNIC_BUG_ON(rqi->dr_req != NULL);
rqi->dr_req = req;
memset(req, 0, sizeof(struct snic_host_req));
/* pre initialization of init_ctx to support req_to_rqi */
req->hdr.init_ctx = (ulong) rqi;
return req;
} /* end of snic_dr_req_init */
/* frees snic_req_info and snic_host_req */
void
snic_req_free(struct snic *snic, struct snic_req_info *rqi)
{
SNIC_BUG_ON(rqi->req == rqi->abort_req);
SNIC_BUG_ON(rqi->req == rqi->dr_req);
SNIC_BUG_ON(rqi->sge_va != 0);
SNIC_SCSI_DBG(snic->shost,
"Req_free:rqi %p:ioreq %p:abt %p:dr %p\n",
rqi, rqi->req, rqi->abort_req, rqi->dr_req);
if (rqi->abort_req)
mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
if (rqi->dr_req)
mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]);
}
void
snic_pci_unmap_rsp_buf(struct snic *snic, struct snic_req_info *rqi)
{
struct snic_sg_desc *sgd;
sgd = req_to_sgl(rqi_to_req(rqi));
SNIC_BUG_ON(sgd[0].addr == 0);
pci_unmap_single(snic->pdev,
le64_to_cpu(sgd[0].addr),
le32_to_cpu(sgd[0].len),
PCI_DMA_FROMDEVICE);
}
/*
* snic_free_all_untagged_reqs: Walks through untagged reqs and frees them.
*/
void
snic_free_all_untagged_reqs(struct snic *snic)
{
struct snic_req_info *rqi;
struct list_head *cur, *nxt;
unsigned long flags;
spin_lock_irqsave(&snic->spl_cmd_lock, flags);
list_for_each_safe(cur, nxt, &snic->spl_cmd_list) {
rqi = list_entry(cur, struct snic_req_info, list);
list_del_init(&rqi->list);
if (rqi->sge_va) {
snic_pci_unmap_rsp_buf(snic, rqi);
kfree((void *)rqi->sge_va);
rqi->sge_va = 0;
}
snic_req_free(snic, rqi);
}
spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
}
/*
* snic_release_untagged_req : Unlinks the untagged req and frees it.
*/
void
snic_release_untagged_req(struct snic *snic, struct snic_req_info *rqi)
{
unsigned long flags;
spin_lock_irqsave(&snic->snic_lock, flags);
if (snic->in_remove) {
spin_unlock_irqrestore(&snic->snic_lock, flags);
goto end;
}
spin_unlock_irqrestore(&snic->snic_lock, flags);
spin_lock_irqsave(&snic->spl_cmd_lock, flags);
if (list_empty(&rqi->list)) {
spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
goto end;
}
list_del_init(&rqi->list);
spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
snic_req_free(snic, rqi);
end:
return;
}
/* dump buf in hex fmt */
void
snic_hex_dump(char *pfx, char *data, int len)
{
SNIC_INFO("%s Dumping Data of Len = %d\n", pfx, len);
print_hex_dump_bytes(pfx, DUMP_PREFIX_NONE, data, len);
}
#define LINE_BUFSZ 128 /* for snic_print_desc fn */
static void
snic_dump_desc(const char *fn, char *os_buf, int len)
{
struct snic_host_req *req = (struct snic_host_req *) os_buf;
struct snic_fw_req *fwreq = (struct snic_fw_req *) os_buf;
struct snic_req_info *rqi = NULL;
char line[LINE_BUFSZ] = { '\0' };
char *cmd_str = NULL;
if (req->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL)
rqi = (struct snic_req_info *) fwreq->hdr.init_ctx;
else
rqi = (struct snic_req_info *) req->hdr.init_ctx;
SNIC_BUG_ON(rqi == NULL || rqi->req == NULL);
switch (req->hdr.type) {
case SNIC_REQ_REPORT_TGTS:
cmd_str = "report-tgt : ";
snprintf(line, LINE_BUFSZ, "SNIC_REQ_REPORT_TGTS :");
break;
case SNIC_REQ_ICMND:
cmd_str = "icmnd : ";
snprintf(line, LINE_BUFSZ, "SNIC_REQ_ICMND : 0x%x :",
req->u.icmnd.cdb[0]);
break;
case SNIC_REQ_ITMF:
cmd_str = "itmf : ";
snprintf(line, LINE_BUFSZ, "SNIC_REQ_ITMF :");
break;
case SNIC_REQ_HBA_RESET:
cmd_str = "hba reset :";
snprintf(line, LINE_BUFSZ, "SNIC_REQ_HBA_RESET :");
break;
case SNIC_REQ_EXCH_VER:
cmd_str = "exch ver : ";
snprintf(line, LINE_BUFSZ, "SNIC_REQ_EXCH_VER :");
break;
case SNIC_REQ_TGT_INFO:
cmd_str = "tgt info : ";
break;
case SNIC_RSP_REPORT_TGTS_CMPL:
cmd_str = "report tgt cmpl : ";
snprintf(line, LINE_BUFSZ, "SNIC_RSP_REPORT_TGTS_CMPL :");
break;
case SNIC_RSP_ICMND_CMPL:
cmd_str = "icmnd_cmpl : ";
snprintf(line, LINE_BUFSZ, "SNIC_RSP_ICMND_CMPL : 0x%x :",
rqi->req->u.icmnd.cdb[0]);
break;
case SNIC_RSP_ITMF_CMPL:
cmd_str = "itmf_cmpl : ";
snprintf(line, LINE_BUFSZ, "SNIC_RSP_ITMF_CMPL :");
break;
case SNIC_RSP_HBA_RESET_CMPL:
cmd_str = "hba_reset_cmpl : ";
snprintf(line, LINE_BUFSZ, "SNIC_RSP_HBA_RESET_CMPL :");
break;
case SNIC_RSP_EXCH_VER_CMPL:
cmd_str = "exch_ver_cmpl : ";
snprintf(line, LINE_BUFSZ, "SNIC_RSP_EXCH_VER_CMPL :");
break;
case SNIC_MSG_ACK:
cmd_str = "msg ack : ";
snprintf(line, LINE_BUFSZ, "SNIC_MSG_ACK :");
break;
case SNIC_MSG_ASYNC_EVNOTIFY:
cmd_str = "async notify : ";
snprintf(line, LINE_BUFSZ, "SNIC_MSG_ASYNC_EVNOTIFY :");
break;
default:
cmd_str = "unknown : ";
SNIC_BUG_ON(1);
break;
}
SNIC_INFO("%s:%s >>cmndid=%x:sg_cnt = %x:status = %x:ctx = %lx.\n",
fn, line, req->hdr.cmnd_id, req->hdr.sg_cnt, req->hdr.status,
req->hdr.init_ctx);
/* Enable it, to dump byte stream */
if (snic_log_level & 0x20)
snic_hex_dump(cmd_str, os_buf, len);
} /* end of __snic_print_desc */
void
snic_print_desc(const char *fn, char *os_buf, int len)
{
if (snic_log_level & SNIC_DESC_LOGGING)
snic_dump_desc(fn, os_buf, len);
}
void
snic_calc_io_process_time(struct snic *snic, struct snic_req_info *rqi)
{
u64 duration;
duration = jiffies - rqi->start_time;
if (duration > atomic64_read(&snic->s_stats.io.max_time))
atomic64_set(&snic->s_stats.io.max_time, duration);
}
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _SNIC_IO_H
#define _SNIC_IO_H
#define SNIC_DFLT_SG_DESC_CNT 32 /* Default descriptors for sgl */
#define SNIC_MAX_SG_DESC_CNT 60 /* Max descriptor for sgl */
#define SNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */
/* SG descriptor for snic */
struct snic_sg_desc {
__le64 addr;
__le32 len;
u32 _resvd;
};
struct snic_dflt_sgl {
struct snic_sg_desc sg_desc[SNIC_DFLT_SG_DESC_CNT];
};
struct snic_max_sgl {
struct snic_sg_desc sg_desc[SNIC_MAX_SG_DESC_CNT];
};
enum snic_req_cache_type {
SNIC_REQ_CACHE_DFLT_SGL = 0, /* cache with default size sgl */
SNIC_REQ_CACHE_MAX_SGL, /* cache with max size sgl */
SNIC_REQ_TM_CACHE, /* cache for task mgmt reqs contains
snic_host_req objects only*/
SNIC_REQ_MAX_CACHES /* number of sgl caches */
};
/* Per IO internal state */
struct snic_internal_io_state {
char *rqi;
u64 flags;
u32 state;
u32 abts_status; /* Abort completion status */
u32 lr_status; /* device reset completion status */
};
/* IO state machine */
enum snic_ioreq_state {
SNIC_IOREQ_NOT_INITED = 0,
SNIC_IOREQ_PENDING,
SNIC_IOREQ_ABTS_PENDING,
SNIC_IOREQ_ABTS_COMPLETE,
SNIC_IOREQ_LR_PENDING,
SNIC_IOREQ_LR_COMPLETE,
SNIC_IOREQ_COMPLETE,
};
struct snic;
struct snic_host_req;
/*
* snic_req_info : Contains info about IO, one per scsi command.
* Notes: Make sure that the structure is aligned to 16 B
* this helps in easy access to snic_req_info from snic_host_req
*/
struct snic_req_info {
struct list_head list;
struct snic_host_req *req;
u64 start_time; /* start time in jiffies */
u16 rq_pool_type; /* noticion of request pool type */
u16 req_len; /* buf len passing to fw (req + sgl)*/
u32 tgt_id;
u32 tm_tag;
u8 io_cmpl:1; /* sets to 1 when fw completes IO */
u8 resvd[3];
struct scsi_cmnd *sc; /* Associated scsi cmd */
struct snic *snic; /* Associated snic */
ulong sge_va; /* Pointer to Resp Buffer */
u64 snsbuf_va;
struct snic_host_req *abort_req;
struct completion *abts_done;
struct snic_host_req *dr_req;
struct completion *dr_done;
};
#define rqi_to_req(rqi) \
((struct snic_host_req *) (((struct snic_req_info *)rqi)->req))
#define req_to_rqi(req) \
((struct snic_req_info *) (((struct snic_host_req *)req)->hdr.init_ctx))
#define req_to_sgl(req) \
((struct snic_sg_desc *) (((struct snic_host_req *)req)+1))
struct snic_req_info *
snic_req_init(struct snic *, int sg_cnt);
void snic_req_free(struct snic *, struct snic_req_info *);
void snic_calc_io_process_time(struct snic *, struct snic_req_info *);
void snic_pci_unmap_rsp_buf(struct snic *, struct snic_req_info *);
struct snic_host_req *
snic_abort_req_init(struct snic *, struct snic_req_info *);
struct snic_host_req *
snic_dr_req_init(struct snic *, struct snic_req_info *);
#endif /* _SNIC_IO_H */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "snic_io.h"
#include "snic.h"
/*
* snic_isr_msix_wq : MSIx ISR for work queue.
*/
static irqreturn_t
snic_isr_msix_wq(int irq, void *data)
{
struct snic *snic = data;
unsigned long wq_work_done = 0;
snic->s_stats.misc.last_isr_time = jiffies;
atomic64_inc(&snic->s_stats.misc.isr_cnt);
wq_work_done = snic_wq_cmpl_handler(snic, -1);
svnic_intr_return_credits(&snic->intr[SNIC_MSIX_WQ],
wq_work_done,
1 /* unmask intr */,
1 /* reset intr timer */);
return IRQ_HANDLED;
} /* end of snic_isr_msix_wq */
static irqreturn_t
snic_isr_msix_io_cmpl(int irq, void *data)
{
struct snic *snic = data;
unsigned long iocmpl_work_done = 0;
snic->s_stats.misc.last_isr_time = jiffies;
atomic64_inc(&snic->s_stats.misc.isr_cnt);
iocmpl_work_done = snic_fwcq_cmpl_handler(snic, -1);
svnic_intr_return_credits(&snic->intr[SNIC_MSIX_IO_CMPL],
iocmpl_work_done,
1 /* unmask intr */,
1 /* reset intr timer */);
return IRQ_HANDLED;
} /* end of snic_isr_msix_io_cmpl */
static irqreturn_t
snic_isr_msix_err_notify(int irq, void *data)
{
struct snic *snic = data;
snic->s_stats.misc.last_isr_time = jiffies;
atomic64_inc(&snic->s_stats.misc.isr_cnt);
svnic_intr_return_all_credits(&snic->intr[SNIC_MSIX_ERR_NOTIFY]);
snic_log_q_error(snic);
/*Handling link events */
snic_handle_link_event(snic);
return IRQ_HANDLED;
} /* end of snic_isr_msix_err_notify */
void
snic_free_intr(struct snic *snic)
{
int i;
/* ONLY interrupt mode MSIX is supported */
for (i = 0; i < ARRAY_SIZE(snic->msix); i++) {
if (snic->msix[i].requested) {
free_irq(snic->msix_entry[i].vector,
snic->msix[i].devid);
}
}
} /* end of snic_free_intr */
int
snic_request_intr(struct snic *snic)
{
int ret = 0, i;
enum vnic_dev_intr_mode intr_mode;
intr_mode = svnic_dev_get_intr_mode(snic->vdev);
SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX);
/*
* Currently HW supports single WQ and CQ. So passing devid as snic.
* When hardware supports multiple WQs and CQs, one idea is
* to pass devid as corresponding WQ or CQ ptr and retrieve snic
* from queue ptr.
* Except for err_notify, which is always one.
*/
sprintf(snic->msix[SNIC_MSIX_WQ].devname,
"%.11s-scsi-wq",
snic->name);
snic->msix[SNIC_MSIX_WQ].isr = snic_isr_msix_wq;
snic->msix[SNIC_MSIX_WQ].devid = snic;
sprintf(snic->msix[SNIC_MSIX_IO_CMPL].devname,
"%.11s-io-cmpl",
snic->name);
snic->msix[SNIC_MSIX_IO_CMPL].isr = snic_isr_msix_io_cmpl;
snic->msix[SNIC_MSIX_IO_CMPL].devid = snic;
sprintf(snic->msix[SNIC_MSIX_ERR_NOTIFY].devname,
"%.11s-err-notify",
snic->name);
snic->msix[SNIC_MSIX_ERR_NOTIFY].isr = snic_isr_msix_err_notify;
snic->msix[SNIC_MSIX_ERR_NOTIFY].devid = snic;
for (i = 0; i < ARRAY_SIZE(snic->msix); i++) {
ret = request_irq(snic->msix_entry[i].vector,
snic->msix[i].isr,
0,
snic->msix[i].devname,
snic->msix[i].devid);
if (ret) {
SNIC_HOST_ERR(snic->shost,
"MSI-X: requrest_irq(%d) failed %d\n",
i,
ret);
snic_free_intr(snic);
break;
}
snic->msix[i].requested = 1;
}
return ret;
} /* end of snic_requrest_intr */
int
snic_set_intr_mode(struct snic *snic)
{
unsigned int n = ARRAY_SIZE(snic->wq);
unsigned int m = SNIC_CQ_IO_CMPL_MAX;
unsigned int i;
/*
* We need n WQs, m CQs, and n+m+1 INTRs
* (last INTR is used for WQ/CQ errors and notification area
*/
BUILD_BUG_ON((ARRAY_SIZE(snic->wq) + SNIC_CQ_IO_CMPL_MAX) >
ARRAY_SIZE(snic->intr));
SNIC_BUG_ON(ARRAY_SIZE(snic->msix_entry) < (n + m + 1));
for (i = 0; i < (n + m + 1); i++)
snic->msix_entry[i].entry = i;
if (snic->wq_count >= n && snic->cq_count >= (n + m)) {
if (!pci_enable_msix(snic->pdev,
snic->msix_entry,
(n + m + 1))) {
snic->wq_count = n;
snic->cq_count = n + m;
snic->intr_count = n + m + 1;
snic->err_intr_offset = SNIC_MSIX_ERR_NOTIFY;
SNIC_ISR_DBG(snic->shost,
"Using MSI-X Interrupts\n");
svnic_dev_set_intr_mode(snic->vdev,
VNIC_DEV_INTR_MODE_MSIX);
return 0;
}
}
svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
return -EINVAL;
} /* end of snic_set_intr_mode */
void
snic_clear_intr_mode(struct snic *snic)
{
pci_disable_msix(snic->pdev);
svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_INTX);
}
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/mempool.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include "snic.h"
#include "snic_fwint.h"
#define PCI_DEVICE_ID_CISCO_SNIC 0x0046
/* Supported devices by snic module */
static struct pci_device_id snic_id_table[] = {
{PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) },
{ 0, } /* end of table */
};
unsigned int snic_log_level = 0x0;
module_param(snic_log_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(snic_log_level, "bitmask for snic logging levels");
#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
unsigned int snic_trace_max_pages = 16;
module_param(snic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(snic_trace_max_pages,
"Total allocated memory pages for snic trace buffer");
#endif
unsigned int snic_max_qdepth = SNIC_DFLT_QUEUE_DEPTH;
module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN");
/*
* snic_slave_alloc : callback function to SCSI Mid Layer, called on
* scsi device initialization.
*/
static int
snic_slave_alloc(struct scsi_device *sdev)
{
struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
if (!tgt || snic_tgt_chkready(tgt))
return -ENXIO;
return 0;
}
/*
* snic_slave_configure : callback function to SCSI Mid Layer, called on
* scsi device initialization.
*/
static int
snic_slave_configure(struct scsi_device *sdev)
{
struct snic *snic = shost_priv(sdev->host);
u32 qdepth = 0, max_ios = 0;
int tmo = SNIC_DFLT_CMD_TIMEOUT * HZ;
/* Set Queue Depth */
max_ios = snic_max_qdepth;
qdepth = min_t(u32, max_ios, SNIC_MAX_QUEUE_DEPTH);
scsi_change_queue_depth(sdev, qdepth);
if (snic->fwinfo.io_tmo > 1)
tmo = snic->fwinfo.io_tmo * HZ;
/* FW requires extended timeouts */
blk_queue_rq_timeout(sdev->request_queue, tmo);
return 0;
}
static int
snic_change_queue_depth(struct scsi_device *sdev, int qdepth)
{
int qsz = 0;
qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH);
scsi_change_queue_depth(sdev, qsz);
SNIC_INFO("QDepth Changed to %d\n", sdev->queue_depth);
return sdev->queue_depth;
}
static struct scsi_host_template snic_host_template = {
.module = THIS_MODULE,
.name = SNIC_DRV_NAME,
.queuecommand = snic_queuecommand,
.eh_abort_handler = snic_abort_cmd,
.eh_device_reset_handler = snic_device_reset,
.eh_host_reset_handler = snic_host_reset,
.slave_alloc = snic_slave_alloc,
.slave_configure = snic_slave_configure,
.change_queue_depth = snic_change_queue_depth,
.this_id = -1,
.cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH,
.can_queue = SNIC_MAX_IO_REQ,
.use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = SNIC_MAX_SG_DESC_CNT,
.max_sectors = 0x800,
.shost_attrs = snic_attrs,
.use_blk_tags = 1,
.track_queue_depth = 1,
.cmd_size = sizeof(struct snic_internal_io_state),
.proc_name = "snic_scsi",
};
/*
* snic_handle_link_event : Handles link events such as link up/down/error
*/
void
snic_handle_link_event(struct snic *snic)
{
unsigned long flags;
spin_lock_irqsave(&snic->snic_lock, flags);
if (snic->stop_link_events) {
spin_unlock_irqrestore(&snic->snic_lock, flags);
return;
}
spin_unlock_irqrestore(&snic->snic_lock, flags);
queue_work(snic_glob->event_q, &snic->link_work);
} /* end of snic_handle_link_event */
/*
* snic_notify_set : sets notification area
* This notification area is to receive events from fw
* Note: snic supports only MSIX interrupts, in which we can just call
* svnic_dev_notify_set directly
*/
static int
snic_notify_set(struct snic *snic)
{
int ret = 0;
enum vnic_dev_intr_mode intr_mode;
intr_mode = svnic_dev_get_intr_mode(snic->vdev);
if (intr_mode == VNIC_DEV_INTR_MODE_MSIX) {
ret = svnic_dev_notify_set(snic->vdev, SNIC_MSIX_ERR_NOTIFY);
} else {
SNIC_HOST_ERR(snic->shost,
"Interrupt mode should be setup before devcmd notify set %d\n",
intr_mode);
ret = -1;
}
return ret;
} /* end of snic_notify_set */
/*
* snic_dev_wait : polls vnic open status.
*/
static int
snic_dev_wait(struct vnic_dev *vdev,
int (*start)(struct vnic_dev *, int),
int (*finished)(struct vnic_dev *, int *),
int arg)
{
unsigned long time;
int ret, done;
int retry_cnt = 0;
ret = start(vdev, arg);
if (ret)
return ret;
/*
* Wait for func to complete...2 seconds max.
*
* Sometimes schedule_timeout_uninterruptible take long time
* to wakeup, which results skipping retry. The retry counter
* ensures to retry at least two times.
*/
time = jiffies + (HZ * 2);
do {
ret = finished(vdev, &done);
if (ret)
return ret;
if (done)
return 0;
schedule_timeout_uninterruptible(HZ/10);
++retry_cnt;
} while (time_after(time, jiffies) || (retry_cnt < 3));
return -ETIMEDOUT;
} /* end of snic_dev_wait */
/*
* snic_cleanup: called by snic_remove
* Stops the snic device, masks all interrupts, Completed CQ entries are
* drained. Posted WQ/RQ/Copy-WQ entries are cleanup
*/
static int
snic_cleanup(struct snic *snic)
{
unsigned int i;
int ret;
svnic_dev_disable(snic->vdev);
for (i = 0; i < snic->intr_count; i++)
svnic_intr_mask(&snic->intr[i]);
for (i = 0; i < snic->wq_count; i++) {
ret = svnic_wq_disable(&snic->wq[i]);
if (ret)
return ret;
}
/* Clean up completed IOs */
snic_fwcq_cmpl_handler(snic, -1);
snic_wq_cmpl_handler(snic, -1);
/* Clean up the IOs that have not completed */
for (i = 0; i < snic->wq_count; i++)
svnic_wq_clean(&snic->wq[i], snic_free_wq_buf);
for (i = 0; i < snic->cq_count; i++)
svnic_cq_clean(&snic->cq[i]);
for (i = 0; i < snic->intr_count; i++)
svnic_intr_clean(&snic->intr[i]);
/* Cleanup snic specific requests */
snic_free_all_untagged_reqs(snic);
/* Cleanup Pending SCSI commands */
snic_shutdown_scsi_cleanup(snic);
for (i = 0; i < SNIC_REQ_MAX_CACHES; i++)
mempool_destroy(snic->req_pool[i]);
return 0;
} /* end of snic_cleanup */
static void
snic_iounmap(struct snic *snic)
{
if (snic->bar0.vaddr)
iounmap(snic->bar0.vaddr);
}
/*
* snic_vdev_open_done : polls for svnic_dev_open cmd completion.
*/
static int
snic_vdev_open_done(struct vnic_dev *vdev, int *done)
{
struct snic *snic = svnic_dev_priv(vdev);
int ret;
int nretries = 5;
do {
ret = svnic_dev_open_done(vdev, done);
if (ret == 0)
break;
SNIC_HOST_INFO(snic->shost, "VNIC_DEV_OPEN Timedout.\n");
} while (nretries--);
return ret;
} /* end of snic_vdev_open_done */
/*
* snic_add_host : registers scsi host with ML
*/
static int
snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev)
{
int ret = 0;
ret = scsi_add_host(shost, &pdev->dev);
if (ret) {
SNIC_HOST_ERR(shost,
"snic: scsi_add_host failed. %d\n",
ret);
return ret;
}
SNIC_BUG_ON(shost->work_q != NULL);
snprintf(shost->work_q_name, sizeof(shost->work_q_name), "scsi_wq_%d",
shost->host_no);
shost->work_q = create_singlethread_workqueue(shost->work_q_name);
if (!shost->work_q) {
SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n");
ret = -ENOMEM;
}
return ret;
} /* end of snic_add_host */
static void
snic_del_host(struct Scsi_Host *shost)
{
if (!shost->work_q)
return;
destroy_workqueue(shost->work_q);
shost->work_q = NULL;
scsi_remove_host(shost);
}
int
snic_get_state(struct snic *snic)
{
return atomic_read(&snic->state);
}
void
snic_set_state(struct snic *snic, enum snic_state state)
{
SNIC_HOST_INFO(snic->shost, "snic state change from %s to %s\n",
snic_state_to_str(snic_get_state(snic)),
snic_state_to_str(state));
atomic_set(&snic->state, state);
}
/*
* snic_probe : Initialize the snic interface.
*/
static int
snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct Scsi_Host *shost;
struct snic *snic;
mempool_t *pool;
unsigned long flags;
u32 max_ios = 0;
int ret, i;
/* Device Information */
SNIC_INFO("snic device %4x:%4x:%4x:%4x: ",
pdev->vendor, pdev->device, pdev->subsystem_vendor,
pdev->subsystem_device);
SNIC_INFO("snic device bus %x: slot %x: fn %x\n",
pdev->bus->number, PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn));
/*
* Allocate SCSI Host and setup association between host, and snic
*/
shost = scsi_host_alloc(&snic_host_template, sizeof(struct snic));
if (!shost) {
SNIC_ERR("Unable to alloc scsi_host\n");
ret = -ENOMEM;
goto prob_end;
}
snic = shost_priv(shost);
snic->shost = shost;
snprintf(snic->name, sizeof(snic->name) - 1, "%s%d", SNIC_DRV_NAME,
shost->host_no);
SNIC_HOST_INFO(shost,
"snic%d = %p shost = %p device bus %x: slot %x: fn %x\n",
shost->host_no, snic, shost, pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
/* Per snic debugfs init */
ret = snic_stats_debugfs_init(snic);
if (ret) {
SNIC_HOST_ERR(snic->shost,
"Failed to initialize debugfs stats\n");
snic_stats_debugfs_remove(snic);
}
#endif
/* Setup PCI Resources */
pci_set_drvdata(pdev, snic);
snic->pdev = pdev;
ret = pci_enable_device(pdev);
if (ret) {
SNIC_HOST_ERR(shost,
"Cannot enable PCI Resources, aborting : %d\n",
ret);
goto err_free_snic;
}
ret = pci_request_regions(pdev, SNIC_DRV_NAME);
if (ret) {
SNIC_HOST_ERR(shost,
"Cannot obtain PCI Resources, aborting : %d\n",
ret);
goto err_pci_disable;
}
pci_set_master(pdev);
/*
* Query PCI Controller on system for DMA addressing
* limitation for the device. Try 43-bit first, and
* fail to 32-bit.
*/
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(43));
if (ret) {
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
SNIC_HOST_ERR(shost,
"No Usable DMA Configuration, aborting %d\n",
ret);
goto err_rel_regions;
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
SNIC_HOST_ERR(shost,
"Unable to obtain 32-bit DMA for consistent allocations, aborting: %d\n",
ret);
goto err_rel_regions;
}
} else {
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(43));
if (ret) {
SNIC_HOST_ERR(shost,
"Unable to obtain 43-bit DMA for consistent allocations. aborting: %d\n",
ret);
goto err_rel_regions;
}
}
/* Map vNIC resources from BAR0 */
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n");
ret = -ENODEV;
goto err_rel_regions;
}
snic->bar0.vaddr = pci_iomap(pdev, 0, 0);
if (!snic->bar0.vaddr) {
SNIC_HOST_ERR(shost,
"Cannot memory map BAR0 res hdr aborting.\n");
ret = -ENODEV;
goto err_rel_regions;
}
snic->bar0.bus_addr = pci_resource_start(pdev, 0);
snic->bar0.len = pci_resource_len(pdev, 0);
SNIC_BUG_ON(snic->bar0.bus_addr == 0);
/* Devcmd2 Resource Allocation and Initialization */
snic->vdev = svnic_dev_alloc_discover(NULL, snic, pdev, &snic->bar0, 1);
if (!snic->vdev) {
SNIC_HOST_ERR(shost, "vNIC Resource Discovery Failed.\n");
ret = -ENODEV;
goto err_iounmap;
}
ret = svnic_dev_cmd_init(snic->vdev, 0);
if (ret) {
SNIC_HOST_INFO(shost, "Devcmd2 Init Failed. err = %d\n", ret);
goto err_vnic_unreg;
}
ret = snic_dev_wait(snic->vdev, svnic_dev_open, snic_vdev_open_done, 0);
if (ret) {
SNIC_HOST_ERR(shost,
"vNIC dev open failed, aborting. %d\n",
ret);
goto err_vnic_unreg;
}
ret = svnic_dev_init(snic->vdev, 0);
if (ret) {
SNIC_HOST_ERR(shost,
"vNIC dev init failed. aborting. %d\n",
ret);
goto err_dev_close;
}
/* Get vNIC information */
ret = snic_get_vnic_config(snic);
if (ret) {
SNIC_HOST_ERR(shost,
"Get vNIC configuration failed, aborting. %d\n",
ret);
goto err_dev_close;
}
/* Configure Maximum Outstanding IO reqs */
max_ios = snic->config.io_throttle_count;
if (max_ios != SNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
shost->can_queue = min_t(u32, SNIC_MAX_IO_REQ,
max_t(u32, SNIC_MIN_IO_REQ, max_ios));
snic->max_tag_id = shost->can_queue;
ret = scsi_init_shared_tag_map(shost, snic->max_tag_id);
if (ret) {
SNIC_HOST_ERR(shost,
"Unable to alloc shared tag map. %d\n",
ret);
goto err_dev_close;
}
shost->max_lun = snic->config.luns_per_tgt;
shost->max_id = SNIC_MAX_TARGET;
shost->max_cmd_len = MAX_COMMAND_SIZE; /*defined in scsi_cmnd.h*/
snic_get_res_counts(snic);
/*
* Assumption: Only MSIx is supported
*/
ret = snic_set_intr_mode(snic);
if (ret) {
SNIC_HOST_ERR(shost,
"Failed to set intr mode aborting. %d\n",
ret);
goto err_dev_close;
}
ret = snic_alloc_vnic_res(snic);
if (ret) {
SNIC_HOST_ERR(shost,
"Failed to alloc vNIC resources aborting. %d\n",
ret);
goto err_clear_intr;
}
/* Initialize specific lists */
INIT_LIST_HEAD(&snic->list);
/*
* spl_cmd_list for maintaining snic specific cmds
* such as EXCH_VER_REQ, REPORT_TARGETS etc
*/
INIT_LIST_HEAD(&snic->spl_cmd_list);
spin_lock_init(&snic->spl_cmd_lock);
/* initialize all snic locks */
spin_lock_init(&snic->snic_lock);
for (i = 0; i < SNIC_WQ_MAX; i++)
spin_lock_init(&snic->wq_lock[i]);
for (i = 0; i < SNIC_IO_LOCKS; i++)
spin_lock_init(&snic->io_req_lock[i]);
pool = mempool_create_slab_pool(2,
snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
if (!pool) {
SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
goto err_free_res;
}
snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL] = pool;
pool = mempool_create_slab_pool(2,
snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
if (!pool) {
SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
goto err_free_dflt_sgl_pool;
}
snic->req_pool[SNIC_REQ_CACHE_MAX_SGL] = pool;
pool = mempool_create_slab_pool(2,
snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
if (!pool) {
SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
goto err_free_max_sgl_pool;
}
snic->req_pool[SNIC_REQ_TM_CACHE] = pool;
/* Initialize snic state */
atomic_set(&snic->state, SNIC_INIT);
atomic_set(&snic->ios_inflight, 0);
/* Setup notification buffer area */
ret = snic_notify_set(snic);
if (ret) {
SNIC_HOST_ERR(shost,
"Failed to alloc notify buffer aborting. %d\n",
ret);
goto err_free_tmreq_pool;
}
/*
* Initialization done with PCI system, hardware, firmware.
* Add shost to SCSI
*/
ret = snic_add_host(shost, pdev);
if (ret) {
SNIC_HOST_ERR(shost,
"Adding scsi host Failed ... exiting. %d\n",
ret);
goto err_notify_unset;
}
spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
list_add_tail(&snic->list, &snic_glob->snic_list);
spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
snic_disc_init(&snic->disc);
INIT_WORK(&snic->tgt_work, snic_handle_tgt_disc);
INIT_WORK(&snic->disc_work, snic_handle_disc);
INIT_WORK(&snic->link_work, snic_handle_link);
/* Enable all queues */
for (i = 0; i < snic->wq_count; i++)
svnic_wq_enable(&snic->wq[i]);
ret = svnic_dev_enable_wait(snic->vdev);
if (ret) {
SNIC_HOST_ERR(shost,
"vNIC dev enable failed w/ error %d\n",
ret);
goto err_vdev_enable;
}
ret = snic_request_intr(snic);
if (ret) {
SNIC_HOST_ERR(shost, "Unable to request irq. %d\n", ret);
goto err_req_intr;
}
for (i = 0; i < snic->intr_count; i++)
svnic_intr_unmask(&snic->intr[i]);
snic_set_state(snic, SNIC_ONLINE);
/* Get snic params */
ret = snic_get_conf(snic);
if (ret) {
SNIC_HOST_ERR(shost,
"Failed to get snic io config from FW w err %d\n",
ret);
goto err_get_conf;
}
ret = snic_disc_start(snic);
if (ret) {
SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n",
ret);
goto err_get_conf;
}
SNIC_HOST_INFO(shost, "SNIC Device Probe Successful.\n");
return 0;
err_get_conf:
snic_free_all_untagged_reqs(snic);
for (i = 0; i < snic->intr_count; i++)
svnic_intr_mask(&snic->intr[i]);
snic_free_intr(snic);
err_req_intr:
svnic_dev_disable(snic->vdev);
err_vdev_enable:
for (i = 0; i < snic->wq_count; i++) {
int rc = 0;
rc = svnic_wq_disable(&snic->wq[i]);
if (rc) {
SNIC_HOST_ERR(shost,
"WQ Disable Failed w/ err = %d\n", rc);
break;
}
}
snic_del_host(snic->shost);
err_notify_unset:
svnic_dev_notify_unset(snic->vdev);
err_free_tmreq_pool:
mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]);
err_free_max_sgl_pool:
mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_MAX_SGL]);
err_free_dflt_sgl_pool:
mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL]);
err_free_res:
snic_free_vnic_res(snic);
err_clear_intr:
snic_clear_intr_mode(snic);
err_dev_close:
svnic_dev_close(snic->vdev);
err_vnic_unreg:
svnic_dev_unregister(snic->vdev);
err_iounmap:
snic_iounmap(snic);
err_rel_regions:
pci_release_regions(pdev);
err_pci_disable:
pci_disable_device(pdev);
err_free_snic:
#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
snic_stats_debugfs_remove(snic);
#endif
scsi_host_put(shost);
pci_set_drvdata(pdev, NULL);
prob_end:
SNIC_INFO("sNIC device : bus %d: slot %d: fn %d Registration Failed.\n",
pdev->bus->number, PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn));
return ret;
} /* end of snic_probe */
/*
* snic_remove : invoked on unbinding the interface to cleanup the
* resources allocated in snic_probe on initialization.
*/
static void
snic_remove(struct pci_dev *pdev)
{
struct snic *snic = pci_get_drvdata(pdev);
unsigned long flags;
if (!snic) {
SNIC_INFO("sNIC dev: bus %d slot %d fn %d snic inst is null.\n",
pdev->bus->number, PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn));
return;
}
/*
* Mark state so that the workqueue thread stops forwarding
* received frames and link events. ISR and other threads
* that can queue work items will also stop creating work
* items on the snic workqueue
*/
snic_set_state(snic, SNIC_OFFLINE);
spin_lock_irqsave(&snic->snic_lock, flags);
snic->stop_link_events = 1;
spin_unlock_irqrestore(&snic->snic_lock, flags);
flush_workqueue(snic_glob->event_q);
snic_disc_term(snic);
spin_lock_irqsave(&snic->snic_lock, flags);
snic->in_remove = 1;
spin_unlock_irqrestore(&snic->snic_lock, flags);
/*
* This stops the snic device, masks all interrupts, Completed
* CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
* cleanup
*/
snic_cleanup(snic);
spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
list_del(&snic->list);
spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
snic_tgt_del_all(snic);
#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
snic_stats_debugfs_remove(snic);
#endif
snic_del_host(snic->shost);
svnic_dev_notify_unset(snic->vdev);
snic_free_intr(snic);
snic_free_vnic_res(snic);
snic_clear_intr_mode(snic);
svnic_dev_close(snic->vdev);
svnic_dev_unregister(snic->vdev);
snic_iounmap(snic);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
/* this frees Scsi_Host and snic memory (continuous chunk) */
scsi_host_put(snic->shost);
} /* end of snic_remove */
struct snic_global *snic_glob;
/*
* snic_global_data_init: Initialize SNIC Global Data
* Notes: All the global lists, variables should be part of global data
* this helps in debugging.
*/
static int
snic_global_data_init(void)
{
int ret = 0;
struct kmem_cache *cachep;
ssize_t len = 0;
snic_glob = kzalloc(sizeof(*snic_glob), GFP_KERNEL);
if (!snic_glob) {
SNIC_ERR("Failed to allocate Global Context.\n");
ret = -ENOMEM;
goto gdi_end;
}
#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
/* Debugfs related Initialization */
/* Create debugfs entries for snic */
ret = snic_debugfs_init();
if (ret < 0) {
SNIC_ERR("Failed to create sysfs dir for tracing and stats.\n");
snic_debugfs_term();
/* continue even if it fails */
}
/* Trace related Initialization */
/* Allocate memory for trace buffer */
ret = snic_trc_init();
if (ret < 0) {
SNIC_ERR("Trace buffer init failed, SNIC tracing disabled\n");
snic_trc_free();
/* continue even if it fails */
}
#endif
INIT_LIST_HEAD(&snic_glob->snic_list);
spin_lock_init(&snic_glob->snic_list_lock);
/* Create a cache for allocation of snic_host_req+default size ESGLs */
len = sizeof(struct snic_req_info);
len += sizeof(struct snic_host_req) + sizeof(struct snic_dflt_sgl);
cachep = kmem_cache_create("snic_req_dfltsgl", len, SNIC_SG_DESC_ALIGN,
SLAB_HWCACHE_ALIGN, NULL);
if (!cachep) {
SNIC_ERR("Failed to create snic default sgl slab\n");
ret = -ENOMEM;
goto err_dflt_req_slab;
}
snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL] = cachep;
/* Create a cache for allocation of max size Extended SGLs */
len = sizeof(struct snic_req_info);
len += sizeof(struct snic_host_req) + sizeof(struct snic_max_sgl);
cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
SLAB_HWCACHE_ALIGN, NULL);
if (!cachep) {
SNIC_ERR("Failed to create snic max sgl slab\n");
ret = -ENOMEM;
goto err_max_req_slab;
}
snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep;
len = sizeof(struct snic_host_req);
cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
SLAB_HWCACHE_ALIGN, NULL);
if (!cachep) {
SNIC_ERR("Failed to create snic tm req slab\n");
ret = -ENOMEM;
goto err_tmreq_slab;
}
snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep;
/* snic_event queue */
snic_glob->event_q = create_singlethread_workqueue("snic_event_wq");
if (!snic_glob->event_q) {
SNIC_ERR("snic event queue create failed\n");
ret = -ENOMEM;
goto err_eventq;
}
return ret;
err_eventq:
kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
err_tmreq_slab:
kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
err_max_req_slab:
kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
err_dflt_req_slab:
#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
snic_trc_free();
snic_debugfs_term();
#endif
kfree(snic_glob);
snic_glob = NULL;
gdi_end:
return ret;
} /* end of snic_glob_init */
/*
* snic_global_data_cleanup : Frees SNIC Global Data
*/
static void
snic_global_data_cleanup(void)
{
SNIC_BUG_ON(snic_glob == NULL);
destroy_workqueue(snic_glob->event_q);
kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
/* Freeing Trace Resources */
snic_trc_free();
/* Freeing Debugfs Resources */
snic_debugfs_term();
#endif
kfree(snic_glob);
snic_glob = NULL;
} /* end of snic_glob_cleanup */
static struct pci_driver snic_driver = {
.name = SNIC_DRV_NAME,
.id_table = snic_id_table,
.probe = snic_probe,
.remove = snic_remove,
};
static int __init
snic_init_module(void)
{
int ret = 0;
#ifndef __x86_64__
SNIC_INFO("SNIC Driver is supported only for x86_64 platforms!\n");
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
#endif
SNIC_INFO("%s, ver %s\n", SNIC_DRV_DESCRIPTION, SNIC_DRV_VERSION);
ret = snic_global_data_init();
if (ret) {
SNIC_ERR("Failed to Initialize Global Data.\n");
return ret;
}
ret = pci_register_driver(&snic_driver);
if (ret < 0) {
SNIC_ERR("PCI driver register error\n");
goto err_pci_reg;
}
return ret;
err_pci_reg:
snic_global_data_cleanup();
return ret;
}
static void __exit
snic_cleanup_module(void)
{
pci_unregister_driver(&snic_driver);
snic_global_data_cleanup();
}
module_init(snic_init_module);
module_exit(snic_cleanup_module);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION(SNIC_DRV_DESCRIPTION);
MODULE_VERSION(SNIC_DRV_VERSION);
MODULE_DEVICE_TABLE(pci, snic_id_table);
MODULE_AUTHOR("Narsimhulu Musini <nmusini@cisco.com>, "
"Sesidhar Baddela <sebaddel@cisco.com>");
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include "wq_enet_desc.h"
#include "cq_enet_desc.h"
#include "vnic_resource.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_cq.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "snic.h"
int
snic_get_vnic_config(struct snic *snic)
{
struct vnic_snic_config *c = &snic->config;
int ret;
#define GET_CONFIG(m) \
do { \
ret = svnic_dev_spec(snic->vdev, \
offsetof(struct vnic_snic_config, m), \
sizeof(c->m), \
&c->m); \
if (ret) { \
SNIC_HOST_ERR(snic->shost, \
"Error getting %s, %d\n", #m, ret); \
return ret; \
} \
} while (0)
GET_CONFIG(wq_enet_desc_count);
GET_CONFIG(maxdatafieldsize);
GET_CONFIG(intr_timer);
GET_CONFIG(intr_timer_type);
GET_CONFIG(flags);
GET_CONFIG(io_throttle_count);
GET_CONFIG(port_down_timeout);
GET_CONFIG(port_down_io_retries);
GET_CONFIG(luns_per_tgt);
GET_CONFIG(xpt_type);
GET_CONFIG(hid);
c->wq_enet_desc_count = min_t(u32,
VNIC_SNIC_WQ_DESCS_MAX,
max_t(u32,
VNIC_SNIC_WQ_DESCS_MIN,
c->wq_enet_desc_count));
c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16);
c->maxdatafieldsize = min_t(u32,
VNIC_SNIC_MAXDATAFIELDSIZE_MAX,
max_t(u32,
VNIC_SNIC_MAXDATAFIELDSIZE_MIN,
c->maxdatafieldsize));
c->io_throttle_count = min_t(u32,
VNIC_SNIC_IO_THROTTLE_COUNT_MAX,
max_t(u32,
VNIC_SNIC_IO_THROTTLE_COUNT_MIN,
c->io_throttle_count));
c->port_down_timeout = min_t(u32,
VNIC_SNIC_PORT_DOWN_TIMEOUT_MAX,
c->port_down_timeout);
c->port_down_io_retries = min_t(u32,
VNIC_SNIC_PORT_DOWN_IO_RETRIES_MAX,
c->port_down_io_retries);
c->luns_per_tgt = min_t(u32,
VNIC_SNIC_LUNS_PER_TARGET_MAX,
max_t(u32,
VNIC_SNIC_LUNS_PER_TARGET_MIN,
c->luns_per_tgt));
c->intr_timer = min_t(u32, VNIC_INTR_TIMER_MAX, c->intr_timer);
SNIC_INFO("vNIC resources wq %d\n", c->wq_enet_desc_count);
SNIC_INFO("vNIC mtu %d intr timer %d\n",
c->maxdatafieldsize,
c->intr_timer);
SNIC_INFO("vNIC flags 0x%x luns per tgt %d\n",
c->flags,
c->luns_per_tgt);
SNIC_INFO("vNIC io throttle count %d\n", c->io_throttle_count);
SNIC_INFO("vNIC port down timeout %d port down io retries %d\n",
c->port_down_timeout,
c->port_down_io_retries);
SNIC_INFO("vNIC back end type = %d\n", c->xpt_type);
SNIC_INFO("vNIC hid = %d\n", c->hid);
return 0;
}
void
snic_get_res_counts(struct snic *snic)
{
snic->wq_count = svnic_dev_get_res_count(snic->vdev, RES_TYPE_WQ);
SNIC_BUG_ON(snic->wq_count == 0);
snic->cq_count = svnic_dev_get_res_count(snic->vdev, RES_TYPE_CQ);
SNIC_BUG_ON(snic->cq_count == 0);
snic->intr_count = svnic_dev_get_res_count(snic->vdev,
RES_TYPE_INTR_CTRL);
SNIC_BUG_ON(snic->intr_count == 0);
}
void
snic_free_vnic_res(struct snic *snic)
{
unsigned int i;
for (i = 0; i < snic->wq_count; i++)
svnic_wq_free(&snic->wq[i]);
for (i = 0; i < snic->cq_count; i++)
svnic_cq_free(&snic->cq[i]);
for (i = 0; i < snic->intr_count; i++)
svnic_intr_free(&snic->intr[i]);
}
int
snic_alloc_vnic_res(struct snic *snic)
{
enum vnic_dev_intr_mode intr_mode;
unsigned int mask_on_assertion;
unsigned int intr_offset;
unsigned int err_intr_enable;
unsigned int err_intr_offset;
unsigned int i;
int ret;
intr_mode = svnic_dev_get_intr_mode(snic->vdev);
SNIC_INFO("vNIC interrupt mode: %s\n",
((intr_mode == VNIC_DEV_INTR_MODE_INTX) ?
"Legacy PCI INTx" :
((intr_mode == VNIC_DEV_INTR_MODE_MSI) ?
"MSI" :
((intr_mode == VNIC_DEV_INTR_MODE_MSIX) ?
"MSI-X" : "Unknown"))));
/* only MSI-X is supported */
SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX);
SNIC_INFO("wq %d cq %d intr %d\n", snic->wq_count,
snic->cq_count,
snic->intr_count);
/* Allocate WQs used for SCSI IOs */
for (i = 0; i < snic->wq_count; i++) {
ret = svnic_wq_alloc(snic->vdev,
&snic->wq[i],
i,
snic->config.wq_enet_desc_count,
sizeof(struct wq_enet_desc));
if (ret)
goto error_cleanup;
}
/* CQ for each WQ */
for (i = 0; i < snic->wq_count; i++) {
ret = svnic_cq_alloc(snic->vdev,
&snic->cq[i],
i,
snic->config.wq_enet_desc_count,
sizeof(struct cq_enet_wq_desc));
if (ret)
goto error_cleanup;
}
SNIC_BUG_ON(snic->cq_count != 2 * snic->wq_count);
/* CQ for FW TO host */
for (i = snic->wq_count; i < snic->cq_count; i++) {
ret = svnic_cq_alloc(snic->vdev,
&snic->cq[i],
i,
(snic->config.wq_enet_desc_count * 3),
sizeof(struct snic_fw_req));
if (ret)
goto error_cleanup;
}
for (i = 0; i < snic->intr_count; i++) {
ret = svnic_intr_alloc(snic->vdev, &snic->intr[i], i);
if (ret)
goto error_cleanup;
}
/*
* Init WQ Resources.
* WQ[0 to n] points to CQ[0 to n-1]
* firmware to host comm points to CQ[n to m+1]
*/
err_intr_enable = 1;
err_intr_offset = snic->err_intr_offset;
for (i = 0; i < snic->wq_count; i++) {
svnic_wq_init(&snic->wq[i],
i,
err_intr_enable,
err_intr_offset);
}
for (i = 0; i < snic->cq_count; i++) {
intr_offset = i;
svnic_cq_init(&snic->cq[i],
0 /* flow_control_enable */,
1 /* color_enable */,
0 /* cq_head */,
0 /* cq_tail */,
1 /* cq_tail_color */,
1 /* interrupt_enable */,
1 /* cq_entry_enable */,
0 /* cq_message_enable */,
intr_offset,
0 /* cq_message_addr */);
}
/*
* Init INTR resources
* Assumption : snic is always in MSI-X mode
*/
SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX);
mask_on_assertion = 1;
for (i = 0; i < snic->intr_count; i++) {
svnic_intr_init(&snic->intr[i],
snic->config.intr_timer,
snic->config.intr_timer_type,
mask_on_assertion);
}
/* init the stats memory by making the first call here */
ret = svnic_dev_stats_dump(snic->vdev, &snic->stats);
if (ret) {
SNIC_HOST_ERR(snic->shost,
"svnic_dev_stats_dump failed - x%x\n",
ret);
goto error_cleanup;
}
/* Clear LIF stats */
svnic_dev_stats_clear(snic->vdev);
ret = 0;
return ret;
error_cleanup:
snic_free_vnic_res(snic);
return ret;
}
void
snic_log_q_error(struct snic *snic)
{
unsigned int i;
u32 err_status;
for (i = 0; i < snic->wq_count; i++) {
err_status = ioread32(&snic->wq[i].ctrl->error_status);
if (err_status)
SNIC_HOST_ERR(snic->shost,
"WQ[%d] error status %d\n",
i,
err_status);
}
} /* end of snic_log_q_error */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __SNIC_RES_H
#define __SNIC_RES_H
#include "snic_io.h"
#include "wq_enet_desc.h"
#include "vnic_wq.h"
#include "snic_fwint.h"
#include "vnic_cq_fw.h"
static inline void
snic_icmnd_init(struct snic_host_req *req, u32 cmnd_id, u32 host_id, u64 ctx,
u16 flags, u64 tgt_id, u8 *lun, u8 *scsi_cdb, u8 cdb_len,
u32 data_len, u16 sg_cnt, ulong sgl_addr,
dma_addr_t sns_addr_pa, u32 sense_len)
{
snic_io_hdr_enc(&req->hdr, SNIC_REQ_ICMND, 0, cmnd_id, host_id, sg_cnt,
ctx);
req->u.icmnd.flags = cpu_to_le16(flags);
req->u.icmnd.tgt_id = cpu_to_le64(tgt_id);
memcpy(&req->u.icmnd.lun_id, lun, LUN_ADDR_LEN);
req->u.icmnd.cdb_len = cdb_len;
memset(req->u.icmnd.cdb, 0, SNIC_CDB_LEN);
memcpy(req->u.icmnd.cdb, scsi_cdb, cdb_len);
req->u.icmnd.data_len = cpu_to_le32(data_len);
req->u.icmnd.sg_addr = cpu_to_le64(sgl_addr);
req->u.icmnd.sense_len = cpu_to_le32(sense_len);
req->u.icmnd.sense_addr = cpu_to_le64(sns_addr_pa);
}
static inline void
snic_itmf_init(struct snic_host_req *req, u32 cmnd_id, u32 host_id, ulong ctx,
u16 flags, u32 req_id, u64 tgt_id, u8 *lun, u8 tm_type)
{
snic_io_hdr_enc(&req->hdr, SNIC_REQ_ITMF, 0, cmnd_id, host_id, 0, ctx);
req->u.itmf.tm_type = tm_type;
req->u.itmf.flags = cpu_to_le16(flags);
/* req_id valid only in abort, clear task */
req->u.itmf.req_id = cpu_to_le32(req_id);
req->u.itmf.tgt_id = cpu_to_le64(tgt_id);
memcpy(&req->u.itmf.lun_id, lun, LUN_ADDR_LEN);
}
static inline void
snic_queue_wq_eth_desc(struct vnic_wq *wq,
void *os_buf,
dma_addr_t dma_addr,
unsigned int len,
int vlan_tag_insert,
unsigned int vlan_tag,
int cq_entry)
{
struct wq_enet_desc *desc = svnic_wq_next_desc(wq);
wq_enet_desc_enc(desc,
(u64)dma_addr | VNIC_PADDR_TARGET,
(u16)len,
0, /* mss_or_csum_offset */
0, /* fc_eof */
0, /* offload mode */
1, /* eop */
(u8)cq_entry,
0, /* fcoe_encap */
(u8)vlan_tag_insert,
(u16)vlan_tag,
0 /* loopback */);
svnic_wq_post(wq, os_buf, dma_addr, len, 1, 1);
}
struct snic;
int snic_get_vnic_config(struct snic *);
int snic_alloc_vnic_res(struct snic *);
void snic_free_vnic_res(struct snic *);
void snic_get_res_counts(struct snic *);
void snic_log_q_error(struct snic *);
int snic_get_vnic_resources_size(struct snic *);
#endif /* __SNIC_RES_H */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/mempool.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/workqueue.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/gfp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_dbg.h>
#include "snic_io.h"
#include "snic.h"
#define snic_cmd_tag(sc) (((struct scsi_cmnd *) sc)->request->tag)
const char *snic_state_str[] = {
[SNIC_INIT] = "SNIC_INIT",
[SNIC_ERROR] = "SNIC_ERROR",
[SNIC_ONLINE] = "SNIC_ONLINE",
[SNIC_OFFLINE] = "SNIC_OFFLINE",
[SNIC_FWRESET] = "SNIC_FWRESET",
};
static const char * const snic_req_state_str[] = {
[SNIC_IOREQ_NOT_INITED] = "SNIC_IOREQ_NOT_INITED",
[SNIC_IOREQ_PENDING] = "SNIC_IOREQ_PENDING",
[SNIC_IOREQ_ABTS_PENDING] = "SNIC_IOREQ_ABTS_PENDING",
[SNIC_IOREQ_ABTS_COMPLETE] = "SNIC_IOREQ_ABTS_COMPELTE",
[SNIC_IOREQ_LR_PENDING] = "SNIC_IOREQ_LR_PENDING",
[SNIC_IOREQ_LR_COMPLETE] = "SNIC_IOREQ_LR_COMPELTE",
[SNIC_IOREQ_COMPLETE] = "SNIC_IOREQ_CMD_COMPELTE",
};
/* snic cmd status strings */
static const char * const snic_io_status_str[] = {
[SNIC_STAT_IO_SUCCESS] = "SNIC_STAT_IO_SUCCESS", /* 0x0 */
[SNIC_STAT_INVALID_HDR] = "SNIC_STAT_INVALID_HDR",
[SNIC_STAT_OUT_OF_RES] = "SNIC_STAT_OUT_OF_RES",
[SNIC_STAT_INVALID_PARM] = "SNIC_STAT_INVALID_PARM",
[SNIC_STAT_REQ_NOT_SUP] = "SNIC_STAT_REQ_NOT_SUP",
[SNIC_STAT_IO_NOT_FOUND] = "SNIC_STAT_IO_NOT_FOUND",
[SNIC_STAT_ABORTED] = "SNIC_STAT_ABORTED",
[SNIC_STAT_TIMEOUT] = "SNIC_STAT_TIMEOUT",
[SNIC_STAT_SGL_INVALID] = "SNIC_STAT_SGL_INVALID",
[SNIC_STAT_DATA_CNT_MISMATCH] = "SNIC_STAT_DATA_CNT_MISMATCH",
[SNIC_STAT_FW_ERR] = "SNIC_STAT_FW_ERR",
[SNIC_STAT_ITMF_REJECT] = "SNIC_STAT_ITMF_REJECT",
[SNIC_STAT_ITMF_FAIL] = "SNIC_STAT_ITMF_FAIL",
[SNIC_STAT_ITMF_INCORRECT_LUN] = "SNIC_STAT_ITMF_INCORRECT_LUN",
[SNIC_STAT_CMND_REJECT] = "SNIC_STAT_CMND_REJECT",
[SNIC_STAT_DEV_OFFLINE] = "SNIC_STAT_DEV_OFFLINE",
[SNIC_STAT_NO_BOOTLUN] = "SNIC_STAT_NO_BOOTLUN",
[SNIC_STAT_SCSI_ERR] = "SNIC_STAT_SCSI_ERR",
[SNIC_STAT_NOT_READY] = "SNIC_STAT_NOT_READY",
[SNIC_STAT_FATAL_ERROR] = "SNIC_STAT_FATAL_ERROR",
};
static void snic_scsi_cleanup(struct snic *, int);
const char *
snic_state_to_str(unsigned int state)
{
if (state >= ARRAY_SIZE(snic_state_str) || !snic_state_str[state])
return "Unknown";
return snic_state_str[state];
}
static const char *
snic_io_status_to_str(unsigned int state)
{
if ((state >= ARRAY_SIZE(snic_io_status_str)) ||
(!snic_io_status_str[state]))
return "Unknown";
return snic_io_status_str[state];
}
static const char *
snic_ioreq_state_to_str(unsigned int state)
{
if (state >= ARRAY_SIZE(snic_req_state_str) ||
!snic_req_state_str[state])
return "Unknown";
return snic_req_state_str[state];
}
static inline spinlock_t *
snic_io_lock_hash(struct snic *snic, struct scsi_cmnd *sc)
{
u32 hash = snic_cmd_tag(sc) & (SNIC_IO_LOCKS - 1);
return &snic->io_req_lock[hash];
}
static inline spinlock_t *
snic_io_lock_tag(struct snic *snic, int tag)
{
return &snic->io_req_lock[tag & (SNIC_IO_LOCKS - 1)];
}
/* snic_release_req_buf : Releases snic_req_info */
static void
snic_release_req_buf(struct snic *snic,
struct snic_req_info *rqi,
struct scsi_cmnd *sc)
{
struct snic_host_req *req = rqi_to_req(rqi);
/* Freeing cmd without marking completion, not okay */
SNIC_BUG_ON(!((CMD_STATE(sc) == SNIC_IOREQ_COMPLETE) ||
(CMD_STATE(sc) == SNIC_IOREQ_ABTS_COMPLETE) ||
(CMD_FLAGS(sc) & SNIC_DEV_RST_NOTSUP) ||
(CMD_FLAGS(sc) & SNIC_IO_INTERNAL_TERM_ISSUED) ||
(CMD_FLAGS(sc) & SNIC_DEV_RST_TERM_ISSUED) ||
(CMD_FLAGS(sc) & SNIC_SCSI_CLEANUP) ||
(CMD_STATE(sc) == SNIC_IOREQ_LR_COMPLETE)));
SNIC_SCSI_DBG(snic->shost,
"Rel_req:sc %p:tag %x:rqi %p:ioreq %p:abt %p:dr %p: state %s:flags 0x%llx\n",
sc, snic_cmd_tag(sc), rqi, rqi->req, rqi->abort_req,
rqi->dr_req, snic_ioreq_state_to_str(CMD_STATE(sc)),
CMD_FLAGS(sc));
if (req->u.icmnd.sense_addr)
pci_unmap_single(snic->pdev,
le64_to_cpu(req->u.icmnd.sense_addr),
SCSI_SENSE_BUFFERSIZE,
PCI_DMA_FROMDEVICE);
scsi_dma_unmap(sc);
snic_req_free(snic, rqi);
} /* end of snic_release_req_buf */
/*
* snic_queue_icmnd_req : Queues snic_icmnd request
*/
static int
snic_queue_icmnd_req(struct snic *snic,
struct snic_req_info *rqi,
struct scsi_cmnd *sc,
int sg_cnt)
{
struct scatterlist *sg;
struct snic_sg_desc *sgd;
dma_addr_t pa = 0;
struct scsi_lun lun;
u16 flags = 0;
int ret = 0;
unsigned int i;
if (sg_cnt) {
flags = SNIC_ICMND_ESGL;
sgd = (struct snic_sg_desc *) req_to_sgl(rqi->req);
for_each_sg(scsi_sglist(sc), sg, sg_cnt, i) {
sgd->addr = cpu_to_le64(sg_dma_address(sg));
sgd->len = cpu_to_le32(sg_dma_len(sg));
sgd->_resvd = 0;
sgd++;
}
}
pa = pci_map_single(snic->pdev,
sc->sense_buffer,
SCSI_SENSE_BUFFERSIZE,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(snic->pdev, pa)) {
SNIC_HOST_ERR(snic->shost,
"QIcmnd:PCI Map Failed for sns buf %p tag %x\n",
sc->sense_buffer, snic_cmd_tag(sc));
ret = -ENOMEM;
return ret;
}
int_to_scsilun(sc->device->lun, &lun);
if (sc->sc_data_direction == DMA_FROM_DEVICE)
flags |= SNIC_ICMND_RD;
if (sc->sc_data_direction == DMA_TO_DEVICE)
flags |= SNIC_ICMND_WR;
/* Initialize icmnd */
snic_icmnd_init(rqi->req,
snic_cmd_tag(sc),
snic->config.hid, /* hid */
(ulong) rqi,
flags, /* command flags */
rqi->tgt_id,
lun.scsi_lun,
sc->cmnd,
sc->cmd_len,
scsi_bufflen(sc),
sg_cnt,
(ulong) req_to_sgl(rqi->req),
pa, /* sense buffer pa */
SCSI_SENSE_BUFFERSIZE);
ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
if (ret)
SNIC_HOST_ERR(snic->shost,
"QIcmnd: Queuing Icmnd Failed. ret = %d\n",
ret);
return ret;
} /* end of snic_queue_icmnd_req */
/*
* snic_issue_scsi_req : Prepares IO request and Issues to FW.
*/
static int
snic_issue_scsi_req(struct snic *snic,
struct snic_tgt *tgt,
struct scsi_cmnd *sc)
{
struct snic_req_info *rqi = NULL;
int sg_cnt = 0;
int ret = 0;
u32 tag = snic_cmd_tag(sc);
u64 cmd_trc = 0, cmd_st_flags = 0;
spinlock_t *io_lock = NULL;
unsigned long flags;
CMD_STATE(sc) = SNIC_IOREQ_NOT_INITED;
CMD_FLAGS(sc) = SNIC_NO_FLAGS;
sg_cnt = scsi_dma_map(sc);
if (sg_cnt < 0) {
SNIC_TRC((u16)snic->shost->host_no, tag, (ulong) sc, 0,
sc->cmnd[0], sg_cnt, CMD_STATE(sc));
SNIC_HOST_ERR(snic->shost, "issue_sc:Failed to map SG List.\n");
ret = -ENOMEM;
goto issue_sc_end;
}
rqi = snic_req_init(snic, sg_cnt);
if (!rqi) {
scsi_dma_unmap(sc);
ret = -ENOMEM;
goto issue_sc_end;
}
rqi->tgt_id = tgt->id;
rqi->sc = sc;
CMD_STATE(sc) = SNIC_IOREQ_PENDING;
CMD_SP(sc) = (char *) rqi;
cmd_trc = SNIC_TRC_CMD(sc);
CMD_FLAGS(sc) |= (SNIC_IO_INITIALIZED | SNIC_IO_ISSUED);
cmd_st_flags = SNIC_TRC_CMD_STATE_FLAGS(sc);
io_lock = snic_io_lock_hash(snic, sc);
/* create wq desc and enqueue it */
ret = snic_queue_icmnd_req(snic, rqi, sc, sg_cnt);
if (ret) {
SNIC_HOST_ERR(snic->shost,
"issue_sc: icmnd qing Failed for sc %p, err %d\n",
sc, ret);
spin_lock_irqsave(io_lock, flags);
rqi = (struct snic_req_info *) CMD_SP(sc);
CMD_SP(sc) = NULL;
CMD_STATE(sc) = SNIC_IOREQ_COMPLETE;
CMD_FLAGS(sc) &= ~SNIC_IO_ISSUED; /* turn off the flag */
spin_unlock_irqrestore(io_lock, flags);
if (rqi)
snic_release_req_buf(snic, rqi, sc);
SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, 0, 0, 0,
SNIC_TRC_CMD_STATE_FLAGS(sc));
} else {
u32 io_sz = scsi_bufflen(sc) >> 9;
u32 qtime = jiffies - rqi->start_time;
struct snic_io_stats *iostats = &snic->s_stats.io;
if (io_sz > atomic64_read(&iostats->max_io_sz))
atomic64_set(&iostats->max_io_sz, io_sz);
if (qtime > atomic64_read(&iostats->max_qtime))
atomic64_set(&iostats->max_qtime, qtime);
SNIC_SCSI_DBG(snic->shost,
"issue_sc:sc %p, tag %d queued to WQ.\n",
sc, tag);
SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, (ulong) rqi,
sg_cnt, cmd_trc, cmd_st_flags);
}
issue_sc_end:
return ret;
} /* end of snic_issue_scsi_req */
/*
* snic_queuecommand
* Routine to send a scsi cdb to LLD
* Called with host_lock held and interrupts disabled
*/
int
snic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
{
struct snic_tgt *tgt = NULL;
struct snic *snic = shost_priv(shost);
int ret;
tgt = starget_to_tgt(scsi_target(sc->device));
ret = snic_tgt_chkready(tgt);
if (ret) {
SNIC_HOST_ERR(shost, "Tgt %p id %d Not Ready.\n", tgt, tgt->id);
atomic64_inc(&snic->s_stats.misc.tgt_not_rdy);
sc->result = ret;
sc->scsi_done(sc);
return 0;
}
if (snic_get_state(snic) != SNIC_ONLINE) {
SNIC_HOST_ERR(shost, "snic state is %s\n",
snic_state_str[snic_get_state(snic)]);
return SCSI_MLQUEUE_HOST_BUSY;
}
atomic_inc(&snic->ios_inflight);
SNIC_SCSI_DBG(shost, "sc %p Tag %d (sc %0x) lun %lld in snic_qcmd\n",
sc, snic_cmd_tag(sc), sc->cmnd[0], sc->device->lun);
memset(scsi_cmd_priv(sc), 0, sizeof(struct snic_internal_io_state));
ret = snic_issue_scsi_req(snic, tgt, sc);
if (ret) {
SNIC_HOST_ERR(shost, "Failed to Q, Scsi Req w/ err %d.\n", ret);
ret = SCSI_MLQUEUE_HOST_BUSY;
} else
snic_stats_update_active_ios(&snic->s_stats);
atomic_dec(&snic->ios_inflight);
return ret;
} /* end of snic_queuecommand */
/*
* snic_process_abts_pending_state:
* caller should hold IO lock
*/
static void
snic_proc_tmreq_pending_state(struct snic *snic,
struct scsi_cmnd *sc,
u8 cmpl_status)
{
int state = CMD_STATE(sc);
if (state == SNIC_IOREQ_ABTS_PENDING)
CMD_FLAGS(sc) |= SNIC_IO_ABTS_PENDING;
else if (state == SNIC_IOREQ_LR_PENDING)
CMD_FLAGS(sc) |= SNIC_DEV_RST_PENDING;
else
SNIC_BUG_ON(1);
switch (cmpl_status) {
case SNIC_STAT_IO_SUCCESS:
CMD_FLAGS(sc) |= SNIC_IO_DONE;
break;
case SNIC_STAT_ABORTED:
CMD_FLAGS(sc) |= SNIC_IO_ABORTED;
break;
default:
SNIC_BUG_ON(1);
}
}
/*
* snic_process_io_failed_state:
* Processes IO's error states
*/
static void
snic_process_io_failed_state(struct snic *snic,
struct snic_icmnd_cmpl *icmnd_cmpl,
struct scsi_cmnd *sc,
u8 cmpl_stat)
{
int res = 0;
switch (cmpl_stat) {
case SNIC_STAT_TIMEOUT: /* Req was timedout */
atomic64_inc(&snic->s_stats.misc.io_tmo);
res = DID_TIME_OUT;
break;
case SNIC_STAT_ABORTED: /* Req was aborted */
atomic64_inc(&snic->s_stats.misc.io_aborted);
res = DID_ABORT;
break;
case SNIC_STAT_DATA_CNT_MISMATCH:/* Recv/Sent more/less data than exp */
atomic64_inc(&snic->s_stats.misc.data_cnt_mismat);
scsi_set_resid(sc, le32_to_cpu(icmnd_cmpl->resid));
res = DID_ERROR;
break;
case SNIC_STAT_OUT_OF_RES: /* Out of resources to complete request */
atomic64_inc(&snic->s_stats.fw.out_of_res);
res = DID_REQUEUE;
break;
case SNIC_STAT_IO_NOT_FOUND: /* Requested I/O was not found */
atomic64_inc(&snic->s_stats.io.io_not_found);
res = DID_ERROR;
break;
case SNIC_STAT_SGL_INVALID: /* Req was aborted to due to sgl error*/
atomic64_inc(&snic->s_stats.misc.sgl_inval);
res = DID_ERROR;
break;
case SNIC_STAT_FW_ERR: /* Req terminated due to FW Error */
atomic64_inc(&snic->s_stats.fw.io_errs);
res = DID_ERROR;
break;
case SNIC_STAT_SCSI_ERR: /* FW hits SCSI Error */
atomic64_inc(&snic->s_stats.fw.scsi_errs);
break;
case SNIC_STAT_NOT_READY: /* XPT yet to initialize */
case SNIC_STAT_DEV_OFFLINE: /* Device offline */
res = DID_NO_CONNECT;
break;
case SNIC_STAT_INVALID_HDR: /* Hdr contains invalid data */
case SNIC_STAT_INVALID_PARM: /* Some param in req is invalid */
case SNIC_STAT_REQ_NOT_SUP: /* Req type is not supported */
case SNIC_STAT_CMND_REJECT: /* Req rejected */
case SNIC_STAT_FATAL_ERROR: /* XPT Error */
default:
SNIC_SCSI_DBG(snic->shost,
"Invalid Hdr/Param or Req Not Supported or Cmnd Rejected or Device Offline. or Unknown\n");
res = DID_ERROR;
break;
}
SNIC_HOST_ERR(snic->shost, "fw returns failed status %s flags 0x%llx\n",
snic_io_status_to_str(cmpl_stat), CMD_FLAGS(sc));
/* Set sc->result */
sc->result = (res << 16) | icmnd_cmpl->scsi_status;
} /* end of snic_process_io_failed_state */
/*
* snic_tmreq_pending : is task management in progress.
*/
static int
snic_tmreq_pending(struct scsi_cmnd *sc)
{
int state = CMD_STATE(sc);
return ((state == SNIC_IOREQ_ABTS_PENDING) ||
(state == SNIC_IOREQ_LR_PENDING));
}
/*
* snic_process_icmnd_cmpl_status:
* Caller should hold io_lock
*/
static int
snic_process_icmnd_cmpl_status(struct snic *snic,
struct snic_icmnd_cmpl *icmnd_cmpl,
u8 cmpl_stat,
struct scsi_cmnd *sc)
{
u8 scsi_stat = icmnd_cmpl->scsi_status;
u64 xfer_len = 0;
int ret = 0;
/* Mark the IO as complete */
CMD_STATE(sc) = SNIC_IOREQ_COMPLETE;
if (likely(cmpl_stat == SNIC_STAT_IO_SUCCESS)) {
sc->result = (DID_OK << 16) | scsi_stat;
xfer_len = scsi_bufflen(sc);
/* Update SCSI Cmd with resid value */
scsi_set_resid(sc, le32_to_cpu(icmnd_cmpl->resid));
if (icmnd_cmpl->flags & SNIC_ICMND_CMPL_UNDR_RUN) {
xfer_len -= le32_to_cpu(icmnd_cmpl->resid);
atomic64_inc(&snic->s_stats.misc.io_under_run);
}
if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
atomic64_inc(&snic->s_stats.misc.qfull);
ret = 0;
} else {
snic_process_io_failed_state(snic, icmnd_cmpl, sc, cmpl_stat);
atomic64_inc(&snic->s_stats.io.fail);
SNIC_HOST_ERR(snic->shost,
"icmnd_cmpl: IO Failed : Hdr Status %s flags 0x%llx\n",
snic_io_status_to_str(cmpl_stat), CMD_FLAGS(sc));
ret = 1;
}
return ret;
} /* end of snic_process_icmnd_cmpl_status */
/*
* snic_icmnd_cmpl_handler
* Routine to handle icmnd completions
*/
static void
snic_icmnd_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
{
u8 typ, hdr_stat;
u32 cmnd_id, hid;
ulong ctx;
struct scsi_cmnd *sc = NULL;
struct snic_icmnd_cmpl *icmnd_cmpl = NULL;
struct snic_host_req *req = NULL;
struct snic_req_info *rqi = NULL;
unsigned long flags, start_time;
spinlock_t *io_lock;
u8 sc_stat = 0;
snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
icmnd_cmpl = &fwreq->u.icmnd_cmpl;
sc_stat = icmnd_cmpl->scsi_status;
SNIC_SCSI_DBG(snic->shost,
"Icmnd_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x,i ctx = %lx\n",
typ, hdr_stat, cmnd_id, hid, ctx);
if (cmnd_id >= snic->max_tag_id) {
SNIC_HOST_ERR(snic->shost,
"Icmnd_cmpl:Tag Error:Out of Range Tag %d, hdr status = %s\n",
cmnd_id, snic_io_status_to_str(hdr_stat));
return;
}
sc = scsi_host_find_tag(snic->shost, cmnd_id);
WARN_ON_ONCE(!sc);
if (!sc) {
atomic64_inc(&snic->s_stats.io.sc_null);
SNIC_HOST_ERR(snic->shost,
"Icmnd_cmpl: Scsi Cmnd Not found, sc = NULL Hdr Status = %s tag = 0x%x fwreq = 0x%p\n",
snic_io_status_to_str(hdr_stat),
cmnd_id,
fwreq);
SNIC_TRC(snic->shost->host_no, cmnd_id, 0,
((u64)hdr_stat << 16 |
(u64)sc_stat << 8 | (u64)icmnd_cmpl->flags),
(ulong) fwreq, le32_to_cpu(icmnd_cmpl->resid), ctx);
return;
}
io_lock = snic_io_lock_hash(snic, sc);
spin_lock_irqsave(io_lock, flags);
rqi = (struct snic_req_info *) CMD_SP(sc);
SNIC_SCSI_DBG(snic->shost,
"Icmnd_cmpl:lun %lld sc %p cmd %xtag %d flags 0x%llx rqi %p\n",
sc->device->lun, sc, sc->cmnd[0], snic_cmd_tag(sc),
CMD_FLAGS(sc), rqi);
SNIC_BUG_ON(rqi != (struct snic_req_info *)ctx);
WARN_ON_ONCE(req);
if (!rqi) {
atomic64_inc(&snic->s_stats.io.req_null);
CMD_FLAGS(sc) |= SNIC_IO_REQ_NULL;
spin_unlock_irqrestore(io_lock, flags);
SNIC_HOST_ERR(snic->shost,
"Icmnd_cmpl:Host Req Not Found(null), Hdr Status %s, Tag 0x%x, sc 0x%p flags 0x%llx\n",
snic_io_status_to_str(hdr_stat),
cmnd_id, sc, CMD_FLAGS(sc));
return;
}
rqi = (struct snic_req_info *) ctx;
start_time = rqi->start_time;
/* firmware completed the io */
rqi->io_cmpl = 1;
/*
* if SCSI-ML has already issued abort on this command,
* ignore completion of the IO. The abts path will clean it up
*/
if (unlikely(snic_tmreq_pending(sc))) {
snic_proc_tmreq_pending_state(snic, sc, hdr_stat);
spin_unlock_irqrestore(io_lock, flags);
snic_stats_update_io_cmpl(&snic->s_stats);
/* Expected value is SNIC_STAT_ABORTED */
if (likely(hdr_stat == SNIC_STAT_ABORTED))
return;
SNIC_SCSI_DBG(snic->shost,
"icmnd_cmpl:TM Req Pending(%s), Hdr Status %s sc 0x%p scsi status %x resid %d flags 0x%llx\n",
snic_ioreq_state_to_str(CMD_STATE(sc)),
snic_io_status_to_str(hdr_stat),
sc, sc_stat, le32_to_cpu(icmnd_cmpl->resid),
CMD_FLAGS(sc));
SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
jiffies_to_msecs(jiffies - start_time), (ulong) fwreq,
SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
return;
}
if (snic_process_icmnd_cmpl_status(snic, icmnd_cmpl, hdr_stat, sc)) {
scsi_print_command(sc);
SNIC_HOST_ERR(snic->shost,
"icmnd_cmpl:IO Failed, sc 0x%p Tag %d Cmd %x Hdr Status %s flags 0x%llx\n",
sc, sc->cmnd[0], cmnd_id,
snic_io_status_to_str(hdr_stat), CMD_FLAGS(sc));
}
/* Break link with the SCSI Command */
CMD_SP(sc) = NULL;
CMD_FLAGS(sc) |= SNIC_IO_DONE;
spin_unlock_irqrestore(io_lock, flags);
/* For now, consider only successful IO. */
snic_calc_io_process_time(snic, rqi);
snic_release_req_buf(snic, rqi, sc);
SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
jiffies_to_msecs(jiffies - start_time), (ulong) fwreq,
SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
if (sc->scsi_done)
sc->scsi_done(sc);
snic_stats_update_io_cmpl(&snic->s_stats);
} /* end of snic_icmnd_cmpl_handler */
static void
snic_proc_dr_cmpl_locked(struct snic *snic,
struct snic_fw_req *fwreq,
u8 cmpl_stat,
u32 cmnd_id,
struct scsi_cmnd *sc)
{
struct snic_req_info *rqi = (struct snic_req_info *) CMD_SP(sc);
u32 start_time = rqi->start_time;
CMD_LR_STATUS(sc) = cmpl_stat;
SNIC_SCSI_DBG(snic->shost, "itmf_cmpl: Cmd State = %s\n",
snic_ioreq_state_to_str(CMD_STATE(sc)));
if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
CMD_FLAGS(sc) |= SNIC_DEV_RST_ABTS_PENDING;
SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
jiffies_to_msecs(jiffies - start_time),
(ulong) fwreq, 0, SNIC_TRC_CMD_STATE_FLAGS(sc));
SNIC_SCSI_DBG(snic->shost,
"itmf_cmpl: Terminate Pending Dev Reset Cmpl Recvd.id %x, status %s flags 0x%llx\n",
(int)(cmnd_id & SNIC_TAG_MASK),
snic_io_status_to_str(cmpl_stat),
CMD_FLAGS(sc));
return;
}
if (CMD_FLAGS(sc) & SNIC_DEV_RST_TIMEDOUT) {
SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
jiffies_to_msecs(jiffies - start_time),
(ulong) fwreq, 0, SNIC_TRC_CMD_STATE_FLAGS(sc));
SNIC_SCSI_DBG(snic->shost,
"itmf_cmpl:Dev Reset Completion Received after timeout. id %d cmpl status %s flags 0x%llx\n",
(int)(cmnd_id & SNIC_TAG_MASK),
snic_io_status_to_str(cmpl_stat),
CMD_FLAGS(sc));
return;
}
CMD_STATE(sc) = SNIC_IOREQ_LR_COMPLETE;
CMD_FLAGS(sc) |= SNIC_DEV_RST_DONE;
SNIC_SCSI_DBG(snic->shost,
"itmf_cmpl:Dev Reset Cmpl Recvd id %d cmpl status %s flags 0x%llx\n",
(int)(cmnd_id & SNIC_TAG_MASK),
snic_io_status_to_str(cmpl_stat),
CMD_FLAGS(sc));
if (rqi->dr_done)
complete(rqi->dr_done);
} /* end of snic_proc_dr_cmpl_locked */
/*
* snic_update_abort_stats : Updates abort stats based on completion status.
*/
static void
snic_update_abort_stats(struct snic *snic, u8 cmpl_stat)
{
struct snic_abort_stats *abt_stats = &snic->s_stats.abts;
SNIC_SCSI_DBG(snic->shost, "Updating Abort stats.\n");
switch (cmpl_stat) {
case SNIC_STAT_IO_SUCCESS:
break;
case SNIC_STAT_TIMEOUT:
atomic64_inc(&abt_stats->fw_tmo);
break;
case SNIC_STAT_IO_NOT_FOUND:
atomic64_inc(&abt_stats->io_not_found);
break;
default:
atomic64_inc(&abt_stats->fail);
break;
}
}
static int
snic_process_itmf_cmpl(struct snic *snic,
struct snic_fw_req *fwreq,
u32 cmnd_id,
u8 cmpl_stat,
struct scsi_cmnd *sc)
{
struct snic_req_info *rqi = NULL;
u32 tm_tags = 0;
spinlock_t *io_lock = NULL;
unsigned long flags;
u32 start_time = 0;
int ret = 0;
io_lock = snic_io_lock_hash(snic, sc);
spin_lock_irqsave(io_lock, flags);
rqi = (struct snic_req_info *) CMD_SP(sc);
WARN_ON_ONCE(!rqi);
if (!rqi) {
atomic64_inc(&snic->s_stats.io.req_null);
spin_unlock_irqrestore(io_lock, flags);
CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
SNIC_HOST_ERR(snic->shost,
"itmf_cmpl: rqi is null,Hdr stat = %s Tag = 0x%x sc = 0x%p flags 0x%llx\n",
snic_io_status_to_str(cmpl_stat), cmnd_id, sc,
CMD_FLAGS(sc));
return ret;
}
/* Extract task management flags */
tm_tags = cmnd_id & ~(SNIC_TAG_MASK);
start_time = rqi->start_time;
cmnd_id &= (SNIC_TAG_MASK);
switch (tm_tags) {
case SNIC_TAG_ABORT:
/* Abort only issued on cmd */
snic_update_abort_stats(snic, cmpl_stat);
if (CMD_STATE(sc) != SNIC_IOREQ_ABTS_PENDING) {
/* This is a late completion. Ignore it. */
ret = -1;
spin_unlock_irqrestore(io_lock, flags);
break;
}
CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE;
CMD_ABTS_STATUS(sc) = cmpl_stat;
CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_DONE;
SNIC_SCSI_DBG(snic->shost,
"itmf_cmpl:Abort Cmpl Recvd.Tag 0x%x Status %s flags 0x%llx\n",
cmnd_id,
snic_io_status_to_str(cmpl_stat),
CMD_FLAGS(sc));
/*
* If scsi_eh thread is blocked waiting for abts complete,
* signal completion to it. IO will be cleaned in the thread,
* else clean it in this context.
*/
if (rqi->abts_done) {
complete(rqi->abts_done);
spin_unlock_irqrestore(io_lock, flags);
break; /* jump out */
}
CMD_SP(sc) = NULL;
sc->result = (DID_ERROR << 16);
SNIC_SCSI_DBG(snic->shost,
"itmf_cmpl: Completing IO. sc %p flags 0x%llx\n",
sc, CMD_FLAGS(sc));
spin_unlock_irqrestore(io_lock, flags);
snic_release_req_buf(snic, rqi, sc);
if (sc->scsi_done) {
SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
jiffies_to_msecs(jiffies - start_time),
(ulong) fwreq, SNIC_TRC_CMD(sc),
SNIC_TRC_CMD_STATE_FLAGS(sc));
sc->scsi_done(sc);
}
break;
case SNIC_TAG_DEV_RST:
case SNIC_TAG_DEV_RST | SNIC_TAG_IOCTL_DEV_RST:
snic_proc_dr_cmpl_locked(snic, fwreq, cmpl_stat, cmnd_id, sc);
spin_unlock_irqrestore(io_lock, flags);
ret = 0;
break;
case SNIC_TAG_ABORT | SNIC_TAG_DEV_RST:
/* Abort and terminate completion of device reset req */
CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE;
CMD_ABTS_STATUS(sc) = cmpl_stat;
CMD_FLAGS(sc) |= SNIC_DEV_RST_DONE;
SNIC_SCSI_DBG(snic->shost,
"itmf_cmpl:dev reset abts cmpl recvd. id %d status %s flags 0x%llx\n",
cmnd_id, snic_io_status_to_str(cmpl_stat),
CMD_FLAGS(sc));
if (rqi->abts_done)
complete(rqi->abts_done);
spin_unlock_irqrestore(io_lock, flags);
break;
default:
spin_unlock_irqrestore(io_lock, flags);
SNIC_HOST_ERR(snic->shost,
"itmf_cmpl: Unknown TM tag bit 0x%x\n", tm_tags);
SNIC_HOST_ERR(snic->shost,
"itmf_cmpl:Unexpected itmf io stat %s Tag = 0x%x flags 0x%llx\n",
snic_ioreq_state_to_str(CMD_STATE(sc)),
cmnd_id,
CMD_FLAGS(sc));
ret = -1;
SNIC_BUG_ON(1);
break;
}
return ret;
} /* end of snic_process_itmf_cmpl_status */
/*
* snic_itmf_cmpl_handler.
* Routine to handle itmf completions.
*/
static void
snic_itmf_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
{
struct scsi_cmnd *sc = NULL;
struct snic_req_info *rqi = NULL;
struct snic_itmf_cmpl *itmf_cmpl = NULL;
ulong ctx;
u32 cmnd_id;
u32 hid;
u8 typ;
u8 hdr_stat;
snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
SNIC_SCSI_DBG(snic->shost,
"Itmf_cmpl: %s: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x,ctx = %lx\n",
__func__, typ, hdr_stat, cmnd_id, hid, ctx);
itmf_cmpl = &fwreq->u.itmf_cmpl;
SNIC_SCSI_DBG(snic->shost,
"Itmf_cmpl: nterm %u , flags 0x%x\n",
le32_to_cpu(itmf_cmpl->nterminated), itmf_cmpl->flags);
/* spl case, dev reset issued through ioctl */
if (cmnd_id & SNIC_TAG_IOCTL_DEV_RST) {
rqi = (struct snic_req_info *) ctx;
sc = rqi->sc;
goto ioctl_dev_rst;
}
if ((cmnd_id & SNIC_TAG_MASK) >= snic->max_tag_id) {
SNIC_HOST_ERR(snic->shost,
"Itmf_cmpl: Tag 0x%x out of Range,HdrStat %s\n",
cmnd_id, snic_io_status_to_str(hdr_stat));
SNIC_BUG_ON(1);
return;
}
sc = scsi_host_find_tag(snic->shost, cmnd_id & SNIC_TAG_MASK);
WARN_ON_ONCE(!sc);
ioctl_dev_rst:
if (!sc) {
atomic64_inc(&snic->s_stats.io.sc_null);
SNIC_HOST_ERR(snic->shost,
"Itmf_cmpl: sc is NULL - Hdr Stat %s Tag 0x%x\n",
snic_io_status_to_str(hdr_stat), cmnd_id);
return;
}
snic_process_itmf_cmpl(snic, fwreq, cmnd_id, hdr_stat, sc);
} /* end of snic_itmf_cmpl_handler */
static void
snic_hba_reset_scsi_cleanup(struct snic *snic, struct scsi_cmnd *sc)
{
struct snic_stats *st = &snic->s_stats;
long act_ios = 0, act_fwreqs = 0;
SNIC_SCSI_DBG(snic->shost, "HBA Reset scsi cleanup.\n");
snic_scsi_cleanup(snic, snic_cmd_tag(sc));
/* Update stats on pending IOs */
act_ios = atomic64_read(&st->io.active);
atomic64_add(act_ios, &st->io.compl);
atomic64_sub(act_ios, &st->io.active);
act_fwreqs = atomic64_read(&st->fw.actv_reqs);
atomic64_sub(act_fwreqs, &st->fw.actv_reqs);
}
/*
* snic_hba_reset_cmpl_handler :
*
* Notes :
* 1. Cleanup all the scsi cmds, release all snic specific cmds
* 2. Issue Report Targets in case of SAN targets
*/
static int
snic_hba_reset_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
{
ulong ctx;
u32 cmnd_id;
u32 hid;
u8 typ;
u8 hdr_stat;
struct scsi_cmnd *sc = NULL;
struct snic_req_info *rqi = NULL;
spinlock_t *io_lock = NULL;
unsigned long flags, gflags;
int ret = 0;
SNIC_HOST_INFO(snic->shost,
"reset_cmpl:HBA Reset Completion received.\n");
snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
SNIC_SCSI_DBG(snic->shost,
"reset_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n",
typ, hdr_stat, cmnd_id, hid, ctx);
/* spl case, host reset issued through ioctl */
if (cmnd_id == SCSI_NO_TAG) {
rqi = (struct snic_req_info *) ctx;
sc = rqi->sc;
goto ioctl_hba_rst;
}
if (cmnd_id >= snic->max_tag_id) {
SNIC_HOST_ERR(snic->shost,
"reset_cmpl: Tag 0x%x out of Range,HdrStat %s\n",
cmnd_id, snic_io_status_to_str(hdr_stat));
SNIC_BUG_ON(1);
return 1;
}
sc = scsi_host_find_tag(snic->shost, cmnd_id);
ioctl_hba_rst:
if (!sc) {
atomic64_inc(&snic->s_stats.io.sc_null);
SNIC_HOST_ERR(snic->shost,
"reset_cmpl: sc is NULL - Hdr Stat %s Tag 0x%x\n",
snic_io_status_to_str(hdr_stat), cmnd_id);
ret = 1;
return ret;
}
io_lock = snic_io_lock_hash(snic, sc);
spin_lock_irqsave(io_lock, flags);
if (!snic->remove_wait) {
spin_unlock_irqrestore(io_lock, flags);
SNIC_HOST_ERR(snic->shost,
"reset_cmpl:host reset completed after timout\n");
ret = 1;
return ret;
}
rqi = (struct snic_req_info *) CMD_SP(sc);
WARN_ON_ONCE(!rqi);
if (!rqi) {
atomic64_inc(&snic->s_stats.io.req_null);
spin_unlock_irqrestore(io_lock, flags);
CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
SNIC_HOST_ERR(snic->shost,
"reset_cmpl: rqi is null,Hdr stat %s Tag 0x%x sc 0x%p flags 0x%llx\n",
snic_io_status_to_str(hdr_stat), cmnd_id, sc,
CMD_FLAGS(sc));
ret = 1;
return ret;
}
/* stats */
spin_unlock_irqrestore(io_lock, flags);
/* scsi cleanup */
snic_hba_reset_scsi_cleanup(snic, sc);
SNIC_BUG_ON(snic_get_state(snic) != SNIC_OFFLINE &&
snic_get_state(snic) != SNIC_FWRESET);
/* Careful locking between snic_lock and io lock */
spin_lock_irqsave(io_lock, flags);
spin_lock_irqsave(&snic->snic_lock, gflags);
if (snic_get_state(snic) == SNIC_FWRESET)
snic_set_state(snic, SNIC_ONLINE);
spin_unlock_irqrestore(&snic->snic_lock, gflags);
if (snic->remove_wait)
complete(snic->remove_wait);
spin_unlock_irqrestore(io_lock, flags);
atomic64_inc(&snic->s_stats.reset.hba_reset_cmpl);
ret = 0;
/* Rediscovery is for SAN */
if (snic->config.xpt_type == SNIC_DAS)
return ret;
SNIC_SCSI_DBG(snic->shost, "reset_cmpl: Queuing discovery work.\n");
queue_work(snic_glob->event_q, &snic->disc_work);
return ret;
}
static void
snic_msg_ack_handler(struct snic *snic, struct snic_fw_req *fwreq)
{
SNIC_HOST_INFO(snic->shost, "Message Ack Received.\n");
SNIC_ASSERT_NOT_IMPL(1);
}
static void
snic_aen_handler(struct snic *snic, struct snic_fw_req *fwreq)
{
u8 typ, hdr_stat;
u32 cmnd_id, hid;
ulong ctx;
struct snic_async_evnotify *aen = &fwreq->u.async_ev;
u32 event_id = 0;
snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
SNIC_SCSI_DBG(snic->shost,
"aen: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n",
typ, hdr_stat, cmnd_id, hid, ctx);
event_id = le32_to_cpu(aen->ev_id);
switch (event_id) {
case SNIC_EV_TGT_OFFLINE:
SNIC_HOST_INFO(snic->shost, "aen:TGT_OFFLINE Event Recvd.\n");
break;
case SNIC_EV_TGT_ONLINE:
SNIC_HOST_INFO(snic->shost, "aen:TGT_ONLINE Event Recvd.\n");
break;
case SNIC_EV_LUN_OFFLINE:
SNIC_HOST_INFO(snic->shost, "aen:LUN_OFFLINE Event Recvd.\n");
break;
case SNIC_EV_LUN_ONLINE:
SNIC_HOST_INFO(snic->shost, "aen:LUN_ONLINE Event Recvd.\n");
break;
case SNIC_EV_CONF_CHG:
SNIC_HOST_INFO(snic->shost, "aen:Config Change Event Recvd.\n");
break;
case SNIC_EV_TGT_ADDED:
SNIC_HOST_INFO(snic->shost, "aen:TGT_ADD Event Recvd.\n");
break;
case SNIC_EV_TGT_DELTD:
SNIC_HOST_INFO(snic->shost, "aen:TGT_DEL Event Recvd.\n");
break;
case SNIC_EV_LUN_ADDED:
SNIC_HOST_INFO(snic->shost, "aen:LUN_ADD Event Recvd.\n");
break;
case SNIC_EV_LUN_DELTD:
SNIC_HOST_INFO(snic->shost, "aen:LUN_DEL Event Recvd.\n");
break;
case SNIC_EV_DISC_CMPL:
SNIC_HOST_INFO(snic->shost, "aen:DISC_CMPL Event Recvd.\n");
break;
default:
SNIC_HOST_INFO(snic->shost, "aen:Unknown Event Recvd.\n");
SNIC_BUG_ON(1);
break;
}
SNIC_ASSERT_NOT_IMPL(1);
} /* end of snic_aen_handler */
/*
* snic_io_cmpl_handler
* Routine to process CQ entries(IO Completions) posted by fw.
*/
static int
snic_io_cmpl_handler(struct vnic_dev *vdev,
unsigned int cq_idx,
struct snic_fw_req *fwreq)
{
struct snic *snic = svnic_dev_priv(vdev);
u64 start = jiffies, cmpl_time;
snic_print_desc(__func__, (char *)fwreq, sizeof(*fwreq));
/* Update FW Stats */
if ((fwreq->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL) &&
(fwreq->hdr.type <= SNIC_RSP_BOOT_LUNS_CMPL))
atomic64_dec(&snic->s_stats.fw.actv_reqs);
SNIC_BUG_ON((fwreq->hdr.type > SNIC_RSP_BOOT_LUNS_CMPL) &&
(fwreq->hdr.type < SNIC_MSG_ASYNC_EVNOTIFY));
/* Check for snic subsys errors */
switch (fwreq->hdr.status) {
case SNIC_STAT_NOT_READY: /* XPT yet to initialize */
SNIC_HOST_ERR(snic->shost,
"sNIC SubSystem is NOT Ready.\n");
break;
case SNIC_STAT_FATAL_ERROR: /* XPT Error */
SNIC_HOST_ERR(snic->shost,
"sNIC SubSystem in Unrecoverable State.\n");
break;
}
switch (fwreq->hdr.type) {
case SNIC_RSP_EXCH_VER_CMPL:
snic_io_exch_ver_cmpl_handler(snic, fwreq);
break;
case SNIC_RSP_REPORT_TGTS_CMPL:
snic_report_tgt_cmpl_handler(snic, fwreq);
break;
case SNIC_RSP_ICMND_CMPL:
snic_icmnd_cmpl_handler(snic, fwreq);
break;
case SNIC_RSP_ITMF_CMPL:
snic_itmf_cmpl_handler(snic, fwreq);
break;
case SNIC_RSP_HBA_RESET_CMPL:
snic_hba_reset_cmpl_handler(snic, fwreq);
break;
case SNIC_MSG_ACK:
snic_msg_ack_handler(snic, fwreq);
break;
case SNIC_MSG_ASYNC_EVNOTIFY:
snic_aen_handler(snic, fwreq);
break;
default:
SNIC_BUG_ON(1);
SNIC_SCSI_DBG(snic->shost,
"Unknown Firmwqre completion request type %d\n",
fwreq->hdr.type);
break;
}
/* Update Stats */
cmpl_time = jiffies - start;
if (cmpl_time > atomic64_read(&snic->s_stats.io.max_cmpl_time))
atomic64_set(&snic->s_stats.io.max_cmpl_time, cmpl_time);
return 0;
} /* end of snic_io_cmpl_handler */
/*
* snic_fwcq_cmpl_handler
* Routine to process fwCQ
* This CQ is independent, and not associated with wq/rq/wq_copy queues
*/
int
snic_fwcq_cmpl_handler(struct snic *snic, int io_cmpl_work)
{
unsigned int num_ent = 0; /* number cq entries processed */
unsigned int cq_idx;
unsigned int nent_per_cq;
struct snic_misc_stats *misc_stats = &snic->s_stats.misc;
for (cq_idx = snic->wq_count; cq_idx < snic->cq_count; cq_idx++) {
nent_per_cq = vnic_cq_fw_service(&snic->cq[cq_idx],
snic_io_cmpl_handler,
io_cmpl_work);
num_ent += nent_per_cq;
if (nent_per_cq > atomic64_read(&misc_stats->max_cq_ents))
atomic64_set(&misc_stats->max_cq_ents, nent_per_cq);
}
return num_ent;
} /* end of snic_fwcq_cmpl_handler */
/*
* snic_queue_itmf_req: Common API to queue Task Management requests.
* Use rqi->tm_tag for passing special tags.
* @req_id : aborted request's tag, -1 for lun reset.
*/
static int
snic_queue_itmf_req(struct snic *snic,
struct snic_host_req *tmreq,
struct scsi_cmnd *sc,
u32 tmf,
u32 req_id)
{
struct snic_req_info *rqi = req_to_rqi(tmreq);
struct scsi_lun lun;
int tm_tag = snic_cmd_tag(sc) | rqi->tm_tag;
int ret = 0;
SNIC_BUG_ON(!rqi);
SNIC_BUG_ON(!rqi->tm_tag);
/* fill in lun info */
int_to_scsilun(sc->device->lun, &lun);
/* Initialize snic_host_req: itmf */
snic_itmf_init(tmreq,
tm_tag,
snic->config.hid,
(ulong) rqi,
0 /* flags */,
req_id, /* Command to be aborted. */
rqi->tgt_id,
lun.scsi_lun,
tmf);
/*
* In case of multiple aborts on same cmd,
* use try_wait_for_completion and completion_done() to check
* whether it queues aborts even after completion of abort issued
* prior.SNIC_BUG_ON(completion_done(&rqi->done));
*/
ret = snic_queue_wq_desc(snic, tmreq, sizeof(*tmreq));
if (ret)
SNIC_HOST_ERR(snic->shost,
"qitmf:Queuing ITMF(%d) Req sc %p, rqi %p, req_id %d tag %d Failed, ret = %d\n",
tmf, sc, rqi, req_id, snic_cmd_tag(sc), ret);
else
SNIC_SCSI_DBG(snic->shost,
"qitmf:Queuing ITMF(%d) Req sc %p, rqi %p, req_id %d, tag %d (req_id)- Success.",
tmf, sc, rqi, req_id, snic_cmd_tag(sc));
return ret;
} /* end of snic_queue_itmf_req */
static int
snic_issue_tm_req(struct snic *snic,
struct snic_req_info *rqi,
struct scsi_cmnd *sc,
int tmf)
{
struct snic_host_req *tmreq = NULL;
int req_id = 0, tag = snic_cmd_tag(sc);
int ret = 0;
if (snic_get_state(snic) == SNIC_FWRESET)
return -EBUSY;
atomic_inc(&snic->ios_inflight);
SNIC_SCSI_DBG(snic->shost,
"issu_tmreq: Task mgmt req %d. rqi %p w/ tag %x\n",
tmf, rqi, tag);
if (tmf == SNIC_ITMF_LUN_RESET) {
tmreq = snic_dr_req_init(snic, rqi);
req_id = SCSI_NO_TAG;
} else {
tmreq = snic_abort_req_init(snic, rqi);
req_id = tag;
}
if (!tmreq) {
ret = -ENOMEM;
goto tmreq_err;
}
ret = snic_queue_itmf_req(snic, tmreq, sc, tmf, req_id);
if (ret)
goto tmreq_err;
ret = 0;
tmreq_err:
if (ret) {
SNIC_HOST_ERR(snic->shost,
"issu_tmreq: Queing ITMF(%d) Req, sc %p rqi %p req_id %d tag %x fails err = %d\n",
tmf, sc, rqi, req_id, tag, ret);
} else {
SNIC_SCSI_DBG(snic->shost,
"issu_tmreq: Queuing ITMF(%d) Req, sc %p, rqi %p, req_id %d tag %x - Success.\n",
tmf, sc, rqi, req_id, tag);
}
atomic_dec(&snic->ios_inflight);
return ret;
}
/*
* snic_queue_abort_req : Queues abort req to WQ
*/
static int
snic_queue_abort_req(struct snic *snic,
struct snic_req_info *rqi,
struct scsi_cmnd *sc,
int tmf)
{
SNIC_SCSI_DBG(snic->shost, "q_abtreq: sc %p, rqi %p, tag %x, tmf %d\n",
sc, rqi, snic_cmd_tag(sc), tmf);
/* Add special tag for abort */
rqi->tm_tag |= SNIC_TAG_ABORT;
return snic_issue_tm_req(snic, rqi, sc, tmf);
}
/*
* snic_abort_finish : called by snic_abort_cmd on queuing abort successfully.
*/
static int
snic_abort_finish(struct snic *snic, struct scsi_cmnd *sc)
{
struct snic_req_info *rqi = NULL;
spinlock_t *io_lock = NULL;
unsigned long flags;
int ret = 0, tag = snic_cmd_tag(sc);
io_lock = snic_io_lock_hash(snic, sc);
spin_lock_irqsave(io_lock, flags);
rqi = (struct snic_req_info *) CMD_SP(sc);
if (!rqi) {
atomic64_inc(&snic->s_stats.io.req_null);
CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
SNIC_SCSI_DBG(snic->shost,
"abt_fini:req info is null tag 0x%x, sc 0x%p flags 0x%llx\n",
tag, sc, CMD_FLAGS(sc));
ret = FAILED;
goto abort_fail;
}
rqi->abts_done = NULL;
ret = FAILED;
/* Check the abort status. */
switch (CMD_ABTS_STATUS(sc)) {
case SNIC_INVALID_CODE:
/* Firmware didn't complete abort req, timedout */
CMD_FLAGS(sc) |= SNIC_IO_ABTS_TIMEDOUT;
atomic64_inc(&snic->s_stats.abts.drv_tmo);
SNIC_SCSI_DBG(snic->shost,
"abt_fini:sc %p Tag %x Driver Timeout.flags 0x%llx\n",
sc, snic_cmd_tag(sc), CMD_FLAGS(sc));
/* do not release snic request in timedout case */
rqi = NULL;
goto abort_fail;
case SNIC_STAT_IO_SUCCESS:
case SNIC_STAT_IO_NOT_FOUND:
ret = SUCCESS;
break;
default:
/* Firmware completed abort with error */
ret = FAILED;
break;
}
CMD_SP(sc) = NULL;
SNIC_HOST_INFO(snic->shost,
"abt_fini: Tag %x, Cmpl Status %s flags 0x%llx\n",
tag, snic_io_status_to_str(CMD_ABTS_STATUS(sc)),
CMD_FLAGS(sc));
abort_fail:
spin_unlock_irqrestore(io_lock, flags);
if (rqi)
snic_release_req_buf(snic, rqi, sc);
return ret;
} /* end of snic_abort_finish */
/*
* snic_send_abort_and_wait : Issues Abort, and Waits
*/
static int
snic_send_abort_and_wait(struct snic *snic, struct scsi_cmnd *sc)
{
struct snic_req_info *rqi = NULL;
enum snic_ioreq_state sv_state;
struct snic_tgt *tgt = NULL;
spinlock_t *io_lock = NULL;
DECLARE_COMPLETION_ONSTACK(tm_done);
unsigned long flags;
int ret = 0, tmf = 0, tag = snic_cmd_tag(sc);
tgt = starget_to_tgt(scsi_target(sc->device));
if ((snic_tgt_chkready(tgt) != 0) && (tgt->tdata.typ == SNIC_TGT_SAN))
tmf = SNIC_ITMF_ABTS_TASK_TERM;
else
tmf = SNIC_ITMF_ABTS_TASK;
/* stats */
io_lock = snic_io_lock_hash(snic, sc);
/*
* Avoid a race between SCSI issuing the abort and the device
* completing the command.
*
* If the command is already completed by fw_cmpl code,
* we just return SUCCESS from here. This means that the abort
* succeeded. In the SCSI ML, since the timeout for command has
* happend, the completion wont actually complete the command
* and it will be considered as an aborted command
*
* The CMD_SP will not be cleared except while holding io_lock
*/
spin_lock_irqsave(io_lock, flags);
rqi = (struct snic_req_info *) CMD_SP(sc);
if (!rqi) {
spin_unlock_irqrestore(io_lock, flags);
SNIC_HOST_ERR(snic->shost,
"abt_cmd: rqi is null. Tag %d flags 0x%llx\n",
tag, CMD_FLAGS(sc));
ret = SUCCESS;
goto send_abts_end;
}
rqi->abts_done = &tm_done;
if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
ret = 0;
goto abts_pending;
}
SNIC_BUG_ON(!rqi->abts_done);
/* Save Command State, should be restored on failed to Queue. */
sv_state = CMD_STATE(sc);
/*
* Command is still pending, need to abort it
* If the fw completes the command after this point,
* the completion won't be done till mid-layer, since abot
* has already started.
*/
CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE;
SNIC_SCSI_DBG(snic->shost, "send_abt_cmd: TAG 0x%x\n", tag);
spin_unlock_irqrestore(io_lock, flags);
/* Now Queue the abort command to firmware */
ret = snic_queue_abort_req(snic, rqi, sc, tmf);
if (ret) {
SNIC_HOST_ERR(snic->shost,
"send_abt_cmd: IO w/ Tag 0x%x fail w/ err %d flags 0x%llx\n",
tag, ret, CMD_FLAGS(sc));
spin_lock_irqsave(io_lock, flags);
/* Restore Command's previous state */
CMD_STATE(sc) = sv_state;
rqi = (struct snic_req_info *) CMD_SP(sc);
if (rqi)
rqi->abts_done = NULL;
spin_unlock_irqrestore(io_lock, flags);
ret = FAILED;
goto send_abts_end;
}
spin_lock_irqsave(io_lock, flags);
if (tmf == SNIC_ITMF_ABTS_TASK) {
CMD_FLAGS(sc) |= SNIC_IO_ABTS_ISSUED;
atomic64_inc(&snic->s_stats.abts.num);
} else {
/* term stats */
CMD_FLAGS(sc) |= SNIC_IO_TERM_ISSUED;
}
spin_unlock_irqrestore(io_lock, flags);
SNIC_SCSI_DBG(snic->shost,
"send_abt_cmd: sc %p Tag %x flags 0x%llx\n",
sc, tag, CMD_FLAGS(sc));
ret = 0;
abts_pending:
/*
* Queued an abort IO, wait for its completion.
* Once the fw completes the abort command, it will
* wakeup this thread.
*/
wait_for_completion_timeout(&tm_done, SNIC_ABTS_TIMEOUT);
send_abts_end:
return ret;
} /* end of snic_send_abort_and_wait */
/*
* This function is exported to SCSI for sending abort cmnds.
* A SCSI IO is represent by snic_ioreq in the driver.
* The snic_ioreq is linked to the SCSI Cmd, thus a link with the ULP'S IO
*/
int
snic_abort_cmd(struct scsi_cmnd *sc)
{
struct snic *snic = shost_priv(sc->device->host);
int ret = SUCCESS, tag = snic_cmd_tag(sc);
u32 start_time = jiffies;
SNIC_SCSI_DBG(snic->shost, "abt_cmd:sc %p :0x%x :req = %p :tag = %d\n",
sc, sc->cmnd[0], sc->request, tag);
if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) {
SNIC_HOST_ERR(snic->shost,
"abt_cmd: tag %x Parent Devs are not rdy\n",
tag);
ret = FAST_IO_FAIL;
goto abort_end;
}
ret = snic_send_abort_and_wait(snic, sc);
if (ret)
goto abort_end;
ret = snic_abort_finish(snic, sc);
abort_end:
SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
jiffies_to_msecs(jiffies - start_time), 0,
SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
SNIC_SCSI_DBG(snic->shost,
"abts: Abort Req Status = %s\n",
(ret == SUCCESS) ? "SUCCESS" :
((ret == FAST_IO_FAIL) ? "FAST_IO_FAIL" : "FAILED"));
return ret;
}
static int
snic_is_abts_pending(struct snic *snic, struct scsi_cmnd *lr_sc)
{
struct snic_req_info *rqi = NULL;
struct scsi_cmnd *sc = NULL;
struct scsi_device *lr_sdev = NULL;
spinlock_t *io_lock = NULL;
u32 tag;
unsigned long flags;
if (lr_sc)
lr_sdev = lr_sc->device;
/* walk through the tag map, an dcheck if IOs are still pending in fw*/
for (tag = 0; tag < snic->max_tag_id; tag++) {
io_lock = snic_io_lock_tag(snic, tag);
spin_lock_irqsave(io_lock, flags);
sc = scsi_host_find_tag(snic->shost, tag);
if (!sc || (lr_sc && (sc->device != lr_sdev || sc == lr_sc))) {
spin_unlock_irqrestore(io_lock, flags);
continue;
}
rqi = (struct snic_req_info *) CMD_SP(sc);
if (!rqi) {
spin_unlock_irqrestore(io_lock, flags);
continue;
}
/*
* Found IO that is still pending w/ firmware and belongs to
* the LUN that is under reset, if lr_sc != NULL
*/
SNIC_SCSI_DBG(snic->shost, "Found IO in %s on LUN\n",
snic_ioreq_state_to_str(CMD_STATE(sc)));
if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
return 1;
}
spin_unlock_irqrestore(io_lock, flags);
}
return 0;
} /* end of snic_is_abts_pending */
static int
snic_dr_clean_single_req(struct snic *snic,
u32 tag,
struct scsi_device *lr_sdev)
{
struct snic_req_info *rqi = NULL;
struct snic_tgt *tgt = NULL;
struct scsi_cmnd *sc = NULL;
spinlock_t *io_lock = NULL;
u32 sv_state = 0, tmf = 0;
DECLARE_COMPLETION_ONSTACK(tm_done);
unsigned long flags;
int ret = 0;
io_lock = snic_io_lock_tag(snic, tag);
spin_lock_irqsave(io_lock, flags);
sc = scsi_host_find_tag(snic->shost, tag);
/* Ignore Cmd that don't belong to Lun Reset device */
if (!sc || sc->device != lr_sdev)
goto skip_clean;
rqi = (struct snic_req_info *) CMD_SP(sc);
if (!rqi)
goto skip_clean;
if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
goto skip_clean;
if ((CMD_FLAGS(sc) & SNIC_DEVICE_RESET) &&
(!(CMD_FLAGS(sc) & SNIC_DEV_RST_ISSUED))) {
SNIC_SCSI_DBG(snic->shost,
"clean_single_req: devrst is not pending sc 0x%p\n",
sc);
goto skip_clean;
}
SNIC_SCSI_DBG(snic->shost,
"clean_single_req: Found IO in %s on lun\n",
snic_ioreq_state_to_str(CMD_STATE(sc)));
/* Save Command State */
sv_state = CMD_STATE(sc);
/*
* Any pending IO issued prior to reset is expected to be
* in abts pending state, if not we need to set SNIC_IOREQ_ABTS_PENDING
* to indicate the IO is abort pending.
* When IO is completed, the IO will be handed over and handled
* in this function.
*/
CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING;
SNIC_BUG_ON(rqi->abts_done);
if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) {
rqi->tm_tag = SNIC_TAG_DEV_RST;
SNIC_SCSI_DBG(snic->shost,
"clean_single_req:devrst sc 0x%p\n", sc);
}
CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE;
rqi->abts_done = &tm_done;
spin_unlock_irqrestore(io_lock, flags);
tgt = starget_to_tgt(scsi_target(sc->device));
if ((snic_tgt_chkready(tgt) != 0) && (tgt->tdata.typ == SNIC_TGT_SAN))
tmf = SNIC_ITMF_ABTS_TASK_TERM;
else
tmf = SNIC_ITMF_ABTS_TASK;
/* Now queue the abort command to firmware */
ret = snic_queue_abort_req(snic, rqi, sc, tmf);
if (ret) {
SNIC_HOST_ERR(snic->shost,
"clean_single_req_err:sc %p, tag %d abt failed. tm_tag %d flags 0x%llx\n",
sc, tag, rqi->tm_tag, CMD_FLAGS(sc));
spin_lock_irqsave(io_lock, flags);
rqi = (struct snic_req_info *) CMD_SP(sc);
if (rqi)
rqi->abts_done = NULL;
/* Restore Command State */
if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
CMD_STATE(sc) = sv_state;
ret = 1;
goto skip_clean;
}
spin_lock_irqsave(io_lock, flags);
if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET)
CMD_FLAGS(sc) |= SNIC_DEV_RST_TERM_ISSUED;
CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_ISSUED;
spin_unlock_irqrestore(io_lock, flags);
wait_for_completion_timeout(&tm_done, SNIC_ABTS_TIMEOUT);
/* Recheck cmd state to check if it now aborted. */
spin_lock_irqsave(io_lock, flags);
rqi = (struct snic_req_info *) CMD_SP(sc);
if (!rqi) {
CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
goto skip_clean;
}
rqi->abts_done = NULL;
/* if abort is still pending w/ fw, fail */
if (CMD_ABTS_STATUS(sc) == SNIC_INVALID_CODE) {
SNIC_HOST_ERR(snic->shost,
"clean_single_req_err:sc %p tag %d abt still pending w/ fw, tm_tag %d flags 0x%llx\n",
sc, tag, rqi->tm_tag, CMD_FLAGS(sc));
CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_DONE;
ret = 1;
goto skip_clean;
}
CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE;
CMD_SP(sc) = NULL;
spin_unlock_irqrestore(io_lock, flags);
snic_release_req_buf(snic, rqi, sc);
ret = 0;
return ret;
skip_clean:
spin_unlock_irqrestore(io_lock, flags);
return ret;
} /* end of snic_dr_clean_single_req */
static int
snic_dr_clean_pending_req(struct snic *snic, struct scsi_cmnd *lr_sc)
{
struct scsi_device *lr_sdev = lr_sc->device;
u32 tag = 0;
int ret = FAILED;
for (tag = 0; tag < snic->max_tag_id; tag++) {
if (tag == snic_cmd_tag(lr_sc))
continue;
ret = snic_dr_clean_single_req(snic, tag, lr_sdev);
if (ret) {
SNIC_HOST_ERR(snic->shost, "clean_err:tag = %d\n", tag);
goto clean_err;
}
}
schedule_timeout(msecs_to_jiffies(100));
/* Walk through all the cmds and check abts status. */
if (snic_is_abts_pending(snic, lr_sc)) {
ret = FAILED;
goto clean_err;
}
ret = 0;
SNIC_SCSI_DBG(snic->shost, "clean_pending_req: Success.\n");
return ret;
clean_err:
ret = FAILED;
SNIC_HOST_ERR(snic->shost,
"Failed to Clean Pending IOs on %s device.\n",
dev_name(&lr_sdev->sdev_gendev));
return ret;
} /* end of snic_dr_clean_pending_req */
/*
* snic_dr_finish : Called by snic_device_reset
*/
static int
snic_dr_finish(struct snic *snic, struct scsi_cmnd *sc)
{
struct snic_req_info *rqi = NULL;
spinlock_t *io_lock = NULL;
unsigned long flags;
int lr_res = 0;
int ret = FAILED;
io_lock = snic_io_lock_hash(snic, sc);
spin_lock_irqsave(io_lock, flags);
rqi = (struct snic_req_info *) CMD_SP(sc);
if (!rqi) {
spin_unlock_irqrestore(io_lock, flags);
SNIC_SCSI_DBG(snic->shost,
"dr_fini: rqi is null tag 0x%x sc 0x%p flags 0x%llx\n",
snic_cmd_tag(sc), sc, CMD_FLAGS(sc));
ret = FAILED;
goto dr_fini_end;
}
rqi->dr_done = NULL;
lr_res = CMD_LR_STATUS(sc);
switch (lr_res) {
case SNIC_INVALID_CODE:
/* stats */
SNIC_SCSI_DBG(snic->shost,
"dr_fini: Tag %x Dev Reset Timedout. flags 0x%llx\n",
snic_cmd_tag(sc), CMD_FLAGS(sc));
CMD_FLAGS(sc) |= SNIC_DEV_RST_TIMEDOUT;
ret = FAILED;
goto dr_failed;
case SNIC_STAT_IO_SUCCESS:
SNIC_SCSI_DBG(snic->shost,
"dr_fini: Tag %x Dev Reset cmpl\n",
snic_cmd_tag(sc));
ret = 0;
break;
default:
SNIC_HOST_ERR(snic->shost,
"dr_fini:Device Reset completed& failed.Tag = %x lr_status %s flags 0x%llx\n",
snic_cmd_tag(sc),
snic_io_status_to_str(lr_res), CMD_FLAGS(sc));
ret = FAILED;
goto dr_failed;
}
spin_unlock_irqrestore(io_lock, flags);
/*
* Cleanup any IOs on this LUN that have still not completed.
* If any of these fail, then LUN Reset fails.
* Cleanup cleans all commands on this LUN except
* the lun reset command. If all cmds get cleaned, the LUN Reset
* succeeds.
*/
ret = snic_dr_clean_pending_req(snic, sc);
if (ret) {
spin_lock_irqsave(io_lock, flags);
SNIC_SCSI_DBG(snic->shost,
"dr_fini: Device Reset Failed since could not abort all IOs. Tag = %x.\n",
snic_cmd_tag(sc));
rqi = (struct snic_req_info *) CMD_SP(sc);
goto dr_failed;
} else {
/* Cleanup LUN Reset Command */
spin_lock_irqsave(io_lock, flags);
rqi = (struct snic_req_info *) CMD_SP(sc);
if (rqi)
ret = SUCCESS; /* Completed Successfully */
else
ret = FAILED;
}
dr_failed:
SNIC_BUG_ON(!spin_is_locked(io_lock));
if (rqi)
CMD_SP(sc) = NULL;
spin_unlock_irqrestore(io_lock, flags);
if (rqi)
snic_release_req_buf(snic, rqi, sc);
dr_fini_end:
return ret;
} /* end of snic_dr_finish */
static int
snic_queue_dr_req(struct snic *snic,
struct snic_req_info *rqi,
struct scsi_cmnd *sc)
{
/* Add special tag for device reset */
rqi->tm_tag |= SNIC_TAG_DEV_RST;
return snic_issue_tm_req(snic, rqi, sc, SNIC_ITMF_LUN_RESET);
}
static int
snic_send_dr_and_wait(struct snic *snic, struct scsi_cmnd *sc)
{
struct snic_req_info *rqi = NULL;
enum snic_ioreq_state sv_state;
spinlock_t *io_lock = NULL;
unsigned long flags;
DECLARE_COMPLETION_ONSTACK(tm_done);
int ret = FAILED, tag = snic_cmd_tag(sc);
io_lock = snic_io_lock_hash(snic, sc);
spin_lock_irqsave(io_lock, flags);
CMD_FLAGS(sc) |= SNIC_DEVICE_RESET;
rqi = (struct snic_req_info *) CMD_SP(sc);
if (!rqi) {
SNIC_HOST_ERR(snic->shost,
"send_dr: rqi is null, Tag 0x%x flags 0x%llx\n",
tag, CMD_FLAGS(sc));
spin_unlock_irqrestore(io_lock, flags);
ret = FAILED;
goto send_dr_end;
}
/* Save Command state to restore in case Queuing failed. */
sv_state = CMD_STATE(sc);
CMD_STATE(sc) = SNIC_IOREQ_LR_PENDING;
CMD_LR_STATUS(sc) = SNIC_INVALID_CODE;
SNIC_SCSI_DBG(snic->shost, "dr: TAG = %x\n", tag);
rqi->dr_done = &tm_done;
SNIC_BUG_ON(!rqi->dr_done);
spin_unlock_irqrestore(io_lock, flags);
/*
* The Command state is changed to IOREQ_PENDING,
* in this case, if the command is completed, the icmnd_cmpl will
* mark the cmd as completed.
* This logic still makes LUN Reset is inevitable.
*/
ret = snic_queue_dr_req(snic, rqi, sc);
if (ret) {
SNIC_HOST_ERR(snic->shost,
"send_dr: IO w/ Tag 0x%x Failed err = %d. flags 0x%llx\n",
tag, ret, CMD_FLAGS(sc));
spin_lock_irqsave(io_lock, flags);
/* Restore State */
CMD_STATE(sc) = sv_state;
rqi = (struct snic_req_info *) CMD_SP(sc);
if (rqi)
rqi->dr_done = NULL;
/* rqi is freed in caller. */
spin_unlock_irqrestore(io_lock, flags);
ret = FAILED;
goto send_dr_end;
}
spin_lock_irqsave(io_lock, flags);
CMD_FLAGS(sc) |= SNIC_DEV_RST_ISSUED;
spin_unlock_irqrestore(io_lock, flags);
ret = 0;
wait_for_completion_timeout(&tm_done, SNIC_LUN_RESET_TIMEOUT);
send_dr_end:
return ret;
}
/*
* auxillary funciton to check lun reset op is supported or not
* Not supported if returns 0
*/
static int
snic_dev_reset_supported(struct scsi_device *sdev)
{
struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
if (tgt->tdata.typ == SNIC_TGT_DAS)
return 0;
return 1;
}
static void
snic_unlink_and_release_req(struct snic *snic, struct scsi_cmnd *sc, int flag)
{
struct snic_req_info *rqi = NULL;
spinlock_t *io_lock = NULL;
unsigned long flags;
u32 start_time = jiffies;
io_lock = snic_io_lock_hash(snic, sc);
spin_lock_irqsave(io_lock, flags);
rqi = (struct snic_req_info *) CMD_SP(sc);
if (rqi) {
start_time = rqi->start_time;
CMD_SP(sc) = NULL;
}
CMD_FLAGS(sc) |= flag;
spin_unlock_irqrestore(io_lock, flags);
if (rqi)
snic_release_req_buf(snic, rqi, sc);
SNIC_TRC(snic->shost->host_no, snic_cmd_tag(sc), (ulong) sc,
jiffies_to_msecs(jiffies - start_time), (ulong) rqi,
SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
}
/*
* SCSI Eh thread issues a LUN Reset when one or more commands on a LUN
* fail to get aborted. It calls driver's eh_device_reset with a SCSI
* command on the LUN.
*/
int
snic_device_reset(struct scsi_cmnd *sc)
{
struct Scsi_Host *shost = sc->device->host;
struct snic *snic = shost_priv(shost);
struct snic_req_info *rqi = NULL;
int tag = snic_cmd_tag(sc);
int start_time = jiffies;
int ret = FAILED;
int dr_supp = 0;
SNIC_SCSI_DBG(shost, "dev_reset:sc %p :0x%x :req = %p :tag = %d\n",
sc, sc->cmnd[0], sc->request,
snic_cmd_tag(sc));
dr_supp = snic_dev_reset_supported(sc->device);
if (!dr_supp) {
/* device reset op is not supported */
SNIC_HOST_INFO(shost, "LUN Reset Op not supported.\n");
snic_unlink_and_release_req(snic, sc, SNIC_DEV_RST_NOTSUP);
goto dev_rst_end;
}
if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) {
snic_unlink_and_release_req(snic, sc, 0);
SNIC_HOST_ERR(shost, "Devrst: Parent Devs are not online.\n");
goto dev_rst_end;
}
/* There is no tag when lun reset is issue through ioctl. */
if (unlikely(tag <= SNIC_NO_TAG)) {
SNIC_HOST_INFO(snic->shost,
"Devrst: LUN Reset Recvd thru IOCTL.\n");
rqi = snic_req_init(snic, 0);
if (!rqi)
goto dev_rst_end;
memset(scsi_cmd_priv(sc), 0,
sizeof(struct snic_internal_io_state));
CMD_SP(sc) = (char *)rqi;
CMD_FLAGS(sc) = SNIC_NO_FLAGS;
/* Add special tag for dr coming from user spc */
rqi->tm_tag = SNIC_TAG_IOCTL_DEV_RST;
rqi->sc = sc;
}
ret = snic_send_dr_and_wait(snic, sc);
if (ret) {
SNIC_HOST_ERR(snic->shost,
"Devrst: IO w/ Tag %x Failed w/ err = %d\n",
tag, ret);
snic_unlink_and_release_req(snic, sc, 0);
goto dev_rst_end;
}
ret = snic_dr_finish(snic, sc);
dev_rst_end:
SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
jiffies_to_msecs(jiffies - start_time),
0, SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
SNIC_SCSI_DBG(snic->shost,
"Devrst: Returning from Device Reset : %s\n",
(ret == SUCCESS) ? "SUCCESS" : "FAILED");
return ret;
} /* end of snic_device_reset */
/*
* SCSI Error handling calls driver's eh_host_reset if all prior
* error handling levels return FAILED.
*
* Host Reset is the highest level of error recovery. If this fails, then
* host is offlined by SCSI.
*/
/*
* snic_issue_hba_reset : Queues FW Reset Request.
*/
static int
snic_issue_hba_reset(struct snic *snic, struct scsi_cmnd *sc)
{
struct snic_req_info *rqi = NULL;
struct snic_host_req *req = NULL;
spinlock_t *io_lock = NULL;
DECLARE_COMPLETION_ONSTACK(wait);
unsigned long flags;
int ret = -ENOMEM;
rqi = snic_req_init(snic, 0);
if (!rqi) {
ret = -ENOMEM;
goto hba_rst_end;
}
if (snic_cmd_tag(sc) == SCSI_NO_TAG) {
memset(scsi_cmd_priv(sc), 0,
sizeof(struct snic_internal_io_state));
SNIC_HOST_INFO(snic->shost, "issu_hr:Host reset thru ioctl.\n");
rqi->sc = sc;
}
req = rqi_to_req(rqi);
io_lock = snic_io_lock_hash(snic, sc);
spin_lock_irqsave(io_lock, flags);
SNIC_BUG_ON(CMD_SP(sc) != NULL);
CMD_STATE(sc) = SNIC_IOREQ_PENDING;
CMD_SP(sc) = (char *) rqi;
CMD_FLAGS(sc) |= SNIC_IO_INITIALIZED;
snic->remove_wait = &wait;
spin_unlock_irqrestore(io_lock, flags);
/* Initialize Request */
snic_io_hdr_enc(&req->hdr, SNIC_REQ_HBA_RESET, 0, snic_cmd_tag(sc),
snic->config.hid, 0, (ulong) rqi);
req->u.reset.flags = 0;
ret = snic_queue_wq_desc(snic, req, sizeof(*req));
if (ret) {
SNIC_HOST_ERR(snic->shost,
"issu_hr:Queuing HBA Reset Failed. w err %d\n",
ret);
goto hba_rst_err;
}
spin_lock_irqsave(io_lock, flags);
CMD_FLAGS(sc) |= SNIC_HOST_RESET_ISSUED;
spin_unlock_irqrestore(io_lock, flags);
atomic64_inc(&snic->s_stats.reset.hba_resets);
SNIC_HOST_INFO(snic->shost, "Queued HBA Reset Successfully.\n");
wait_for_completion_timeout(snic->remove_wait,
SNIC_HOST_RESET_TIMEOUT);
if (snic_get_state(snic) == SNIC_FWRESET) {
SNIC_HOST_ERR(snic->shost, "reset_cmpl: Reset Timedout.\n");
ret = -ETIMEDOUT;
goto hba_rst_err;
}
spin_lock_irqsave(io_lock, flags);
snic->remove_wait = NULL;
rqi = (struct snic_req_info *) CMD_SP(sc);
CMD_SP(sc) = NULL;
spin_unlock_irqrestore(io_lock, flags);
if (rqi)
snic_req_free(snic, rqi);
ret = 0;
return ret;
hba_rst_err:
spin_lock_irqsave(io_lock, flags);
snic->remove_wait = NULL;
rqi = (struct snic_req_info *) CMD_SP(sc);
CMD_SP(sc) = NULL;
spin_unlock_irqrestore(io_lock, flags);
if (rqi)
snic_req_free(snic, rqi);
hba_rst_end:
SNIC_HOST_ERR(snic->shost,
"reset:HBA Reset Failed w/ err = %d.\n",
ret);
return ret;
} /* end of snic_issue_hba_reset */
int
snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc)
{
struct snic *snic = shost_priv(shost);
enum snic_state sv_state;
unsigned long flags;
int ret = FAILED;
/* Set snic state as SNIC_FWRESET*/
sv_state = snic_get_state(snic);
spin_lock_irqsave(&snic->snic_lock, flags);
if (snic_get_state(snic) == SNIC_FWRESET) {
spin_unlock_irqrestore(&snic->snic_lock, flags);
SNIC_HOST_INFO(shost, "reset:prev reset is in progres\n");
msleep(SNIC_HOST_RESET_TIMEOUT);
ret = SUCCESS;
goto reset_end;
}
snic_set_state(snic, SNIC_FWRESET);
spin_unlock_irqrestore(&snic->snic_lock, flags);
/* Wait for all the IOs that are entered in Qcmd */
while (atomic_read(&snic->ios_inflight))
schedule_timeout(msecs_to_jiffies(1));
ret = snic_issue_hba_reset(snic, sc);
if (ret) {
SNIC_HOST_ERR(shost,
"reset:Host Reset Failed w/ err %d.\n",
ret);
spin_lock_irqsave(&snic->snic_lock, flags);
snic_set_state(snic, sv_state);
spin_unlock_irqrestore(&snic->snic_lock, flags);
atomic64_inc(&snic->s_stats.reset.hba_reset_fail);
ret = FAILED;
goto reset_end;
}
ret = SUCCESS;
reset_end:
return ret;
} /* end of snic_reset */
/*
* SCSI Error handling calls driver's eh_host_reset if all prior
* error handling levels return FAILED.
*
* Host Reset is the highest level of error recovery. If this fails, then
* host is offlined by SCSI.
*/
int
snic_host_reset(struct scsi_cmnd *sc)
{
struct Scsi_Host *shost = sc->device->host;
u32 start_time = jiffies;
int ret = FAILED;
SNIC_SCSI_DBG(shost,
"host reset:sc %p sc_cmd 0x%x req %p tag %d flags 0x%llx\n",
sc, sc->cmnd[0], sc->request,
snic_cmd_tag(sc), CMD_FLAGS(sc));
ret = snic_reset(shost, sc);
SNIC_TRC(shost->host_no, snic_cmd_tag(sc), (ulong) sc,
jiffies_to_msecs(jiffies - start_time),
0, SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
return ret;
} /* end of snic_host_reset */
/*
* snic_cmpl_pending_tmreq : Caller should hold io_lock
*/
static void
snic_cmpl_pending_tmreq(struct snic *snic, struct scsi_cmnd *sc)
{
struct snic_req_info *rqi = NULL;
SNIC_SCSI_DBG(snic->shost,
"Completing Pending TM Req sc %p, state %s flags 0x%llx\n",
sc, snic_io_status_to_str(CMD_STATE(sc)), CMD_FLAGS(sc));
rqi = (struct snic_req_info *) CMD_SP(sc);
if (!rqi)
return;
if (rqi->dr_done)
complete(rqi->dr_done);
else if (rqi->abts_done)
complete(rqi->abts_done);
}
/*
* snic_scsi_cleanup: Walks through tag map and releases the reqs
*/
static void
snic_scsi_cleanup(struct snic *snic, int ex_tag)
{
struct snic_req_info *rqi = NULL;
struct scsi_cmnd *sc = NULL;
spinlock_t *io_lock = NULL;
unsigned long flags;
int tag;
u64 st_time = 0;
SNIC_SCSI_DBG(snic->shost, "sc_clean: scsi cleanup.\n");
for (tag = 0; tag < snic->max_tag_id; tag++) {
/* Skip ex_tag */
if (tag == ex_tag)
continue;
io_lock = snic_io_lock_tag(snic, tag);
spin_lock_irqsave(io_lock, flags);
sc = scsi_host_find_tag(snic->shost, tag);
if (!sc) {
spin_unlock_irqrestore(io_lock, flags);
continue;
}
if (unlikely(snic_tmreq_pending(sc))) {
/*
* When FW Completes reset w/o sending completions
* for outstanding ios.
*/
snic_cmpl_pending_tmreq(snic, sc);
spin_unlock_irqrestore(io_lock, flags);
continue;
}
rqi = (struct snic_req_info *) CMD_SP(sc);
if (!rqi) {
spin_unlock_irqrestore(io_lock, flags);
goto cleanup;
}
SNIC_SCSI_DBG(snic->shost,
"sc_clean: sc %p, rqi %p, tag %d flags 0x%llx\n",
sc, rqi, tag, CMD_FLAGS(sc));
CMD_SP(sc) = NULL;
CMD_FLAGS(sc) |= SNIC_SCSI_CLEANUP;
spin_unlock_irqrestore(io_lock, flags);
st_time = rqi->start_time;
SNIC_HOST_INFO(snic->shost,
"sc_clean: Releasing rqi %p : flags 0x%llx\n",
rqi, CMD_FLAGS(sc));
snic_release_req_buf(snic, rqi, sc);
cleanup:
sc->result = DID_TRANSPORT_DISRUPTED << 16;
SNIC_HOST_INFO(snic->shost,
"sc_clean: DID_TRANSPORT_DISRUPTED for sc %p. rqi %p duration %llu msecs\n",
sc, rqi, (jiffies - st_time));
/* Update IO stats */
snic_stats_update_io_cmpl(&snic->s_stats);
if (sc->scsi_done) {
SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
jiffies_to_msecs(jiffies - st_time), 0,
SNIC_TRC_CMD(sc),
SNIC_TRC_CMD_STATE_FLAGS(sc));
sc->scsi_done(sc);
}
}
} /* end of snic_scsi_cleanup */
void
snic_shutdown_scsi_cleanup(struct snic *snic)
{
SNIC_HOST_INFO(snic->shost, "Shutdown time SCSI Cleanup.\n");
snic_scsi_cleanup(snic, SCSI_NO_TAG);
} /* end of snic_shutdown_scsi_cleanup */
/*
* snic_internal_abort_io
* called by : snic_tgt_scsi_abort_io
*/
static int
snic_internal_abort_io(struct snic *snic, struct scsi_cmnd *sc, int tmf)
{
struct snic_req_info *rqi = NULL;
spinlock_t *io_lock = NULL;
unsigned long flags;
u32 sv_state = 0;
int ret = 0;
io_lock = snic_io_lock_hash(snic, sc);
spin_lock_irqsave(io_lock, flags);
rqi = (struct snic_req_info *) CMD_SP(sc);
if (!rqi)
goto skip_internal_abts;
if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
goto skip_internal_abts;
if ((CMD_FLAGS(sc) & SNIC_DEVICE_RESET) &&
(!(CMD_FLAGS(sc) & SNIC_DEV_RST_ISSUED))) {
SNIC_SCSI_DBG(snic->shost,
"internal_abts: dev rst not pending sc 0x%p\n",
sc);
goto skip_internal_abts;
}
if (!(CMD_FLAGS(sc) & SNIC_IO_ISSUED)) {
SNIC_SCSI_DBG(snic->shost,
"internal_abts: IO not yet issued sc 0x%p tag 0x%x flags 0x%llx state %d\n",
sc, snic_cmd_tag(sc), CMD_FLAGS(sc), CMD_STATE(sc));
goto skip_internal_abts;
}
sv_state = CMD_STATE(sc);
CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE;
CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_PENDING;
if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) {
/* stats */
rqi->tm_tag = SNIC_TAG_DEV_RST;
SNIC_SCSI_DBG(snic->shost, "internal_abts:dev rst sc %p\n", sc);
}
SNIC_SCSI_DBG(snic->shost, "internal_abts: Issuing abts tag %x\n",
snic_cmd_tag(sc));
SNIC_BUG_ON(rqi->abts_done);
spin_unlock_irqrestore(io_lock, flags);
ret = snic_queue_abort_req(snic, rqi, sc, tmf);
if (ret) {
SNIC_HOST_ERR(snic->shost,
"internal_abts: Tag = %x , Failed w/ err = %d\n",
snic_cmd_tag(sc), ret);
spin_lock_irqsave(io_lock, flags);
if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
CMD_STATE(sc) = sv_state;
goto skip_internal_abts;
}
spin_lock_irqsave(io_lock, flags);
if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET)
CMD_FLAGS(sc) |= SNIC_DEV_RST_TERM_ISSUED;
else
CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_ISSUED;
ret = SUCCESS;
skip_internal_abts:
SNIC_BUG_ON(!spin_is_locked(io_lock));
spin_unlock_irqrestore(io_lock, flags);
return ret;
} /* end of snic_internal_abort_io */
/*
* snic_tgt_scsi_abort_io : called by snic_tgt_del
*/
int
snic_tgt_scsi_abort_io(struct snic_tgt *tgt)
{
struct snic *snic = NULL;
struct scsi_cmnd *sc = NULL;
struct snic_tgt *sc_tgt = NULL;
spinlock_t *io_lock = NULL;
unsigned long flags;
int ret = 0, tag, abt_cnt = 0, tmf = 0;
if (!tgt)
return -1;
snic = shost_priv(snic_tgt_to_shost(tgt));
SNIC_SCSI_DBG(snic->shost, "tgt_abt_io: Cleaning Pending IOs.\n");
if (tgt->tdata.typ == SNIC_TGT_DAS)
tmf = SNIC_ITMF_ABTS_TASK;
else
tmf = SNIC_ITMF_ABTS_TASK_TERM;
for (tag = 0; tag < snic->max_tag_id; tag++) {
io_lock = snic_io_lock_tag(snic, tag);
spin_lock_irqsave(io_lock, flags);
sc = scsi_host_find_tag(snic->shost, tag);
if (!sc) {
spin_unlock_irqrestore(io_lock, flags);
continue;
}
sc_tgt = starget_to_tgt(scsi_target(sc->device));
if (sc_tgt != tgt) {
spin_unlock_irqrestore(io_lock, flags);
continue;
}
spin_unlock_irqrestore(io_lock, flags);
ret = snic_internal_abort_io(snic, sc, tmf);
if (ret < 0) {
SNIC_HOST_ERR(snic->shost,
"tgt_abt_io: Tag %x, Failed w err = %d\n",
tag, ret);
continue;
}
if (ret == SUCCESS)
abt_cnt++;
}
SNIC_SCSI_DBG(snic->shost, "tgt_abt_io: abt_cnt = %d\n", abt_cnt);
return 0;
} /* end of snic_tgt_scsi_abort_io */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __SNIC_STATS_H
#define __SNIC_STATS_H
struct snic_io_stats {
atomic64_t active; /* Active IOs */
atomic64_t max_active; /* Max # active IOs */
atomic64_t max_sgl; /* Max # SGLs for any IO */
atomic64_t max_time; /* Max time to process IO */
atomic64_t max_qtime; /* Max time to Queue the IO */
atomic64_t max_cmpl_time; /* Max time to complete the IO */
atomic64_t sgl_cnt[SNIC_MAX_SG_DESC_CNT]; /* SGL Counters */
atomic64_t max_io_sz; /* Max IO Size */
atomic64_t compl; /* IO Completions */
atomic64_t fail; /* IO Failures */
atomic64_t req_null; /* req or req info is NULL */
atomic64_t alloc_fail; /* Alloc Failures */
atomic64_t sc_null;
atomic64_t io_not_found; /* IO Not Found */
atomic64_t num_ios; /* Number of IOs */
};
struct snic_abort_stats {
atomic64_t num; /* Abort counter */
atomic64_t fail; /* Abort Failure Counter */
atomic64_t drv_tmo; /* Abort Driver Timeouts */
atomic64_t fw_tmo; /* Abort Firmware Timeouts */
atomic64_t io_not_found;/* Abort IO Not Found */
};
struct snic_reset_stats {
atomic64_t dev_resets; /* Device Reset Counter */
atomic64_t dev_reset_fail; /* Device Reset Failures */
atomic64_t dev_reset_aborts; /* Device Reset Aborts */
atomic64_t dev_reset_tmo; /* Device Reset Timeout */
atomic64_t dev_reset_terms; /* Device Reset terminate */
atomic64_t hba_resets; /* hba/firmware resets */
atomic64_t hba_reset_cmpl; /* hba/firmware reset completions */
atomic64_t hba_reset_fail; /* hba/firmware failures */
atomic64_t snic_resets; /* snic resets */
atomic64_t snic_reset_compl; /* snic reset completions */
atomic64_t snic_reset_fail; /* snic reset failures */
};
struct snic_fw_stats {
atomic64_t actv_reqs; /* Active Requests */
atomic64_t max_actv_reqs; /* Max Active Requests */
atomic64_t out_of_res; /* Firmware Out Of Resources */
atomic64_t io_errs; /* Firmware IO Firmware Errors */
atomic64_t scsi_errs; /* Target hits check condition */
};
struct snic_misc_stats {
u64 last_isr_time;
u64 last_ack_time;
atomic64_t isr_cnt;
atomic64_t max_cq_ents; /* Max CQ Entries */
atomic64_t data_cnt_mismat; /* Data Count Mismatch */
atomic64_t io_tmo;
atomic64_t io_aborted;
atomic64_t sgl_inval; /* SGL Invalid */
atomic64_t abts_wq_alloc_fail; /* Abort Path WQ desc alloc failure */
atomic64_t devrst_wq_alloc_fail;/* Device Reset - WQ desc alloc fail */
atomic64_t wq_alloc_fail; /* IO WQ desc alloc failure */
atomic64_t no_icmnd_itmf_cmpls;
atomic64_t io_under_run;
atomic64_t qfull;
atomic64_t tgt_not_rdy;
};
struct snic_stats {
struct snic_io_stats io;
struct snic_abort_stats abts;
struct snic_reset_stats reset;
struct snic_fw_stats fw;
struct snic_misc_stats misc;
atomic64_t io_cmpl_skip;
};
int snic_stats_debugfs_init(struct snic *);
void snic_stats_debugfs_remove(struct snic *);
/* Auxillary function to update active IO counter */
static inline void
snic_stats_update_active_ios(struct snic_stats *s_stats)
{
struct snic_io_stats *io = &s_stats->io;
u32 nr_active_ios;
nr_active_ios = atomic64_inc_return(&io->active);
if (atomic64_read(&io->max_active) < nr_active_ios)
atomic64_set(&io->max_active, nr_active_ios);
atomic64_inc(&io->num_ios);
}
/* Auxillary function to update IO completion counter */
static inline void
snic_stats_update_io_cmpl(struct snic_stats *s_stats)
{
atomic64_dec(&s_stats->io.active);
if (unlikely(atomic64_read(&s_stats->io_cmpl_skip)))
atomic64_dec(&s_stats->io_cmpl_skip);
else
atomic64_inc(&s_stats->io.compl);
}
#endif /* __SNIC_STATS_H */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/mempool.h>
#include <linux/errno.h>
#include <linux/vmalloc.h>
#include "snic_io.h"
#include "snic.h"
/*
* snic_get_trc_buf : Allocates a trace record and returns.
*/
struct snic_trc_data *
snic_get_trc_buf(void)
{
struct snic_trc *trc = &snic_glob->trc;
struct snic_trc_data *td = NULL;
unsigned long flags;
spin_lock_irqsave(&trc->lock, flags);
td = &trc->buf[trc->wr_idx];
trc->wr_idx++;
if (trc->wr_idx == trc->max_idx)
trc->wr_idx = 0;
if (trc->wr_idx != trc->rd_idx) {
spin_unlock_irqrestore(&trc->lock, flags);
goto end;
}
trc->rd_idx++;
if (trc->rd_idx == trc->max_idx)
trc->rd_idx = 0;
td->ts = 0; /* Marker for checking the record, for complete data*/
spin_unlock_irqrestore(&trc->lock, flags);
end:
return td;
} /* end of snic_get_trc_buf */
/*
* snic_fmt_trc_data : Formats trace data for printing.
*/
static int
snic_fmt_trc_data(struct snic_trc_data *td, char *buf, int buf_sz)
{
int len = 0;
struct timespec tmspec;
jiffies_to_timespec(td->ts, &tmspec);
len += snprintf(buf, buf_sz,
"%lu.%10lu %-25s %3d %4x %16llx %16llx %16llx %16llx %16llx\n",
tmspec.tv_sec,
tmspec.tv_nsec,
td->fn,
td->hno,
td->tag,
td->data[0], td->data[1], td->data[2], td->data[3],
td->data[4]);
return len;
} /* end of snic_fmt_trc_data */
/*
* snic_get_trc_data : Returns a formatted trace buffer.
*/
int
snic_get_trc_data(char *buf, int buf_sz)
{
struct snic_trc_data *td = NULL;
struct snic_trc *trc = &snic_glob->trc;
unsigned long flags;
spin_lock_irqsave(&trc->lock, flags);
if (trc->rd_idx == trc->wr_idx) {
spin_unlock_irqrestore(&trc->lock, flags);
return -1;
}
td = &trc->buf[trc->rd_idx];
if (td->ts == 0) {
/* write in progress. */
spin_unlock_irqrestore(&trc->lock, flags);
return -1;
}
trc->rd_idx++;
if (trc->rd_idx == trc->max_idx)
trc->rd_idx = 0;
spin_unlock_irqrestore(&trc->lock, flags);
return snic_fmt_trc_data(td, buf, buf_sz);
} /* end of snic_get_trc_data */
/*
* snic_trc_init() : Configures Trace Functionality for snic.
*/
int
snic_trc_init(void)
{
struct snic_trc *trc = &snic_glob->trc;
void *tbuf = NULL;
int tbuf_sz = 0, ret;
tbuf_sz = (snic_trace_max_pages * PAGE_SIZE);
tbuf = vmalloc(tbuf_sz);
if (!tbuf) {
SNIC_ERR("Failed to Allocate Trace Buffer Size. %d\n", tbuf_sz);
SNIC_ERR("Trace Facility not enabled.\n");
ret = -ENOMEM;
return ret;
}
memset(tbuf, 0, tbuf_sz);
trc->buf = (struct snic_trc_data *) tbuf;
spin_lock_init(&trc->lock);
ret = snic_trc_debugfs_init();
if (ret) {
SNIC_ERR("Failed to create Debugfs Files.\n");
goto error;
}
trc->max_idx = (tbuf_sz / SNIC_TRC_ENTRY_SZ);
trc->rd_idx = trc->wr_idx = 0;
trc->enable = 1;
SNIC_INFO("Trace Facility Enabled.\n Trace Buffer SZ %lu Pages.\n",
tbuf_sz / PAGE_SIZE);
ret = 0;
return ret;
error:
snic_trc_free();
return ret;
} /* end of snic_trc_init */
/*
* snic_trc_free : Releases the trace buffer and disables the tracing.
*/
void
snic_trc_free(void)
{
struct snic_trc *trc = &snic_glob->trc;
trc->enable = 0;
snic_trc_debugfs_term();
if (trc->buf) {
vfree(trc->buf);
trc->buf = NULL;
}
SNIC_INFO("Trace Facility Disabled.\n");
} /* end of snic_trc_free */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __SNIC_TRC_H
#define __SNIC_TRC_H
#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
extern ssize_t simple_read_from_buffer(void __user *to,
size_t count,
loff_t *ppos,
const void *from,
size_t available);
extern unsigned int snic_trace_max_pages;
/* Global Data structure for trace to manage trace functionality */
struct snic_trc_data {
u64 ts; /* Time Stamp */
char *fn; /* Ptr to Function Name */
u32 hno; /* SCSI Host ID */
u32 tag; /* Command Tag */
u64 data[5];
} __attribute__((__packed__));
#define SNIC_TRC_ENTRY_SZ 64 /* in Bytes */
struct snic_trc {
spinlock_t lock;
struct snic_trc_data *buf; /* Trace Buffer */
u32 max_idx; /* Max Index into trace buffer */
u32 rd_idx;
u32 wr_idx;
u32 enable; /* Control Variable for Tracing */
struct dentry *trc_enable; /* debugfs file object */
struct dentry *trc_file;
};
int snic_trc_init(void);
void snic_trc_free(void);
int snic_trc_debugfs_init(void);
void snic_trc_debugfs_term(void);
struct snic_trc_data *snic_get_trc_buf(void);
int snic_get_trc_data(char *buf, int buf_sz);
int snic_debugfs_init(void);
void snic_debugfs_term(void);
static inline void
snic_trace(char *fn, u16 hno, u32 tag, u64 d1, u64 d2, u64 d3, u64 d4, u64 d5)
{
struct snic_trc_data *tr_rec = snic_get_trc_buf();
if (!tr_rec)
return;
tr_rec->fn = (char *)fn;
tr_rec->hno = hno;
tr_rec->tag = tag;
tr_rec->data[0] = d1;
tr_rec->data[1] = d2;
tr_rec->data[2] = d3;
tr_rec->data[3] = d4;
tr_rec->data[4] = d5;
tr_rec->ts = jiffies; /* Update time stamp at last */
}
#define SNIC_TRC(_hno, _tag, d1, d2, d3, d4, d5) \
do { \
if (unlikely(snic_glob->trc.enable)) \
snic_trace((char *)__func__, \
(u16)(_hno), \
(u32)(_tag), \
(u64)(d1), \
(u64)(d2), \
(u64)(d3), \
(u64)(d4), \
(u64)(d5)); \
} while (0)
#else
#define SNIC_TRC(_hno, _tag, d1, d2, d3, d4, d5) \
do { \
if (unlikely(snic_log_level & 0x2)) \
SNIC_DBG("SnicTrace: %s %2u %2u %llx %llx %llx %llx %llx", \
(char *)__func__, \
(u16)(_hno), \
(u32)(_tag), \
(u64)(d1), \
(u64)(d2), \
(u64)(d3), \
(u64)(d4), \
(u64)(d5)); \
} while (0)
#endif /* end of CONFIG_SCSI_SNIC_DEBUG_FS */
#define SNIC_TRC_CMD(sc) \
((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 | \
(u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 | \
(u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 | \
(u64)sc->cmnd[5])
#define SNIC_TRC_CMD_STATE_FLAGS(sc) \
((u64) CMD_FLAGS(sc) << 32 | CMD_STATE(sc))
#endif /* end of __SNIC_TRC_H */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include "vnic_dev.h"
#include "vnic_cq.h"
void svnic_cq_free(struct vnic_cq *cq)
{
svnic_dev_free_desc_ring(cq->vdev, &cq->ring);
cq->ctrl = NULL;
}
int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq,
unsigned int index, unsigned int desc_count, unsigned int desc_size)
{
int err;
cq->index = index;
cq->vdev = vdev;
cq->ctrl = svnic_dev_get_res(vdev, RES_TYPE_CQ, index);
if (!cq->ctrl) {
pr_err("Failed to hook CQ[%d] resource\n", index);
return -EINVAL;
}
err = svnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
if (err)
return err;
return 0;
}
void svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
unsigned int cq_tail_color, unsigned int interrupt_enable,
unsigned int cq_entry_enable, unsigned int cq_message_enable,
unsigned int interrupt_offset, u64 cq_message_addr)
{
u64 paddr;
paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &cq->ctrl->ring_base);
iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
iowrite32(color_enable, &cq->ctrl->color_enable);
iowrite32(cq_head, &cq->ctrl->cq_head);
iowrite32(cq_tail, &cq->ctrl->cq_tail);
iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
}
void svnic_cq_clean(struct vnic_cq *cq)
{
cq->to_clean = 0;
cq->last_color = 0;
iowrite32(0, &cq->ctrl->cq_head);
iowrite32(0, &cq->ctrl->cq_tail);
iowrite32(1, &cq->ctrl->cq_tail_color);
svnic_dev_clear_desc_ring(&cq->ring);
}
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_CQ_H_
#define _VNIC_CQ_H_
#include "cq_desc.h"
#include "vnic_dev.h"
/* Completion queue control */
struct vnic_cq_ctrl {
u64 ring_base; /* 0x00 */
u32 ring_size; /* 0x08 */
u32 pad0;
u32 flow_control_enable; /* 0x10 */
u32 pad1;
u32 color_enable; /* 0x18 */
u32 pad2;
u32 cq_head; /* 0x20 */
u32 pad3;
u32 cq_tail; /* 0x28 */
u32 pad4;
u32 cq_tail_color; /* 0x30 */
u32 pad5;
u32 interrupt_enable; /* 0x38 */
u32 pad6;
u32 cq_entry_enable; /* 0x40 */
u32 pad7;
u32 cq_message_enable; /* 0x48 */
u32 pad8;
u32 interrupt_offset; /* 0x50 */
u32 pad9;
u64 cq_message_addr; /* 0x58 */
u32 pad10;
};
struct vnic_cq {
unsigned int index;
struct vnic_dev *vdev;
struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
unsigned int to_clean;
unsigned int last_color;
};
static inline unsigned int svnic_cq_service(struct vnic_cq *cq,
unsigned int work_to_do,
int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
u8 type, u16 q_number, u16 completed_index, void *opaque),
void *opaque)
{
struct cq_desc *cq_desc;
unsigned int work_done = 0;
u16 q_number, completed_index;
u8 type, color;
cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
cq->ring.desc_size * cq->to_clean);
cq_desc_dec(cq_desc, &type, &color,
&q_number, &completed_index);
while (color != cq->last_color) {
if ((*q_service)(cq->vdev, cq_desc, type,
q_number, completed_index, opaque))
break;
cq->to_clean++;
if (cq->to_clean == cq->ring.desc_count) {
cq->to_clean = 0;
cq->last_color = cq->last_color ? 0 : 1;
}
cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
cq->ring.desc_size * cq->to_clean);
cq_desc_dec(cq_desc, &type, &color,
&q_number, &completed_index);
work_done++;
if (work_done >= work_to_do)
break;
}
return work_done;
}
void svnic_cq_free(struct vnic_cq *cq);
int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq,
unsigned int index, unsigned int desc_count, unsigned int desc_size);
void svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
unsigned int cq_tail_color, unsigned int interrupt_enable,
unsigned int cq_entry_enable, unsigned int message_enable,
unsigned int interrupt_offset, u64 message_addr);
void svnic_cq_clean(struct vnic_cq *cq);
#endif /* _VNIC_CQ_H_ */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_CQ_FW_H_
#define _VNIC_CQ_FW_H_
#include "snic_fwint.h"
static inline unsigned int
vnic_cq_fw_service(struct vnic_cq *cq,
int (*q_service)(struct vnic_dev *vdev,
unsigned int index,
struct snic_fw_req *desc),
unsigned int work_to_do)
{
struct snic_fw_req *desc;
unsigned int work_done = 0;
u8 color;
desc = (struct snic_fw_req *)((u8 *)cq->ring.descs +
cq->ring.desc_size * cq->to_clean);
snic_color_dec(desc, &color);
while (color != cq->last_color) {
if ((*q_service)(cq->vdev, cq->index, desc))
break;
cq->to_clean++;
if (cq->to_clean == cq->ring.desc_count) {
cq->to_clean = 0;
cq->last_color = cq->last_color ? 0 : 1;
}
desc = (struct snic_fw_req *)((u8 *)cq->ring.descs +
cq->ring.desc_size * cq->to_clean);
snic_color_dec(desc, &color);
work_done++;
if (work_done >= work_to_do)
break;
}
return work_done;
}
#endif /* _VNIC_CQ_FW_H_ */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/if_ether.h>
#include <linux/slab.h>
#include "vnic_resource.h"
#include "vnic_devcmd.h"
#include "vnic_dev.h"
#include "vnic_stats.h"
#include "vnic_wq.h"
#define VNIC_DVCMD_TMO 10000 /* Devcmd Timeout value */
#define VNIC_NOTIFY_INTR_MASK 0x0000ffff00000000ULL
struct devcmd2_controller {
struct vnic_wq_ctrl __iomem *wq_ctrl;
struct vnic_dev_ring results_ring;
struct vnic_wq wq;
struct vnic_devcmd2 *cmd_ring;
struct devcmd2_result *result;
u16 next_result;
u16 result_size;
int color;
};
struct vnic_res {
void __iomem *vaddr;
unsigned int count;
};
struct vnic_dev {
void *priv;
struct pci_dev *pdev;
struct vnic_res res[RES_TYPE_MAX];
enum vnic_dev_intr_mode intr_mode;
struct vnic_devcmd __iomem *devcmd;
struct vnic_devcmd_notify *notify;
struct vnic_devcmd_notify notify_copy;
dma_addr_t notify_pa;
u32 *linkstatus;
dma_addr_t linkstatus_pa;
struct vnic_stats *stats;
dma_addr_t stats_pa;
struct vnic_devcmd_fw_info *fw_info;
dma_addr_t fw_info_pa;
u64 args[VNIC_DEVCMD_NARGS];
struct devcmd2_controller *devcmd2;
int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
int wait);
};
#define VNIC_MAX_RES_HDR_SIZE \
(sizeof(struct vnic_resource_header) + \
sizeof(struct vnic_resource) * RES_TYPE_MAX)
#define VNIC_RES_STRIDE 128
void *svnic_dev_priv(struct vnic_dev *vdev)
{
return vdev->priv;
}
static int vnic_dev_discover_res(struct vnic_dev *vdev,
struct vnic_dev_bar *bar, unsigned int num_bars)
{
struct vnic_resource_header __iomem *rh;
struct vnic_resource __iomem *r;
u8 type;
if (num_bars == 0)
return -EINVAL;
if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
pr_err("vNIC BAR0 res hdr length error\n");
return -EINVAL;
}
rh = bar->vaddr;
if (!rh) {
pr_err("vNIC BAR0 res hdr not mem-mapped\n");
return -EINVAL;
}
if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
ioread32(&rh->version) != VNIC_RES_VERSION) {
pr_err("vNIC BAR0 res magic/version error exp (%lx/%lx) curr (%x/%x)\n",
VNIC_RES_MAGIC, VNIC_RES_VERSION,
ioread32(&rh->magic), ioread32(&rh->version));
return -EINVAL;
}
r = (struct vnic_resource __iomem *)(rh + 1);
while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
u8 bar_num = ioread8(&r->bar);
u32 bar_offset = ioread32(&r->bar_offset);
u32 count = ioread32(&r->count);
u32 len;
r++;
if (bar_num >= num_bars)
continue;
if (!bar[bar_num].len || !bar[bar_num].vaddr)
continue;
switch (type) {
case RES_TYPE_WQ:
case RES_TYPE_RQ:
case RES_TYPE_CQ:
case RES_TYPE_INTR_CTRL:
/* each count is stride bytes long */
len = count * VNIC_RES_STRIDE;
if (len + bar_offset > bar->len) {
pr_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
type, bar_offset,
len,
bar->len);
return -EINVAL;
}
break;
case RES_TYPE_INTR_PBA_LEGACY:
case RES_TYPE_DEVCMD:
case RES_TYPE_DEVCMD2:
len = count;
break;
default:
continue;
}
vdev->res[type].count = count;
vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
}
return 0;
}
unsigned int svnic_dev_get_res_count(struct vnic_dev *vdev,
enum vnic_res_type type)
{
return vdev->res[type].count;
}
void __iomem *svnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
unsigned int index)
{
if (!vdev->res[type].vaddr)
return NULL;
switch (type) {
case RES_TYPE_WQ:
case RES_TYPE_RQ:
case RES_TYPE_CQ:
case RES_TYPE_INTR_CTRL:
return (char __iomem *)vdev->res[type].vaddr +
index * VNIC_RES_STRIDE;
default:
return (char __iomem *)vdev->res[type].vaddr;
}
}
unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count,
unsigned int desc_size)
{
/* The base address of the desc rings must be 512 byte aligned.
* Descriptor count is aligned to groups of 32 descriptors. A
* count of 0 means the maximum 4096 descriptors. Descriptor
* size is aligned to 16 bytes.
*/
unsigned int count_align = 32;
unsigned int desc_align = 16;
ring->base_align = 512;
if (desc_count == 0)
desc_count = 4096;
ring->desc_count = ALIGN(desc_count, count_align);
ring->desc_size = ALIGN(desc_size, desc_align);
ring->size = ring->desc_count * ring->desc_size;
ring->size_unaligned = ring->size + ring->base_align;
return ring->size_unaligned;
}
void svnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
{
memset(ring->descs, 0, ring->size);
}
int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size)
{
svnic_dev_desc_ring_size(ring, desc_count, desc_size);
ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
ring->size_unaligned,
&ring->base_addr_unaligned);
if (!ring->descs_unaligned) {
pr_err("Failed to allocate ring (size=%d), aborting\n",
(int)ring->size);
return -ENOMEM;
}
ring->base_addr = ALIGN(ring->base_addr_unaligned,
ring->base_align);
ring->descs = (u8 *)ring->descs_unaligned +
(ring->base_addr - ring->base_addr_unaligned);
svnic_dev_clear_desc_ring(ring);
ring->desc_avail = ring->desc_count - 1;
return 0;
}
void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
{
if (ring->descs) {
pci_free_consistent(vdev->pdev,
ring->size_unaligned,
ring->descs_unaligned,
ring->base_addr_unaligned);
ring->descs = NULL;
}
}
static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
int wait)
{
struct devcmd2_controller *dc2c = vdev->devcmd2;
struct devcmd2_result *result = dc2c->result + dc2c->next_result;
unsigned int i;
int delay;
int err;
u32 posted;
u32 new_posted;
posted = ioread32(&dc2c->wq_ctrl->posted_index);
if (posted == 0xFFFFFFFF) { /* check for hardware gone */
/* Hardware surprise removal: return error */
return -ENODEV;
}
new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
dc2c->cmd_ring[posted].cmd = cmd;
dc2c->cmd_ring[posted].flags = 0;
if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
dc2c->cmd_ring[posted].args[i] = vdev->args[i];
}
/* Adding write memory barrier prevents compiler and/or CPU
* reordering, thus avoiding descriptor posting before
* descriptor is initialized. Otherwise, hardware can read
* stale descriptor fields.
*/
wmb();
iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
return 0;
for (delay = 0; delay < wait; delay++) {
udelay(100);
if (result->color == dc2c->color) {
dc2c->next_result++;
if (dc2c->next_result == dc2c->result_size) {
dc2c->next_result = 0;
dc2c->color = dc2c->color ? 0 : 1;
}
if (result->error) {
err = (int) result->error;
if (err != ERR_ECMDUNKNOWN ||
cmd != CMD_CAPABILITY)
pr_err("Error %d devcmd %d\n",
err, _CMD_N(cmd));
return err;
}
if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
/*
* Adding the rmb() prevents the compiler
* and/or CPU from reordering the reads which
* would potentially result in reading stale
* values.
*/
rmb();
for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
vdev->args[i] = result->results[i];
}
return 0;
}
}
pr_err("Timed out devcmd %d\n", _CMD_N(cmd));
return -ETIMEDOUT;
}
static int svnic_dev_init_devcmd2(struct vnic_dev *vdev)
{
struct devcmd2_controller *dc2c = NULL;
unsigned int fetch_idx;
int ret;
void __iomem *p;
if (vdev->devcmd2)
return 0;
p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
if (!p)
return -ENODEV;
dc2c = kzalloc(sizeof(*dc2c), GFP_ATOMIC);
if (!dc2c)
return -ENOMEM;
vdev->devcmd2 = dc2c;
dc2c->color = 1;
dc2c->result_size = DEVCMD2_RING_SIZE;
ret = vnic_wq_devcmd2_alloc(vdev,
&dc2c->wq,
DEVCMD2_RING_SIZE,
DEVCMD2_DESC_SIZE);
if (ret)
goto err_free_devcmd2;
fetch_idx = ioread32(&dc2c->wq.ctrl->fetch_index);
if (fetch_idx == 0xFFFFFFFF) { /* check for hardware gone */
/* Hardware surprise removal: reset fetch_index */
fetch_idx = 0;
}
/*
* Don't change fetch_index ever and
* set posted_index same as fetch_index
* when setting up the WQ for devcmd2.
*/
vnic_wq_init_start(&dc2c->wq, 0, fetch_idx, fetch_idx, 0, 0);
svnic_wq_enable(&dc2c->wq);
ret = svnic_dev_alloc_desc_ring(vdev,
&dc2c->results_ring,
DEVCMD2_RING_SIZE,
DEVCMD2_DESC_SIZE);
if (ret)
goto err_free_wq;
dc2c->result = (struct devcmd2_result *) dc2c->results_ring.descs;
dc2c->cmd_ring = (struct vnic_devcmd2 *) dc2c->wq.ring.descs;
dc2c->wq_ctrl = dc2c->wq.ctrl;
vdev->args[0] = (u64) dc2c->results_ring.base_addr | VNIC_PADDR_TARGET;
vdev->args[1] = DEVCMD2_RING_SIZE;
ret = _svnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, VNIC_DVCMD_TMO);
if (ret < 0)
goto err_free_desc_ring;
vdev->devcmd_rtn = &_svnic_dev_cmd2;
pr_info("DEVCMD2 Initialized.\n");
return ret;
err_free_desc_ring:
svnic_dev_free_desc_ring(vdev, &dc2c->results_ring);
err_free_wq:
svnic_wq_disable(&dc2c->wq);
svnic_wq_free(&dc2c->wq);
err_free_devcmd2:
kfree(dc2c);
vdev->devcmd2 = NULL;
return ret;
} /* end of svnic_dev_init_devcmd2 */
static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
{
struct devcmd2_controller *dc2c = vdev->devcmd2;
vdev->devcmd2 = NULL;
vdev->devcmd_rtn = NULL;
svnic_dev_free_desc_ring(vdev, &dc2c->results_ring);
svnic_wq_disable(&dc2c->wq);
svnic_wq_free(&dc2c->wq);
kfree(dc2c);
}
int svnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait)
{
int err;
memset(vdev->args, 0, sizeof(vdev->args));
vdev->args[0] = *a0;
vdev->args[1] = *a1;
err = (*vdev->devcmd_rtn)(vdev, cmd, wait);
*a0 = vdev->args[0];
*a1 = vdev->args[1];
return err;
}
int svnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info)
{
u64 a0, a1 = 0;
int wait = VNIC_DVCMD_TMO;
int err = 0;
if (!vdev->fw_info) {
vdev->fw_info = pci_alloc_consistent(vdev->pdev,
sizeof(struct vnic_devcmd_fw_info),
&vdev->fw_info_pa);
if (!vdev->fw_info)
return -ENOMEM;
a0 = vdev->fw_info_pa;
/* only get fw_info once and cache it */
err = svnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
}
*fw_info = vdev->fw_info;
return err;
}
int svnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
unsigned int size, void *value)
{
u64 a0, a1;
int wait = VNIC_DVCMD_TMO;
int err;
a0 = offset;
a1 = size;
err = svnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
switch (size) {
case 1:
*(u8 *)value = (u8)a0;
break;
case 2:
*(u16 *)value = (u16)a0;
break;
case 4:
*(u32 *)value = (u32)a0;
break;
case 8:
*(u64 *)value = a0;
break;
default:
BUG();
break;
}
return err;
}
int svnic_dev_stats_clear(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = VNIC_DVCMD_TMO;
return svnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
}
int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
{
u64 a0, a1;
int wait = VNIC_DVCMD_TMO;
if (!vdev->stats) {
vdev->stats = pci_alloc_consistent(vdev->pdev,
sizeof(struct vnic_stats), &vdev->stats_pa);
if (!vdev->stats)
return -ENOMEM;
}
*stats = vdev->stats;
a0 = vdev->stats_pa;
a1 = sizeof(struct vnic_stats);
return svnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
}
int svnic_dev_close(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = VNIC_DVCMD_TMO;
return svnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
}
int svnic_dev_enable_wait(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = VNIC_DVCMD_TMO;
int err = 0;
err = svnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
if (err == ERR_ECMDUNKNOWN)
return svnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
return err;
}
int svnic_dev_disable(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = VNIC_DVCMD_TMO;
return svnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
}
int svnic_dev_open(struct vnic_dev *vdev, int arg)
{
u64 a0 = (u32)arg, a1 = 0;
int wait = VNIC_DVCMD_TMO;
return svnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
}
int svnic_dev_open_done(struct vnic_dev *vdev, int *done)
{
u64 a0 = 0, a1 = 0;
int wait = VNIC_DVCMD_TMO;
int err;
*done = 0;
err = svnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
if (err)
return err;
*done = (a0 == 0);
return 0;
}
int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
{
u64 a0, a1;
int wait = VNIC_DVCMD_TMO;
if (!vdev->notify) {
vdev->notify = pci_alloc_consistent(vdev->pdev,
sizeof(struct vnic_devcmd_notify),
&vdev->notify_pa);
if (!vdev->notify)
return -ENOMEM;
}
a0 = vdev->notify_pa;
a1 = ((u64)intr << 32) & VNIC_NOTIFY_INTR_MASK;
a1 += sizeof(struct vnic_devcmd_notify);
return svnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
}
void svnic_dev_notify_unset(struct vnic_dev *vdev)
{
u64 a0, a1;
int wait = VNIC_DVCMD_TMO;
a0 = 0; /* paddr = 0 to unset notify buffer */
a1 = VNIC_NOTIFY_INTR_MASK; /* intr num = -1 to unreg for intr */
a1 += sizeof(struct vnic_devcmd_notify);
svnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
}
static int vnic_dev_notify_ready(struct vnic_dev *vdev)
{
u32 *words;
unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
unsigned int i;
u32 csum;
if (!vdev->notify)
return 0;
do {
csum = 0;
memcpy(&vdev->notify_copy, vdev->notify,
sizeof(struct vnic_devcmd_notify));
words = (u32 *)&vdev->notify_copy;
for (i = 1; i < nwords; i++)
csum += words[i];
} while (csum != words[0]);
return 1;
}
int svnic_dev_init(struct vnic_dev *vdev, int arg)
{
u64 a0 = (u32)arg, a1 = 0;
int wait = VNIC_DVCMD_TMO;
return svnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
}
int svnic_dev_link_status(struct vnic_dev *vdev)
{
if (vdev->linkstatus)
return *vdev->linkstatus;
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.link_state;
}
u32 svnic_dev_link_down_cnt(struct vnic_dev *vdev)
{
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.link_down_cnt;
}
void svnic_dev_set_intr_mode(struct vnic_dev *vdev,
enum vnic_dev_intr_mode intr_mode)
{
vdev->intr_mode = intr_mode;
}
enum vnic_dev_intr_mode svnic_dev_get_intr_mode(struct vnic_dev *vdev)
{
return vdev->intr_mode;
}
void svnic_dev_unregister(struct vnic_dev *vdev)
{
if (vdev) {
if (vdev->notify)
pci_free_consistent(vdev->pdev,
sizeof(struct vnic_devcmd_notify),
vdev->notify,
vdev->notify_pa);
if (vdev->linkstatus)
pci_free_consistent(vdev->pdev,
sizeof(u32),
vdev->linkstatus,
vdev->linkstatus_pa);
if (vdev->stats)
pci_free_consistent(vdev->pdev,
sizeof(struct vnic_stats),
vdev->stats, vdev->stats_pa);
if (vdev->fw_info)
pci_free_consistent(vdev->pdev,
sizeof(struct vnic_devcmd_fw_info),
vdev->fw_info, vdev->fw_info_pa);
if (vdev->devcmd2)
vnic_dev_deinit_devcmd2(vdev);
kfree(vdev);
}
}
struct vnic_dev *svnic_dev_alloc_discover(struct vnic_dev *vdev,
void *priv,
struct pci_dev *pdev,
struct vnic_dev_bar *bar,
unsigned int num_bars)
{
if (!vdev) {
vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
if (!vdev)
return NULL;
}
vdev->priv = priv;
vdev->pdev = pdev;
if (vnic_dev_discover_res(vdev, bar, num_bars))
goto err_out;
return vdev;
err_out:
svnic_dev_unregister(vdev);
return NULL;
} /* end of svnic_dev_alloc_discover */
/*
* fallback option is left to keep the interface common for other vnics.
*/
int svnic_dev_cmd_init(struct vnic_dev *vdev, int fallback)
{
int err = -ENODEV;
void __iomem *p;
p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
if (p)
err = svnic_dev_init_devcmd2(vdev);
else
pr_err("DEVCMD2 resource not found.\n");
return err;
} /* end of svnic_dev_cmd_init */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_DEV_H_
#define _VNIC_DEV_H_
#include "vnic_resource.h"
#include "vnic_devcmd.h"
#ifndef VNIC_PADDR_TARGET
#define VNIC_PADDR_TARGET 0x0000000000000000ULL
#endif
#ifndef readq
static inline u64 readq(void __iomem *reg)
{
return ((u64)readl(reg + 0x4UL) << 32) | (u64)readl(reg);
}
static inline void writeq(u64 val, void __iomem *reg)
{
writel(lower_32_bits(val), reg);
writel(upper_32_bits(val), reg + 0x4UL);
}
#endif
enum vnic_dev_intr_mode {
VNIC_DEV_INTR_MODE_UNKNOWN,
VNIC_DEV_INTR_MODE_INTX,
VNIC_DEV_INTR_MODE_MSI,
VNIC_DEV_INTR_MODE_MSIX,
};
struct vnic_dev_bar {
void __iomem *vaddr;
dma_addr_t bus_addr;
unsigned long len;
};
struct vnic_dev_ring {
void *descs;
size_t size;
dma_addr_t base_addr;
size_t base_align;
void *descs_unaligned;
size_t size_unaligned;
dma_addr_t base_addr_unaligned;
unsigned int desc_size;
unsigned int desc_count;
unsigned int desc_avail;
};
struct vnic_dev;
struct vnic_stats;
void *svnic_dev_priv(struct vnic_dev *vdev);
unsigned int svnic_dev_get_res_count(struct vnic_dev *vdev,
enum vnic_res_type type);
void __iomem *svnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
unsigned int index);
unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count,
unsigned int desc_size);
void svnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size);
void svnic_dev_free_desc_ring(struct vnic_dev *vdev,
struct vnic_dev_ring *ring);
int svnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait);
int svnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info);
int svnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
unsigned int size, void *value);
int svnic_dev_stats_clear(struct vnic_dev *vdev);
int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
void svnic_dev_notify_unset(struct vnic_dev *vdev);
int svnic_dev_link_status(struct vnic_dev *vdev);
u32 svnic_dev_link_down_cnt(struct vnic_dev *vdev);
int svnic_dev_close(struct vnic_dev *vdev);
int svnic_dev_enable_wait(struct vnic_dev *vdev);
int svnic_dev_disable(struct vnic_dev *vdev);
int svnic_dev_open(struct vnic_dev *vdev, int arg);
int svnic_dev_open_done(struct vnic_dev *vdev, int *done);
int svnic_dev_init(struct vnic_dev *vdev, int arg);
struct vnic_dev *svnic_dev_alloc_discover(struct vnic_dev *vdev,
void *priv, struct pci_dev *pdev,
struct vnic_dev_bar *bar,
unsigned int num_bars);
void svnic_dev_set_intr_mode(struct vnic_dev *vdev,
enum vnic_dev_intr_mode intr_mode);
enum vnic_dev_intr_mode svnic_dev_get_intr_mode(struct vnic_dev *vdev);
void svnic_dev_unregister(struct vnic_dev *vdev);
int svnic_dev_cmd_init(struct vnic_dev *vdev, int fallback);
#endif /* _VNIC_DEV_H_ */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_DEVCMD_H_
#define _VNIC_DEVCMD_H_
#define _CMD_NBITS 14
#define _CMD_VTYPEBITS 10
#define _CMD_FLAGSBITS 6
#define _CMD_DIRBITS 2
#define _CMD_NMASK ((1 << _CMD_NBITS)-1)
#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1)
#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1)
#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1)
#define _CMD_NSHIFT 0
#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
/*
* Direction bits (from host perspective).
*/
#define _CMD_DIR_NONE 0U
#define _CMD_DIR_WRITE 1U
#define _CMD_DIR_READ 2U
#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ)
/*
* Flag bits.
*/
#define _CMD_FLAGS_NONE 0U
#define _CMD_FLAGS_NOWAIT 1U
/*
* vNIC type bits.
*/
#define _CMD_VTYPE_NONE 0U
#define _CMD_VTYPE_ENET 1U
#define _CMD_VTYPE_FC 2U
#define _CMD_VTYPE_SCSI 4U
#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
/*
* Used to create cmds..
*/
#define _CMDCF(dir, flags, vtype, nr) \
(((dir) << _CMD_DIRSHIFT) | \
((flags) << _CMD_FLAGSSHIFT) | \
((vtype) << _CMD_VTYPESHIFT) | \
((nr) << _CMD_NSHIFT))
#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr)
#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
/*
* Used to decode cmds..
*/
#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
enum vnic_devcmd_cmd {
CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
/* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */
CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
/* dev-specific block member:
* in: (u16)a0=offset,(u8)a1=size
* out: a0=value */
CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
/* stats clear */
CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
/* stats dump in mem: (u64)a0=paddr to stats area,
* (u16)a1=sizeof stats area */
CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
/* nic_cfg in (u32)a0 */
CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
/* set struct vnic_devcmd_notify buffer in mem:
* in:
* (u64)a0=paddr to notify (set paddr=0 to unset)
* (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
* (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
* out:
* (u32)a1 = effective size
*/
CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
/* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
/* open status:
* out: a0=0 open complete, a0=1 open in progress */
CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
/* close vnic */
CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
/* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
CMD_INIT = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
/* enable virtual link */
CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
/* enable virtual link, waiting variant. */
CMD_ENABLE_WAIT = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
/* disable virtual link */
CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
/* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */
CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
/* init status:
* out: a0=0 init complete, a0=1 init in progress
* if a0=0, a1=errno */
CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
/* undo initialize of virtual link */
CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
/* check fw capability of a cmd:
* in: (u32)a0=cmd
* out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */
CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36),
/*
* Initialization for the devcmd2 interface.
* in: (u64) a0=host result buffer physical address
* in: (u16) a1=number of entries in result buffer
*/
CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57)
};
/* flags for CMD_OPEN */
#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
/* flags for CMD_INIT */
#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
/* flags for CMD_PACKET_FILTER */
#define CMD_PFILTER_DIRECTED 0x01
#define CMD_PFILTER_MULTICAST 0x02
#define CMD_PFILTER_BROADCAST 0x04
#define CMD_PFILTER_PROMISCUOUS 0x08
#define CMD_PFILTER_ALL_MULTICAST 0x10
enum vnic_devcmd_status {
STAT_NONE = 0,
STAT_BUSY = 1 << 0, /* cmd in progress */
STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
};
enum vnic_devcmd_error {
ERR_SUCCESS = 0,
ERR_EINVAL = 1,
ERR_EFAULT = 2,
ERR_EPERM = 3,
ERR_EBUSY = 4,
ERR_ECMDUNKNOWN = 5,
ERR_EBADSTATE = 6,
ERR_ENOMEM = 7,
ERR_ETIMEDOUT = 8,
ERR_ELINKDOWN = 9,
};
struct vnic_devcmd_fw_info {
char fw_version[32];
char fw_build[32];
char hw_version[32];
char hw_serial_number[32];
};
struct vnic_devcmd_notify {
u32 csum; /* checksum over following words */
u32 link_state; /* link up == 1 */
u32 port_speed; /* effective port speed (rate limit) */
u32 mtu; /* MTU */
u32 msglvl; /* requested driver msg lvl */
u32 uif; /* uplink interface */
u32 status; /* status bits (see VNIC_STF_*) */
u32 error; /* error code (see ERR_*) for first ERR */
u32 link_down_cnt; /* running count of link down transitions */
};
#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */
struct vnic_devcmd_provinfo {
u8 oui[3];
u8 type;
u8 data[0];
};
/*
* Writing cmd register causes STAT_BUSY to get set in status register.
* When cmd completes, STAT_BUSY will be cleared.
*
* If cmd completed successfully STAT_ERROR will be clear
* and args registers contain cmd-specific results.
*
* If cmd error, STAT_ERROR will be set and args[0] contains error code.
*
* status register is read-only. While STAT_BUSY is set,
* all other register contents are read-only.
*/
/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
#define VNIC_DEVCMD_NARGS 15
struct vnic_devcmd {
u32 status; /* RO */
u32 cmd; /* RW */
u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
};
/*
* Version 2 of the interface.
*
* Some things are carried over, notably the vnic_devcmd_cmd enum.
*/
/*
* Flags for vnic_devcmd2.flags
*/
#define DEVCMD2_FNORESULT 0x1 /* Don't copy result to host */
#define VNIC_DEVCMD2_NARGS VNIC_DEVCMD_NARGS
struct vnic_devcmd2 {
u16 pad;
u16 flags;
u32 cmd; /* same command #defines as original */
u64 args[VNIC_DEVCMD2_NARGS];
};
#define VNIC_DEVCMD2_NRESULTS VNIC_DEVCMD_NARGS
struct devcmd2_result {
u64 results[VNIC_DEVCMD2_NRESULTS];
u32 pad;
u16 completed_index; /* into copy WQ */
u8 error; /* same error codes as original */
u8 color; /* 0 or 1 as with completion queues */
};
#define DEVCMD2_RING_SIZE 32
#define DEVCMD2_DESC_SIZE 128
#define DEVCMD2_RESULTS_SIZE_MAX ((1 << 16) - 1)
#endif /* _VNIC_DEVCMD_H_ */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
void svnic_intr_free(struct vnic_intr *intr)
{
intr->ctrl = NULL;
}
int svnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
unsigned int index)
{
intr->index = index;
intr->vdev = vdev;
intr->ctrl = svnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
if (!intr->ctrl) {
pr_err("Failed to hook INTR[%d].ctrl resource\n",
index);
return -EINVAL;
}
return 0;
}
void svnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion)
{
iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
iowrite32(0, &intr->ctrl->int_credits);
}
void svnic_intr_clean(struct vnic_intr *intr)
{
iowrite32(0, &intr->ctrl->int_credits);
}
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_INTR_H_
#define _VNIC_INTR_H_
#include <linux/pci.h>
#include "vnic_dev.h"
#define VNIC_INTR_TIMER_MAX 0xffff
#define VNIC_INTR_TIMER_TYPE_ABS 0
#define VNIC_INTR_TIMER_TYPE_QUIET 1
/* Interrupt control */
struct vnic_intr_ctrl {
u32 coalescing_timer; /* 0x00 */
u32 pad0;
u32 coalescing_value; /* 0x08 */
u32 pad1;
u32 coalescing_type; /* 0x10 */
u32 pad2;
u32 mask_on_assertion; /* 0x18 */
u32 pad3;
u32 mask; /* 0x20 */
u32 pad4;
u32 int_credits; /* 0x28 */
u32 pad5;
u32 int_credit_return; /* 0x30 */
u32 pad6;
};
struct vnic_intr {
unsigned int index;
struct vnic_dev *vdev;
struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */
};
static inline void
svnic_intr_unmask(struct vnic_intr *intr)
{
iowrite32(0, &intr->ctrl->mask);
}
static inline void
svnic_intr_mask(struct vnic_intr *intr)
{
iowrite32(1, &intr->ctrl->mask);
}
static inline void
svnic_intr_return_credits(struct vnic_intr *intr,
unsigned int credits,
int unmask,
int reset_timer)
{
#define VNIC_INTR_UNMASK_SHIFT 16
#define VNIC_INTR_RESET_TIMER_SHIFT 17
u32 int_credit_return = (credits & 0xffff) |
(unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
(reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
}
static inline unsigned int
svnic_intr_credits(struct vnic_intr *intr)
{
return ioread32(&intr->ctrl->int_credits);
}
static inline void
svnic_intr_return_all_credits(struct vnic_intr *intr)
{
unsigned int credits = svnic_intr_credits(intr);
int unmask = 1;
int reset_timer = 1;
svnic_intr_return_credits(intr, credits, unmask, reset_timer);
}
void svnic_intr_free(struct vnic_intr *);
int svnic_intr_alloc(struct vnic_dev *, struct vnic_intr *, unsigned int);
void svnic_intr_init(struct vnic_intr *intr,
unsigned int coalescing_timer,
unsigned int coalescing_type,
unsigned int mask_on_assertion);
void svnic_intr_clean(struct vnic_intr *);
#endif /* _VNIC_INTR_H_ */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_RESOURCE_H_
#define _VNIC_RESOURCE_H_
#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
#define VNIC_RES_VERSION 0x00000000L
/* vNIC resource types */
enum vnic_res_type {
RES_TYPE_EOL, /* End-of-list */
RES_TYPE_WQ, /* Work queues */
RES_TYPE_RQ, /* Receive queues */
RES_TYPE_CQ, /* Completion queues */
RES_TYPE_RSVD1,
RES_TYPE_NIC_CFG, /* Enet NIC config registers */
RES_TYPE_RSVD2,
RES_TYPE_RSVD3,
RES_TYPE_RSVD4,
RES_TYPE_RSVD5,
RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */
RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */
RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */
RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */
RES_TYPE_RSVD6,
RES_TYPE_RSVD7,
RES_TYPE_DEVCMD, /* Device command region */
RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
RES_TYPE_SUBVNIC, /* subvnic resource type */
RES_TYPE_MQ_WQ, /* MQ Work queues */
RES_TYPE_MQ_RQ, /* MQ Receive queues */
RES_TYPE_MQ_CQ, /* MQ Completion queues */
RES_TYPE_DEPRECATED1, /* Old version of devcmd 2 */
RES_TYPE_DEPRECATED2, /* Old version of devcmd 2 */
RES_TYPE_DEVCMD2, /* Device control region */
RES_TYPE_MAX, /* Count of resource types */
};
struct vnic_resource_header {
u32 magic;
u32 version;
};
struct vnic_resource {
u8 type;
u8 bar;
u8 pad[2];
u32 bar_offset;
u32 count;
};
#endif /* _VNIC_RESOURCE_H_ */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_SNIC_H_
#define _VNIC_SNIC_H_
#define VNIC_SNIC_WQ_DESCS_MIN 64
#define VNIC_SNIC_WQ_DESCS_MAX 1024
#define VNIC_SNIC_MAXDATAFIELDSIZE_MIN 256
#define VNIC_SNIC_MAXDATAFIELDSIZE_MAX 2112
#define VNIC_SNIC_IO_THROTTLE_COUNT_MIN 1
#define VNIC_SNIC_IO_THROTTLE_COUNT_MAX 1024
#define VNIC_SNIC_PORT_DOWN_TIMEOUT_MIN 0
#define VNIC_SNIC_PORT_DOWN_TIMEOUT_MAX 240000
#define VNIC_SNIC_PORT_DOWN_IO_RETRIES_MIN 0
#define VNIC_SNIC_PORT_DOWN_IO_RETRIES_MAX 255
#define VNIC_SNIC_LUNS_PER_TARGET_MIN 1
#define VNIC_SNIC_LUNS_PER_TARGET_MAX 1024
/* Device-specific region: scsi configuration */
struct vnic_snic_config {
u32 flags;
u32 wq_enet_desc_count;
u32 io_throttle_count;
u32 port_down_timeout;
u32 port_down_io_retries;
u32 luns_per_tgt;
u16 maxdatafieldsize;
u16 intr_timer;
u8 intr_timer_type;
u8 _resvd2;
u8 xpt_type;
u8 hid;
};
#endif /* _VNIC_SNIC_H_ */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_STATS_H_
#define _VNIC_STATS_H_
/* Tx statistics */
struct vnic_tx_stats {
u64 tx_frames_ok;
u64 tx_unicast_frames_ok;
u64 tx_multicast_frames_ok;
u64 tx_broadcast_frames_ok;
u64 tx_bytes_ok;
u64 tx_unicast_bytes_ok;
u64 tx_multicast_bytes_ok;
u64 tx_broadcast_bytes_ok;
u64 tx_drops;
u64 tx_errors;
u64 tx_tso;
u64 rsvd[16];
};
/* Rx statistics */
struct vnic_rx_stats {
u64 rx_frames_ok;
u64 rx_frames_total;
u64 rx_unicast_frames_ok;
u64 rx_multicast_frames_ok;
u64 rx_broadcast_frames_ok;
u64 rx_bytes_ok;
u64 rx_unicast_bytes_ok;
u64 rx_multicast_bytes_ok;
u64 rx_broadcast_bytes_ok;
u64 rx_drop;
u64 rx_no_bufs;
u64 rx_errors;
u64 rx_rss;
u64 rx_crc_errors;
u64 rx_frames_64;
u64 rx_frames_127;
u64 rx_frames_255;
u64 rx_frames_511;
u64 rx_frames_1023;
u64 rx_frames_1518;
u64 rx_frames_to_max;
u64 rsvd[16];
};
struct vnic_stats {
struct vnic_tx_stats tx;
struct vnic_rx_stats rx;
};
#endif /* _VNIC_STATS_H_ */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include "vnic_dev.h"
#include "vnic_wq.h"
static inline int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int index, enum vnic_res_type res_type)
{
wq->ctrl = svnic_dev_get_res(vdev, res_type, index);
if (!wq->ctrl)
return -EINVAL;
return 0;
}
static inline int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int index, unsigned int desc_count, unsigned int desc_size)
{
return svnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count,
desc_size);
}
static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
{
struct vnic_wq_buf *buf;
unsigned int i, j, count = wq->ring.desc_count;
unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
for (i = 0; i < blks; i++) {
wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
if (!wq->bufs[i]) {
pr_err("Failed to alloc wq_bufs\n");
return -ENOMEM;
}
}
for (i = 0; i < blks; i++) {
buf = wq->bufs[i];
for (j = 0; j < VNIC_WQ_BUF_DFLT_BLK_ENTRIES; j++) {
buf->index = i * VNIC_WQ_BUF_DFLT_BLK_ENTRIES + j;
buf->desc = (u8 *)wq->ring.descs +
wq->ring.desc_size * buf->index;
if (buf->index + 1 == count) {
buf->next = wq->bufs[0];
break;
} else if (j + 1 == VNIC_WQ_BUF_DFLT_BLK_ENTRIES) {
buf->next = wq->bufs[i + 1];
} else {
buf->next = buf + 1;
buf++;
}
}
}
wq->to_use = wq->to_clean = wq->bufs[0];
return 0;
}
void svnic_wq_free(struct vnic_wq *wq)
{
struct vnic_dev *vdev;
unsigned int i;
vdev = wq->vdev;
svnic_dev_free_desc_ring(vdev, &wq->ring);
for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
kfree(wq->bufs[i]);
wq->bufs[i] = NULL;
}
wq->ctrl = NULL;
}
int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int desc_count, unsigned int desc_size)
{
int err;
wq->index = 0;
wq->vdev = vdev;
err = vnic_wq_get_ctrl(vdev, wq, 0, RES_TYPE_DEVCMD2);
if (err) {
pr_err("Failed to get devcmd2 resource\n");
return err;
}
svnic_wq_disable(wq);
err = vnic_wq_alloc_ring(vdev, wq, 0, desc_count, desc_size);
if (err)
return err;
return 0;
}
int svnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int index, unsigned int desc_count, unsigned int desc_size)
{
int err;
wq->index = index;
wq->vdev = vdev;
err = vnic_wq_get_ctrl(vdev, wq, index, RES_TYPE_WQ);
if (err) {
pr_err("Failed to hook WQ[%d] resource\n", index);
return err;
}
svnic_wq_disable(wq);
err = vnic_wq_alloc_ring(vdev, wq, index, desc_count, desc_size);
if (err)
return err;
err = vnic_wq_alloc_bufs(wq);
if (err) {
svnic_wq_free(wq);
return err;
}
return 0;
}
void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
unsigned int fetch_index, unsigned int posted_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
u64 paddr;
unsigned int count = wq->ring.desc_count;
paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &wq->ctrl->ring_base);
iowrite32(count, &wq->ctrl->ring_size);
iowrite32(fetch_index, &wq->ctrl->fetch_index);
iowrite32(posted_index, &wq->ctrl->posted_index);
iowrite32(cq_index, &wq->ctrl->cq_index);
iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
iowrite32(0, &wq->ctrl->error_status);
wq->to_use = wq->to_clean =
&wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)]
[fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)];
}
void svnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
vnic_wq_init_start(wq, cq_index, 0, 0, error_interrupt_enable,
error_interrupt_offset);
}
unsigned int svnic_wq_error_status(struct vnic_wq *wq)
{
return ioread32(&wq->ctrl->error_status);
}
void svnic_wq_enable(struct vnic_wq *wq)
{
iowrite32(1, &wq->ctrl->enable);
}
int svnic_wq_disable(struct vnic_wq *wq)
{
unsigned int wait;
iowrite32(0, &wq->ctrl->enable);
/* Wait for HW to ACK disable request */
for (wait = 0; wait < 100; wait++) {
if (!(ioread32(&wq->ctrl->running)))
return 0;
udelay(1);
}
pr_err("Failed to disable WQ[%d]\n", wq->index);
return -ETIMEDOUT;
}
void svnic_wq_clean(struct vnic_wq *wq,
void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
{
struct vnic_wq_buf *buf;
BUG_ON(ioread32(&wq->ctrl->enable));
buf = wq->to_clean;
while (svnic_wq_desc_used(wq) > 0) {
(*buf_clean)(wq, buf);
buf = wq->to_clean = buf->next;
wq->ring.desc_avail++;
}
wq->to_use = wq->to_clean = wq->bufs[0];
iowrite32(0, &wq->ctrl->fetch_index);
iowrite32(0, &wq->ctrl->posted_index);
iowrite32(0, &wq->ctrl->error_status);
svnic_dev_clear_desc_ring(&wq->ring);
}
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_WQ_H_
#define _VNIC_WQ_H_
#include <linux/pci.h>
#include "vnic_dev.h"
#include "vnic_cq.h"
/* Work queue control */
struct vnic_wq_ctrl {
u64 ring_base; /* 0x00 */
u32 ring_size; /* 0x08 */
u32 pad0;
u32 posted_index; /* 0x10 */
u32 pad1;
u32 cq_index; /* 0x18 */
u32 pad2;
u32 enable; /* 0x20 */
u32 pad3;
u32 running; /* 0x28 */
u32 pad4;
u32 fetch_index; /* 0x30 */
u32 pad5;
u32 dca_value; /* 0x38 */
u32 pad6;
u32 error_interrupt_enable; /* 0x40 */
u32 pad7;
u32 error_interrupt_offset; /* 0x48 */
u32 pad8;
u32 error_status; /* 0x50 */
u32 pad9;
};
struct vnic_wq_buf {
struct vnic_wq_buf *next;
dma_addr_t dma_addr;
void *os_buf;
unsigned int len;
unsigned int index;
int sop;
void *desc;
};
/* Break the vnic_wq_buf allocations into blocks of 64 entries */
#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32
#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64
#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \
((unsigned int)(entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \
VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES)
#define VNIC_WQ_BUF_BLK_SZ \
(VNIC_WQ_BUF_DFLT_BLK_ENTRIES * sizeof(struct vnic_wq_buf))
#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_WQ_BUF_DFLT_BLK_ENTRIES)
#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_WQ_BUF_DFLT_BLK_ENTRIES)
#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
struct vnic_wq {
unsigned int index;
struct vnic_dev *vdev;
struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
struct vnic_wq_buf *to_use;
struct vnic_wq_buf *to_clean;
unsigned int pkts_outstanding;
};
static inline unsigned int svnic_wq_desc_avail(struct vnic_wq *wq)
{
/* how many does SW own? */
return wq->ring.desc_avail;
}
static inline unsigned int svnic_wq_desc_used(struct vnic_wq *wq)
{
/* how many does HW own? */
return wq->ring.desc_count - wq->ring.desc_avail - 1;
}
static inline void *svnic_wq_next_desc(struct vnic_wq *wq)
{
return wq->to_use->desc;
}
static inline void svnic_wq_post(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr,
unsigned int len, int sop, int eop)
{
struct vnic_wq_buf *buf = wq->to_use;
buf->sop = sop;
buf->os_buf = eop ? os_buf : NULL;
buf->dma_addr = dma_addr;
buf->len = len;
buf = buf->next;
if (eop) {
/* Adding write memory barrier prevents compiler and/or CPU
* reordering, thus avoiding descriptor posting before
* descriptor is initialized. Otherwise, hardware can read
* stale descriptor fields.
*/
wmb();
iowrite32(buf->index, &wq->ctrl->posted_index);
}
wq->to_use = buf;
wq->ring.desc_avail--;
}
static inline void svnic_wq_service(struct vnic_wq *wq,
struct cq_desc *cq_desc, u16 completed_index,
void (*buf_service)(struct vnic_wq *wq,
struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
void *opaque)
{
struct vnic_wq_buf *buf;
buf = wq->to_clean;
while (1) {
(*buf_service)(wq, cq_desc, buf, opaque);
wq->ring.desc_avail++;
wq->to_clean = buf->next;
if (buf->index == completed_index)
break;
buf = wq->to_clean;
}
}
void svnic_wq_free(struct vnic_wq *wq);
int svnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int index, unsigned int desc_count, unsigned int desc_size);
int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int desc_count, unsigned int desc_size);
void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
unsigned int fetch_index, unsigned int post_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset);
void svnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset);
unsigned int svnic_wq_error_status(struct vnic_wq *wq);
void svnic_wq_enable(struct vnic_wq *wq);
int svnic_wq_disable(struct vnic_wq *wq);
void svnic_wq_clean(struct vnic_wq *wq,
void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
#endif /* _VNIC_WQ_H_ */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _WQ_ENET_DESC_H_
#define _WQ_ENET_DESC_H_
/* Ethernet work queue descriptor: 16B */
struct wq_enet_desc {
__le64 address;
__le16 length;
__le16 mss_loopback;
__le16 header_length_flags;
__le16 vlan_tag;
};
#define WQ_ENET_ADDR_BITS 64
#define WQ_ENET_LEN_BITS 14
#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1)
#define WQ_ENET_MSS_BITS 14
#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1)
#define WQ_ENET_MSS_SHIFT 2
#define WQ_ENET_LOOPBACK_SHIFT 1
#define WQ_ENET_HDRLEN_BITS 10
#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1)
#define WQ_ENET_FLAGS_OM_BITS 2
#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1)
#define WQ_ENET_FLAGS_EOP_SHIFT 12
#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13
#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14
#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15
#define WQ_ENET_OFFLOAD_MODE_CSUM 0
#define WQ_ENET_OFFLOAD_MODE_RESERVED 1
#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2
#define WQ_ENET_OFFLOAD_MODE_TSO 3
static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
u64 address, u16 length, u16 mss, u16 header_length,
u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
{
desc->address = cpu_to_le64(address);
desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
desc->header_length_flags = cpu_to_le16(
(header_length & WQ_ENET_HDRLEN_MASK) |
(offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
(eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
(cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
(fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
(vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
desc->vlan_tag = cpu_to_le16(vlan_tag);
}
static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
u64 *address, u16 *length, u16 *mss, u16 *header_length,
u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
{
*address = le64_to_cpu(desc->address);
*length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK;
*mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
WQ_ENET_MSS_MASK;
*loopback = (u8)((le16_to_cpu(desc->mss_loopback) >>
WQ_ENET_LOOPBACK_SHIFT) & 1);
*header_length = le16_to_cpu(desc->header_length_flags) &
WQ_ENET_HDRLEN_MASK;
*offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >>
WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
*eop = (u8)((le16_to_cpu(desc->header_length_flags) >>
WQ_ENET_FLAGS_EOP_SHIFT) & 1);
*cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >>
WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
*fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >>
WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
*vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >>
WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
*vlan_tag = le16_to_cpu(desc->vlan_tag);
}
#endif /* _WQ_ENET_DESC_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment