Commit c8806b6c authored by Narsimhulu Musini's avatar Narsimhulu Musini Committed by James Bottomley

snic: driver for Cisco SCSI HBA

Cisco has developed a new PCI HBA interface called sNIC, which stands for
SCSI NIC. This is a new storage feature supported on specialized network
adapter. The new PCI function provides a uniform host interface and abstracts
backend storage.

[jejb: fix up checkpatch errors]
Signed-off-by: default avatarNarsimhulu Musini <nmusini@cisco.com>
Signed-off-by: default avatarSesidhar Baddela <sebaddel@cisco.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarJames Bottomley <JBottomley@Odin.com>
parent 8d2b21db
......@@ -2590,6 +2590,13 @@ L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/fnic/
CISCO SCSI HBA DRIVER
M: Narsimhulu Musini <nmusini@cisco.com>
M: Sesidhar Baddela <sebaddel@cisco.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/snic/
CMPC ACPI DRIVER
M: Thadeu Lima de Souza Cascardo <cascardo@holoscopio.com>
M: Daniel Oliveira Nascimento <don@syst.com.br>
......
......@@ -634,6 +634,23 @@ config FCOE_FNIC
<file:Documentation/scsi/scsi.txt>.
The module will be called fnic.
config SCSI_SNIC
tristate "Cisco SNIC Driver"
depends on PCI && SCSI
help
This is support for the Cisco PCI-Express SCSI HBA.
To compile this driver as a module, choose M here and read
<file:Documentation/scsi/scsi.txt>.
The module will be called snic.
config SCSI_SNIC_DEBUG_FS
bool "Cisco SNIC Driver Debugfs Support"
depends on SCSI_SNIC && DEBUG_FS
help
This enables to list debugging information from SNIC Driver
available via debugfs file system
config SCSI_DMX3191D
tristate "DMX3191D SCSI support"
depends on PCI && SCSI
......
......@@ -39,6 +39,7 @@ obj-$(CONFIG_LIBFC) += libfc/
obj-$(CONFIG_LIBFCOE) += fcoe/
obj-$(CONFIG_FCOE) += fcoe/
obj-$(CONFIG_FCOE_FNIC) += fnic/
obj-$(CONFIG_SCSI_SNIC) += snic/
obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
......
obj-$(CONFIG_SCSI_SNIC) += snic.o
snic-y := \
snic_attrs.o \
snic_main.o \
snic_res.o \
snic_isr.o \
snic_ctl.o \
snic_io.o \
snic_scsi.o \
snic_disc.o \
vnic_cq.o \
vnic_intr.o \
vnic_dev.o \
vnic_wq.o
snic-$(CONFIG_SCSI_SNIC_DEBUG_FS) += snic_debugfs.o snic_trc.o
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _CQ_DESC_H_
#define _CQ_DESC_H_
/*
* Completion queue descriptor types
*/
enum cq_desc_types {
CQ_DESC_TYPE_WQ_ENET = 0,
CQ_DESC_TYPE_DESC_COPY = 1,
CQ_DESC_TYPE_WQ_EXCH = 2,
CQ_DESC_TYPE_RQ_ENET = 3,
CQ_DESC_TYPE_RQ_FCP = 4,
};
/* Completion queue descriptor: 16B
*
* All completion queues have this basic layout. The
* type_specific area is unique for each completion
* queue type.
*/
struct cq_desc {
__le16 completed_index;
__le16 q_number;
u8 type_specific[11];
u8 type_color;
};
#define CQ_DESC_TYPE_BITS 4
#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
#define CQ_DESC_COLOR_MASK 1
#define CQ_DESC_COLOR_SHIFT 7
#define CQ_DESC_Q_NUM_BITS 10
#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
#define CQ_DESC_COMP_NDX_BITS 12
#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
static inline void cq_desc_dec(const struct cq_desc *desc_arg,
u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
{
const struct cq_desc *desc = desc_arg;
const u8 type_color = desc->type_color;
*color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
/*
* Make sure color bit is read from desc *before* other fields
* are read from desc. Hardware guarantees color bit is last
* bit (byte) written. Adding the rmb() prevents the compiler
* and/or CPU from reordering the reads which would potentially
* result in reading stale values.
*/
rmb();
*type = type_color & CQ_DESC_TYPE_MASK;
*q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
*completed_index = le16_to_cpu(desc->completed_index) &
CQ_DESC_COMP_NDX_MASK;
}
#endif /* _CQ_DESC_H_ */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _CQ_ENET_DESC_H_
#define _CQ_ENET_DESC_H_
#include "cq_desc.h"
/* Ethernet completion queue descriptor: 16B */
struct cq_enet_wq_desc {
__le16 completed_index;
__le16 q_number;
u8 reserved[11];
u8 type_color;
};
static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
{
cq_desc_dec((struct cq_desc *)desc, type,
color, q_number, completed_index);
}
#endif /* _CQ_ENET_DESC_H_ */
This diff is collapsed.
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/string.h>
#include <linux/device.h>
#include "snic.h"
static ssize_t
snic_show_sym_name(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct snic *snic = shost_priv(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%s\n", snic->name);
}
static ssize_t
snic_show_state(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct snic *snic = shost_priv(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%s\n",
snic_state_str[snic_get_state(snic)]);
}
static ssize_t
snic_show_drv_version(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", SNIC_DRV_VERSION);
}
static ssize_t
snic_show_link_state(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct snic *snic = shost_priv(class_to_shost(dev));
if (snic->config.xpt_type == SNIC_DAS)
snic->link_status = svnic_dev_link_status(snic->vdev);
return snprintf(buf, PAGE_SIZE, "%s\n",
(snic->link_status) ? "Link Up" : "Link Down");
}
static DEVICE_ATTR(snic_sym_name, S_IRUGO, snic_show_sym_name, NULL);
static DEVICE_ATTR(snic_state, S_IRUGO, snic_show_state, NULL);
static DEVICE_ATTR(drv_version, S_IRUGO, snic_show_drv_version, NULL);
static DEVICE_ATTR(link_state, S_IRUGO, snic_show_link_state, NULL);
struct device_attribute *snic_attrs[] = {
&dev_attr_snic_sym_name,
&dev_attr_snic_state,
&dev_attr_drv_version,
&dev_attr_link_state,
NULL,
};
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/mempool.h>
#include <scsi/scsi_tcq.h>
#include <linux/ctype.h>
#include "snic_io.h"
#include "snic.h"
#include "cq_enet_desc.h"
#include "snic_fwint.h"
/*
* snic_handle_link : Handles link flaps.
*/
void
snic_handle_link(struct work_struct *work)
{
struct snic *snic = container_of(work, struct snic, link_work);
if (snic->config.xpt_type != SNIC_DAS) {
SNIC_HOST_INFO(snic->shost, "Link Event Received.\n");
SNIC_ASSERT_NOT_IMPL(1);
return;
}
snic->link_status = svnic_dev_link_status(snic->vdev);
snic->link_down_cnt = svnic_dev_link_down_cnt(snic->vdev);
SNIC_HOST_INFO(snic->shost, "Link Event: Link %s.\n",
((snic->link_status) ? "Up" : "Down"));
}
/*
* snic_ver_enc : Encodes version str to int
* version string is similar to netmask string
*/
static int
snic_ver_enc(const char *s)
{
int v[4] = {0};
int i = 0, x = 0;
char c;
const char *p = s;
/* validate version string */
if ((strlen(s) > 15) || (strlen(s) < 7))
goto end;
while ((c = *p++)) {
if (c == '.') {
i++;
continue;
}
if (i > 4 || !isdigit(c))
goto end;
v[i] = v[i] * 10 + (c - '0');
}
/* validate sub version numbers */
for (i = 3; i >= 0; i--)
if (v[i] > 0xff)
goto end;
x |= (v[0] << 24) | v[1] << 16 | v[2] << 8 | v[3];
end:
if (x == 0) {
SNIC_ERR("Invalid version string [%s].\n", s);
return -1;
}
return x;
} /* end of snic_ver_enc */
/*
* snic_qeueue_exch_ver_req :
*
* Queues Exchange Version Request, to communicate host information
* in return, it gets firmware version details
*/
int
snic_queue_exch_ver_req(struct snic *snic)
{
struct snic_req_info *rqi = NULL;
struct snic_host_req *req = NULL;
u32 ver = 0;
int ret = 0;
SNIC_HOST_INFO(snic->shost, "Exch Ver Req Preparing...\n");
rqi = snic_req_init(snic, 0);
if (!rqi) {
SNIC_HOST_ERR(snic->shost,
"Queuing Exch Ver Req failed, err = %d\n",
ret);
ret = -ENOMEM;
goto error;
}
req = rqi_to_req(rqi);
/* Initialize snic_host_req */
snic_io_hdr_enc(&req->hdr, SNIC_REQ_EXCH_VER, 0, SCSI_NO_TAG,
snic->config.hid, 0, (ulong)rqi);
ver = snic_ver_enc(SNIC_DRV_VERSION);
req->u.exch_ver.drvr_ver = cpu_to_le32(ver);
req->u.exch_ver.os_type = cpu_to_le32(SNIC_OS_LINUX);
snic_handle_untagged_req(snic, rqi);
ret = snic_queue_wq_desc(snic, req, sizeof(*req));
if (ret) {
snic_release_untagged_req(snic, rqi);
SNIC_HOST_ERR(snic->shost,
"Queuing Exch Ver Req failed, err = %d\n",
ret);
goto error;
}
SNIC_HOST_INFO(snic->shost, "Exch Ver Req is issued. ret = %d\n", ret);
error:
return ret;
} /* end of snic_queue_exch_ver_req */
/*
* snic_io_exch_ver_cmpl_handler
*/
int
snic_io_exch_ver_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
{
struct snic_req_info *rqi = NULL;
struct snic_exch_ver_rsp *exv_cmpl = &fwreq->u.exch_ver_cmpl;
u8 typ, hdr_stat;
u32 cmnd_id, hid, max_sgs;
ulong ctx = 0;
unsigned long flags;
int ret = 0;
SNIC_HOST_INFO(snic->shost, "Exch Ver Compl Received.\n");
snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
SNIC_BUG_ON(snic->config.hid != hid);
rqi = (struct snic_req_info *) ctx;
if (hdr_stat) {
SNIC_HOST_ERR(snic->shost,
"Exch Ver Completed w/ err status %d\n",
hdr_stat);
goto exch_cmpl_end;
}
spin_lock_irqsave(&snic->snic_lock, flags);
snic->fwinfo.fw_ver = le32_to_cpu(exv_cmpl->version);
snic->fwinfo.hid = le32_to_cpu(exv_cmpl->hid);
snic->fwinfo.max_concur_ios = le32_to_cpu(exv_cmpl->max_concur_ios);
snic->fwinfo.max_sgs_per_cmd = le32_to_cpu(exv_cmpl->max_sgs_per_cmd);
snic->fwinfo.max_io_sz = le32_to_cpu(exv_cmpl->max_io_sz);
snic->fwinfo.max_tgts = le32_to_cpu(exv_cmpl->max_tgts);
snic->fwinfo.io_tmo = le16_to_cpu(exv_cmpl->io_timeout);
SNIC_HOST_INFO(snic->shost,
"vers %u hid %u max_concur_ios %u max_sgs_per_cmd %u max_io_sz %u max_tgts %u fw tmo %u\n",
snic->fwinfo.fw_ver,
snic->fwinfo.hid,
snic->fwinfo.max_concur_ios,
snic->fwinfo.max_sgs_per_cmd,
snic->fwinfo.max_io_sz,
snic->fwinfo.max_tgts,
snic->fwinfo.io_tmo);
SNIC_HOST_INFO(snic->shost,
"HBA Capabilities = 0x%x\n",
le32_to_cpu(exv_cmpl->hba_cap));
/* Updating SGList size */
max_sgs = snic->fwinfo.max_sgs_per_cmd;
if (max_sgs && max_sgs < SNIC_MAX_SG_DESC_CNT) {
snic->shost->sg_tablesize = max_sgs;
SNIC_HOST_INFO(snic->shost, "Max SGs set to %d\n",
snic->shost->sg_tablesize);
} else if (max_sgs > snic->shost->sg_tablesize) {
SNIC_HOST_INFO(snic->shost,
"Target type %d Supports Larger Max SGList %d than driver's Max SG List %d.\n",
snic->config.xpt_type, max_sgs,
snic->shost->sg_tablesize);
}
if (snic->shost->can_queue > snic->fwinfo.max_concur_ios)
snic->shost->can_queue = snic->fwinfo.max_concur_ios;
snic->shost->max_sectors = snic->fwinfo.max_io_sz >> 9;
if (snic->fwinfo.wait)
complete(snic->fwinfo.wait);
spin_unlock_irqrestore(&snic->snic_lock, flags);
exch_cmpl_end:
snic_release_untagged_req(snic, rqi);
SNIC_HOST_INFO(snic->shost, "Exch_cmpl Done, hdr_stat %d.\n", hdr_stat);
return ret;
} /* end of snic_io_exch_ver_cmpl_handler */
/*
* snic_get_conf
*
* Synchronous call, and Retrieves snic params.
*/
int
snic_get_conf(struct snic *snic)
{
DECLARE_COMPLETION_ONSTACK(wait);
unsigned long flags;
int ret;
int nr_retries = 3;
SNIC_HOST_INFO(snic->shost, "Retrieving snic params.\n");
spin_lock_irqsave(&snic->snic_lock, flags);
memset(&snic->fwinfo, 0, sizeof(snic->fwinfo));
snic->fwinfo.wait = &wait;
spin_unlock_irqrestore(&snic->snic_lock, flags);
/* Additional delay to handle HW Resource initialization. */
msleep(50);
/*
* Exch ver req can be ignored by FW, if HW Resource initialization
* is in progress, Hence retry.
*/
do {
ret = snic_queue_exch_ver_req(snic);
if (ret)
return ret;
wait_for_completion_timeout(&wait, msecs_to_jiffies(2000));
spin_lock_irqsave(&snic->snic_lock, flags);
ret = (snic->fwinfo.fw_ver != 0) ? 0 : -ETIMEDOUT;
if (ret)
SNIC_HOST_ERR(snic->shost,
"Failed to retrieve snic params,\n");
/* Unset fwinfo.wait, on success or on last retry */
if (ret == 0 || nr_retries == 1)
snic->fwinfo.wait = NULL;
spin_unlock_irqrestore(&snic->snic_lock, flags);
} while (ret && --nr_retries);
return ret;
} /* end of snic_get_info */
This diff is collapsed.
This diff is collapsed.
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __SNIC_DISC_H
#define __SNIC_DISC_H
#include "snic_fwint.h"
enum snic_disc_state {
SNIC_DISC_NONE,
SNIC_DISC_INIT,
SNIC_DISC_PENDING,
SNIC_DISC_DONE
};
struct snic;
struct snic_disc {
struct list_head tgt_list;
enum snic_disc_state state;
struct mutex mutex;
u16 disc_id;
u8 req_cnt;
u32 nxt_tgt_id;
u32 rtgt_cnt;
u8 *rtgt_info;
struct delayed_work disc_timeout;
void (*cb)(struct snic *);
};
#define SNIC_TGT_NAM_LEN 16
enum snic_tgt_state {
SNIC_TGT_STAT_NONE,
SNIC_TGT_STAT_INIT,
SNIC_TGT_STAT_ONLINE, /* Target is Online */
SNIC_TGT_STAT_OFFLINE, /* Target is Offline */
SNIC_TGT_STAT_DEL,
};
struct snic_tgt_priv {
struct list_head list;
enum snic_tgt_type typ;
u16 disc_id;
char *name[SNIC_TGT_NAM_LEN];
union {
/*DAS Target specific info */
/*SAN Target specific info */
u8 dummmy;
} u;
};
/* snic tgt flags */
#define SNIC_TGT_SCAN_PENDING 0x01
struct snic_tgt {
struct list_head list;
u16 id;
u16 channel;
u32 flags;
u32 scsi_tgt_id;
enum snic_tgt_state state;
struct device dev;
struct work_struct scan_work;
struct work_struct del_work;
struct snic_tgt_priv tdata;
};
struct snic_fw_req;
void snic_disc_init(struct snic_disc *);
int snic_disc_start(struct snic *);
void snic_disc_term(struct snic *);
int snic_report_tgt_cmpl_handler(struct snic *, struct snic_fw_req *);
int snic_tgtinfo_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq);
void snic_process_report_tgts_rsp(struct work_struct *);
void snic_handle_tgt_disc(struct work_struct *);
void snic_handle_disc(struct work_struct *);
void snic_tgt_dev_release(struct device *);
void snic_tgt_del_all(struct snic *);
#define dev_to_tgt(d) \
container_of(d, struct snic_tgt, dev)
static inline int
is_snic_target(struct device *dev)
{
return dev->release == snic_tgt_dev_release;
}
#define starget_to_tgt(st) \
(is_snic_target(((struct scsi_target *) st)->dev.parent) ? \
dev_to_tgt(st->dev.parent) : NULL)
#define snic_tgt_to_shost(t) \
dev_to_shost(t->dev.parent)
static inline int
snic_tgt_chkready(struct snic_tgt *tgt)
{
if (tgt->state == SNIC_TGT_STAT_ONLINE)
return 0;
else
return DID_NO_CONNECT << 16;
}
const char *snic_tgt_state_to_str(int);
int snic_tgt_scsi_abort_io(struct snic_tgt *);
#endif /* end of __SNIC_DISC_H */
This diff is collapsed.
This diff is collapsed.
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _SNIC_IO_H
#define _SNIC_IO_H
#define SNIC_DFLT_SG_DESC_CNT 32 /* Default descriptors for sgl */
#define SNIC_MAX_SG_DESC_CNT 60 /* Max descriptor for sgl */
#define SNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */
/* SG descriptor for snic */
struct snic_sg_desc {
__le64 addr;
__le32 len;
u32 _resvd;
};
struct snic_dflt_sgl {
struct snic_sg_desc sg_desc[SNIC_DFLT_SG_DESC_CNT];
};
struct snic_max_sgl {
struct snic_sg_desc sg_desc[SNIC_MAX_SG_DESC_CNT];
};
enum snic_req_cache_type {
SNIC_REQ_CACHE_DFLT_SGL = 0, /* cache with default size sgl */
SNIC_REQ_CACHE_MAX_SGL, /* cache with max size sgl */
SNIC_REQ_TM_CACHE, /* cache for task mgmt reqs contains
snic_host_req objects only*/
SNIC_REQ_MAX_CACHES /* number of sgl caches */
};
/* Per IO internal state */
struct snic_internal_io_state {
char *rqi;
u64 flags;
u32 state;
u32 abts_status; /* Abort completion status */
u32 lr_status; /* device reset completion status */
};
/* IO state machine */
enum snic_ioreq_state {
SNIC_IOREQ_NOT_INITED = 0,
SNIC_IOREQ_PENDING,
SNIC_IOREQ_ABTS_PENDING,
SNIC_IOREQ_ABTS_COMPLETE,
SNIC_IOREQ_LR_PENDING,
SNIC_IOREQ_LR_COMPLETE,
SNIC_IOREQ_COMPLETE,
};
struct snic;
struct snic_host_req;
/*
* snic_req_info : Contains info about IO, one per scsi command.
* Notes: Make sure that the structure is aligned to 16 B
* this helps in easy access to snic_req_info from snic_host_req
*/
struct snic_req_info {
struct list_head list;
struct snic_host_req *req;
u64 start_time; /* start time in jiffies */
u16 rq_pool_type; /* noticion of request pool type */
u16 req_len; /* buf len passing to fw (req + sgl)*/
u32 tgt_id;
u32 tm_tag;
u8 io_cmpl:1; /* sets to 1 when fw completes IO */
u8 resvd[3];
struct scsi_cmnd *sc; /* Associated scsi cmd */
struct snic *snic; /* Associated snic */
ulong sge_va; /* Pointer to Resp Buffer */
u64 snsbuf_va;
struct snic_host_req *abort_req;
struct completion *abts_done;
struct snic_host_req *dr_req;
struct completion *dr_done;
};
#define rqi_to_req(rqi) \
((struct snic_host_req *) (((struct snic_req_info *)rqi)->req))
#define req_to_rqi(req) \
((struct snic_req_info *) (((struct snic_host_req *)req)->hdr.init_ctx))
#define req_to_sgl(req) \
((struct snic_sg_desc *) (((struct snic_host_req *)req)+1))
struct snic_req_info *
snic_req_init(struct snic *, int sg_cnt);
void snic_req_free(struct snic *, struct snic_req_info *);
void snic_calc_io_process_time(struct snic *, struct snic_req_info *);
void snic_pci_unmap_rsp_buf(struct snic *, struct snic_req_info *);
struct snic_host_req *
snic_abort_req_init(struct snic *, struct snic_req_info *);
struct snic_host_req *
snic_dr_req_init(struct snic *, struct snic_req_info *);
#endif /* _SNIC_IO_H */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "snic_io.h"
#include "snic.h"
/*
* snic_isr_msix_wq : MSIx ISR for work queue.
*/
static irqreturn_t
snic_isr_msix_wq(int irq, void *data)
{
struct snic *snic = data;
unsigned long wq_work_done = 0;
snic->s_stats.misc.last_isr_time = jiffies;
atomic64_inc(&snic->s_stats.misc.isr_cnt);
wq_work_done = snic_wq_cmpl_handler(snic, -1);
svnic_intr_return_credits(&snic->intr[SNIC_MSIX_WQ],
wq_work_done,
1 /* unmask intr */,
1 /* reset intr timer */);
return IRQ_HANDLED;
} /* end of snic_isr_msix_wq */
static irqreturn_t
snic_isr_msix_io_cmpl(int irq, void *data)
{
struct snic *snic = data;
unsigned long iocmpl_work_done = 0;
snic->s_stats.misc.last_isr_time = jiffies;
atomic64_inc(&snic->s_stats.misc.isr_cnt);
iocmpl_work_done = snic_fwcq_cmpl_handler(snic, -1);
svnic_intr_return_credits(&snic->intr[SNIC_MSIX_IO_CMPL],
iocmpl_work_done,
1 /* unmask intr */,
1 /* reset intr timer */);
return IRQ_HANDLED;
} /* end of snic_isr_msix_io_cmpl */
static irqreturn_t
snic_isr_msix_err_notify(int irq, void *data)
{
struct snic *snic = data;
snic->s_stats.misc.last_isr_time = jiffies;
atomic64_inc(&snic->s_stats.misc.isr_cnt);
svnic_intr_return_all_credits(&snic->intr[SNIC_MSIX_ERR_NOTIFY]);
snic_log_q_error(snic);
/*Handling link events */
snic_handle_link_event(snic);
return IRQ_HANDLED;
} /* end of snic_isr_msix_err_notify */
void
snic_free_intr(struct snic *snic)
{
int i;
/* ONLY interrupt mode MSIX is supported */
for (i = 0; i < ARRAY_SIZE(snic->msix); i++) {
if (snic->msix[i].requested) {
free_irq(snic->msix_entry[i].vector,
snic->msix[i].devid);
}
}
} /* end of snic_free_intr */
int
snic_request_intr(struct snic *snic)
{
int ret = 0, i;
enum vnic_dev_intr_mode intr_mode;
intr_mode = svnic_dev_get_intr_mode(snic->vdev);
SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX);
/*
* Currently HW supports single WQ and CQ. So passing devid as snic.
* When hardware supports multiple WQs and CQs, one idea is
* to pass devid as corresponding WQ or CQ ptr and retrieve snic
* from queue ptr.
* Except for err_notify, which is always one.
*/
sprintf(snic->msix[SNIC_MSIX_WQ].devname,
"%.11s-scsi-wq",
snic->name);
snic->msix[SNIC_MSIX_WQ].isr = snic_isr_msix_wq;
snic->msix[SNIC_MSIX_WQ].devid = snic;
sprintf(snic->msix[SNIC_MSIX_IO_CMPL].devname,
"%.11s-io-cmpl",
snic->name);
snic->msix[SNIC_MSIX_IO_CMPL].isr = snic_isr_msix_io_cmpl;
snic->msix[SNIC_MSIX_IO_CMPL].devid = snic;
sprintf(snic->msix[SNIC_MSIX_ERR_NOTIFY].devname,
"%.11s-err-notify",
snic->name);
snic->msix[SNIC_MSIX_ERR_NOTIFY].isr = snic_isr_msix_err_notify;
snic->msix[SNIC_MSIX_ERR_NOTIFY].devid = snic;
for (i = 0; i < ARRAY_SIZE(snic->msix); i++) {
ret = request_irq(snic->msix_entry[i].vector,
snic->msix[i].isr,
0,
snic->msix[i].devname,
snic->msix[i].devid);
if (ret) {
SNIC_HOST_ERR(snic->shost,
"MSI-X: requrest_irq(%d) failed %d\n",
i,
ret);
snic_free_intr(snic);
break;
}
snic->msix[i].requested = 1;
}
return ret;
} /* end of snic_requrest_intr */
int
snic_set_intr_mode(struct snic *snic)
{
unsigned int n = ARRAY_SIZE(snic->wq);
unsigned int m = SNIC_CQ_IO_CMPL_MAX;
unsigned int i;
/*
* We need n WQs, m CQs, and n+m+1 INTRs
* (last INTR is used for WQ/CQ errors and notification area
*/
BUILD_BUG_ON((ARRAY_SIZE(snic->wq) + SNIC_CQ_IO_CMPL_MAX) >
ARRAY_SIZE(snic->intr));
SNIC_BUG_ON(ARRAY_SIZE(snic->msix_entry) < (n + m + 1));
for (i = 0; i < (n + m + 1); i++)
snic->msix_entry[i].entry = i;
if (snic->wq_count >= n && snic->cq_count >= (n + m)) {
if (!pci_enable_msix(snic->pdev,
snic->msix_entry,
(n + m + 1))) {
snic->wq_count = n;
snic->cq_count = n + m;
snic->intr_count = n + m + 1;
snic->err_intr_offset = SNIC_MSIX_ERR_NOTIFY;
SNIC_ISR_DBG(snic->shost,
"Using MSI-X Interrupts\n");
svnic_dev_set_intr_mode(snic->vdev,
VNIC_DEV_INTR_MODE_MSIX);
return 0;
}
}
svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
return -EINVAL;
} /* end of snic_set_intr_mode */
void
snic_clear_intr_mode(struct snic *snic)
{
pci_disable_msix(snic->pdev);
svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_INTX);
}
This diff is collapsed.
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include "wq_enet_desc.h"
#include "cq_enet_desc.h"
#include "vnic_resource.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_cq.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "snic.h"
int
snic_get_vnic_config(struct snic *snic)
{
struct vnic_snic_config *c = &snic->config;
int ret;
#define GET_CONFIG(m) \
do { \
ret = svnic_dev_spec(snic->vdev, \
offsetof(struct vnic_snic_config, m), \
sizeof(c->m), \
&c->m); \
if (ret) { \
SNIC_HOST_ERR(snic->shost, \
"Error getting %s, %d\n", #m, ret); \
return ret; \
} \
} while (0)
GET_CONFIG(wq_enet_desc_count);
GET_CONFIG(maxdatafieldsize);
GET_CONFIG(intr_timer);
GET_CONFIG(intr_timer_type);
GET_CONFIG(flags);
GET_CONFIG(io_throttle_count);
GET_CONFIG(port_down_timeout);
GET_CONFIG(port_down_io_retries);
GET_CONFIG(luns_per_tgt);
GET_CONFIG(xpt_type);
GET_CONFIG(hid);
c->wq_enet_desc_count = min_t(u32,
VNIC_SNIC_WQ_DESCS_MAX,
max_t(u32,
VNIC_SNIC_WQ_DESCS_MIN,
c->wq_enet_desc_count));
c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16);
c->maxdatafieldsize = min_t(u32,
VNIC_SNIC_MAXDATAFIELDSIZE_MAX,
max_t(u32,
VNIC_SNIC_MAXDATAFIELDSIZE_MIN,
c->maxdatafieldsize));
c->io_throttle_count = min_t(u32,
VNIC_SNIC_IO_THROTTLE_COUNT_MAX,
max_t(u32,
VNIC_SNIC_IO_THROTTLE_COUNT_MIN,
c->io_throttle_count));
c->port_down_timeout = min_t(u32,
VNIC_SNIC_PORT_DOWN_TIMEOUT_MAX,
c->port_down_timeout);
c->port_down_io_retries = min_t(u32,
VNIC_SNIC_PORT_DOWN_IO_RETRIES_MAX,
c->port_down_io_retries);
c->luns_per_tgt = min_t(u32,
VNIC_SNIC_LUNS_PER_TARGET_MAX,
max_t(u32,
VNIC_SNIC_LUNS_PER_TARGET_MIN,
c->luns_per_tgt));
c->intr_timer = min_t(u32, VNIC_INTR_TIMER_MAX, c->intr_timer);
SNIC_INFO("vNIC resources wq %d\n", c->wq_enet_desc_count);
SNIC_INFO("vNIC mtu %d intr timer %d\n",
c->maxdatafieldsize,
c->intr_timer);
SNIC_INFO("vNIC flags 0x%x luns per tgt %d\n",
c->flags,
c->luns_per_tgt);
SNIC_INFO("vNIC io throttle count %d\n", c->io_throttle_count);
SNIC_INFO("vNIC port down timeout %d port down io retries %d\n",
c->port_down_timeout,
c->port_down_io_retries);
SNIC_INFO("vNIC back end type = %d\n", c->xpt_type);
SNIC_INFO("vNIC hid = %d\n", c->hid);
return 0;
}
void
snic_get_res_counts(struct snic *snic)
{
snic->wq_count = svnic_dev_get_res_count(snic->vdev, RES_TYPE_WQ);
SNIC_BUG_ON(snic->wq_count == 0);
snic->cq_count = svnic_dev_get_res_count(snic->vdev, RES_TYPE_CQ);
SNIC_BUG_ON(snic->cq_count == 0);
snic->intr_count = svnic_dev_get_res_count(snic->vdev,
RES_TYPE_INTR_CTRL);
SNIC_BUG_ON(snic->intr_count == 0);
}
void
snic_free_vnic_res(struct snic *snic)
{
unsigned int i;
for (i = 0; i < snic->wq_count; i++)
svnic_wq_free(&snic->wq[i]);
for (i = 0; i < snic->cq_count; i++)
svnic_cq_free(&snic->cq[i]);
for (i = 0; i < snic->intr_count; i++)
svnic_intr_free(&snic->intr[i]);
}
int
snic_alloc_vnic_res(struct snic *snic)
{
enum vnic_dev_intr_mode intr_mode;
unsigned int mask_on_assertion;
unsigned int intr_offset;
unsigned int err_intr_enable;
unsigned int err_intr_offset;
unsigned int i;
int ret;
intr_mode = svnic_dev_get_intr_mode(snic->vdev);
SNIC_INFO("vNIC interrupt mode: %s\n",
((intr_mode == VNIC_DEV_INTR_MODE_INTX) ?
"Legacy PCI INTx" :
((intr_mode == VNIC_DEV_INTR_MODE_MSI) ?
"MSI" :
((intr_mode == VNIC_DEV_INTR_MODE_MSIX) ?
"MSI-X" : "Unknown"))));
/* only MSI-X is supported */
SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX);
SNIC_INFO("wq %d cq %d intr %d\n", snic->wq_count,
snic->cq_count,
snic->intr_count);
/* Allocate WQs used for SCSI IOs */
for (i = 0; i < snic->wq_count; i++) {
ret = svnic_wq_alloc(snic->vdev,
&snic->wq[i],
i,
snic->config.wq_enet_desc_count,
sizeof(struct wq_enet_desc));
if (ret)
goto error_cleanup;
}
/* CQ for each WQ */
for (i = 0; i < snic->wq_count; i++) {
ret = svnic_cq_alloc(snic->vdev,
&snic->cq[i],
i,
snic->config.wq_enet_desc_count,
sizeof(struct cq_enet_wq_desc));
if (ret)
goto error_cleanup;
}
SNIC_BUG_ON(snic->cq_count != 2 * snic->wq_count);
/* CQ for FW TO host */
for (i = snic->wq_count; i < snic->cq_count; i++) {
ret = svnic_cq_alloc(snic->vdev,
&snic->cq[i],
i,
(snic->config.wq_enet_desc_count * 3),
sizeof(struct snic_fw_req));
if (ret)
goto error_cleanup;
}
for (i = 0; i < snic->intr_count; i++) {
ret = svnic_intr_alloc(snic->vdev, &snic->intr[i], i);
if (ret)
goto error_cleanup;
}
/*
* Init WQ Resources.
* WQ[0 to n] points to CQ[0 to n-1]
* firmware to host comm points to CQ[n to m+1]
*/
err_intr_enable = 1;
err_intr_offset = snic->err_intr_offset;
for (i = 0; i < snic->wq_count; i++) {
svnic_wq_init(&snic->wq[i],
i,
err_intr_enable,
err_intr_offset);
}
for (i = 0; i < snic->cq_count; i++) {
intr_offset = i;
svnic_cq_init(&snic->cq[i],
0 /* flow_control_enable */,
1 /* color_enable */,
0 /* cq_head */,
0 /* cq_tail */,
1 /* cq_tail_color */,
1 /* interrupt_enable */,
1 /* cq_entry_enable */,
0 /* cq_message_enable */,
intr_offset,
0 /* cq_message_addr */);
}
/*
* Init INTR resources
* Assumption : snic is always in MSI-X mode
*/
SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX);
mask_on_assertion = 1;
for (i = 0; i < snic->intr_count; i++) {
svnic_intr_init(&snic->intr[i],
snic->config.intr_timer,
snic->config.intr_timer_type,
mask_on_assertion);
}
/* init the stats memory by making the first call here */
ret = svnic_dev_stats_dump(snic->vdev, &snic->stats);
if (ret) {
SNIC_HOST_ERR(snic->shost,
"svnic_dev_stats_dump failed - x%x\n",
ret);
goto error_cleanup;
}
/* Clear LIF stats */
svnic_dev_stats_clear(snic->vdev);
ret = 0;
return ret;
error_cleanup:
snic_free_vnic_res(snic);
return ret;
}
void
snic_log_q_error(struct snic *snic)
{
unsigned int i;
u32 err_status;
for (i = 0; i < snic->wq_count; i++) {
err_status = ioread32(&snic->wq[i].ctrl->error_status);
if (err_status)
SNIC_HOST_ERR(snic->shost,
"WQ[%d] error status %d\n",
i,
err_status);
}
} /* end of snic_log_q_error */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __SNIC_RES_H
#define __SNIC_RES_H
#include "snic_io.h"
#include "wq_enet_desc.h"
#include "vnic_wq.h"
#include "snic_fwint.h"
#include "vnic_cq_fw.h"
static inline void
snic_icmnd_init(struct snic_host_req *req, u32 cmnd_id, u32 host_id, u64 ctx,
u16 flags, u64 tgt_id, u8 *lun, u8 *scsi_cdb, u8 cdb_len,
u32 data_len, u16 sg_cnt, ulong sgl_addr,
dma_addr_t sns_addr_pa, u32 sense_len)
{
snic_io_hdr_enc(&req->hdr, SNIC_REQ_ICMND, 0, cmnd_id, host_id, sg_cnt,
ctx);
req->u.icmnd.flags = cpu_to_le16(flags);
req->u.icmnd.tgt_id = cpu_to_le64(tgt_id);
memcpy(&req->u.icmnd.lun_id, lun, LUN_ADDR_LEN);
req->u.icmnd.cdb_len = cdb_len;
memset(req->u.icmnd.cdb, 0, SNIC_CDB_LEN);
memcpy(req->u.icmnd.cdb, scsi_cdb, cdb_len);
req->u.icmnd.data_len = cpu_to_le32(data_len);
req->u.icmnd.sg_addr = cpu_to_le64(sgl_addr);
req->u.icmnd.sense_len = cpu_to_le32(sense_len);
req->u.icmnd.sense_addr = cpu_to_le64(sns_addr_pa);
}
static inline void
snic_itmf_init(struct snic_host_req *req, u32 cmnd_id, u32 host_id, ulong ctx,
u16 flags, u32 req_id, u64 tgt_id, u8 *lun, u8 tm_type)
{
snic_io_hdr_enc(&req->hdr, SNIC_REQ_ITMF, 0, cmnd_id, host_id, 0, ctx);
req->u.itmf.tm_type = tm_type;
req->u.itmf.flags = cpu_to_le16(flags);
/* req_id valid only in abort, clear task */
req->u.itmf.req_id = cpu_to_le32(req_id);
req->u.itmf.tgt_id = cpu_to_le64(tgt_id);
memcpy(&req->u.itmf.lun_id, lun, LUN_ADDR_LEN);
}
static inline void
snic_queue_wq_eth_desc(struct vnic_wq *wq,
void *os_buf,
dma_addr_t dma_addr,
unsigned int len,
int vlan_tag_insert,
unsigned int vlan_tag,
int cq_entry)
{
struct wq_enet_desc *desc = svnic_wq_next_desc(wq);
wq_enet_desc_enc(desc,
(u64)dma_addr | VNIC_PADDR_TARGET,
(u16)len,
0, /* mss_or_csum_offset */
0, /* fc_eof */
0, /* offload mode */
1, /* eop */
(u8)cq_entry,
0, /* fcoe_encap */
(u8)vlan_tag_insert,
(u16)vlan_tag,
0 /* loopback */);
svnic_wq_post(wq, os_buf, dma_addr, len, 1, 1);
}
struct snic;
int snic_get_vnic_config(struct snic *);
int snic_alloc_vnic_res(struct snic *);
void snic_free_vnic_res(struct snic *);
void snic_get_res_counts(struct snic *);
void snic_log_q_error(struct snic *);
int snic_get_vnic_resources_size(struct snic *);
#endif /* __SNIC_RES_H */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment