Commit 6411280a authored by Ariel Elior's avatar Ariel Elior Committed by David S. Miller

bnx2x: Segregate SR-IOV code

In this patch the SR-IOV code is segregated from the main bulk of
the bnx2x code. The CONFIG_BNX2X_SRIOV define is added to Broadcom's
Kconfig, and allows the elision of the building of all the SR-IOV
support code in the driver.
The define is dependant on the kernel CONFIG_PCI_IOV configuration
define.
Signed-off-by: default avatarAriel Elior <ariele@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5ebae489
......@@ -121,4 +121,13 @@ config BNX2X
To compile this driver as a module, choose M here: the module
will be called bnx2x. This is recommended.
config BNX2X_SRIOV
bool "Broadcom 578xx and 57712 SR-IOV support"
depends on BNX2X && PCI_IOV
default y
---help---
This configuration parameter enables Single Root Input Output
Virtualization support in the 578xx and 57712 products. This
allows for virtual function acceleration in virtual environments.
endif # NET_VENDOR_BROADCOM
......@@ -4,4 +4,5 @@
obj-$(CONFIG_BNX2X) += bnx2x.o
bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o bnx2x_vfpf.o bnx2x_sriov.o
bnx2x-y := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o
bnx2x-$(CONFIG_BNX2X_SRIOV) += bnx2x_vfpf.o bnx2x_sriov.o
......@@ -1266,6 +1266,7 @@ struct bnx2x {
(vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
#ifdef CONFIG_BNX2X_SRIOV
/* vf pf channel mailbox contains request and response buffers */
struct bnx2x_vf_mbx_msg *vf2pf_mbox;
dma_addr_t vf2pf_mbox_mapping;
......@@ -1278,6 +1279,7 @@ struct bnx2x {
dma_addr_t pf2vf_bulletin_mapping;
struct pf_vf_bulletin_content old_bulletin;
#endif /* CONFIG_BNX2X_SRIOV */
struct net_device *dev;
struct pci_dev *pdev;
......@@ -1379,8 +1381,14 @@ struct bnx2x {
#define IS_VF_FLAG (1 << 22)
#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
#ifdef CONFIG_BNX2X_SRIOV
#define IS_VF(bp) ((bp)->flags & IS_VF_FLAG)
#define IS_PF(bp) (!((bp)->flags & IS_VF_FLAG))
#else
#define IS_VF(bp) false
#define IS_PF(bp) true
#endif
#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
......@@ -2275,18 +2283,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
(!((me_reg) & ME_REG_VF_ERR)))
int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id);
int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping);
int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count);
int bnx2x_vfpf_release(struct bnx2x *bp);
int bnx2x_vfpf_init(struct bnx2x *bp);
void bnx2x_vfpf_close_vf(struct bnx2x *bp);
int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx);
int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
int bnx2x_vfpf_set_mac(struct bnx2x *bp);
int bnx2x_vfpf_set_mcast(struct net_device *dev);
int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code);
/* Congestion management fairness mode */
#define CMNG_FNS_NONE 0
......
......@@ -27,7 +27,6 @@
#include "bnx2x_cmn.h"
#include "bnx2x_init.h"
#include "bnx2x_sp.h"
#include "bnx2x_sriov.h"
/**
* bnx2x_move_fp - move content of the fastpath structure.
......@@ -109,7 +108,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
(bp->common.bc_ver & 0xff),
((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
} else {
strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
bnx2x_vf_fill_fw_str(bp, buf, buf_len);
}
}
......@@ -2048,7 +2047,7 @@ static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
* request struct
*/
if (IS_SRIOV(bp))
vf_headroom = bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
vf_headroom = bnx2x_vf_headroom(bp);
/* Request is built from stats_query_header and an array of
* stats_query_cmd_group each of which contains
......@@ -3793,93 +3792,6 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
return 0;
}
/* New mac for VF. Consider these cases:
* 1. VF hasn't been acquired yet - save the mac in local bulletin board and
* supply at acquire.
* 2. VF has already been acquired but has not yet initialized - store in local
* bulletin board. mac will be posted on VF bulletin board after VF init. VF
* will configure this mac when it is ready.
* 3. VF has already initialized but has not yet setup a queue - post the new
* mac on VF's bulletin board right now. VF will configure this mac when it
* is ready.
* 4. VF has already set a queue - delete any macs already configured for this
* queue and manually config the new mac.
* In any event, once this function has been called refuse any attempts by the
* VF to configure any mac for itself except for this mac. In case of a race
* where the VF fails to see the new post on its bulletin board before sending a
* mac configuration request, the PF will simply fail the request and VF can try
* again after consulting its bulletin board
*/
int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
{
struct bnx2x *bp = netdev_priv(dev);
int rc, q_logical_state, vfidx = queue;
struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
/* if SRIOV is disabled there is nothing to do (and somewhere, someone
* has erred).
*/
if (!IS_SRIOV(bp)) {
BNX2X_ERR("bnx2x_set_vf_mac called though sriov is disabled\n");
return -EINVAL;
}
if (!is_valid_ether_addr(mac)) {
BNX2X_ERR("mac address invalid\n");
return -EINVAL;
}
/* update PF's copy of the VF's bulletin. will no longer accept mac
* configuration requests from vf unless match this mac
*/
bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
memcpy(bulletin->mac, mac, ETH_ALEN);
/* Post update on VF's bulletin board */
rc = bnx2x_post_vf_bulletin(bp, vfidx);
if (rc) {
BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
return rc;
}
/* is vf initialized and queue set up? */
q_logical_state =
bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
if (vf->state == VF_ENABLED &&
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the mac in device on this vf's queue */
unsigned long flags = 0;
struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
/* must lock vfpf channel to protect against vf flows */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
/* remove existing eth macs */
rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
if (rc) {
BNX2X_ERR("failed to delete eth macs\n");
return -EINVAL;
}
/* remove existing uc list macs */
rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
if (rc) {
BNX2X_ERR("failed to delete uc_list macs\n");
return -EINVAL;
}
/* configure the new mac to device */
__set_bit(RAMROD_COMP_WAIT, &flags);
bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
BNX2X_ETH_MAC, &flags);
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
}
return rc;
}
/* called with rtnl_lock */
int bnx2x_change_mac_addr(struct net_device *dev, void *p)
{
......
......@@ -24,6 +24,7 @@
#include "bnx2x.h"
#include "bnx2x_sriov.h"
/* This is used as a replacement for an MCP if it's not present */
extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
......@@ -1128,22 +1129,7 @@ static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
return fp->cl_id;
}
static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
{
struct bnx2x *bp = fp->bp;
u32 offset = BAR_USTRORM_INTMEM;
if (IS_VF(bp))
return PXP_VF_ADDR_USDM_QUEUES_START +
bp->acquire_resp.resc.hw_qid[fp->index] *
sizeof(struct ustorm_queue_zone_data);
else if (!CHIP_IS_E1x(bp))
offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
else
offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
return offset;
}
u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
static inline void bnx2x_init_txdata(struct bnx2x *bp,
struct bnx2x_fp_txdata *txdata, u32 cid,
......
......@@ -20,7 +20,7 @@
#include "bnx2x.h"
#include "bnx2x_init.h"
#include "bnx2x_cmn.h"
#include "bnx2x_sriov.h"
#include <linux/crc32.h>
/* General service functions */
static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
......@@ -1334,25 +1334,6 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
* for calling pretend prior to calling these routines
*/
/* called only on E1H or E2.
* When pretending to be PF, the pretend value is the function number 0...7
* When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
* combination
*/
int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
{
u32 pretend_reg;
if (CHIP_IS_E1H(bp) && pretend_func_val > E1H_FUNC_MAX)
return -1;
/* get my own pretend register */
pretend_reg = bnx2x_get_pretend_reg(bp);
REG_WR(bp, pretend_reg, pretend_func_val);
REG_RD(bp, pretend_reg);
return 0;
}
/* internal vf enable - until vf is enabled internally all transactions
* are blocked. this routine should always be called last with pretend.
*/
......@@ -1800,7 +1781,7 @@ static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
do_div(size, iov->total);
size /= iov->total;
vf->bars[n].bar = start + size * vf->abs_vfid;
vf->bars[n].size = size;
}
......@@ -3031,3 +3012,188 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* record the locking op */
vf->op_current = CHANNEL_TLV_NONE;
}
void bnx2x_enable_sriov(struct bnx2x *bp)
{
int rc = 0;
/* disbale sriov in case it is still enabled */
pci_disable_sriov(bp->pdev);
DP(BNX2X_MSG_IOV, "sriov disabled\n");
/* enable sriov */
DP(BNX2X_MSG_IOV, "vf num (%d)\n", (bp->vfdb->sriov.nr_virtfn));
rc = pci_enable_sriov(bp->pdev, (bp->vfdb->sriov.nr_virtfn));
if (rc)
BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
else
DP(BNX2X_MSG_IOV, "sriov enabled\n");
}
/* New mac for VF. Consider these cases:
* 1. VF hasn't been acquired yet - save the mac in local bulletin board and
* supply at acquire.
* 2. VF has already been acquired but has not yet initialized - store in local
* bulletin board. mac will be posted on VF bulletin board after VF init. VF
* will configure this mac when it is ready.
* 3. VF has already initialized but has not yet setup a queue - post the new
* mac on VF's bulletin board right now. VF will configure this mac when it
* is ready.
* 4. VF has already set a queue - delete any macs already configured for this
* queue and manually config the new mac.
* In any event, once this function has been called refuse any attempts by the
* VF to configure any mac for itself except for this mac. In case of a race
* where the VF fails to see the new post on its bulletin board before sending a
* mac configuration request, the PF will simply fail the request and VF can try
* again after consulting its bulletin board
*/
int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
{
struct bnx2x *bp = netdev_priv(dev);
int rc, q_logical_state, vfidx = queue;
struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
/* if SRIOV is disabled there is nothing to do (and somewhere, someone
* has erred).
*/
if (!IS_SRIOV(bp)) {
BNX2X_ERR("bnx2x_set_vf_mac called though sriov is disabled\n");
return -EINVAL;
}
if (!is_valid_ether_addr(mac)) {
BNX2X_ERR("mac address invalid\n");
return -EINVAL;
}
/* update PF's copy of the VF's bulletin. will no longer accept mac
* configuration requests from vf unless match this mac
*/
bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
memcpy(bulletin->mac, mac, ETH_ALEN);
/* Post update on VF's bulletin board */
rc = bnx2x_post_vf_bulletin(bp, vfidx);
if (rc) {
BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
return rc;
}
/* is vf initialized and queue set up? */
q_logical_state =
bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
if (vf->state == VF_ENABLED &&
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the mac in device on this vf's queue */
unsigned long flags = 0;
struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
/* must lock vfpf channel to protect against vf flows */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
/* remove existing eth macs */
rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
if (rc) {
BNX2X_ERR("failed to delete eth macs\n");
return -EINVAL;
}
/* remove existing uc list macs */
rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
if (rc) {
BNX2X_ERR("failed to delete uc_list macs\n");
return -EINVAL;
}
/* configure the new mac to device */
__set_bit(RAMROD_COMP_WAIT, &flags);
bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
BNX2X_ETH_MAC, &flags);
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
}
return rc;
}
/* crc is the first field in the bulletin board. compute the crc over the
* entire bulletin board excluding the crc field itself
*/
u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
struct pf_vf_bulletin_content *bulletin)
{
return crc32(BULLETIN_CRC_SEED,
((u8 *)bulletin) + sizeof(bulletin->crc),
BULLETIN_CONTENT_SIZE - sizeof(bulletin->crc));
}
/* Check for new posts on the bulletin board */
enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
{
struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
int attempts;
/* bulletin board hasn't changed since last sample */
if (bp->old_bulletin.version == bulletin.version)
return PFVF_BULLETIN_UNCHANGED;
/* validate crc of new bulletin board */
if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) {
/* sampling structure in mid post may result with corrupted data
* validate crc to ensure coherency.
*/
for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
bulletin = bp->pf2vf_bulletin->content;
if (bulletin.crc == bnx2x_crc_vf_bulletin(bp,
&bulletin))
break;
BNX2X_ERR("bad crc on bulletin board. contained %x computed %x\n",
bulletin.crc,
bnx2x_crc_vf_bulletin(bp, &bulletin));
}
if (attempts >= BULLETIN_ATTEMPTS) {
BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
attempts);
return PFVF_BULLETIN_CRC_ERR;
}
}
/* the mac address in bulletin board is valid and is new */
if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) {
/* update new mac to net device */
memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
}
/* copy new bulletin board to bp */
bp->old_bulletin = bulletin;
return PFVF_BULLETIN_UPDATED;
}
void bnx2x_vf_map_doorbells(struct bnx2x *bp)
{
/* vf doorbells are embedded within the regview */
bp->doorbells = bp->regview + PXP_VF_ADDR_DB_START;
}
int bnx2x_vf_pci_alloc(struct bnx2x *bp)
{
/* allocate vf2pf mailbox for vf to pf channel */
BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
sizeof(struct bnx2x_vf_mbx_msg));
/* allocate pf 2 vf bulletin board */
BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping,
sizeof(union pf_vf_bulletin));
return 0;
alloc_mem_err:
BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
sizeof(struct bnx2x_vf_mbx_msg));
BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
sizeof(union pf_vf_bulletin));
return -ENOMEM;
}
......@@ -20,7 +20,15 @@
#define BNX2X_SRIOV_H
#include "bnx2x_vfpf.h"
#include "bnx2x_cmn.h"
#include "bnx2x.h"
enum sample_bulletin_result {
PFVF_BULLETIN_UNCHANGED,
PFVF_BULLETIN_UPDATED,
PFVF_BULLETIN_CRC_ERR
};
#ifdef CONFIG_BNX2X_SRIOV
/* The bnx2x device structure holds vfdb structure described below.
* The VF array is indexed by the relative vfid.
......@@ -712,12 +720,89 @@ u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
struct pf_vf_bulletin_content *bulletin);
int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf);
enum sample_bulletin_result {
PFVF_BULLETIN_UNCHANGED,
PFVF_BULLETIN_UPDATED,
PFVF_BULLETIN_CRC_ERR
};
enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
/* VF side vfpf channel functions */
int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count);
int bnx2x_vfpf_release(struct bnx2x *bp);
int bnx2x_vfpf_release(struct bnx2x *bp);
int bnx2x_vfpf_init(struct bnx2x *bp);
void bnx2x_vfpf_close_vf(struct bnx2x *bp);
int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx);
int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
int bnx2x_vfpf_set_mac(struct bnx2x *bp);
int bnx2x_vfpf_set_mcast(struct net_device *dev);
int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
size_t buf_len)
{
strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
}
static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
struct bnx2x_fastpath *fp)
{
return PXP_VF_ADDR_USDM_QUEUES_START +
bp->acquire_resp.resc.hw_qid[fp->index] *
sizeof(struct ustorm_queue_zone_data);
}
enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
void bnx2x_vf_map_doorbells(struct bnx2x *bp);
int bnx2x_vf_pci_alloc(struct bnx2x *bp);
void bnx2x_enable_sriov(struct bnx2x *bp);
static inline int bnx2x_vf_headroom(struct bnx2x *bp)
{
return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
}
#else /* CONFIG_BNX2X_SRIOV */
static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
struct bnx2x_queue_sp_obj **q_obj) {}
static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid,
bool queue_work) {}
static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp,
union event_ring_elem *elem) {return 1; }
static inline void bnx2x_iov_sp_task(struct bnx2x *bp) {}
static inline void bnx2x_vf_mbx(struct bnx2x *bp,
struct vf_pf_event_data *vfpf_event) {}
static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; }
static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {}
static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; }
static inline int bnx2x_iov_chip_cleanup(struct bnx2x *bp) {return 0; }
static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {}
static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
int num_vfs_param) {return 0; }
static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {}
static inline void bnx2x_enable_sriov(struct bnx2x *bp) {}
static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
u8 tx_count, u8 rx_count) {return 0; }
static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) {return 0; }
static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; }
static inline int bnx2x_vfpf_set_mac(struct bnx2x *bp) {return 0; }
static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; }
static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; }
static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; }
static inline int bnx2x_vf_headroom(struct bnx2x *bp) {return 0; }
static inline void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) {}
static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
size_t buf_len) {}
static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
struct bnx2x_fastpath *fp) {return 0; }
static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
{
return PFVF_BULLETIN_UNCHANGED;
}
static inline int bnx2x_vf_map_doorbells(struct bnx2x *bp) {return 0; }
static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
#endif /* CONFIG_BNX2X_SRIOV */
#endif /* bnx2x_sriov.h */
......@@ -18,6 +18,8 @@
#ifndef VF_PF_IF_H
#define VF_PF_IF_H
#ifdef CONFIG_BNX2X_SRIOV
/* Common definitions for all HVs */
struct vf_pf_resc_request {
u8 num_rxqs;
......@@ -353,4 +355,5 @@ enum channel_tlvs {
CHANNEL_TLV_MAX
};
#endif /* CONFIG_BNX2X_SRIOV */
#endif /* VF_PF_IF_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment