Commit 7282ec8c authored by David S. Miller's avatar David S. Miller

Merge branch 'bnx2x'

Yuval Mintz says:

====================
bnx2x: Enhancements & semantic changes series

This patch series contains several semantic (or mostly semantic) patches,
as well as adding support for packet aggregations on the receive path
of windows VMs and updating bnx2x to the new FW recently accepted upstream.

Please consider applying these patches to `net-next'.

(This is a repost as net-next was still closed when this was previously
sent)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8bed1285 3156b8eb
...@@ -26,8 +26,8 @@ ...@@ -26,8 +26,8 @@
* (you will need to reboot afterwards) */ * (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */ /* #define BNX2X_STOP_ON_ERROR */
#define DRV_MODULE_VERSION "1.78.17-0" #define DRV_MODULE_VERSION "1.78.19-0"
#define DRV_MODULE_RELDATE "2013/04/11" #define DRV_MODULE_RELDATE "2014/02/10"
#define BNX2X_BC_VER 0x040200 #define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB) #if defined(CONFIG_DCB)
...@@ -75,13 +75,22 @@ enum bnx2x_int_mode { ...@@ -75,13 +75,22 @@ enum bnx2x_int_mode {
#define BNX2X_MSG_DCB 0x8000000 #define BNX2X_MSG_DCB 0x8000000
/* regular debug print */ /* regular debug print */
#define DP_INNER(fmt, ...) \
pr_notice("[%s:%d(%s)]" fmt, \
__func__, __LINE__, \
bp->dev ? (bp->dev->name) : "?", \
##__VA_ARGS__);
#define DP(__mask, fmt, ...) \ #define DP(__mask, fmt, ...) \
do { \ do { \
if (unlikely(bp->msg_enable & (__mask))) \ if (unlikely(bp->msg_enable & (__mask))) \
pr_notice("[%s:%d(%s)]" fmt, \ DP_INNER(fmt, ##__VA_ARGS__); \
__func__, __LINE__, \ } while (0)
bp->dev ? (bp->dev->name) : "?", \
##__VA_ARGS__); \ #define DP_AND(__mask, fmt, ...) \
do { \
if (unlikely((bp->msg_enable & (__mask)) == __mask)) \
DP_INNER(fmt, ##__VA_ARGS__); \
} while (0) } while (0)
#define DP_CONT(__mask, fmt, ...) \ #define DP_CONT(__mask, fmt, ...) \
...@@ -1261,6 +1270,7 @@ struct bnx2x_slowpath { ...@@ -1261,6 +1270,7 @@ struct bnx2x_slowpath {
union { union {
struct client_init_ramrod_data init_data; struct client_init_ramrod_data init_data;
struct client_update_ramrod_data update_data; struct client_update_ramrod_data update_data;
struct tpa_update_ramrod_data tpa_data;
} q_rdata; } q_rdata;
union { union {
...@@ -1392,7 +1402,7 @@ struct bnx2x_fw_stats_data { ...@@ -1392,7 +1402,7 @@ struct bnx2x_fw_stats_data {
}; };
/* Public slow path states */ /* Public slow path states */
enum { enum sp_rtnl_flag {
BNX2X_SP_RTNL_SETUP_TC, BNX2X_SP_RTNL_SETUP_TC,
BNX2X_SP_RTNL_TX_TIMEOUT, BNX2X_SP_RTNL_TX_TIMEOUT,
BNX2X_SP_RTNL_FAN_FAILURE, BNX2X_SP_RTNL_FAN_FAILURE,
......
...@@ -4773,12 +4773,8 @@ void bnx2x_tx_timeout(struct net_device *dev) ...@@ -4773,12 +4773,8 @@ void bnx2x_tx_timeout(struct net_device *dev)
bnx2x_panic(); bnx2x_panic();
#endif #endif
smp_mb__before_clear_bit();
set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
smp_mb__after_clear_bit();
/* This allows the netif to be shutdown gracefully before resetting */ /* This allows the netif to be shutdown gracefully before resetting */
schedule_delayed_work(&bp->sp_rtnl_task, 0); bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
} }
int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
...@@ -4906,3 +4902,15 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, ...@@ -4906,3 +4902,15 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
disable = disable ? 1 : (usec ? 0 : 1); disable = disable ? 1 : (usec ? 0 : 1);
storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
} }
void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
u32 verbose)
{
smp_mb__before_clear_bit();
set_bit(flag, &bp->sp_rtnl_state);
smp_mb__after_clear_bit();
DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
flag);
schedule_delayed_work(&bp->sp_rtnl_task, 0);
}
EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);
...@@ -1324,4 +1324,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len); ...@@ -1324,4 +1324,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
int bnx2x_drain_tx_queues(struct bnx2x *bp); int bnx2x_drain_tx_queues(struct bnx2x *bp);
void bnx2x_squeeze_objects(struct bnx2x *bp); void bnx2x_squeeze_objects(struct bnx2x *bp);
void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
u32 verbose);
#endif /* BNX2X_CMN_H */ #endif /* BNX2X_CMN_H */
...@@ -710,8 +710,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp) ...@@ -710,8 +710,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
* as we are handling an attention on a work queue which must be * as we are handling an attention on a work queue which must be
* flushed at some rtnl-locked contexts (e.g. if down) * flushed at some rtnl-locked contexts (e.g. if down)
*/ */
if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_SETUP_TC, 0);
schedule_delayed_work(&bp->sp_rtnl_task, 0);
} }
void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
...@@ -764,10 +763,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) ...@@ -764,10 +763,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
if (IS_MF(bp)) if (IS_MF(bp))
bnx2x_link_sync_notify(bp); bnx2x_link_sync_notify(bp);
set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state); bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_STOP, 0);
schedule_delayed_work(&bp->sp_rtnl_task, 0);
return; return;
} }
case BNX2X_DCBX_STATE_TX_PAUSED: case BNX2X_DCBX_STATE_TX_PAUSED:
......
...@@ -87,7 +87,6 @@ ...@@ -87,7 +87,6 @@
(IRO[156].base + ((vfId) * IRO[156].m1)) (IRO[156].base + ((vfId) * IRO[156].m1))
#define CSTORM_VF_TO_PF_OFFSET(funcId) \ #define CSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[150].base + ((funcId) * IRO[150].m1)) (IRO[150].base + ((funcId) * IRO[150].m1))
#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \ #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
(IRO[203].base + ((pfId) * IRO[203].m1)) (IRO[203].base + ((pfId) * IRO[203].m1))
#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base) #define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
......
...@@ -2848,7 +2848,7 @@ struct afex_stats { ...@@ -2848,7 +2848,7 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7 #define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 8 #define BCM_5710_FW_MINOR_VERSION 8
#define BCM_5710_FW_REVISION_VERSION 17 #define BCM_5710_FW_REVISION_VERSION 19
#define BCM_5710_FW_ENGINEERING_VERSION 0 #define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1 #define BCM_5710_FW_COMPILE_FLAGS 1
......
...@@ -918,7 +918,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) ...@@ -918,7 +918,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
u16 start = 0, end = 0; u16 start = 0, end = 0;
u8 cos; u8 cos;
#endif #endif
if (disable_int) if (IS_PF(bp) && disable_int)
bnx2x_int_disable(bp); bnx2x_int_disable(bp);
bp->stats_state = STATS_STATE_DISABLED; bp->stats_state = STATS_STATE_DISABLED;
...@@ -929,33 +929,41 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) ...@@ -929,33 +929,41 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
/* Indices */ /* Indices */
/* Common */ /* Common */
BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", if (IS_PF(bp)) {
bp->def_idx, bp->def_att_idx, bp->attn_state, struct host_sp_status_block *def_sb = bp->def_status_blk;
bp->spq_prod_idx, bp->stats_counter); int data_size, cstorm_offset;
BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
bp->def_status_blk->atten_status_block.attn_bits, BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
bp->def_status_blk->atten_status_block.attn_bits_ack, bp->def_idx, bp->def_att_idx, bp->attn_state,
bp->def_status_blk->atten_status_block.status_block_id, bp->spq_prod_idx, bp->stats_counter);
bp->def_status_blk->atten_status_block.attn_bits_index); BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
BNX2X_ERR(" def ("); def_sb->atten_status_block.attn_bits,
for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) def_sb->atten_status_block.attn_bits_ack,
pr_cont("0x%x%s", def_sb->atten_status_block.status_block_id,
bp->def_status_blk->sp_sb.index_values[i], def_sb->atten_status_block.attn_bits_index);
(i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); BNX2X_ERR(" def (");
for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) pr_cont("0x%x%s",
*((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM + def_sb->sp_sb.index_values[i],
CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
i*sizeof(u32));
data_size = sizeof(struct hc_sp_status_block_data) /
pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n", sizeof(u32);
sp_sb_data.igu_sb_id, cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
sp_sb_data.igu_seg_id, for (i = 0; i < data_size; i++)
sp_sb_data.p_func.pf_id, *((u32 *)&sp_sb_data + i) =
sp_sb_data.p_func.vnic_id, REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
sp_sb_data.p_func.vf_id, i * sizeof(u32));
sp_sb_data.p_func.vf_valid,
sp_sb_data.state); pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
sp_sb_data.igu_sb_id,
sp_sb_data.igu_seg_id,
sp_sb_data.p_func.pf_id,
sp_sb_data.p_func.vnic_id,
sp_sb_data.p_func.vf_id,
sp_sb_data.p_func.vf_valid,
sp_sb_data.state);
}
for_each_eth_queue(bp, i) { for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i]; struct bnx2x_fastpath *fp = &bp->fp[i];
...@@ -1013,6 +1021,11 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) ...@@ -1013,6 +1021,11 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
pr_cont("0x%x%s", pr_cont("0x%x%s",
fp->sb_index_values[j], fp->sb_index_values[j],
(j == loop - 1) ? ")" : " "); (j == loop - 1) ? ")" : " ");
/* VF cannot access FW refelection for status block */
if (IS_VF(bp))
continue;
/* fw sb data */ /* fw sb data */
data_size = CHIP_IS_E1x(bp) ? data_size = CHIP_IS_E1x(bp) ?
sizeof(struct hc_status_block_data_e1x) : sizeof(struct hc_status_block_data_e1x) :
...@@ -1064,16 +1077,18 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) ...@@ -1064,16 +1077,18 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
} }
#ifdef BNX2X_STOP_ON_ERROR #ifdef BNX2X_STOP_ON_ERROR
if (IS_PF(bp)) {
/* event queue */ /* event queue */
BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
for (i = 0; i < NUM_EQ_DESC; i++) { for (i = 0; i < NUM_EQ_DESC; i++) {
u32 *data = (u32 *)&bp->eq_ring[i].message.data; u32 *data = (u32 *)&bp->eq_ring[i].message.data;
BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n", BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
i, bp->eq_ring[i].message.opcode, i, bp->eq_ring[i].message.opcode,
bp->eq_ring[i].message.error); bp->eq_ring[i].message.error);
BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]); BNX2X_ERR("data: %x %x %x\n",
data[0], data[1], data[2]);
}
} }
/* Rings */ /* Rings */
...@@ -1140,8 +1155,10 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) ...@@ -1140,8 +1155,10 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
} }
} }
#endif #endif
bnx2x_fw_dump(bp); if (IS_PF(bp)) {
bnx2x_mc_assert(bp); bnx2x_fw_dump(bp);
bnx2x_mc_assert(bp);
}
BNX2X_ERR("end crash dump -----------------\n"); BNX2X_ERR("end crash dump -----------------\n");
} }
...@@ -1814,6 +1831,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) ...@@ -1814,6 +1831,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
drv_cmd = BNX2X_Q_CMD_EMPTY; drv_cmd = BNX2X_Q_CMD_EMPTY;
break; break;
case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
break;
default: default:
BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
command, fp->index); command, fp->index);
...@@ -3644,10 +3666,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, ...@@ -3644,10 +3666,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
HW_CID(bp, cid)); HW_CID(bp, cid));
type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; /* In some cases, type may already contain the func-id
* mainly in SRIOV related use cases, so we add it here only
type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & * if it's not already set.
SPE_HDR_FUNCTION_ID); */
if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
SPE_HDR_CONN_TYPE;
type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
SPE_HDR_FUNCTION_ID);
} else {
type = cmd_type;
}
spe->hdr.type = cpu_to_le16(type); spe->hdr.type = cpu_to_le16(type);
...@@ -3878,10 +3908,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp) ...@@ -3878,10 +3908,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp)
* This is due to some boards consuming sufficient power when driver is * This is due to some boards consuming sufficient power when driver is
* up to overheat if fan fails. * up to overheat if fan fails.
*/ */
smp_mb__before_clear_bit(); bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
smp_mb__after_clear_bit();
schedule_delayed_work(&bp->sp_rtnl_task, 0);
} }
static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
...@@ -5221,9 +5248,9 @@ static void bnx2x_eq_int(struct bnx2x *bp) ...@@ -5221,9 +5248,9 @@ static void bnx2x_eq_int(struct bnx2x *bp)
continue; continue;
case EVENT_RING_OPCODE_STAT_QUERY: case EVENT_RING_OPCODE_STAT_QUERY:
DP(BNX2X_MSG_SP | BNX2X_MSG_STATS, DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
"got statistics comp event %d\n", "got statistics comp event %d\n",
bp->stats_comp++); bp->stats_comp++);
/* nothing to do with stats comp */ /* nothing to do with stats comp */
goto next_spqe; goto next_spqe;
...@@ -5273,6 +5300,8 @@ static void bnx2x_eq_int(struct bnx2x *bp) ...@@ -5273,6 +5300,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
break; break;
} else { } else {
int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
"AFEX: ramrod completed FUNCTION_UPDATE\n"); "AFEX: ramrod completed FUNCTION_UPDATE\n");
f_obj->complete_cmd(bp, f_obj, f_obj->complete_cmd(bp, f_obj,
...@@ -5282,12 +5311,7 @@ static void bnx2x_eq_int(struct bnx2x *bp) ...@@ -5282,12 +5311,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
* sp_rtnl task as all Queue SP operations * sp_rtnl task as all Queue SP operations
* should run under rtnl_lock. * should run under rtnl_lock.
*/ */
smp_mb__before_clear_bit(); bnx2x_schedule_sp_rtnl(bp, cmd, 0);
set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
&bp->sp_rtnl_state);
smp_mb__after_clear_bit();
schedule_delayed_work(&bp->sp_rtnl_task, 0);
} }
goto next_spqe; goto next_spqe;
...@@ -6005,18 +6029,6 @@ static void bnx2x_init_internal_common(struct bnx2x *bp) ...@@ -6005,18 +6029,6 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
{ {
int i; int i;
if (IS_MF_SI(bp))
/*
* In switch independent mode, the TSTORM needs to accept
* packets that failed classification, since approximate match
* mac addresses aren't written to NIG LLH
*/
REG_WR8(bp, BAR_TSTRORM_INTMEM +
TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
REG_WR8(bp, BAR_TSTRORM_INTMEM +
TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
/* Zero this manually as its initialization is /* Zero this manually as its initialization is
currently missing in the initTool */ currently missing in the initTool */
for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
...@@ -12064,11 +12076,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev) ...@@ -12064,11 +12076,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
return; return;
} else { } else {
/* Schedule an SP task to handle rest of change */ /* Schedule an SP task to handle rest of change */
DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n"); bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
smp_mb__before_clear_bit(); NETIF_MSG_IFUP);
set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state);
smp_mb__after_clear_bit();
schedule_delayed_work(&bp->sp_rtnl_task, 0);
} }
} }
...@@ -12101,11 +12110,8 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp) ...@@ -12101,11 +12110,8 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
/* configuring mcast to a vf involves sleeping (when we /* configuring mcast to a vf involves sleeping (when we
* wait for the pf's response). * wait for the pf's response).
*/ */
smp_mb__before_clear_bit(); bnx2x_schedule_sp_rtnl(bp,
set_bit(BNX2X_SP_RTNL_VFPF_MCAST, BNX2X_SP_RTNL_VFPF_MCAST, 0);
&bp->sp_rtnl_state);
smp_mb__after_clear_bit();
schedule_delayed_work(&bp->sp_rtnl_task, 0);
} }
} }
......
...@@ -2277,11 +2277,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, ...@@ -2277,11 +2277,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->header.rule_cnt, p->rx_accept_flags, data->header.rule_cnt, p->rx_accept_flags,
p->tx_accept_flags); p->tx_accept_flags);
/* No need for an explicit memory barrier here as long we would /* No need for an explicit memory barrier here as long as we
* need to ensure the ordering of writing to the SPQ element * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory * and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there * read. If the memory read is removed we will have to put a
* (inside bnx2x_sp_post()). * full memory barrier there (inside bnx2x_sp_post()).
*/ */
/* Send a ramrod */ /* Send a ramrod */
...@@ -2982,11 +2982,11 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp, ...@@ -2982,11 +2982,11 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
raw->clear_pending(raw); raw->clear_pending(raw);
return 0; return 0;
} else { } else {
/* No need for an explicit memory barrier here as long we would /* No need for an explicit memory barrier here as long as we
* need to ensure the ordering of writing to the SPQ element * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory * and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there * read. If the memory read is removed we will have to put a
* (inside bnx2x_sp_post()). * full memory barrier there (inside bnx2x_sp_post()).
*/ */
/* Send a ramrod */ /* Send a ramrod */
...@@ -3466,11 +3466,11 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp, ...@@ -3466,11 +3466,11 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
raw->clear_pending(raw); raw->clear_pending(raw);
return 0; return 0;
} else { } else {
/* No need for an explicit memory barrier here as long we would /* No need for an explicit memory barrier here as long as we
* need to ensure the ordering of writing to the SPQ element * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory * and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there * read. If the memory read is removed we will have to put a
* (inside bnx2x_sp_post()). * full memory barrier there (inside bnx2x_sp_post()).
*/ */
/* Send a ramrod */ /* Send a ramrod */
...@@ -4091,11 +4091,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp, ...@@ -4091,11 +4091,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
} }
/* No need for an explicit memory barrier here as long we would /* No need for an explicit memory barrier here as long as we
* need to ensure the ordering of writing to the SPQ element * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory * and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there * read. If the memory read is removed we will have to put a
* (inside bnx2x_sp_post()). * full memory barrier there (inside bnx2x_sp_post()).
*/ */
/* Send a ramrod */ /* Send a ramrod */
...@@ -4158,16 +4158,6 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp, ...@@ -4158,16 +4158,6 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
rss_obj->config_rss = bnx2x_setup_rss; rss_obj->config_rss = bnx2x_setup_rss;
} }
int validate_vlan_mac(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *vlan_mac)
{
if (!vlan_mac->get_n_elements) {
BNX2X_ERR("vlan mac object was not intialized\n");
return -EINVAL;
}
return 0;
}
/********************** Queue state object ***********************************/ /********************** Queue state object ***********************************/
/** /**
...@@ -4587,13 +4577,12 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp, ...@@ -4587,13 +4577,12 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
/* Fill the ramrod data */ /* Fill the ramrod data */
bnx2x_q_fill_setup_data_cmn(bp, params, rdata); bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
/* No need for an explicit memory barrier here as long we would /* No need for an explicit memory barrier here as long as we
* need to ensure the ordering of writing to the SPQ element * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory * and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there * read. If the memory read is removed we will have to put a
* (inside bnx2x_sp_post()). * full memory barrier there (inside bnx2x_sp_post()).
*/ */
return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
U64_HI(data_mapping), U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE); U64_LO(data_mapping), ETH_CONNECTION_TYPE);
...@@ -4615,13 +4604,12 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp, ...@@ -4615,13 +4604,12 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
bnx2x_q_fill_setup_data_cmn(bp, params, rdata); bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
bnx2x_q_fill_setup_data_e2(bp, params, rdata); bnx2x_q_fill_setup_data_e2(bp, params, rdata);
/* No need for an explicit memory barrier here as long we would /* No need for an explicit memory barrier here as long as we
* need to ensure the ordering of writing to the SPQ element * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory * and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there * read. If the memory read is removed we will have to put a
* (inside bnx2x_sp_post()). * full memory barrier there (inside bnx2x_sp_post()).
*/ */
return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
U64_HI(data_mapping), U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE); U64_LO(data_mapping), ETH_CONNECTION_TYPE);
...@@ -4659,13 +4647,12 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, ...@@ -4659,13 +4647,12 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
o->cids[cid_index], rdata->general.client_id, o->cids[cid_index], rdata->general.client_id,
rdata->general.sp_client_id, rdata->general.cos); rdata->general.sp_client_id, rdata->general.cos);
/* No need for an explicit memory barrier here as long we would /* No need for an explicit memory barrier here as long as we
* need to ensure the ordering of writing to the SPQ element * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory * and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there * read. If the memory read is removed we will have to put a
* (inside bnx2x_sp_post()). * full memory barrier there (inside bnx2x_sp_post()).
*/ */
return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
U64_HI(data_mapping), U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE); U64_LO(data_mapping), ETH_CONNECTION_TYPE);
...@@ -4760,13 +4747,12 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp, ...@@ -4760,13 +4747,12 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp,
/* Fill the ramrod data */ /* Fill the ramrod data */
bnx2x_q_fill_update_data(bp, o, update_params, rdata); bnx2x_q_fill_update_data(bp, o, update_params, rdata);
/* No need for an explicit memory barrier here as long we would /* No need for an explicit memory barrier here as long as we
* need to ensure the ordering of writing to the SPQ element * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory * and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there * read. If the memory read is removed we will have to put a
* (inside bnx2x_sp_post()). * full memory barrier there (inside bnx2x_sp_post()).
*/ */
return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
o->cids[cid_index], U64_HI(data_mapping), o->cids[cid_index], U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE); U64_LO(data_mapping), ETH_CONNECTION_TYPE);
...@@ -4813,11 +4799,62 @@ static inline int bnx2x_q_send_activate(struct bnx2x *bp, ...@@ -4813,11 +4799,62 @@ static inline int bnx2x_q_send_activate(struct bnx2x *bp,
return bnx2x_q_send_update(bp, params); return bnx2x_q_send_update(bp, params);
} }
static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp,
struct bnx2x_queue_sp_obj *obj,
struct bnx2x_queue_update_tpa_params *params,
struct tpa_update_ramrod_data *data)
{
data->client_id = obj->cl_id;
data->complete_on_both_clients = params->complete_on_both_clients;
data->dont_verify_rings_pause_thr_flg =
params->dont_verify_thr;
data->max_agg_size = cpu_to_le16(params->max_agg_sz);
data->max_sges_for_packet = params->max_sges_pkt;
data->max_tpa_queues = params->max_tpa_queues;
data->sge_buff_size = cpu_to_le16(params->sge_buff_sz);
data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map));
data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map));
data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high);
data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low);
data->tpa_mode = params->tpa_mode;
data->update_ipv4 = params->update_ipv4;
data->update_ipv6 = params->update_ipv6;
}
static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp, static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
struct bnx2x_queue_state_params *params) struct bnx2x_queue_state_params *params)
{ {
/* TODO: Not implemented yet. */ struct bnx2x_queue_sp_obj *o = params->q_obj;
return -1; struct tpa_update_ramrod_data *rdata =
(struct tpa_update_ramrod_data *)o->rdata;
dma_addr_t data_mapping = o->rdata_mapping;
struct bnx2x_queue_update_tpa_params *update_tpa_params =
&params->params.update_tpa;
u16 type;
/* Clear the ramrod data */
memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data */
bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata);
/* Add the function id inside the type, so that sp post function
* doesn't automatically add the PF func-id, this is required
* for operations done by PFs on behalf of their VFs
*/
type = ETH_CONNECTION_TYPE |
((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT);
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside bnx2x_sp_post()).
*/
return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE,
o->cids[BNX2X_PRIMARY_CID_INDEX],
U64_HI(data_mapping),
U64_LO(data_mapping), type);
} }
static inline int bnx2x_q_send_halt(struct bnx2x *bp, static inline int bnx2x_q_send_halt(struct bnx2x *bp,
...@@ -5647,6 +5684,12 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp, ...@@ -5647,6 +5684,12 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
rdata->tx_switch_suspend = switch_update_params->suspend; rdata->tx_switch_suspend = switch_update_params->suspend;
rdata->echo = SWITCH_UPDATE; rdata->echo = SWITCH_UPDATE;
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside bnx2x_sp_post()).
*/
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
U64_HI(data_mapping), U64_HI(data_mapping),
U64_LO(data_mapping), NONE_CONNECTION_TYPE); U64_LO(data_mapping), NONE_CONNECTION_TYPE);
...@@ -5674,11 +5717,11 @@ static inline int bnx2x_func_send_afex_update(struct bnx2x *bp, ...@@ -5674,11 +5717,11 @@ static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
rdata->allowed_priorities = afex_update_params->allowed_priorities; rdata->allowed_priorities = afex_update_params->allowed_priorities;
rdata->echo = AFEX_UPDATE; rdata->echo = AFEX_UPDATE;
/* No need for an explicit memory barrier here as long we would /* No need for an explicit memory barrier here as long as we
* need to ensure the ordering of writing to the SPQ element * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory * and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there * read. If the memory read is removed we will have to put a
* (inside bnx2x_sp_post()). * full memory barrier there (inside bnx2x_sp_post()).
*/ */
DP(BNX2X_MSG_SP, DP(BNX2X_MSG_SP,
"afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n", "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
...@@ -5763,6 +5806,12 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp, ...@@ -5763,6 +5806,12 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
rdata->traffic_type_to_priority_cos[i] = rdata->traffic_type_to_priority_cos[i] =
tx_start_params->traffic_type_to_priority_cos[i]; tx_start_params->traffic_type_to_priority_cos[i];
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside bnx2x_sp_post()).
*/
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
U64_HI(data_mapping), U64_HI(data_mapping),
U64_LO(data_mapping), NONE_CONNECTION_TYPE); U64_LO(data_mapping), NONE_CONNECTION_TYPE);
......
...@@ -893,6 +893,24 @@ struct bnx2x_queue_update_params { ...@@ -893,6 +893,24 @@ struct bnx2x_queue_update_params {
u8 cid_index; u8 cid_index;
}; };
struct bnx2x_queue_update_tpa_params {
dma_addr_t sge_map;
u8 update_ipv4;
u8 update_ipv6;
u8 max_tpa_queues;
u8 max_sges_pkt;
u8 complete_on_both_clients;
u8 dont_verify_thr;
u8 tpa_mode;
u8 _pad;
u16 sge_buff_sz;
u16 max_agg_sz;
u16 sge_pause_thr_low;
u16 sge_pause_thr_high;
};
struct rxq_pause_params { struct rxq_pause_params {
u16 bd_th_lo; u16 bd_th_lo;
u16 bd_th_hi; u16 bd_th_hi;
...@@ -987,6 +1005,7 @@ struct bnx2x_queue_state_params { ...@@ -987,6 +1005,7 @@ struct bnx2x_queue_state_params {
/* Params according to the current command */ /* Params according to the current command */
union { union {
struct bnx2x_queue_update_params update; struct bnx2x_queue_update_params update;
struct bnx2x_queue_update_tpa_params update_tpa;
struct bnx2x_queue_setup_params setup; struct bnx2x_queue_setup_params setup;
struct bnx2x_queue_init_params init; struct bnx2x_queue_init_params init;
struct bnx2x_queue_setup_tx_only_params tx_only; struct bnx2x_queue_setup_tx_only_params tx_only;
...@@ -1403,6 +1422,4 @@ int bnx2x_config_rss(struct bnx2x *bp, ...@@ -1403,6 +1422,4 @@ int bnx2x_config_rss(struct bnx2x *bp,
void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
u8 *ind_table); u8 *ind_table);
int validate_vlan_mac(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *vlan_mac);
#endif /* BNX2X_SP_VERBS */ #endif /* BNX2X_SP_VERBS */
...@@ -102,6 +102,21 @@ static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -102,6 +102,21 @@ static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
mmiowb(); mmiowb();
barrier(); barrier();
} }
static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
struct bnx2x_virtf *vf,
bool print_err)
{
if (!bnx2x_leading_vfq(vf, sp_initialized)) {
if (print_err)
BNX2X_ERR("Slowpath objects not yet initialized!\n");
else
DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
return false;
}
return true;
}
/* VFOP - VF slow-path operation support */ /* VFOP - VF slow-path operation support */
#define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000
...@@ -176,6 +191,11 @@ enum bnx2x_vfop_rss_state { ...@@ -176,6 +191,11 @@ enum bnx2x_vfop_rss_state {
BNX2X_VFOP_RSS_DONE BNX2X_VFOP_RSS_DONE
}; };
enum bnx2x_vfop_tpa_state {
BNX2X_VFOP_TPA_CONFIG,
BNX2X_VFOP_TPA_DONE
};
#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
...@@ -716,7 +736,6 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, ...@@ -716,7 +736,6 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
int qid, bool drv_only) int qid, bool drv_only)
{ {
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
int rc;
if (vfop) { if (vfop) {
struct bnx2x_vfop_args_filters filters = { struct bnx2x_vfop_args_filters filters = {
...@@ -736,9 +755,6 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, ...@@ -736,9 +755,6 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
/* set object */ /* set object */
rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
if (rc)
return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
/* set extra args */ /* set extra args */
...@@ -758,9 +774,12 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, ...@@ -758,9 +774,12 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
struct bnx2x_vfop_filters *macs, struct bnx2x_vfop_filters *macs,
int qid, bool drv_only) int qid, bool drv_only)
{ {
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); struct bnx2x_vfop *vfop;
int rc;
if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
return -EINVAL;
vfop = bnx2x_vfop_add(bp, vf);
if (vfop) { if (vfop) {
struct bnx2x_vfop_args_filters filters = { struct bnx2x_vfop_args_filters filters = {
.multi_filter = macs, .multi_filter = macs,
...@@ -782,9 +801,6 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, ...@@ -782,9 +801,6 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
/* set object */ /* set object */
rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
if (rc)
return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
/* set extra args */ /* set extra args */
...@@ -804,9 +820,12 @@ static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, ...@@ -804,9 +820,12 @@ static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
struct bnx2x_vfop_cmd *cmd, struct bnx2x_vfop_cmd *cmd,
int qid, u16 vid, bool add) int qid, u16 vid, bool add)
{ {
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); struct bnx2x_vfop *vfop;
int rc;
if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
return -EINVAL;
vfop = bnx2x_vfop_add(bp, vf);
if (vfop) { if (vfop) {
struct bnx2x_vfop_args_filters filters = { struct bnx2x_vfop_args_filters filters = {
.multi_filter = NULL, /* single command */ .multi_filter = NULL, /* single command */
...@@ -826,9 +845,6 @@ static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, ...@@ -826,9 +845,6 @@ static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
ramrod->user_req.u.vlan.vlan = vid; ramrod->user_req.u.vlan.vlan = vid;
/* set object */ /* set object */
rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
if (rc)
return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */ /* set extra args */
...@@ -848,7 +864,6 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, ...@@ -848,7 +864,6 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
int qid, bool drv_only) int qid, bool drv_only)
{ {
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
int rc;
if (vfop) { if (vfop) {
struct bnx2x_vfop_args_filters filters = { struct bnx2x_vfop_args_filters filters = {
...@@ -868,9 +883,6 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, ...@@ -868,9 +883,6 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
/* set object */ /* set object */
rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
if (rc)
return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */ /* set extra args */
...@@ -890,9 +902,12 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, ...@@ -890,9 +902,12 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
struct bnx2x_vfop_filters *vlans, struct bnx2x_vfop_filters *vlans,
int qid, bool drv_only) int qid, bool drv_only)
{ {
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); struct bnx2x_vfop *vfop;
int rc;
if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
return -EINVAL;
vfop = bnx2x_vfop_add(bp, vf);
if (vfop) { if (vfop) {
struct bnx2x_vfop_args_filters filters = { struct bnx2x_vfop_args_filters filters = {
.multi_filter = vlans, .multi_filter = vlans,
...@@ -911,9 +926,6 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, ...@@ -911,9 +926,6 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
/* set object */ /* set object */
rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
if (rc)
return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */ /* set extra args */
...@@ -971,11 +983,8 @@ static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf) ...@@ -971,11 +983,8 @@ static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
op_done: op_done:
case BNX2X_VFOP_QSETUP_DONE: case BNX2X_VFOP_QSETUP_DONE:
vf->cfg_flags |= VF_CFG_VLAN; vf->cfg_flags |= VF_CFG_VLAN;
smp_mb__before_clear_bit(); bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, BNX2X_MSG_IOV);
&bp->sp_rtnl_state);
smp_mb__after_clear_bit();
schedule_delayed_work(&bp->sp_rtnl_task, 0);
bnx2x_vfop_end(bp, vf, vfop); bnx2x_vfop_end(bp, vf, vfop);
return; return;
default: default:
...@@ -1025,34 +1034,20 @@ static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf) ...@@ -1025,34 +1034,20 @@ static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
/* vlan-clear-all: driver-only, don't consume credit */ /* vlan-clear-all: driver-only, don't consume credit */
vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) { /* the vlan_mac vfop will re-schedule us */
/* the vlan_mac vfop will re-schedule us */ vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, if (vfop->rc)
qid, true); goto op_err;
if (vfop->rc) return;
goto op_err;
return;
} else {
/* need to reschedule ourselves */
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
}
case BNX2X_VFOP_QFLR_CLR_MAC: case BNX2X_VFOP_QFLR_CLR_MAC:
/* mac-clear-all: driver only consume credit */ /* mac-clear-all: driver only consume credit */
vfop->state = BNX2X_VFOP_QFLR_TERMINATE; vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) { /* the vlan_mac vfop will re-schedule us */
/* the vlan_mac vfop will re-schedule us */ vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, if (vfop->rc)
qid, true); goto op_err;
if (vfop->rc) return;
goto op_err;
return;
} else {
/* need to reschedule ourselves */
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
}
case BNX2X_VFOP_QFLR_TERMINATE: case BNX2X_VFOP_QFLR_TERMINATE:
qstate = &vfop->op_p->qctor.qstate; qstate = &vfop->op_p->qctor.qstate;
...@@ -1095,8 +1090,13 @@ static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp, ...@@ -1095,8 +1090,13 @@ static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
if (vfop) { if (vfop) {
vfop->args.qx.qid = qid; vfop->args.qx.qid = qid;
bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN, if ((qid == LEADING_IDX) &&
bnx2x_vfop_qflr, cmd->done); bnx2x_validate_vf_sp_objs(bp, vf, false))
bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
bnx2x_vfop_qflr, cmd->done);
else
bnx2x_vfop_opset(BNX2X_VFOP_QFLR_TERMINATE,
bnx2x_vfop_qflr, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr, return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
cmd->block); cmd->block);
} }
...@@ -1310,7 +1310,10 @@ static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf) ...@@ -1310,7 +1310,10 @@ static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
switch (state) { switch (state) {
case BNX2X_VFOP_QTEARDOWN_RXMODE: case BNX2X_VFOP_QTEARDOWN_RXMODE:
/* Drop all */ /* Drop all */
vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN; if (bnx2x_validate_vf_sp_objs(bp, vf, true))
vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
else
vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0); vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
if (vfop->rc) if (vfop->rc)
goto op_err; goto op_err;
...@@ -2166,6 +2169,9 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -2166,6 +2169,9 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_vf_sp_map(bp, vf, q_data), bnx2x_vf_sp_map(bp, vf, q_data),
q_type); q_type);
/* sp indication is set only when vlan/mac/etc. are initialized */
q->sp_initialized = false;
DP(BNX2X_MSG_IOV, DP(BNX2X_MSG_IOV,
"initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n", "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
vf->abs_vfid, q->sp_obj.func_id, q->cid); vf->abs_vfid, q->sp_obj.func_id, q->cid);
...@@ -2527,10 +2533,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) ...@@ -2527,10 +2533,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
(is_fcoe ? 0 : 1); (is_fcoe ? 0 : 1);
DP(BNX2X_MSG_IOV, DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
"BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
first_queue_query_index + num_queues_req); first_queue_query_index + num_queues_req);
cur_data_offset = bp->fw_stats_data_mapping + cur_data_offset = bp->fw_stats_data_mapping +
offsetof(struct bnx2x_fw_stats_data, queue_stats) + offsetof(struct bnx2x_fw_stats_data, queue_stats) +
...@@ -2544,9 +2550,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) ...@@ -2544,9 +2550,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
struct bnx2x_virtf *vf = BP_VF(bp, i); struct bnx2x_virtf *vf = BP_VF(bp, i);
if (vf->state != VF_ENABLED) { if (vf->state != VF_ENABLED) {
DP(BNX2X_MSG_IOV, DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
"vf %d not enabled so no stats for it\n", "vf %d not enabled so no stats for it\n",
vf->abs_vfid); vf->abs_vfid);
continue; continue;
} }
...@@ -2597,7 +2603,8 @@ void bnx2x_iov_sp_task(struct bnx2x *bp) ...@@ -2597,7 +2603,8 @@ void bnx2x_iov_sp_task(struct bnx2x *bp)
/* Iterate over all VFs and invoke state transition for VFs with /* Iterate over all VFs and invoke state transition for VFs with
* 'in-progress' slow-path operations * 'in-progress' slow-path operations
*/ */
DP(BNX2X_MSG_IOV, "searching for pending vf operations\n"); DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_SP),
"searching for pending vf operations\n");
for_each_vf(bp, i) { for_each_vf(bp, i) {
struct bnx2x_virtf *vf = BP_VF(bp, i); struct bnx2x_virtf *vf = BP_VF(bp, i);
...@@ -3046,6 +3053,83 @@ int bnx2x_vfop_rss_cmd(struct bnx2x *bp, ...@@ -3046,6 +3053,83 @@ int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
return -ENOMEM; return -ENOMEM;
} }
/* VFOP tpa update, send update on all queues */
static void bnx2x_vfop_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
struct bnx2x_vfop_args_tpa *tpa_args = &vfop->args.tpa;
enum bnx2x_vfop_tpa_state state = vfop->state;
bnx2x_vfop_reset_wq(vf);
if (vfop->rc < 0)
goto op_err;
DP(BNX2X_MSG_IOV, "vf[%d:%d] STATE: %d\n",
vf->abs_vfid, tpa_args->qid,
state);
switch (state) {
case BNX2X_VFOP_TPA_CONFIG:
if (tpa_args->qid < vf_rxq_count(vf)) {
struct bnx2x_queue_state_params *qstate =
&vf->op_params.qstate;
qstate->q_obj = &bnx2x_vfq(vf, tpa_args->qid, sp_obj);
/* The only thing that changes for the ramrod params
* between calls is the sge_map
*/
qstate->params.update_tpa.sge_map =
tpa_args->sge_map[tpa_args->qid];
DP(BNX2X_MSG_IOV, "sge_addr[%d] %08x:%08x\n",
tpa_args->qid,
U64_HI(qstate->params.update_tpa.sge_map),
U64_LO(qstate->params.update_tpa.sge_map));
qstate->cmd = BNX2X_Q_CMD_UPDATE_TPA;
vfop->rc = bnx2x_queue_state_change(bp, qstate);
tpa_args->qid++;
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
}
vfop->state = BNX2X_VFOP_TPA_DONE;
vfop->rc = 0;
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
op_err:
BNX2X_ERR("TPA update error: rc %d\n", vfop->rc);
op_done:
case BNX2X_VFOP_TPA_DONE:
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
bnx2x_vfop_default(state);
}
op_pending:
return;
}
int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
struct vfpf_tpa_tlv *tpa_tlv)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
vfop->args.qx.qid = 0; /* loop */
memcpy(&vfop->args.tpa.sge_map,
tpa_tlv->tpa_client_info.sge_addr,
sizeof(vfop->args.tpa.sge_map));
bnx2x_vfop_opset(BNX2X_VFOP_TPA_CONFIG,
bnx2x_vfop_tpa, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_tpa,
cmd->block);
}
return -ENOMEM;
}
/* VF release ~ VF close + VF release-resources /* VF release ~ VF close + VF release-resources
* Release is the ultimate SW shutdown and is called whenever an * Release is the ultimate SW shutdown and is called whenever an
* irrecoverable error is encountered. * irrecoverable error is encountered.
...@@ -3074,16 +3158,6 @@ static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, ...@@ -3074,16 +3158,6 @@ static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
*sbdf = vf->devfn | (vf->bus << 8); *sbdf = vf->devfn | (vf->bus << 8);
} }
static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_bar_info *bar_info)
{
int n;
bar_info->nr_bars = bp->vfdb->sriov.nres;
for (n = 0; n < bar_info->nr_bars; n++)
bar_info->bars[n] = vf->bars[n];
}
void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs tlv) enum channel_tlvs tlv)
{ {
...@@ -3405,13 +3479,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, ...@@ -3405,13 +3479,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
ivi->spoofchk = 1; /*always enabled */ ivi->spoofchk = 1; /*always enabled */
if (vf->state == VF_ENABLED) { if (vf->state == VF_ENABLED) {
/* mac and vlan are in vlan_mac objects */ /* mac and vlan are in vlan_mac objects */
if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj))) if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
0, ETH_ALEN); 0, ETH_ALEN);
if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj)))
vlan_obj->get_n_elements(bp, vlan_obj, 1, vlan_obj->get_n_elements(bp, vlan_obj, 1,
(u8 *)&ivi->vlan, 0, (u8 *)&ivi->vlan, 0,
VLAN_HLEN); VLAN_HLEN);
}
} else { } else {
/* mac */ /* mac */
if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
...@@ -3485,17 +3559,17 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) ...@@ -3485,17 +3559,17 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the mac in device on this vf's queue */ /* configure the mac in device on this vf's queue */
unsigned long ramrod_flags = 0; unsigned long ramrod_flags = 0;
struct bnx2x_vlan_mac_obj *mac_obj = struct bnx2x_vlan_mac_obj *mac_obj;
&bnx2x_leading_vfq(vf, mac_obj);
rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); /* User should be able to see failure reason in system logs */
if (rc) if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
return rc; return -EINVAL;
/* must lock vfpf channel to protect against vf flows */ /* must lock vfpf channel to protect against vf flows */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
/* remove existing eth macs */ /* remove existing eth macs */
mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
if (rc) { if (rc) {
BNX2X_ERR("failed to delete eth macs\n"); BNX2X_ERR("failed to delete eth macs\n");
...@@ -3569,17 +3643,16 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) ...@@ -3569,17 +3643,16 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
BNX2X_Q_LOGICAL_STATE_ACTIVE) BNX2X_Q_LOGICAL_STATE_ACTIVE)
return rc; return rc;
/* configure the vlan in device on this vf's queue */ /* User should be able to see error in system logs */
vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); return -EINVAL;
if (rc)
return rc;
/* must lock vfpf channel to protect against vf flows */ /* must lock vfpf channel to protect against vf flows */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
/* remove existing vlans */ /* remove existing vlans */
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags); __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
&ramrod_flags); &ramrod_flags);
if (rc) { if (rc) {
...@@ -3736,13 +3809,9 @@ void bnx2x_timer_sriov(struct bnx2x *bp) ...@@ -3736,13 +3809,9 @@ void bnx2x_timer_sriov(struct bnx2x *bp)
bnx2x_sample_bulletin(bp); bnx2x_sample_bulletin(bp);
/* if channel is down we need to self destruct */ /* if channel is down we need to self destruct */
if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
smp_mb__before_clear_bit(); bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, BNX2X_MSG_IOV);
&bp->sp_rtnl_state);
smp_mb__after_clear_bit();
schedule_delayed_work(&bp->sp_rtnl_task, 0);
}
} }
void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
......
...@@ -83,6 +83,7 @@ struct bnx2x_vf_queue { ...@@ -83,6 +83,7 @@ struct bnx2x_vf_queue {
u16 index; u16 index;
u16 sb_idx; u16 sb_idx;
bool is_leading; bool is_leading;
bool sp_initialized;
}; };
/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters: /* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
...@@ -100,6 +101,7 @@ union bnx2x_vfop_params { ...@@ -100,6 +101,7 @@ union bnx2x_vfop_params {
struct bnx2x_mcast_ramrod_params mcast; struct bnx2x_mcast_ramrod_params mcast;
struct bnx2x_config_rss_params rss; struct bnx2x_config_rss_params rss;
struct bnx2x_vfop_qctor_params qctor; struct bnx2x_vfop_qctor_params qctor;
struct bnx2x_queue_state_params qstate;
}; };
/* forward */ /* forward */
...@@ -166,6 +168,11 @@ struct bnx2x_vfop_args_filters { ...@@ -166,6 +168,11 @@ struct bnx2x_vfop_args_filters {
atomic_t *credit; /* non NULL means 'don't consume credit' */ atomic_t *credit; /* non NULL means 'don't consume credit' */
}; };
struct bnx2x_vfop_args_tpa {
int qid;
dma_addr_t sge_map[PFVF_MAX_QUEUES_PER_VF];
};
union bnx2x_vfop_args { union bnx2x_vfop_args {
struct bnx2x_vfop_args_mcast mc_list; struct bnx2x_vfop_args_mcast mc_list;
struct bnx2x_vfop_args_qctor qctor; struct bnx2x_vfop_args_qctor qctor;
...@@ -173,6 +180,7 @@ union bnx2x_vfop_args { ...@@ -173,6 +180,7 @@ union bnx2x_vfop_args {
struct bnx2x_vfop_args_defvlan defvlan; struct bnx2x_vfop_args_defvlan defvlan;
struct bnx2x_vfop_args_qx qx; struct bnx2x_vfop_args_qx qx;
struct bnx2x_vfop_args_filters filters; struct bnx2x_vfop_args_filters filters;
struct bnx2x_vfop_args_tpa tpa;
}; };
struct bnx2x_vfop { struct bnx2x_vfop {
...@@ -704,6 +712,11 @@ int bnx2x_vfop_rss_cmd(struct bnx2x *bp, ...@@ -704,6 +712,11 @@ int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf, struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd); struct bnx2x_vfop_cmd *cmd);
int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
struct vfpf_tpa_tlv *tpa_tlv);
/* VF release ~ VF close + VF release-resources /* VF release ~ VF close + VF release-resources
* *
* Release is the ultimate SW shutdown and is called whenever an * Release is the ultimate SW shutdown and is called whenever an
......
...@@ -548,6 +548,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -548,6 +548,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf->leading_rss = cl_id; vf->leading_rss = cl_id;
q->is_leading = true; q->is_leading = true;
q->sp_initialized = true;
} }
/* ask the pf to open a queue for the vf */ /* ask the pf to open a queue for the vf */
...@@ -1159,7 +1160,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -1159,7 +1160,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
resp->pfdev_info.db_size = bp->db_size; resp->pfdev_info.db_size = bp->db_size;
resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
/* PFVF_CAP_DHC |*/ PFVF_CAP_TPA); PFVF_CAP_TPA |
PFVF_CAP_TPA_UPDATE);
bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver, bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
sizeof(resp->pfdev_info.fw_ver)); sizeof(resp->pfdev_info.fw_ver));
...@@ -1694,16 +1696,12 @@ static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp, ...@@ -1694,16 +1696,12 @@ static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
return -ENOMEM; return -ENOMEM;
} }
static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, static int bnx2x_filters_validate_mac(struct bnx2x *bp,
struct bnx2x_virtf *vf, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx) struct vfpf_set_q_filters_tlv *filters)
{ {
struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
struct bnx2x_vfop_cmd cmd = { int rc = 0;
.done = bnx2x_vf_mbx_resp,
.block = false,
};
/* if a mac was already set for this VF via the set vf mac ndo, we only /* if a mac was already set for this VF via the set vf mac ndo, we only
* accept mac configurations of that mac. Why accept them at all? * accept mac configurations of that mac. Why accept them at all?
...@@ -1716,6 +1714,7 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, ...@@ -1716,6 +1714,7 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n", BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
vf->abs_vfid); vf->abs_vfid);
vf->op_rc = -EPERM; vf->op_rc = -EPERM;
rc = -EPERM;
goto response; goto response;
} }
...@@ -1726,9 +1725,22 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, ...@@ -1726,9 +1725,22 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
vf->abs_vfid); vf->abs_vfid);
vf->op_rc = -EPERM; vf->op_rc = -EPERM;
rc = -EPERM;
goto response; goto response;
} }
} }
response:
return rc;
}
static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct vfpf_set_q_filters_tlv *filters)
{
struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
int rc = 0;
/* if vlan was set by hypervisor we don't allow guest to config vlan */ /* if vlan was set by hypervisor we don't allow guest to config vlan */
if (bulletin->valid_bitmap & 1 << VLAN_VALID) { if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
int i; int i;
...@@ -1740,13 +1752,36 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, ...@@ -1740,13 +1752,36 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
vf->abs_vfid); vf->abs_vfid);
vf->op_rc = -EPERM; vf->op_rc = -EPERM;
rc = -EPERM;
goto response; goto response;
} }
} }
} }
/* verify vf_qid */ /* verify vf_qid */
if (filters->vf_qid > vf_rxq_count(vf)) if (filters->vf_qid > vf_rxq_count(vf)) {
rc = -EPERM;
goto response;
}
response:
return rc;
}
static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vf_mbx_resp,
.block = false,
};
if (bnx2x_filters_validate_mac(bp, vf, filters))
goto response;
if (bnx2x_filters_validate_vlan(bp, vf, filters))
goto response; goto response;
DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n", DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
...@@ -1877,6 +1912,75 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -1877,6 +1912,75 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_vf_mbx_resp(bp, vf); bnx2x_vf_mbx_resp(bp, vf);
} }
static int bnx2x_validate_tpa_params(struct bnx2x *bp,
struct vfpf_tpa_tlv *tpa_tlv)
{
int rc = 0;
if (tpa_tlv->tpa_client_info.max_sges_for_packet >
U_ETH_MAX_SGES_FOR_PACKET) {
rc = -EINVAL;
BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
tpa_tlv->tpa_client_info.max_sges_for_packet,
U_ETH_MAX_SGES_FOR_PACKET);
}
if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
rc = -EINVAL;
BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
tpa_tlv->tpa_client_info.max_tpa_queues,
MAX_AGG_QS(bp));
}
return rc;
}
static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vf_mbx_resp,
.block = false,
};
struct bnx2x_queue_update_tpa_params *vf_op_params =
&vf->op_params.qstate.params.update_tpa;
struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
memset(vf_op_params, 0, sizeof(*vf_op_params));
if (bnx2x_validate_tpa_params(bp, tpa_tlv))
goto mbx_resp;
vf_op_params->complete_on_both_clients =
tpa_tlv->tpa_client_info.complete_on_both_clients;
vf_op_params->dont_verify_thr =
tpa_tlv->tpa_client_info.dont_verify_thr;
vf_op_params->max_agg_sz =
tpa_tlv->tpa_client_info.max_agg_size;
vf_op_params->max_sges_pkt =
tpa_tlv->tpa_client_info.max_sges_for_packet;
vf_op_params->max_tpa_queues =
tpa_tlv->tpa_client_info.max_tpa_queues;
vf_op_params->sge_buff_sz =
tpa_tlv->tpa_client_info.sge_buff_size;
vf_op_params->sge_pause_thr_high =
tpa_tlv->tpa_client_info.sge_pause_thr_high;
vf_op_params->sge_pause_thr_low =
tpa_tlv->tpa_client_info.sge_pause_thr_low;
vf_op_params->tpa_mode =
tpa_tlv->tpa_client_info.tpa_mode;
vf_op_params->update_ipv4 =
tpa_tlv->tpa_client_info.update_ipv4;
vf_op_params->update_ipv6 =
tpa_tlv->tpa_client_info.update_ipv6;
vf->op_rc = bnx2x_vfop_tpa_cmd(bp, vf, &cmd, tpa_tlv);
mbx_resp:
if (vf->op_rc)
bnx2x_vf_mbx_resp(bp, vf);
}
/* dispatch request */ /* dispatch request */
static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx) struct bnx2x_vf_mbx *mbx)
...@@ -1916,6 +2020,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -1916,6 +2020,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
case CHANNEL_TLV_UPDATE_RSS: case CHANNEL_TLV_UPDATE_RSS:
bnx2x_vf_mbx_update_rss(bp, vf, mbx); bnx2x_vf_mbx_update_rss(bp, vf, mbx);
return; return;
case CHANNEL_TLV_UPDATE_TPA:
bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
return;
} }
} else { } else {
......
...@@ -162,6 +162,7 @@ struct pfvf_acquire_resp_tlv { ...@@ -162,6 +162,7 @@ struct pfvf_acquire_resp_tlv {
#define PFVF_CAP_RSS 0x00000001 #define PFVF_CAP_RSS 0x00000001
#define PFVF_CAP_DHC 0x00000002 #define PFVF_CAP_DHC 0x00000002
#define PFVF_CAP_TPA 0x00000004 #define PFVF_CAP_TPA 0x00000004
#define PFVF_CAP_TPA_UPDATE 0x00000008
char fw_ver[32]; char fw_ver[32];
u16 db_size; u16 db_size;
u8 indices_per_sb; u8 indices_per_sb;
...@@ -303,6 +304,25 @@ struct vfpf_set_q_filters_tlv { ...@@ -303,6 +304,25 @@ struct vfpf_set_q_filters_tlv {
u32 rx_mask; /* see mask constants at the top of the file */ u32 rx_mask; /* see mask constants at the top of the file */
}; };
struct vfpf_tpa_tlv {
struct vfpf_first_tlv first_tlv;
struct vf_pf_tpa_client_info {
aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF];
u8 update_ipv4;
u8 update_ipv6;
u8 max_tpa_queues;
u8 max_sges_for_packet;
u8 complete_on_both_clients;
u8 dont_verify_thr;
u8 tpa_mode;
u16 sge_buff_size;
u16 max_agg_size;
u16 sge_pause_thr_low;
u16 sge_pause_thr_high;
} tpa_client_info;
};
/* close VF (disable VF) */ /* close VF (disable VF) */
struct vfpf_close_tlv { struct vfpf_close_tlv {
struct vfpf_first_tlv first_tlv; struct vfpf_first_tlv first_tlv;
...@@ -331,6 +351,7 @@ union vfpf_tlvs { ...@@ -331,6 +351,7 @@ union vfpf_tlvs {
struct vfpf_set_q_filters_tlv set_q_filters; struct vfpf_set_q_filters_tlv set_q_filters;
struct vfpf_release_tlv release; struct vfpf_release_tlv release;
struct vfpf_rss_tlv update_rss; struct vfpf_rss_tlv update_rss;
struct vfpf_tpa_tlv update_tpa;
struct channel_list_end_tlv list_end; struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size; struct tlv_buffer_size tlv_buf_size;
}; };
...@@ -405,6 +426,7 @@ enum channel_tlvs { ...@@ -405,6 +426,7 @@ enum channel_tlvs {
CHANNEL_TLV_PF_SET_VLAN, CHANNEL_TLV_PF_SET_VLAN,
CHANNEL_TLV_UPDATE_RSS, CHANNEL_TLV_UPDATE_RSS,
CHANNEL_TLV_PHYS_PORT_ID, CHANNEL_TLV_PHYS_PORT_ID,
CHANNEL_TLV_UPDATE_TPA,
CHANNEL_TLV_MAX CHANNEL_TLV_MAX
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment