Commit 7282ec8c authored by David S. Miller's avatar David S. Miller

Merge branch 'bnx2x'

Yuval Mintz says:

====================
bnx2x: Enhancements & semantic changes series

This patch series contains several semantic (or mostly semantic) patches,
as well as adding support for packet aggregations on the receive path
of windows VMs and updating bnx2x to the new FW recently accepted upstream.

Please consider applying these patches to `net-next'.

(This is a repost as net-next was still closed when this was previously
sent)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8bed1285 3156b8eb
...@@ -26,8 +26,8 @@ ...@@ -26,8 +26,8 @@
* (you will need to reboot afterwards) */ * (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */ /* #define BNX2X_STOP_ON_ERROR */
#define DRV_MODULE_VERSION "1.78.17-0" #define DRV_MODULE_VERSION "1.78.19-0"
#define DRV_MODULE_RELDATE "2013/04/11" #define DRV_MODULE_RELDATE "2014/02/10"
#define BNX2X_BC_VER 0x040200 #define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB) #if defined(CONFIG_DCB)
...@@ -75,13 +75,22 @@ enum bnx2x_int_mode { ...@@ -75,13 +75,22 @@ enum bnx2x_int_mode {
#define BNX2X_MSG_DCB 0x8000000 #define BNX2X_MSG_DCB 0x8000000
/* regular debug print */ /* regular debug print */
#define DP(__mask, fmt, ...) \ #define DP_INNER(fmt, ...) \
do { \
if (unlikely(bp->msg_enable & (__mask))) \
pr_notice("[%s:%d(%s)]" fmt, \ pr_notice("[%s:%d(%s)]" fmt, \
__func__, __LINE__, \ __func__, __LINE__, \
bp->dev ? (bp->dev->name) : "?", \ bp->dev ? (bp->dev->name) : "?", \
##__VA_ARGS__); \ ##__VA_ARGS__);
#define DP(__mask, fmt, ...) \
do { \
if (unlikely(bp->msg_enable & (__mask))) \
DP_INNER(fmt, ##__VA_ARGS__); \
} while (0)
#define DP_AND(__mask, fmt, ...) \
do { \
if (unlikely((bp->msg_enable & (__mask)) == __mask)) \
DP_INNER(fmt, ##__VA_ARGS__); \
} while (0) } while (0)
#define DP_CONT(__mask, fmt, ...) \ #define DP_CONT(__mask, fmt, ...) \
...@@ -1261,6 +1270,7 @@ struct bnx2x_slowpath { ...@@ -1261,6 +1270,7 @@ struct bnx2x_slowpath {
union { union {
struct client_init_ramrod_data init_data; struct client_init_ramrod_data init_data;
struct client_update_ramrod_data update_data; struct client_update_ramrod_data update_data;
struct tpa_update_ramrod_data tpa_data;
} q_rdata; } q_rdata;
union { union {
...@@ -1392,7 +1402,7 @@ struct bnx2x_fw_stats_data { ...@@ -1392,7 +1402,7 @@ struct bnx2x_fw_stats_data {
}; };
/* Public slow path states */ /* Public slow path states */
enum { enum sp_rtnl_flag {
BNX2X_SP_RTNL_SETUP_TC, BNX2X_SP_RTNL_SETUP_TC,
BNX2X_SP_RTNL_TX_TIMEOUT, BNX2X_SP_RTNL_TX_TIMEOUT,
BNX2X_SP_RTNL_FAN_FAILURE, BNX2X_SP_RTNL_FAN_FAILURE,
......
...@@ -4773,12 +4773,8 @@ void bnx2x_tx_timeout(struct net_device *dev) ...@@ -4773,12 +4773,8 @@ void bnx2x_tx_timeout(struct net_device *dev)
bnx2x_panic(); bnx2x_panic();
#endif #endif
smp_mb__before_clear_bit();
set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
smp_mb__after_clear_bit();
/* This allows the netif to be shutdown gracefully before resetting */ /* This allows the netif to be shutdown gracefully before resetting */
schedule_delayed_work(&bp->sp_rtnl_task, 0); bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
} }
int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
...@@ -4906,3 +4902,15 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, ...@@ -4906,3 +4902,15 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
disable = disable ? 1 : (usec ? 0 : 1); disable = disable ? 1 : (usec ? 0 : 1);
storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
} }
void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
u32 verbose)
{
smp_mb__before_clear_bit();
set_bit(flag, &bp->sp_rtnl_state);
smp_mb__after_clear_bit();
DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
flag);
schedule_delayed_work(&bp->sp_rtnl_task, 0);
}
EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);
...@@ -1324,4 +1324,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len); ...@@ -1324,4 +1324,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
int bnx2x_drain_tx_queues(struct bnx2x *bp); int bnx2x_drain_tx_queues(struct bnx2x *bp);
void bnx2x_squeeze_objects(struct bnx2x *bp); void bnx2x_squeeze_objects(struct bnx2x *bp);
void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
u32 verbose);
#endif /* BNX2X_CMN_H */ #endif /* BNX2X_CMN_H */
...@@ -710,8 +710,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp) ...@@ -710,8 +710,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
* as we are handling an attention on a work queue which must be * as we are handling an attention on a work queue which must be
* flushed at some rtnl-locked contexts (e.g. if down) * flushed at some rtnl-locked contexts (e.g. if down)
*/ */
if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_SETUP_TC, 0);
schedule_delayed_work(&bp->sp_rtnl_task, 0);
} }
void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
...@@ -764,10 +763,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) ...@@ -764,10 +763,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
if (IS_MF(bp)) if (IS_MF(bp))
bnx2x_link_sync_notify(bp); bnx2x_link_sync_notify(bp);
set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state); bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_STOP, 0);
schedule_delayed_work(&bp->sp_rtnl_task, 0);
return; return;
} }
case BNX2X_DCBX_STATE_TX_PAUSED: case BNX2X_DCBX_STATE_TX_PAUSED:
......
...@@ -87,7 +87,6 @@ ...@@ -87,7 +87,6 @@
(IRO[156].base + ((vfId) * IRO[156].m1)) (IRO[156].base + ((vfId) * IRO[156].m1))
#define CSTORM_VF_TO_PF_OFFSET(funcId) \ #define CSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[150].base + ((funcId) * IRO[150].m1)) (IRO[150].base + ((funcId) * IRO[150].m1))
#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \ #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
(IRO[203].base + ((pfId) * IRO[203].m1)) (IRO[203].base + ((pfId) * IRO[203].m1))
#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base) #define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
......
...@@ -2848,7 +2848,7 @@ struct afex_stats { ...@@ -2848,7 +2848,7 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7 #define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 8 #define BCM_5710_FW_MINOR_VERSION 8
#define BCM_5710_FW_REVISION_VERSION 17 #define BCM_5710_FW_REVISION_VERSION 19
#define BCM_5710_FW_ENGINEERING_VERSION 0 #define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1 #define BCM_5710_FW_COMPILE_FLAGS 1
......
...@@ -918,7 +918,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) ...@@ -918,7 +918,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
u16 start = 0, end = 0; u16 start = 0, end = 0;
u8 cos; u8 cos;
#endif #endif
if (disable_int) if (IS_PF(bp) && disable_int)
bnx2x_int_disable(bp); bnx2x_int_disable(bp);
bp->stats_state = STATS_STATE_DISABLED; bp->stats_state = STATS_STATE_DISABLED;
...@@ -929,24 +929,31 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) ...@@ -929,24 +929,31 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
/* Indices */ /* Indices */
/* Common */ /* Common */
if (IS_PF(bp)) {
struct host_sp_status_block *def_sb = bp->def_status_blk;
int data_size, cstorm_offset;
BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
bp->def_idx, bp->def_att_idx, bp->attn_state, bp->def_idx, bp->def_att_idx, bp->attn_state,
bp->spq_prod_idx, bp->stats_counter); bp->spq_prod_idx, bp->stats_counter);
BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
bp->def_status_blk->atten_status_block.attn_bits, def_sb->atten_status_block.attn_bits,
bp->def_status_blk->atten_status_block.attn_bits_ack, def_sb->atten_status_block.attn_bits_ack,
bp->def_status_blk->atten_status_block.status_block_id, def_sb->atten_status_block.status_block_id,
bp->def_status_blk->atten_status_block.attn_bits_index); def_sb->atten_status_block.attn_bits_index);
BNX2X_ERR(" def ("); BNX2X_ERR(" def (");
for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
pr_cont("0x%x%s", pr_cont("0x%x%s",
bp->def_status_blk->sp_sb.index_values[i], def_sb->sp_sb.index_values[i],
(i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) data_size = sizeof(struct hc_sp_status_block_data) /
*((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM + sizeof(u32);
CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
i*sizeof(u32)); for (i = 0; i < data_size; i++)
*((u32 *)&sp_sb_data + i) =
REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
i * sizeof(u32));
pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n", pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
sp_sb_data.igu_sb_id, sp_sb_data.igu_sb_id,
...@@ -956,6 +963,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) ...@@ -956,6 +963,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
sp_sb_data.p_func.vf_id, sp_sb_data.p_func.vf_id,
sp_sb_data.p_func.vf_valid, sp_sb_data.p_func.vf_valid,
sp_sb_data.state); sp_sb_data.state);
}
for_each_eth_queue(bp, i) { for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i]; struct bnx2x_fastpath *fp = &bp->fp[i];
...@@ -1013,6 +1021,11 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) ...@@ -1013,6 +1021,11 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
pr_cont("0x%x%s", pr_cont("0x%x%s",
fp->sb_index_values[j], fp->sb_index_values[j],
(j == loop - 1) ? ")" : " "); (j == loop - 1) ? ")" : " ");
/* VF cannot access FW refelection for status block */
if (IS_VF(bp))
continue;
/* fw sb data */ /* fw sb data */
data_size = CHIP_IS_E1x(bp) ? data_size = CHIP_IS_E1x(bp) ?
sizeof(struct hc_status_block_data_e1x) : sizeof(struct hc_status_block_data_e1x) :
...@@ -1064,7 +1077,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) ...@@ -1064,7 +1077,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
} }
#ifdef BNX2X_STOP_ON_ERROR #ifdef BNX2X_STOP_ON_ERROR
if (IS_PF(bp)) {
/* event queue */ /* event queue */
BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
for (i = 0; i < NUM_EQ_DESC; i++) { for (i = 0; i < NUM_EQ_DESC; i++) {
...@@ -1073,7 +1086,9 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) ...@@ -1073,7 +1086,9 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n", BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
i, bp->eq_ring[i].message.opcode, i, bp->eq_ring[i].message.opcode,
bp->eq_ring[i].message.error); bp->eq_ring[i].message.error);
BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]); BNX2X_ERR("data: %x %x %x\n",
data[0], data[1], data[2]);
}
} }
/* Rings */ /* Rings */
...@@ -1140,8 +1155,10 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) ...@@ -1140,8 +1155,10 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
} }
} }
#endif #endif
if (IS_PF(bp)) {
bnx2x_fw_dump(bp); bnx2x_fw_dump(bp);
bnx2x_mc_assert(bp); bnx2x_mc_assert(bp);
}
BNX2X_ERR("end crash dump -----------------\n"); BNX2X_ERR("end crash dump -----------------\n");
} }
...@@ -1814,6 +1831,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) ...@@ -1814,6 +1831,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
drv_cmd = BNX2X_Q_CMD_EMPTY; drv_cmd = BNX2X_Q_CMD_EMPTY;
break; break;
case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
break;
default: default:
BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
command, fp->index); command, fp->index);
...@@ -3644,10 +3666,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, ...@@ -3644,10 +3666,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
HW_CID(bp, cid)); HW_CID(bp, cid));
type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; /* In some cases, type may already contain the func-id
* mainly in SRIOV related use cases, so we add it here only
* if it's not already set.
*/
if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
SPE_HDR_CONN_TYPE;
type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
SPE_HDR_FUNCTION_ID); SPE_HDR_FUNCTION_ID);
} else {
type = cmd_type;
}
spe->hdr.type = cpu_to_le16(type); spe->hdr.type = cpu_to_le16(type);
...@@ -3878,10 +3908,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp) ...@@ -3878,10 +3908,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp)
* This is due to some boards consuming sufficient power when driver is * This is due to some boards consuming sufficient power when driver is
* up to overheat if fan fails. * up to overheat if fan fails.
*/ */
smp_mb__before_clear_bit(); bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
smp_mb__after_clear_bit();
schedule_delayed_work(&bp->sp_rtnl_task, 0);
} }
static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
...@@ -5221,7 +5248,7 @@ static void bnx2x_eq_int(struct bnx2x *bp) ...@@ -5221,7 +5248,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
continue; continue;
case EVENT_RING_OPCODE_STAT_QUERY: case EVENT_RING_OPCODE_STAT_QUERY:
DP(BNX2X_MSG_SP | BNX2X_MSG_STATS, DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
"got statistics comp event %d\n", "got statistics comp event %d\n",
bp->stats_comp++); bp->stats_comp++);
/* nothing to do with stats comp */ /* nothing to do with stats comp */
...@@ -5273,6 +5300,8 @@ static void bnx2x_eq_int(struct bnx2x *bp) ...@@ -5273,6 +5300,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
break; break;
} else { } else {
int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
"AFEX: ramrod completed FUNCTION_UPDATE\n"); "AFEX: ramrod completed FUNCTION_UPDATE\n");
f_obj->complete_cmd(bp, f_obj, f_obj->complete_cmd(bp, f_obj,
...@@ -5282,12 +5311,7 @@ static void bnx2x_eq_int(struct bnx2x *bp) ...@@ -5282,12 +5311,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
* sp_rtnl task as all Queue SP operations * sp_rtnl task as all Queue SP operations
* should run under rtnl_lock. * should run under rtnl_lock.
*/ */
smp_mb__before_clear_bit(); bnx2x_schedule_sp_rtnl(bp, cmd, 0);
set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
&bp->sp_rtnl_state);
smp_mb__after_clear_bit();
schedule_delayed_work(&bp->sp_rtnl_task, 0);
} }
goto next_spqe; goto next_spqe;
...@@ -6005,18 +6029,6 @@ static void bnx2x_init_internal_common(struct bnx2x *bp) ...@@ -6005,18 +6029,6 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
{ {
int i; int i;
if (IS_MF_SI(bp))
/*
* In switch independent mode, the TSTORM needs to accept
* packets that failed classification, since approximate match
* mac addresses aren't written to NIG LLH
*/
REG_WR8(bp, BAR_TSTRORM_INTMEM +
TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
REG_WR8(bp, BAR_TSTRORM_INTMEM +
TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
/* Zero this manually as its initialization is /* Zero this manually as its initialization is
currently missing in the initTool */ currently missing in the initTool */
for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
...@@ -12064,11 +12076,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev) ...@@ -12064,11 +12076,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
return; return;
} else { } else {
/* Schedule an SP task to handle rest of change */ /* Schedule an SP task to handle rest of change */
DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n"); bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
smp_mb__before_clear_bit(); NETIF_MSG_IFUP);
set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state);
smp_mb__after_clear_bit();
schedule_delayed_work(&bp->sp_rtnl_task, 0);
} }
} }
...@@ -12101,11 +12110,8 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp) ...@@ -12101,11 +12110,8 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
/* configuring mcast to a vf involves sleeping (when we /* configuring mcast to a vf involves sleeping (when we
* wait for the pf's response). * wait for the pf's response).
*/ */
smp_mb__before_clear_bit(); bnx2x_schedule_sp_rtnl(bp,
set_bit(BNX2X_SP_RTNL_VFPF_MCAST, BNX2X_SP_RTNL_VFPF_MCAST, 0);
&bp->sp_rtnl_state);
smp_mb__after_clear_bit();
schedule_delayed_work(&bp->sp_rtnl_task, 0);
} }
} }
......
...@@ -893,6 +893,24 @@ struct bnx2x_queue_update_params { ...@@ -893,6 +893,24 @@ struct bnx2x_queue_update_params {
u8 cid_index; u8 cid_index;
}; };
struct bnx2x_queue_update_tpa_params {
dma_addr_t sge_map;
u8 update_ipv4;
u8 update_ipv6;
u8 max_tpa_queues;
u8 max_sges_pkt;
u8 complete_on_both_clients;
u8 dont_verify_thr;
u8 tpa_mode;
u8 _pad;
u16 sge_buff_sz;
u16 max_agg_sz;
u16 sge_pause_thr_low;
u16 sge_pause_thr_high;
};
struct rxq_pause_params { struct rxq_pause_params {
u16 bd_th_lo; u16 bd_th_lo;
u16 bd_th_hi; u16 bd_th_hi;
...@@ -987,6 +1005,7 @@ struct bnx2x_queue_state_params { ...@@ -987,6 +1005,7 @@ struct bnx2x_queue_state_params {
/* Params according to the current command */ /* Params according to the current command */
union { union {
struct bnx2x_queue_update_params update; struct bnx2x_queue_update_params update;
struct bnx2x_queue_update_tpa_params update_tpa;
struct bnx2x_queue_setup_params setup; struct bnx2x_queue_setup_params setup;
struct bnx2x_queue_init_params init; struct bnx2x_queue_init_params init;
struct bnx2x_queue_setup_tx_only_params tx_only; struct bnx2x_queue_setup_tx_only_params tx_only;
...@@ -1403,6 +1422,4 @@ int bnx2x_config_rss(struct bnx2x *bp, ...@@ -1403,6 +1422,4 @@ int bnx2x_config_rss(struct bnx2x *bp,
void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
u8 *ind_table); u8 *ind_table);
int validate_vlan_mac(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *vlan_mac);
#endif /* BNX2X_SP_VERBS */ #endif /* BNX2X_SP_VERBS */
...@@ -83,6 +83,7 @@ struct bnx2x_vf_queue { ...@@ -83,6 +83,7 @@ struct bnx2x_vf_queue {
u16 index; u16 index;
u16 sb_idx; u16 sb_idx;
bool is_leading; bool is_leading;
bool sp_initialized;
}; };
/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters: /* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
...@@ -100,6 +101,7 @@ union bnx2x_vfop_params { ...@@ -100,6 +101,7 @@ union bnx2x_vfop_params {
struct bnx2x_mcast_ramrod_params mcast; struct bnx2x_mcast_ramrod_params mcast;
struct bnx2x_config_rss_params rss; struct bnx2x_config_rss_params rss;
struct bnx2x_vfop_qctor_params qctor; struct bnx2x_vfop_qctor_params qctor;
struct bnx2x_queue_state_params qstate;
}; };
/* forward */ /* forward */
...@@ -166,6 +168,11 @@ struct bnx2x_vfop_args_filters { ...@@ -166,6 +168,11 @@ struct bnx2x_vfop_args_filters {
atomic_t *credit; /* non NULL means 'don't consume credit' */ atomic_t *credit; /* non NULL means 'don't consume credit' */
}; };
struct bnx2x_vfop_args_tpa {
int qid;
dma_addr_t sge_map[PFVF_MAX_QUEUES_PER_VF];
};
union bnx2x_vfop_args { union bnx2x_vfop_args {
struct bnx2x_vfop_args_mcast mc_list; struct bnx2x_vfop_args_mcast mc_list;
struct bnx2x_vfop_args_qctor qctor; struct bnx2x_vfop_args_qctor qctor;
...@@ -173,6 +180,7 @@ union bnx2x_vfop_args { ...@@ -173,6 +180,7 @@ union bnx2x_vfop_args {
struct bnx2x_vfop_args_defvlan defvlan; struct bnx2x_vfop_args_defvlan defvlan;
struct bnx2x_vfop_args_qx qx; struct bnx2x_vfop_args_qx qx;
struct bnx2x_vfop_args_filters filters; struct bnx2x_vfop_args_filters filters;
struct bnx2x_vfop_args_tpa tpa;
}; };
struct bnx2x_vfop { struct bnx2x_vfop {
...@@ -704,6 +712,11 @@ int bnx2x_vfop_rss_cmd(struct bnx2x *bp, ...@@ -704,6 +712,11 @@ int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf, struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd); struct bnx2x_vfop_cmd *cmd);
int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
struct vfpf_tpa_tlv *tpa_tlv);
/* VF release ~ VF close + VF release-resources /* VF release ~ VF close + VF release-resources
* *
* Release is the ultimate SW shutdown and is called whenever an * Release is the ultimate SW shutdown and is called whenever an
......
...@@ -548,6 +548,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -548,6 +548,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf->leading_rss = cl_id; vf->leading_rss = cl_id;
q->is_leading = true; q->is_leading = true;
q->sp_initialized = true;
} }
/* ask the pf to open a queue for the vf */ /* ask the pf to open a queue for the vf */
...@@ -1159,7 +1160,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -1159,7 +1160,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
resp->pfdev_info.db_size = bp->db_size; resp->pfdev_info.db_size = bp->db_size;
resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
/* PFVF_CAP_DHC |*/ PFVF_CAP_TPA); PFVF_CAP_TPA |
PFVF_CAP_TPA_UPDATE);
bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver, bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
sizeof(resp->pfdev_info.fw_ver)); sizeof(resp->pfdev_info.fw_ver));
...@@ -1694,16 +1696,12 @@ static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp, ...@@ -1694,16 +1696,12 @@ static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
return -ENOMEM; return -ENOMEM;
} }
static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, static int bnx2x_filters_validate_mac(struct bnx2x *bp,
struct bnx2x_virtf *vf, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx) struct vfpf_set_q_filters_tlv *filters)
{ {
struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
struct bnx2x_vfop_cmd cmd = { int rc = 0;
.done = bnx2x_vf_mbx_resp,
.block = false,
};
/* if a mac was already set for this VF via the set vf mac ndo, we only /* if a mac was already set for this VF via the set vf mac ndo, we only
* accept mac configurations of that mac. Why accept them at all? * accept mac configurations of that mac. Why accept them at all?
...@@ -1716,6 +1714,7 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, ...@@ -1716,6 +1714,7 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n", BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
vf->abs_vfid); vf->abs_vfid);
vf->op_rc = -EPERM; vf->op_rc = -EPERM;
rc = -EPERM;
goto response; goto response;
} }
...@@ -1726,9 +1725,22 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, ...@@ -1726,9 +1725,22 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
vf->abs_vfid); vf->abs_vfid);
vf->op_rc = -EPERM; vf->op_rc = -EPERM;
rc = -EPERM;
goto response; goto response;
} }
} }
response:
return rc;
}
static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct vfpf_set_q_filters_tlv *filters)
{
struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
int rc = 0;
/* if vlan was set by hypervisor we don't allow guest to config vlan */ /* if vlan was set by hypervisor we don't allow guest to config vlan */
if (bulletin->valid_bitmap & 1 << VLAN_VALID) { if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
int i; int i;
...@@ -1740,13 +1752,36 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, ...@@ -1740,13 +1752,36 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
vf->abs_vfid); vf->abs_vfid);
vf->op_rc = -EPERM; vf->op_rc = -EPERM;
rc = -EPERM;
goto response; goto response;
} }
} }
} }
/* verify vf_qid */ /* verify vf_qid */
if (filters->vf_qid > vf_rxq_count(vf)) if (filters->vf_qid > vf_rxq_count(vf)) {
rc = -EPERM;
goto response;
}
response:
return rc;
}
static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vf_mbx_resp,
.block = false,
};
if (bnx2x_filters_validate_mac(bp, vf, filters))
goto response;
if (bnx2x_filters_validate_vlan(bp, vf, filters))
goto response; goto response;
DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n", DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
...@@ -1877,6 +1912,75 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -1877,6 +1912,75 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_vf_mbx_resp(bp, vf); bnx2x_vf_mbx_resp(bp, vf);
} }
static int bnx2x_validate_tpa_params(struct bnx2x *bp,
struct vfpf_tpa_tlv *tpa_tlv)
{
int rc = 0;
if (tpa_tlv->tpa_client_info.max_sges_for_packet >
U_ETH_MAX_SGES_FOR_PACKET) {
rc = -EINVAL;
BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
tpa_tlv->tpa_client_info.max_sges_for_packet,
U_ETH_MAX_SGES_FOR_PACKET);
}
if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
rc = -EINVAL;
BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
tpa_tlv->tpa_client_info.max_tpa_queues,
MAX_AGG_QS(bp));
}
return rc;
}
static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vf_mbx_resp,
.block = false,
};
struct bnx2x_queue_update_tpa_params *vf_op_params =
&vf->op_params.qstate.params.update_tpa;
struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
memset(vf_op_params, 0, sizeof(*vf_op_params));
if (bnx2x_validate_tpa_params(bp, tpa_tlv))
goto mbx_resp;
vf_op_params->complete_on_both_clients =
tpa_tlv->tpa_client_info.complete_on_both_clients;
vf_op_params->dont_verify_thr =
tpa_tlv->tpa_client_info.dont_verify_thr;
vf_op_params->max_agg_sz =
tpa_tlv->tpa_client_info.max_agg_size;
vf_op_params->max_sges_pkt =
tpa_tlv->tpa_client_info.max_sges_for_packet;
vf_op_params->max_tpa_queues =
tpa_tlv->tpa_client_info.max_tpa_queues;
vf_op_params->sge_buff_sz =
tpa_tlv->tpa_client_info.sge_buff_size;
vf_op_params->sge_pause_thr_high =
tpa_tlv->tpa_client_info.sge_pause_thr_high;
vf_op_params->sge_pause_thr_low =
tpa_tlv->tpa_client_info.sge_pause_thr_low;
vf_op_params->tpa_mode =
tpa_tlv->tpa_client_info.tpa_mode;
vf_op_params->update_ipv4 =
tpa_tlv->tpa_client_info.update_ipv4;
vf_op_params->update_ipv6 =
tpa_tlv->tpa_client_info.update_ipv6;
vf->op_rc = bnx2x_vfop_tpa_cmd(bp, vf, &cmd, tpa_tlv);
mbx_resp:
if (vf->op_rc)
bnx2x_vf_mbx_resp(bp, vf);
}
/* dispatch request */ /* dispatch request */
static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx) struct bnx2x_vf_mbx *mbx)
...@@ -1916,6 +2020,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -1916,6 +2020,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
case CHANNEL_TLV_UPDATE_RSS: case CHANNEL_TLV_UPDATE_RSS:
bnx2x_vf_mbx_update_rss(bp, vf, mbx); bnx2x_vf_mbx_update_rss(bp, vf, mbx);
return; return;
case CHANNEL_TLV_UPDATE_TPA:
bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
return;
} }
} else { } else {
......
...@@ -162,6 +162,7 @@ struct pfvf_acquire_resp_tlv { ...@@ -162,6 +162,7 @@ struct pfvf_acquire_resp_tlv {
#define PFVF_CAP_RSS 0x00000001 #define PFVF_CAP_RSS 0x00000001
#define PFVF_CAP_DHC 0x00000002 #define PFVF_CAP_DHC 0x00000002
#define PFVF_CAP_TPA 0x00000004 #define PFVF_CAP_TPA 0x00000004
#define PFVF_CAP_TPA_UPDATE 0x00000008
char fw_ver[32]; char fw_ver[32];
u16 db_size; u16 db_size;
u8 indices_per_sb; u8 indices_per_sb;
...@@ -303,6 +304,25 @@ struct vfpf_set_q_filters_tlv { ...@@ -303,6 +304,25 @@ struct vfpf_set_q_filters_tlv {
u32 rx_mask; /* see mask constants at the top of the file */ u32 rx_mask; /* see mask constants at the top of the file */
}; };
struct vfpf_tpa_tlv {
struct vfpf_first_tlv first_tlv;
struct vf_pf_tpa_client_info {
aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF];
u8 update_ipv4;
u8 update_ipv6;
u8 max_tpa_queues;
u8 max_sges_for_packet;
u8 complete_on_both_clients;
u8 dont_verify_thr;
u8 tpa_mode;
u16 sge_buff_size;
u16 max_agg_size;
u16 sge_pause_thr_low;
u16 sge_pause_thr_high;
} tpa_client_info;
};
/* close VF (disable VF) */ /* close VF (disable VF) */
struct vfpf_close_tlv { struct vfpf_close_tlv {
struct vfpf_first_tlv first_tlv; struct vfpf_first_tlv first_tlv;
...@@ -331,6 +351,7 @@ union vfpf_tlvs { ...@@ -331,6 +351,7 @@ union vfpf_tlvs {
struct vfpf_set_q_filters_tlv set_q_filters; struct vfpf_set_q_filters_tlv set_q_filters;
struct vfpf_release_tlv release; struct vfpf_release_tlv release;
struct vfpf_rss_tlv update_rss; struct vfpf_rss_tlv update_rss;
struct vfpf_tpa_tlv update_tpa;
struct channel_list_end_tlv list_end; struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size; struct tlv_buffer_size tlv_buf_size;
}; };
...@@ -405,6 +426,7 @@ enum channel_tlvs { ...@@ -405,6 +426,7 @@ enum channel_tlvs {
CHANNEL_TLV_PF_SET_VLAN, CHANNEL_TLV_PF_SET_VLAN,
CHANNEL_TLV_UPDATE_RSS, CHANNEL_TLV_UPDATE_RSS,
CHANNEL_TLV_PHYS_PORT_ID, CHANNEL_TLV_PHYS_PORT_ID,
CHANNEL_TLV_UPDATE_TPA,
CHANNEL_TLV_MAX CHANNEL_TLV_MAX
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment