Commit 62c02788 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2020-05-27

This series contains updates to the ice driver only.

Jesse fixes a number of issues, starting with fixing the remaining
signed versus unsigned comparison issues.  Cleaned up an unused code
define.  Fixed the implementation of the manage MAC write command, to
simplify it by using a simple array to represent the MAC address when
writing it.

Paul fixes the setting of the VF default LAN address, by removing a
check that assumed that the address had been deleted and zeroed.

Surabhi prevents a memory leak on filter management initialization
failures and during queue initialization and buffer allocation failures.

Brett adds additional receive error counters that are reported by
ethtool.  Fixed the enabling and disabling of VLAN stripping when the
PVID has been set.

Evan fixes a race condition between the firmware and software, which can
occur between the admin queue setup and the first command sent.

Marta fixes the driver when XDP transmit rings are destroyed, also make
sure the XDP transmit queues are also destroyed.  Update the statistics
when XDP transmit programs are loaded and packets are sent.  Changed the
number of XDP transmit queues to match the number of receive queues,
instead of matching the number of transmit queues.

Bruce avoids undefined behavior by not writing the 8-bit element
init_q_state with the associated internal-to-hardware field which is
122-bits.

Anirudh (Ani) refactors the receive checksum checks.

Krzysztof notifies the user if the fill queue is not long enough to
prepare all buffers before packet processing starts and allocates the
buffers during the NAPI poll.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1e372dbd 3f0d97cd
...@@ -156,13 +156,11 @@ struct ice_aqc_manage_mac_write { ...@@ -156,13 +156,11 @@ struct ice_aqc_manage_mac_write {
#define ICE_AQC_MAN_MAC_WR_MC_MAG_EN BIT(0) #define ICE_AQC_MAN_MAC_WR_MC_MAG_EN BIT(0)
#define ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP BIT(1) #define ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP BIT(1)
#define ICE_AQC_MAN_MAC_WR_S 6 #define ICE_AQC_MAN_MAC_WR_S 6
#define ICE_AQC_MAN_MAC_WR_M (3 << ICE_AQC_MAN_MAC_WR_S) #define ICE_AQC_MAN_MAC_WR_M ICE_M(3, ICE_AQC_MAN_MAC_WR_S)
#define ICE_AQC_MAN_MAC_UPDATE_LAA 0 #define ICE_AQC_MAN_MAC_UPDATE_LAA 0
#define ICE_AQC_MAN_MAC_UPDATE_LAA_WOL (BIT(0) << ICE_AQC_MAN_MAC_WR_S) #define ICE_AQC_MAN_MAC_UPDATE_LAA_WOL BIT(ICE_AQC_MAN_MAC_WR_S)
/* High 16 bits of MAC address in big endian order */ /* byte stream in network order */
__be16 sah; u8 mac_addr[ETH_ALEN];
/* Low 32 bits of MAC address in big endian order */
__be32 sal;
__le32 addr_high; __le32 addr_high;
__le32 addr_low; __le32 addr_low;
}; };
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
*/ */
static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
{ {
int offset, i; unsigned int offset, i;
mutex_lock(qs_cfg->qs_mutex); mutex_lock(qs_cfg->qs_mutex);
offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size, offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
...@@ -39,7 +39,7 @@ static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) ...@@ -39,7 +39,7 @@ static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
*/ */
static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg) static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
{ {
int i, index = 0; unsigned int i, index = 0;
mutex_lock(qs_cfg->qs_mutex); mutex_lock(qs_cfg->qs_mutex);
for (i = 0; i < qs_cfg->q_count; i++) { for (i = 0; i < qs_cfg->q_count; i++) {
...@@ -281,7 +281,9 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) ...@@ -281,7 +281,9 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
*/ */
int ice_setup_rx_ctx(struct ice_ring *ring) int ice_setup_rx_ctx(struct ice_ring *ring)
{ {
struct device *dev = ice_pf_to_dev(ring->vsi->back);
int chain_len = ICE_MAX_CHAINED_RX_BUFS; int chain_len = ICE_MAX_CHAINED_RX_BUFS;
u16 num_bufs = ICE_DESC_UNUSED(ring);
struct ice_vsi *vsi = ring->vsi; struct ice_vsi *vsi = ring->vsi;
u32 rxdid = ICE_RXDID_FLEX_NIC; u32 rxdid = ICE_RXDID_FLEX_NIC;
struct ice_rlan_ctx rlan_ctx; struct ice_rlan_ctx rlan_ctx;
...@@ -324,7 +326,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring) ...@@ -324,7 +326,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
return err; return err;
xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq); xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
dev_info(ice_pf_to_dev(vsi->back), "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index); ring->q_index);
} else { } else {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
...@@ -408,7 +410,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring) ...@@ -408,7 +410,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
/* Absolute queue number out of 2K needs to be passed */ /* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
if (err) { if (err) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", dev_err(dev, "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
pf_q, err); pf_q, err);
return -EIO; return -EIO;
} }
...@@ -426,13 +428,23 @@ int ice_setup_rx_ctx(struct ice_ring *ring) ...@@ -426,13 +428,23 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
ring->tail = hw->hw_addr + QRX_TAIL(pf_q); ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
writel(0, ring->tail); writel(0, ring->tail);
err = ring->xsk_umem ? if (ring->xsk_umem) {
ice_alloc_rx_bufs_zc(ring, ICE_DESC_UNUSED(ring)) : if (!xsk_buff_can_alloc(ring->xsk_umem, num_bufs)) {
ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); dev_warn(dev, "UMEM does not provide enough addresses to fill %d buffers on Rx ring %d\n",
if (err) num_bufs, ring->q_index);
dev_info(ice_pf_to_dev(vsi->back), "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n", dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
ring->xsk_umem ? "UMEM enabled " : "",
ring->q_index, pf_q); return 0;
}
err = ice_alloc_rx_bufs_zc(ring, num_bufs);
if (err)
dev_info(dev, "Failed to allocate some buffers on UMEM enabled Rx ring %d (pf_q %d)\n",
ring->q_index, pf_q);
return 0;
}
ice_alloc_rx_bufs(ring, num_bufs);
return 0; return 0;
} }
...@@ -638,6 +650,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, ...@@ -638,6 +650,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
struct ice_aqc_add_txqs_perq *txq; struct ice_aqc_add_txqs_perq *txq;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
u8 buf_len = sizeof(*qg_buf); u8 buf_len = sizeof(*qg_buf);
struct ice_hw *hw = &pf->hw;
enum ice_status status; enum ice_status status;
u16 pf_q; u16 pf_q;
u8 tc; u8 tc;
...@@ -646,13 +659,13 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, ...@@ -646,13 +659,13 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
/* copy context contents into the qg_buf */ /* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
ice_tlan_ctx_info); ice_tlan_ctx_info);
/* init queue specific tail reg. It is referred as /* init queue specific tail reg. It is referred as
* transmit comm scheduler queue doorbell. * transmit comm scheduler queue doorbell.
*/ */
ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q);
if (IS_ENABLED(CONFIG_DCB)) if (IS_ENABLED(CONFIG_DCB))
tc = ring->dcb_tc; tc = ring->dcb_tc;
......
...@@ -387,6 +387,7 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) ...@@ -387,6 +387,7 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
{ {
struct ice_switch_info *sw; struct ice_switch_info *sw;
enum ice_status status;
hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*hw->switch_info), GFP_KERNEL); sizeof(*hw->switch_info), GFP_KERNEL);
...@@ -397,7 +398,12 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) ...@@ -397,7 +398,12 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
INIT_LIST_HEAD(&sw->vsi_list_map_head); INIT_LIST_HEAD(&sw->vsi_list_map_head);
return ice_init_def_sw_recp(hw); status = ice_init_def_sw_recp(hw);
if (status) {
devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
return status;
}
return 0;
} }
/** /**
...@@ -1092,7 +1098,7 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, ...@@ -1092,7 +1098,7 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
rlan_ctx->prefena = 1; rlan_ctx->prefena = 1;
ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
} }
...@@ -1994,10 +2000,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, ...@@ -1994,10 +2000,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
cmd->flags = flags; cmd->flags = flags;
ether_addr_copy(cmd->mac_addr, mac_addr);
/* Prep values for flags, sah, sal */
cmd->sah = htons(*((const u16 *)mac_addr));
cmd->sal = htonl(*((const u32 *)(mac_addr + 2)));
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
} }
...@@ -3196,12 +3199,14 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) ...@@ -3196,12 +3199,14 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/** /**
* ice_set_ctx - set context bits in packed structure * ice_set_ctx - set context bits in packed structure
* @hw: pointer to the hardware structure
* @src_ctx: pointer to a generic non-packed context structure * @src_ctx: pointer to a generic non-packed context structure
* @dest_ctx: pointer to memory for the packed structure * @dest_ctx: pointer to memory for the packed structure
* @ce_info: a description of the structure to be transformed * @ce_info: a description of the structure to be transformed
*/ */
enum ice_status enum ice_status
ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info)
{ {
int f; int f;
...@@ -3210,6 +3215,12 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) ...@@ -3210,6 +3215,12 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* using the correct size so that we are correct regardless * using the correct size so that we are correct regardless
* of the endianness of the machine. * of the endianness of the machine.
*/ */
if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
ice_debug(hw, ICE_DBG_QCTX,
"Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
f, ce_info[f].width, ce_info[f].size_of);
continue;
}
switch (ce_info[f].size_of) { switch (ce_info[f].size_of) {
case sizeof(u8): case sizeof(u8):
ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
......
...@@ -70,7 +70,8 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); ...@@ -70,7 +70,8 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[]; extern const struct ice_ctx_ele ice_tlan_ctx_info[];
enum ice_status enum ice_status
ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info); ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info);
extern struct mutex ice_global_cfg_lock_sw; extern struct mutex ice_global_cfg_lock_sw;
......
...@@ -12,6 +12,7 @@ do { \ ...@@ -12,6 +12,7 @@ do { \
(qinfo)->sq.bal = prefix##_ATQBAL; \ (qinfo)->sq.bal = prefix##_ATQBAL; \
(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
(qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \
(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
(qinfo)->rq.head = prefix##_ARQH; \ (qinfo)->rq.head = prefix##_ARQH; \
(qinfo)->rq.tail = prefix##_ARQT; \ (qinfo)->rq.tail = prefix##_ARQT; \
...@@ -20,6 +21,7 @@ do { \ ...@@ -20,6 +21,7 @@ do { \
(qinfo)->rq.bal = prefix##_ARQBAL; \ (qinfo)->rq.bal = prefix##_ARQBAL; \
(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \ (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \ (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
(qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \
(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \ (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
} while (0) } while (0)
...@@ -199,7 +201,9 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) ...@@ -199,7 +201,9 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->rq.r.rq_bi[i].pa = 0; cq->rq.r.rq_bi[i].pa = 0;
cq->rq.r.rq_bi[i].size = 0; cq->rq.r.rq_bi[i].size = 0;
} }
cq->rq.r.rq_bi = NULL;
devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
cq->rq.dma_head = NULL;
return ICE_ERR_NO_MEMORY; return ICE_ERR_NO_MEMORY;
} }
...@@ -245,7 +249,9 @@ ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) ...@@ -245,7 +249,9 @@ ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->sq.r.sq_bi[i].pa = 0; cq->sq.r.sq_bi[i].pa = 0;
cq->sq.r.sq_bi[i].size = 0; cq->sq.r.sq_bi[i].size = 0;
} }
cq->sq.r.sq_bi = NULL;
devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
cq->sq.dma_head = NULL;
return ICE_ERR_NO_MEMORY; return ICE_ERR_NO_MEMORY;
} }
...@@ -304,6 +310,28 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) ...@@ -304,6 +310,28 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
return 0; return 0;
} }
#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
do { \
int i; \
/* free descriptors */ \
if ((qi)->ring.r.ring##_bi) \
for (i = 0; i < (qi)->num_##ring##_entries; i++) \
if ((qi)->ring.r.ring##_bi[i].pa) { \
dmam_free_coherent(ice_hw_to_dev(hw), \
(qi)->ring.r.ring##_bi[i].size, \
(qi)->ring.r.ring##_bi[i].va, \
(qi)->ring.r.ring##_bi[i].pa); \
(qi)->ring.r.ring##_bi[i].va = NULL;\
(qi)->ring.r.ring##_bi[i].pa = 0;\
(qi)->ring.r.ring##_bi[i].size = 0;\
} \
/* free the buffer info list */ \
if ((qi)->ring.cmd_buf) \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
/* free DMA head */ \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
} while (0)
/** /**
* ice_init_sq - main initialization routine for Control ATQ * ice_init_sq - main initialization routine for Control ATQ
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
...@@ -357,6 +385,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) ...@@ -357,6 +385,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto init_ctrlq_exit; goto init_ctrlq_exit;
init_ctrlq_free_rings: init_ctrlq_free_rings:
ICE_FREE_CQ_BUFS(hw, cq, sq);
ice_free_cq_ring(hw, &cq->sq); ice_free_cq_ring(hw, &cq->sq);
init_ctrlq_exit: init_ctrlq_exit:
...@@ -416,33 +445,13 @@ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) ...@@ -416,33 +445,13 @@ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto init_ctrlq_exit; goto init_ctrlq_exit;
init_ctrlq_free_rings: init_ctrlq_free_rings:
ICE_FREE_CQ_BUFS(hw, cq, rq);
ice_free_cq_ring(hw, &cq->rq); ice_free_cq_ring(hw, &cq->rq);
init_ctrlq_exit: init_ctrlq_exit:
return ret_code; return ret_code;
} }
#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
do { \
int i; \
/* free descriptors */ \
for (i = 0; i < (qi)->num_##ring##_entries; i++) \
if ((qi)->ring.r.ring##_bi[i].pa) { \
dmam_free_coherent(ice_hw_to_dev(hw), \
(qi)->ring.r.ring##_bi[i].size,\
(qi)->ring.r.ring##_bi[i].va,\
(qi)->ring.r.ring##_bi[i].pa);\
(qi)->ring.r.ring##_bi[i].va = NULL; \
(qi)->ring.r.ring##_bi[i].pa = 0; \
(qi)->ring.r.ring##_bi[i].size = 0; \
} \
/* free the buffer info list */ \
if ((qi)->ring.cmd_buf) \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
/* free DMA head */ \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
} while (0)
/** /**
* ice_shutdown_sq - shutdown the Control ATQ * ice_shutdown_sq - shutdown the Control ATQ
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
...@@ -634,6 +643,50 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) ...@@ -634,6 +643,50 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
return ret_code; return ret_code;
} }
/**
* ice_shutdown_ctrlq - shutdown routine for any control queue
* @hw: pointer to the hardware structure
* @q_type: specific Control queue type
*
* NOTE: this function does not destroy the control queue locks.
*/
static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{
struct ice_ctl_q_info *cq;
switch (q_type) {
case ICE_CTL_Q_ADMIN:
cq = &hw->adminq;
if (ice_check_sq_alive(hw, cq))
ice_aq_q_shutdown(hw, true);
break;
case ICE_CTL_Q_MAILBOX:
cq = &hw->mailboxq;
break;
default:
return;
}
ice_shutdown_sq(hw, cq);
ice_shutdown_rq(hw, cq);
}
/**
* ice_shutdown_all_ctrlq - shutdown routine for all control queues
* @hw: pointer to the hardware structure
*
* NOTE: this function does not destroy the control queue locks. The driver
* may call this at runtime to shutdown and later restart control queues, such
* as in response to a reset event.
*/
void ice_shutdown_all_ctrlq(struct ice_hw *hw)
{
/* Shutdown FW admin queue */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
/* Shutdown PF-VF Mailbox */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
/** /**
* ice_init_all_ctrlq - main initialization routine for all control queues * ice_init_all_ctrlq - main initialization routine for all control queues
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
...@@ -649,17 +702,27 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) ...@@ -649,17 +702,27 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
*/ */
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
{ {
enum ice_status ret_code; enum ice_status status;
u32 retry = 0;
/* Init FW admin queue */ /* Init FW admin queue */
ret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN); do {
if (ret_code) status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
return ret_code; if (status)
return status;
ret_code = ice_init_check_adminq(hw); status = ice_init_check_adminq(hw);
if (ret_code) if (status != ICE_ERR_AQ_FW_CRITICAL)
return ret_code; break;
ice_debug(hw, ICE_DBG_AQ_MSG,
"Retry Admin Queue init due to FW critical error\n");
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
msleep(ICE_CTL_Q_ADMIN_INIT_MSEC);
} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
if (status)
return status;
/* Init Mailbox queue */ /* Init Mailbox queue */
return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX); return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
} }
...@@ -700,50 +763,6 @@ enum ice_status ice_create_all_ctrlq(struct ice_hw *hw) ...@@ -700,50 +763,6 @@ enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
return ice_init_all_ctrlq(hw); return ice_init_all_ctrlq(hw);
} }
/**
* ice_shutdown_ctrlq - shutdown routine for any control queue
* @hw: pointer to the hardware structure
* @q_type: specific Control queue type
*
* NOTE: this function does not destroy the control queue locks.
*/
static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{
struct ice_ctl_q_info *cq;
switch (q_type) {
case ICE_CTL_Q_ADMIN:
cq = &hw->adminq;
if (ice_check_sq_alive(hw, cq))
ice_aq_q_shutdown(hw, true);
break;
case ICE_CTL_Q_MAILBOX:
cq = &hw->mailboxq;
break;
default:
return;
}
ice_shutdown_sq(hw, cq);
ice_shutdown_rq(hw, cq);
}
/**
* ice_shutdown_all_ctrlq - shutdown routine for all control queues
* @hw: pointer to the hardware structure
*
* NOTE: this function does not destroy the control queue locks. The driver
* may call this at runtime to shutdown and later restart control queues, such
* as in response to a reset event.
*/
void ice_shutdown_all_ctrlq(struct ice_hw *hw)
{
/* Shutdown FW admin queue */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
/* Shutdown PF-VF Mailbox */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
/** /**
* ice_destroy_ctrlq_locks - Destroy locks for a control queue * ice_destroy_ctrlq_locks - Destroy locks for a control queue
* @cq: pointer to the control queue * @cq: pointer to the control queue
...@@ -1042,9 +1061,15 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, ...@@ -1042,9 +1061,15 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
/* update the error if time out occurred */ /* update the error if time out occurred */
if (!cmd_completed) { if (!cmd_completed) {
ice_debug(hw, ICE_DBG_AQ_MSG, if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
"Control Send Queue Writeback timeout.\n"); rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
status = ICE_ERR_AQ_TIMEOUT; ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
status = ICE_ERR_AQ_FW_CRITICAL;
} else {
ice_debug(hw, ICE_DBG_AQ_MSG,
"Control Send Queue Writeback timeout.\n");
status = ICE_ERR_AQ_TIMEOUT;
}
} }
sq_send_command_error: sq_send_command_error:
......
...@@ -34,6 +34,8 @@ enum ice_ctl_q { ...@@ -34,6 +34,8 @@ enum ice_ctl_q {
/* Control Queue timeout settings - max delay 250ms */ /* Control Queue timeout settings - max delay 250ms */
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */ #define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */
#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */ #define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */
#define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */
struct ice_ctl_q_ring { struct ice_ctl_q_ring {
void *dma_head; /* Virtual address to DMA head */ void *dma_head; /* Virtual address to DMA head */
...@@ -59,6 +61,7 @@ struct ice_ctl_q_ring { ...@@ -59,6 +61,7 @@ struct ice_ctl_q_ring {
u32 bal; u32 bal;
u32 len_mask; u32 len_mask;
u32 len_ena_mask; u32 len_ena_mask;
u32 len_crit_mask;
u32 head_mask; u32 head_mask;
}; };
......
...@@ -7,8 +7,6 @@ ...@@ -7,8 +7,6 @@
#include "ice_dcb_nl.h" #include "ice_dcb_nl.h"
#include <net/dcbnl.h> #include <net/dcbnl.h>
#define ICE_APP_PROT_ID_ROCE 0x8915
/** /**
* ice_dcbnl_devreset - perform enough of a ifdown/ifup to sync DCBNL info * ice_dcbnl_devreset - perform enough of a ifdown/ifup to sync DCBNL info
* @netdev: device associated with interface that needs reset * @netdev: device associated with interface that needs reset
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#define PF_MBX_ARQH_ARQH_M ICE_M(0x3FF, 0) #define PF_MBX_ARQH_ARQH_M ICE_M(0x3FF, 0)
#define PF_MBX_ARQLEN 0x0022E480 #define PF_MBX_ARQLEN 0x0022E480
#define PF_MBX_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0) #define PF_MBX_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0)
#define PF_MBX_ARQLEN_ARQCRIT_M BIT(30)
#define PF_MBX_ARQLEN_ARQENABLE_M BIT(31) #define PF_MBX_ARQLEN_ARQENABLE_M BIT(31)
#define PF_MBX_ARQT 0x0022E580 #define PF_MBX_ARQT 0x0022E580
#define PF_MBX_ATQBAH 0x0022E180 #define PF_MBX_ATQBAH 0x0022E180
...@@ -47,6 +48,7 @@ ...@@ -47,6 +48,7 @@
#define PF_MBX_ATQH_ATQH_M ICE_M(0x3FF, 0) #define PF_MBX_ATQH_ATQH_M ICE_M(0x3FF, 0)
#define PF_MBX_ATQLEN 0x0022E200 #define PF_MBX_ATQLEN 0x0022E200
#define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0) #define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
#define PF_MBX_ATQLEN_ATQCRIT_M BIT(30)
#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31) #define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
#define PF_MBX_ATQT 0x0022E300 #define PF_MBX_ATQT 0x0022E300
#define PRTDCB_GENC 0x00083000 #define PRTDCB_GENC 0x00083000
......
...@@ -581,7 +581,7 @@ struct ice_tlan_ctx { ...@@ -581,7 +581,7 @@ struct ice_tlan_ctx {
u8 drop_ena; u8 drop_ena;
u8 cache_prof_idx; u8 cache_prof_idx;
u8 pkt_shaper_prof_idx; u8 pkt_shaper_prof_idx;
u8 int_q_state; /* width not needed - internal do not write */ u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */
}; };
/* macro to make the table lines short */ /* macro to make the table lines short */
......
...@@ -1812,6 +1812,12 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) ...@@ -1812,6 +1812,12 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
enum ice_status status; enum ice_status status;
int ret = 0; int ret = 0;
/* do not allow modifying VLAN stripping when a port VLAN is configured
* on this VSI
*/
if (vsi->info.pvid)
return 0;
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt) if (!ctxt)
return -ENOMEM; return -ENOMEM;
...@@ -2779,7 +2785,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) ...@@ -2779,7 +2785,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
ice_vsi_map_rings_to_vectors(vsi); ice_vsi_map_rings_to_vectors(vsi);
if (ice_is_xdp_ena_vsi(vsi)) { if (ice_is_xdp_ena_vsi(vsi)) {
vsi->num_xdp_txq = vsi->alloc_txq; vsi->num_xdp_txq = vsi->alloc_rxq;
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog); ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
if (ret) if (ret)
goto err_vectors; goto err_vectors;
......
...@@ -1899,6 +1899,9 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi) ...@@ -1899,6 +1899,9 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi)
for (i = 0; i < vsi->tc_cfg.numtc; i++) for (i = 0; i < vsi->tc_cfg.numtc; i++)
max_txqs[i] = vsi->num_txq; max_txqs[i] = vsi->num_txq;
/* change number of XDP Tx queues to 0 */
vsi->num_xdp_txq = 0;
return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs); max_txqs);
} }
...@@ -1932,7 +1935,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, ...@@ -1932,7 +1935,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
} }
if (!ice_is_xdp_ena_vsi(vsi) && prog) { if (!ice_is_xdp_ena_vsi(vsi) && prog) {
vsi->num_xdp_txq = vsi->alloc_txq; vsi->num_xdp_txq = vsi->alloc_rxq;
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
if (xdp_ring_err) if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
...@@ -4216,6 +4219,33 @@ ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes) ...@@ -4216,6 +4219,33 @@ ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
} while (u64_stats_fetch_retry_irq(&ring->syncp, start)); } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
} }
/**
* ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
* @vsi: the VSI to be updated
* @rings: rings to work on
* @count: number of rings
*/
static void
ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
u16 count)
{
struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
u16 i;
for (i = 0; i < count; i++) {
struct ice_ring *ring;
u64 pkts, bytes;
ring = READ_ONCE(rings[i]);
ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
vsi_stats->tx_packets += pkts;
vsi_stats->tx_bytes += bytes;
vsi->tx_restart += ring->tx_stats.restart_q;
vsi->tx_busy += ring->tx_stats.tx_busy;
vsi->tx_linearize += ring->tx_stats.tx_linearize;
}
}
/** /**
* ice_update_vsi_ring_stats - Update VSI stats counters * ice_update_vsi_ring_stats - Update VSI stats counters
* @vsi: the VSI to be updated * @vsi: the VSI to be updated
...@@ -4243,15 +4273,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) ...@@ -4243,15 +4273,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
rcu_read_lock(); rcu_read_lock();
/* update Tx rings counters */ /* update Tx rings counters */
ice_for_each_txq(vsi, i) { ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
ring = READ_ONCE(vsi->tx_rings[i]);
ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
vsi_stats->tx_packets += pkts;
vsi_stats->tx_bytes += bytes;
vsi->tx_restart += ring->tx_stats.restart_q;
vsi->tx_busy += ring->tx_stats.tx_busy;
vsi->tx_linearize += ring->tx_stats.tx_linearize;
}
/* update Rx rings counters */ /* update Rx rings counters */
ice_for_each_rxq(vsi, i) { ice_for_each_rxq(vsi, i) {
...@@ -4263,6 +4285,11 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) ...@@ -4263,6 +4285,11 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
} }
/* update XDP Tx rings counters */
if (ice_is_xdp_ena_vsi(vsi))
ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
vsi->num_xdp_txq);
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -4295,7 +4322,13 @@ void ice_update_vsi_stats(struct ice_vsi *vsi) ...@@ -4295,7 +4322,13 @@ void ice_update_vsi_stats(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_PF) { if (vsi->type == ICE_VSI_PF) {
cur_ns->rx_crc_errors = pf->stats.crc_errors; cur_ns->rx_crc_errors = pf->stats.crc_errors;
cur_ns->rx_errors = pf->stats.crc_errors + cur_ns->rx_errors = pf->stats.crc_errors +
pf->stats.illegal_bytes; pf->stats.illegal_bytes +
pf->stats.rx_len_errors +
pf->stats.rx_undersize +
pf->hw_csum_rx_error +
pf->stats.rx_jabber +
pf->stats.rx_fragments +
pf->stats.rx_oversize;
cur_ns->rx_length_errors = pf->stats.rx_len_errors; cur_ns->rx_length_errors = pf->stats.rx_len_errors;
/* record drops from the port level */ /* record drops from the port level */
cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
...@@ -5035,7 +5068,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -5035,7 +5068,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
u8 count = 0; u8 count = 0;
if (new_mtu == netdev->mtu) { if (new_mtu == (int)netdev->mtu) {
netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
return 0; return 0;
} }
...@@ -5050,11 +5083,11 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -5050,11 +5083,11 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
} }
} }
if (new_mtu < netdev->min_mtu) { if (new_mtu < (int)netdev->min_mtu) {
netdev_err(netdev, "new MTU invalid. min_mtu is %d\n", netdev_err(netdev, "new MTU invalid. min_mtu is %d\n",
netdev->min_mtu); netdev->min_mtu);
return -EINVAL; return -EINVAL;
} else if (new_mtu > netdev->max_mtu) { } else if (new_mtu > (int)netdev->max_mtu) {
netdev_err(netdev, "new MTU invalid. max_mtu is %d\n", netdev_err(netdev, "new MTU invalid. max_mtu is %d\n",
netdev->min_mtu); netdev->min_mtu);
return -EINVAL; return -EINVAL;
...@@ -5075,7 +5108,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -5075,7 +5108,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return -EBUSY; return -EBUSY;
} }
netdev->mtu = new_mtu; netdev->mtu = (unsigned int)new_mtu;
/* if VSI is up, bring it down and then back up */ /* if VSI is up, bring it down and then back up */
if (!test_and_set_bit(__ICE_DOWN, vsi->state)) { if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
...@@ -5201,6 +5234,8 @@ const char *ice_stat_str(enum ice_status stat_err) ...@@ -5201,6 +5234,8 @@ const char *ice_stat_str(enum ice_status stat_err)
return "ICE_ERR_AQ_NO_WORK"; return "ICE_ERR_AQ_NO_WORK";
case ICE_ERR_AQ_EMPTY: case ICE_ERR_AQ_EMPTY:
return "ICE_ERR_AQ_EMPTY"; return "ICE_ERR_AQ_EMPTY";
case ICE_ERR_AQ_FW_CRITICAL:
return "ICE_ERR_AQ_FW_CRITICAL";
} }
return "ICE_ERR_UNKNOWN"; return "ICE_ERR_UNKNOWN";
......
...@@ -37,6 +37,7 @@ enum ice_status { ...@@ -37,6 +37,7 @@ enum ice_status {
ICE_ERR_AQ_FULL = -102, ICE_ERR_AQ_FULL = -102,
ICE_ERR_AQ_NO_WORK = -103, ICE_ERR_AQ_NO_WORK = -103,
ICE_ERR_AQ_EMPTY = -104, ICE_ERR_AQ_EMPTY = -104,
ICE_ERR_AQ_FW_CRITICAL = -105,
}; };
#endif /* _ICE_STATUS_H_ */ #endif /* _ICE_STATUS_H_ */
...@@ -38,7 +38,8 @@ ...@@ -38,7 +38,8 @@
*/ */
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
#define ICE_2K_TOO_SMALL_WITH_PADDING \ #define ICE_2K_TOO_SMALL_WITH_PADDING \
((NET_SKB_PAD + ICE_RXBUF_1536) > SKB_WITH_OVERHEAD(ICE_RXBUF_2048)) ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \
SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
/** /**
* ice_compute_pad - compute the padding * ice_compute_pad - compute the padding
...@@ -107,8 +108,8 @@ static inline int ice_skb_pad(void) ...@@ -107,8 +108,8 @@ static inline int ice_skb_pad(void)
#define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \ #define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR) ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
#define ICE_DESC_UNUSED(R) \ #define ICE_DESC_UNUSED(R) \
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1) (R)->next_to_clean - (R)->next_to_use - 1)
#define ICE_TX_FLAGS_TSO BIT(0) #define ICE_TX_FLAGS_TSO BIT(0)
#define ICE_TX_FLAGS_HW_VLAN BIT(1) #define ICE_TX_FLAGS_HW_VLAN BIT(1)
......
...@@ -84,17 +84,12 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, ...@@ -84,17 +84,12 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
union ice_32b_rx_flex_desc *rx_desc, u8 ptype) union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
{ {
struct ice_rx_ptype_decoded decoded; struct ice_rx_ptype_decoded decoded;
u16 rx_error, rx_status; u16 rx_status0, rx_status1;
u16 rx_stat_err1;
bool ipv4, ipv6; bool ipv4, ipv6;
rx_status = le16_to_cpu(rx_desc->wb.status_error0); rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
rx_error = rx_status & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |
BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) |
BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S));
rx_stat_err1 = le16_to_cpu(rx_desc->wb.status_error1);
decoded = ice_decode_rx_desc_ptype(ptype); decoded = ice_decode_rx_desc_ptype(ptype);
/* Start with CHECKSUM_NONE and by default csum_level = 0 */ /* Start with CHECKSUM_NONE and by default csum_level = 0 */
...@@ -106,7 +101,7 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, ...@@ -106,7 +101,7 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
return; return;
/* check if HW has decoded the packet and checksum */ /* check if HW has decoded the packet and checksum */
if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
return; return;
if (!(decoded.known && decoded.outer_ip)) if (!(decoded.known && decoded.outer_ip))
...@@ -117,22 +112,22 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, ...@@ -117,22 +112,22 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
(decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
goto checksum_fail; goto checksum_fail;
else if (ipv6 && (rx_status &
(BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)))) if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
goto checksum_fail; goto checksum_fail;
/* check for L4 errors and handle packets that were not able to be /* check for L4 errors and handle packets that were not able to be
* checksummed due to arrival speed * checksummed due to arrival speed
*/ */
if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) if (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
goto checksum_fail; goto checksum_fail;
/* check for outer UDP checksum error in tunneled packets */ /* check for outer UDP checksum error in tunneled packets */
if ((rx_stat_err1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) && if ((rx_status1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) &&
(rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S))) (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
goto checksum_fail; goto checksum_fail;
/* If there is an outer header present that might contain a checksum /* If there is an outer header present that might contain a checksum
......
...@@ -2862,9 +2862,11 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr) ...@@ -2862,9 +2862,11 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
return -EIO; return -EIO;
} }
/* only set dflt_lan_addr once */ /* Set the default LAN address to the latest unicast MAC address added
if (is_zero_ether_addr(vf->dflt_lan_addr.addr) && * by the VF. The default LAN address is reported by the PF via
is_unicast_ether_addr(mac_addr)) * ndo_get_vf_config.
*/
if (is_unicast_ether_addr(mac_addr))
ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr); ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
vf->num_mac++; vf->num_mac++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment