Commit 04006731 authored by Paolo Abeni's avatar Paolo Abeni

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2024-02-02 (ice)

This series contains updates to ice driver only.

Maciej changes some queue configuration calls to existing ones which are
better suited for the purpose.

Aniruddha adds separate reporting for Rx EIPE errors as hardware may
incorrectly report errors on certain packets.

Paul removes an incorrect comment.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
  ice: remove incorrect comment
  ice: Add a new counter for Rx EIPE errors
  ice: make ice_vsi_cfg_txq() static
  ice: make ice_vsi_cfg_rxq() static
====================

Link: https://lore.kernel.org/r/20240202175613.3470818-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 0bd199fd 53875f05
...@@ -605,6 +605,7 @@ struct ice_pf { ...@@ -605,6 +605,7 @@ struct ice_pf {
wait_queue_head_t reset_wait_queue; wait_queue_head_t reset_wait_queue;
u32 hw_csum_rx_error; u32 hw_csum_rx_error;
u32 hw_rx_eipe_error;
u32 oicr_err_reg; u32 oicr_err_reg;
struct msi_map oicr_irq; /* Other interrupt cause MSIX vector */ struct msi_map oicr_irq; /* Other interrupt cause MSIX vector */
struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */ struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */
......
...@@ -538,7 +538,7 @@ static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring) ...@@ -538,7 +538,7 @@ static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
* *
* Return 0 on success and a negative value on error. * Return 0 on success and a negative value on error.
*/ */
int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
{ {
struct device *dev = ice_pf_to_dev(ring->vsi->back); struct device *dev = ice_pf_to_dev(ring->vsi->back);
u32 num_bufs = ICE_RX_DESC_UNUSED(ring); u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
...@@ -633,6 +633,62 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) ...@@ -633,6 +633,62 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return 0; return 0;
} }
int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
{
if (q_idx >= vsi->num_rxq)
return -EINVAL;
return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
}
/**
* ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
* @vsi: VSI
*/
static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
{
if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
vsi->rx_buf_len = ICE_RXBUF_1664;
#if (PAGE_SIZE < 8192)
} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
(vsi->netdev->mtu <= ETH_DATA_LEN)) {
vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
#endif
} else {
vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
vsi->rx_buf_len = ICE_RXBUF_3072;
}
}
/**
* ice_vsi_cfg_rxqs - Configure the VSI for Rx
* @vsi: the VSI being configured
*
* Return 0 on success and a negative value on error
* Configure the Rx VSI for operation.
*/
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
{
u16 i;
if (vsi->type == ICE_VSI_VF)
goto setup_rings;
ice_vsi_cfg_frame_size(vsi);
setup_rings:
/* set up individual rings */
ice_for_each_rxq(vsi, i) {
int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
if (err)
return err;
}
return 0;
}
/** /**
* __ice_vsi_get_qs - helper function for assigning queues from PF to VSI * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
* @qs_cfg: gathered variables needed for pf->vsi queues assignment * @qs_cfg: gathered variables needed for pf->vsi queues assignment
...@@ -828,7 +884,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi) ...@@ -828,7 +884,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
* @ring: Tx ring to be configured * @ring: Tx ring to be configured
* @qg_buf: queue group buffer * @qg_buf: queue group buffer
*/ */
int static int
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf) struct ice_aqc_add_tx_qgrp *qg_buf)
{ {
...@@ -899,6 +955,80 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, ...@@ -899,6 +955,80 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
return 0; return 0;
} }
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
u16 q_idx)
{
DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
return -EINVAL;
qg_buf->num_txqs = 1;
return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
}
/**
* ice_vsi_cfg_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
* @rings: Tx ring array to be configured
* @count: number of Tx ring array elements
*
* Return 0 on success and a negative value on error
* Configure the Tx VSI for operation.
*/
static int
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
{
DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
int err = 0;
u16 q_idx;
qg_buf->num_txqs = 1;
for (q_idx = 0; q_idx < count; q_idx++) {
err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
if (err)
break;
}
return err;
}
/**
* ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
*
* Return 0 on success and a negative value on error
* Configure the Tx VSI for operation.
*/
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
{
return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
}
/**
* ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
* @vsi: the VSI being configured
*
* Return 0 on success and a negative value on error
* Configure the Tx queues dedicated for XDP in given VSI for operation.
*/
int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
{
int ret;
int i;
ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
if (ret)
return ret;
ice_for_each_rxq(vsi, i)
ice_tx_xsk_pool(vsi, i);
return 0;
}
/** /**
* ice_cfg_itr - configure the initial interrupt throttle values * ice_cfg_itr - configure the initial interrupt throttle values
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
......
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
#include "ice.h" #include "ice.h"
int ice_vsi_cfg_rxq(struct ice_rx_ring *ring); int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg); int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
int int
ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait); ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait);
...@@ -14,9 +15,10 @@ int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx); ...@@ -14,9 +15,10 @@ int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi); int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi); void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
void ice_vsi_free_q_vectors(struct ice_vsi *vsi); void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
int int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, u16 q_idx);
struct ice_aqc_add_tx_qgrp *qg_buf); int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector); void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector);
void void
ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx); ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
......
...@@ -64,9 +64,6 @@ static const char * const ice_fwlog_level_string[] = { ...@@ -64,9 +64,6 @@ static const char * const ice_fwlog_level_string[] = {
"verbose", "verbose",
}; };
/* the order in this array is important. it matches the ordering of the
* values in the FW so the index is the same value as in ice_fwlog_level
*/
static const char * const ice_fwlog_log_size[] = { static const char * const ice_fwlog_log_size[] = {
"128K", "128K",
"256K", "256K",
......
...@@ -129,6 +129,7 @@ static const struct ice_stats ice_gstrings_pf_stats[] = { ...@@ -129,6 +129,7 @@ static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("rx_oversize.nic", stats.rx_oversize), ICE_PF_STAT("rx_oversize.nic", stats.rx_oversize),
ICE_PF_STAT("rx_jabber.nic", stats.rx_jabber), ICE_PF_STAT("rx_jabber.nic", stats.rx_jabber),
ICE_PF_STAT("rx_csum_bad.nic", hw_csum_rx_error), ICE_PF_STAT("rx_csum_bad.nic", hw_csum_rx_error),
ICE_PF_STAT("rx_eipe_error.nic", hw_rx_eipe_error),
ICE_PF_STAT("rx_dropped.nic", stats.eth.rx_discards), ICE_PF_STAT("rx_dropped.nic", stats.eth.rx_discards),
ICE_PF_STAT("rx_crc_errors.nic", stats.crc_errors), ICE_PF_STAT("rx_crc_errors.nic", stats.crc_errors),
ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes), ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes),
......
...@@ -1671,27 +1671,6 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) ...@@ -1671,27 +1671,6 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
} }
} }
/**
* ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
* @vsi: VSI
*/
static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
{
if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
vsi->rx_buf_len = ICE_RXBUF_1664;
#if (PAGE_SIZE < 8192)
} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
(vsi->netdev->mtu <= ETH_DATA_LEN)) {
vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
#endif
} else {
vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
vsi->rx_buf_len = ICE_RXBUF_3072;
}
}
/** /**
* ice_pf_state_is_nominal - checks the PF for nominal state * ice_pf_state_is_nominal - checks the PF for nominal state
* @pf: pointer to PF to check * @pf: pointer to PF to check
...@@ -1795,114 +1774,6 @@ ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, ...@@ -1795,114 +1774,6 @@ ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
wr32(hw, QRXFLXP_CNTXT(pf_q), regval); wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
} }
int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
{
if (q_idx >= vsi->num_rxq)
return -EINVAL;
return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
}
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
{
DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
return -EINVAL;
qg_buf->num_txqs = 1;
return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
}
/**
* ice_vsi_cfg_rxqs - Configure the VSI for Rx
* @vsi: the VSI being configured
*
* Return 0 on success and a negative value on error
* Configure the Rx VSI for operation.
*/
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
{
u16 i;
if (vsi->type == ICE_VSI_VF)
goto setup_rings;
ice_vsi_cfg_frame_size(vsi);
setup_rings:
/* set up individual rings */
ice_for_each_rxq(vsi, i) {
int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
if (err)
return err;
}
return 0;
}
/**
* ice_vsi_cfg_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
* @rings: Tx ring array to be configured
* @count: number of Tx ring array elements
*
* Return 0 on success and a negative value on error
* Configure the Tx VSI for operation.
*/
static int
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
{
DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
int err = 0;
u16 q_idx;
qg_buf->num_txqs = 1;
for (q_idx = 0; q_idx < count; q_idx++) {
err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
if (err)
break;
}
return err;
}
/**
* ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
*
* Return 0 on success and a negative value on error
* Configure the Tx VSI for operation.
*/
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
{
return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
}
/**
* ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
* @vsi: the VSI being configured
*
* Return 0 on success and a negative value on error
* Configure the Tx queues dedicated for XDP in given VSI for operation.
*/
int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
{
int ret;
int i;
ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
if (ret)
return ret;
ice_for_each_rxq(vsi, i)
ice_tx_xsk_pool(vsi, i);
return 0;
}
/** /**
* ice_intrl_usec_to_reg - convert interrupt rate limit to register value * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
* @intrl: interrupt rate limit in usecs * @intrl: interrupt rate limit in usecs
......
...@@ -54,14 +54,6 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf); ...@@ -54,14 +54,6 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf);
void ice_update_eth_stats(struct ice_vsi *vsi); void ice_update_eth_stats(struct ice_vsi *vsi);
int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx);
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
void ice_vsi_cfg_msix(struct ice_vsi *vsi); void ice_vsi_cfg_msix(struct ice_vsi *vsi);
int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi); int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi);
...@@ -72,8 +64,6 @@ int ...@@ -72,8 +64,6 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num); u16 rel_vmvf_num);
int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi); int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
......
...@@ -143,8 +143,12 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb, ...@@ -143,8 +143,12 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
(decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) {
BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) ring->vsi->back->hw_rx_eipe_error++;
return;
}
if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S))))
goto checksum_fail; goto checksum_fail;
if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)))) if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
......
...@@ -217,42 +217,28 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) ...@@ -217,42 +217,28 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
*/ */
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{ {
DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
u16 size = __struct_size(qg_buf);
struct ice_q_vector *q_vector; struct ice_q_vector *q_vector;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
int err; int err;
if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq) err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
return -EINVAL;
qg_buf->num_txqs = 1;
tx_ring = vsi->tx_rings[q_idx];
rx_ring = vsi->rx_rings[q_idx];
q_vector = rx_ring->q_vector;
err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
if (err) if (err)
return err; return err;
if (ice_is_xdp_ena_vsi(vsi)) { if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
memset(qg_buf, 0, size); err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
qg_buf->num_txqs = 1;
err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
if (err) if (err)
return err; return err;
ice_set_ring_xdp(xdp_ring); ice_set_ring_xdp(xdp_ring);
ice_tx_xsk_pool(vsi, q_idx); ice_tx_xsk_pool(vsi, q_idx);
} }
err = ice_vsi_cfg_rxq(rx_ring); err = ice_vsi_cfg_single_rxq(vsi, q_idx);
if (err) if (err)
return err; return err;
q_vector = vsi->rx_rings[q_idx]->q_vector;
ice_qvec_cfg_msix(vsi, q_vector); ice_qvec_cfg_msix(vsi, q_vector);
err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true); err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment