Commit 803a4344 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
100GbE Intel Wired LAN Driver Updates 2021-10-14

Maciej Machnikowski says:

Extend the driver implementation to support PTP pins on E810-T and
derivative devices.

E810-T adapters are equipped with:
- 2 external bidirectional SMA connectors
- 1 internal TX U.FL shared with SMA1
- 1 internal RX U.FL shared with SMA2

The SMA and U.FL configuration is controlled by the external
multiplexer.

E810-T Derivatives are equipped with:
- 2 1PPS outputs on SDP20 and SDP22
- 2 1PPS inputs on SDP21 and SDP23
---
v2:
- Remove defensive programming check and simplify return statement
  (Patch 3)
- Remove unnecessary parentheses (Patch 4)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents dcebeb8b 2faf63b6
...@@ -125,10 +125,13 @@ ...@@ -125,10 +125,13 @@
#define ice_for_each_vsi(pf, i) \ #define ice_for_each_vsi(pf, i) \
for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++) for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
/* Macros for each Tx/Rx ring in a VSI */ /* Macros for each Tx/Xdp/Rx ring in a VSI */
#define ice_for_each_txq(vsi, i) \ #define ice_for_each_txq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_txq; (i)++) for ((i) = 0; (i) < (vsi)->num_txq; (i)++)
#define ice_for_each_xdp_txq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_xdp_txq; (i)++)
#define ice_for_each_rxq(vsi, i) \ #define ice_for_each_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
...@@ -167,6 +170,8 @@ enum ice_feature { ...@@ -167,6 +170,8 @@ enum ice_feature {
ICE_F_MAX ICE_F_MAX
}; };
DECLARE_STATIC_KEY_FALSE(ice_xdp_locking_key);
struct ice_txq_meta { struct ice_txq_meta {
u32 q_teid; /* Tx-scheduler element identifier */ u32 q_teid; /* Tx-scheduler element identifier */
u16 q_id; /* Entry in VSI's txq_map bitmap */ u16 q_id; /* Entry in VSI's txq_map bitmap */
...@@ -275,8 +280,8 @@ struct ice_vsi { ...@@ -275,8 +280,8 @@ struct ice_vsi {
struct ice_sw *vsw; /* switch this VSI is on */ struct ice_sw *vsw; /* switch this VSI is on */
struct ice_pf *back; /* back pointer to PF */ struct ice_pf *back; /* back pointer to PF */
struct ice_port_info *port_info; /* back pointer to port_info */ struct ice_port_info *port_info; /* back pointer to port_info */
struct ice_ring **rx_rings; /* Rx ring array */ struct ice_rx_ring **rx_rings; /* Rx ring array */
struct ice_ring **tx_rings; /* Tx ring array */ struct ice_tx_ring **tx_rings; /* Tx ring array */
struct ice_q_vector **q_vectors; /* q_vector array */ struct ice_q_vector **q_vectors; /* q_vector array */
irqreturn_t (*irq_handler)(int irq, void *data); irqreturn_t (*irq_handler)(int irq, void *data);
...@@ -349,7 +354,7 @@ struct ice_vsi { ...@@ -349,7 +354,7 @@ struct ice_vsi {
u16 qset_handle[ICE_MAX_TRAFFIC_CLASS]; u16 qset_handle[ICE_MAX_TRAFFIC_CLASS];
struct ice_tc_cfg tc_cfg; struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
struct ice_ring **xdp_rings; /* XDP ring array */ struct ice_tx_ring **xdp_rings; /* XDP ring array */
unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */ unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
u16 num_xdp_txq; /* Used XDP queues */ u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
...@@ -580,25 +585,42 @@ static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi) ...@@ -580,25 +585,42 @@ static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
return !!vsi->xdp_prog; return !!vsi->xdp_prog;
} }
static inline void ice_set_ring_xdp(struct ice_ring *ring) static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
{ {
ring->flags |= ICE_TX_FLAGS_RING_XDP; ring->flags |= ICE_TX_FLAGS_RING_XDP;
} }
/** /**
* ice_xsk_pool - get XSK buffer pool bound to a ring * ice_xsk_pool - get XSK buffer pool bound to a ring
* @ring: ring to use * @ring: Rx ring to use
* *
* Returns a pointer to xdp_umem structure if there is a buffer pool present, * Returns a pointer to xdp_umem structure if there is a buffer pool present,
* NULL otherwise. * NULL otherwise.
*/ */
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring) static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
{ {
struct ice_vsi *vsi = ring->vsi; struct ice_vsi *vsi = ring->vsi;
u16 qid = ring->q_index; u16 qid = ring->q_index;
if (ice_ring_is_xdp(ring)) if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
qid -= vsi->num_xdp_txq; return NULL;
return xsk_get_pool_from_qid(vsi->netdev, qid);
}
/**
* ice_tx_xsk_pool - get XSK buffer pool bound to a ring
* @ring: Tx ring to use
*
* Returns a pointer to xdp_umem structure if there is a buffer pool present,
* NULL otherwise. Tx equivalent of ice_xsk_pool.
*/
static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring)
{
struct ice_vsi *vsi = ring->vsi;
u16 qid;
qid = ring->q_index - vsi->num_xdp_txq;
if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
return NULL; return NULL;
...@@ -699,6 +721,7 @@ int ice_up(struct ice_vsi *vsi); ...@@ -699,6 +721,7 @@ int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi); int ice_down(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi); int ice_vsi_cfg(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi); struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog); int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
int ice_destroy_xdp_rings(struct ice_vsi *vsi); int ice_destroy_xdp_rings(struct ice_vsi *vsi);
int int
......
...@@ -614,7 +614,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) ...@@ -614,7 +614,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
return -EINVAL; return -EINVAL;
base_idx = vsi->base_vector; base_idx = vsi->base_vector;
for (i = 0; i < vsi->num_q_vectors; i++) ice_for_each_q_vector(vsi, i)
if (irq_cpu_rmap_add(netdev->rx_cpu_rmap, if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
pf->msix_entries[base_idx + i].vector)) { pf->msix_entries[base_idx + i].vector)) {
ice_free_cpu_rx_rmap(vsi); ice_free_cpu_rx_rmap(vsi);
......
...@@ -115,6 +115,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) ...@@ -115,6 +115,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
q_vector->rx.itr_setting = ICE_DFLT_RX_ITR; q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
q_vector->tx.itr_mode = ITR_DYNAMIC; q_vector->tx.itr_mode = ITR_DYNAMIC;
q_vector->rx.itr_mode = ITR_DYNAMIC; q_vector->rx.itr_mode = ITR_DYNAMIC;
q_vector->tx.type = ICE_TX_CONTAINER;
q_vector->rx.type = ICE_RX_CONTAINER;
if (vsi->type == ICE_VSI_VF) if (vsi->type == ICE_VSI_VF)
goto out; goto out;
...@@ -146,7 +148,8 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) ...@@ -146,7 +148,8 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
{ {
struct ice_q_vector *q_vector; struct ice_q_vector *q_vector;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct ice_ring *ring; struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
struct device *dev; struct device *dev;
dev = ice_pf_to_dev(pf); dev = ice_pf_to_dev(pf);
...@@ -156,10 +159,10 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) ...@@ -156,10 +159,10 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
} }
q_vector = vsi->q_vectors[v_idx]; q_vector = vsi->q_vectors[v_idx];
ice_for_each_ring(ring, q_vector->tx) ice_for_each_tx_ring(tx_ring, q_vector->tx)
ring->q_vector = NULL; tx_ring->q_vector = NULL;
ice_for_each_ring(ring, q_vector->rx) ice_for_each_rx_ring(rx_ring, q_vector->rx)
ring->q_vector = NULL; rx_ring->q_vector = NULL;
/* only VSI with an associated netdev is set up with NAPI */ /* only VSI with an associated netdev is set up with NAPI */
if (vsi->netdev) if (vsi->netdev)
...@@ -201,12 +204,12 @@ static void ice_cfg_itr_gran(struct ice_hw *hw) ...@@ -201,12 +204,12 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
} }
/** /**
* ice_calc_q_handle - calculate the queue handle * ice_calc_txq_handle - calculate the queue handle
* @vsi: VSI that ring belongs to * @vsi: VSI that ring belongs to
* @ring: ring to get the absolute queue index * @ring: ring to get the absolute queue index
* @tc: traffic class number * @tc: traffic class number
*/ */
static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc) static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
{ {
WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n"); WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
...@@ -218,7 +221,7 @@ static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc) ...@@ -218,7 +221,7 @@ static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
} }
/** /**
* ice_eswitch_calc_q_handle * ice_eswitch_calc_txq_handle
* @ring: pointer to ring which unique index is needed * @ring: pointer to ring which unique index is needed
* *
* To correctly work with many netdevs ring->q_index of Tx rings on switchdev * To correctly work with many netdevs ring->q_index of Tx rings on switchdev
...@@ -228,7 +231,7 @@ static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc) ...@@ -228,7 +231,7 @@ static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
* Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen, * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen,
* because VSI is get from ring->vsi, so it has to be present in this VSI. * because VSI is get from ring->vsi, so it has to be present in this VSI.
*/ */
static u16 ice_eswitch_calc_q_handle(struct ice_ring *ring) static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring)
{ {
struct ice_vsi *vsi = ring->vsi; struct ice_vsi *vsi = ring->vsi;
int i; int i;
...@@ -248,7 +251,7 @@ static u16 ice_eswitch_calc_q_handle(struct ice_ring *ring) ...@@ -248,7 +251,7 @@ static u16 ice_eswitch_calc_q_handle(struct ice_ring *ring)
* This enables/disables XPS for a given Tx descriptor ring * This enables/disables XPS for a given Tx descriptor ring
* based on the TCs enabled for the VSI that ring belongs to. * based on the TCs enabled for the VSI that ring belongs to.
*/ */
static void ice_cfg_xps_tx_ring(struct ice_ring *ring) static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
{ {
if (!ring->q_vector || !ring->netdev) if (!ring->q_vector || !ring->netdev)
return; return;
...@@ -270,7 +273,7 @@ static void ice_cfg_xps_tx_ring(struct ice_ring *ring) ...@@ -270,7 +273,7 @@ static void ice_cfg_xps_tx_ring(struct ice_ring *ring)
* Configure the Tx descriptor ring in TLAN context. * Configure the Tx descriptor ring in TLAN context.
*/ */
static void static void
ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
{ {
struct ice_vsi *vsi = ring->vsi; struct ice_vsi *vsi = ring->vsi;
struct ice_hw *hw = &vsi->back->hw; struct ice_hw *hw = &vsi->back->hw;
...@@ -282,7 +285,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) ...@@ -282,7 +285,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
/* Transmit Queue Length */ /* Transmit Queue Length */
tlan_ctx->qlen = ring->count; tlan_ctx->qlen = ring->count;
ice_set_cgd_num(tlan_ctx, ring); ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
/* PF number */ /* PF number */
tlan_ctx->pf_num = hw->pf_id; tlan_ctx->pf_num = hw->pf_id;
...@@ -339,7 +342,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) ...@@ -339,7 +342,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
* *
* Returns the offset value for ring into the data buffer. * Returns the offset value for ring into the data buffer.
*/ */
static unsigned int ice_rx_offset(struct ice_ring *rx_ring) static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
{ {
if (ice_ring_uses_build_skb(rx_ring)) if (ice_ring_uses_build_skb(rx_ring))
return ICE_SKB_PAD; return ICE_SKB_PAD;
...@@ -355,7 +358,7 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring) ...@@ -355,7 +358,7 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
* *
* Configure the Rx descriptor ring in RLAN context. * Configure the Rx descriptor ring in RLAN context.
*/ */
static int ice_setup_rx_ctx(struct ice_ring *ring) static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
{ {
int chain_len = ICE_MAX_CHAINED_RX_BUFS; int chain_len = ICE_MAX_CHAINED_RX_BUFS;
struct ice_vsi *vsi = ring->vsi; struct ice_vsi *vsi = ring->vsi;
...@@ -466,7 +469,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring) ...@@ -466,7 +469,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
* *
* Return 0 on success and a negative value on error. * Return 0 on success and a negative value on error.
*/ */
int ice_vsi_cfg_rxq(struct ice_ring *ring) int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
{ {
struct device *dev = ice_pf_to_dev(ring->vsi->back); struct device *dev = ice_pf_to_dev(ring->vsi->back);
u16 num_bufs = ICE_DESC_UNUSED(ring); u16 num_bufs = ICE_DESC_UNUSED(ring);
...@@ -687,16 +690,16 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) ...@@ -687,16 +690,16 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem, tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
q_vectors - v_id); q_vectors - v_id);
q_vector->num_ring_tx = tx_rings_per_v; q_vector->num_ring_tx = tx_rings_per_v;
q_vector->tx.ring = NULL; q_vector->tx.tx_ring = NULL;
q_vector->tx.itr_idx = ICE_TX_ITR; q_vector->tx.itr_idx = ICE_TX_ITR;
q_base = vsi->num_txq - tx_rings_rem; q_base = vsi->num_txq - tx_rings_rem;
for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
struct ice_ring *tx_ring = vsi->tx_rings[q_id]; struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
tx_ring->q_vector = q_vector; tx_ring->q_vector = q_vector;
tx_ring->next = q_vector->tx.ring; tx_ring->next = q_vector->tx.tx_ring;
q_vector->tx.ring = tx_ring; q_vector->tx.tx_ring = tx_ring;
} }
tx_rings_rem -= tx_rings_per_v; tx_rings_rem -= tx_rings_per_v;
...@@ -704,16 +707,16 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) ...@@ -704,16 +707,16 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem, rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
q_vectors - v_id); q_vectors - v_id);
q_vector->num_ring_rx = rx_rings_per_v; q_vector->num_ring_rx = rx_rings_per_v;
q_vector->rx.ring = NULL; q_vector->rx.rx_ring = NULL;
q_vector->rx.itr_idx = ICE_RX_ITR; q_vector->rx.itr_idx = ICE_RX_ITR;
q_base = vsi->num_rxq - rx_rings_rem; q_base = vsi->num_rxq - rx_rings_rem;
for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
struct ice_ring *rx_ring = vsi->rx_rings[q_id]; struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
rx_ring->q_vector = q_vector; rx_ring->q_vector = q_vector;
rx_ring->next = q_vector->rx.ring; rx_ring->next = q_vector->rx.rx_ring;
q_vector->rx.ring = rx_ring; q_vector->rx.rx_ring = rx_ring;
} }
rx_rings_rem -= rx_rings_per_v; rx_rings_rem -= rx_rings_per_v;
} }
...@@ -738,7 +741,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi) ...@@ -738,7 +741,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
* @qg_buf: queue group buffer * @qg_buf: queue group buffer
*/ */
int int
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf) struct ice_aqc_add_tx_qgrp *qg_buf)
{ {
u8 buf_len = struct_size(qg_buf, txqs, 1); u8 buf_len = struct_size(qg_buf, txqs, 1);
...@@ -774,12 +777,12 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, ...@@ -774,12 +777,12 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
* TC into the VSI Tx ring * TC into the VSI Tx ring
*/ */
if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
ring->q_handle = ice_eswitch_calc_q_handle(ring); ring->q_handle = ice_eswitch_calc_txq_handle(ring);
if (ring->q_handle == ICE_INVAL_Q_INDEX) if (ring->q_handle == ICE_INVAL_Q_INDEX)
return -ENODEV; return -ENODEV;
} else { } else {
ring->q_handle = ice_calc_q_handle(vsi, ring, tc); ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
} }
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle, status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
...@@ -904,7 +907,7 @@ void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector) ...@@ -904,7 +907,7 @@ void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
*/ */
int int
ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num, struct ice_ring *ring, u16 rel_vmvf_num, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta) struct ice_txq_meta *txq_meta)
{ {
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
...@@ -961,7 +964,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, ...@@ -961,7 +964,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
* are needed for stopping Tx queue * are needed for stopping Tx queue
*/ */
void void
ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta) struct ice_txq_meta *txq_meta)
{ {
u8 tc; u8 tc;
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include "ice.h" #include "ice.h"
int ice_vsi_cfg_rxq(struct ice_ring *ring); int ice_vsi_cfg_rxq(struct ice_rx_ring *ring);
int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg); int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
int int
ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait); ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait);
...@@ -15,7 +15,7 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi); ...@@ -15,7 +15,7 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi); void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
void ice_vsi_free_q_vectors(struct ice_vsi *vsi); void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
int int
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf); struct ice_aqc_add_tx_qgrp *qg_buf);
void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector); void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector);
void void
...@@ -25,9 +25,9 @@ ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx); ...@@ -25,9 +25,9 @@ ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector); void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);
int int
ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num, struct ice_ring *ring, u16 rel_vmvf_num, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta); struct ice_txq_meta *txq_meta);
void void
ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta); struct ice_txq_meta *txq_meta);
#endif /* _ICE_BASE_H_ */ #endif /* _ICE_BASE_H_ */
...@@ -194,17 +194,18 @@ u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index) ...@@ -194,17 +194,18 @@ u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index)
*/ */
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
{ {
struct ice_ring *tx_ring, *rx_ring; struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
u16 qoffset, qcount; u16 qoffset, qcount;
int i, n; int i, n;
if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) { if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
/* Reset the TC information */ /* Reset the TC information */
for (i = 0; i < vsi->num_txq; i++) { ice_for_each_txq(vsi, i) {
tx_ring = vsi->tx_rings[i]; tx_ring = vsi->tx_rings[i];
tx_ring->dcb_tc = 0; tx_ring->dcb_tc = 0;
} }
for (i = 0; i < vsi->num_rxq; i++) { ice_for_each_rxq(vsi, i) {
rx_ring = vsi->rx_rings[i]; rx_ring = vsi->rx_rings[i];
rx_ring->dcb_tc = 0; rx_ring->dcb_tc = 0;
} }
...@@ -824,7 +825,7 @@ void ice_update_dcb_stats(struct ice_pf *pf) ...@@ -824,7 +825,7 @@ void ice_update_dcb_stats(struct ice_pf *pf)
* tag will already be configured with the correct ID and priority bits * tag will already be configured with the correct ID and priority bits
*/ */
void void
ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring, ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
struct ice_tx_buf *first) struct ice_tx_buf *first)
{ {
struct sk_buff *skb = first->skb; struct sk_buff *skb = first->skb;
......
...@@ -29,7 +29,7 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi); ...@@ -29,7 +29,7 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked); int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf); void ice_update_dcb_stats(struct ice_pf *pf);
void void
ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring, ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
struct ice_tx_buf *first); struct ice_tx_buf *first);
void void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
...@@ -50,9 +50,9 @@ static inline bool ice_find_q_in_range(u16 low, u16 high, unsigned int tx_q) ...@@ -50,9 +50,9 @@ static inline bool ice_find_q_in_range(u16 low, u16 high, unsigned int tx_q)
} }
static inline void static inline void
ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc)
{ {
tlan_ctx->cgd_num = ring->dcb_tc; tlan_ctx->cgd_num = dcb_tc;
} }
static inline bool ice_is_dcb_active(struct ice_pf *pf) static inline bool ice_is_dcb_active(struct ice_pf *pf)
...@@ -102,7 +102,7 @@ ice_pf_dcb_cfg(struct ice_pf __always_unused *pf, ...@@ -102,7 +102,7 @@ ice_pf_dcb_cfg(struct ice_pf __always_unused *pf,
} }
static inline int static inline int
ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring, ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring __always_unused *tx_ring,
struct ice_tx_buf __always_unused *first) struct ice_tx_buf __always_unused *first)
{ {
return 0; return 0;
...@@ -131,6 +131,6 @@ static inline void ice_update_dcb_stats(struct ice_pf *pf) { } ...@@ -131,6 +131,6 @@ static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
static inline void static inline void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { } ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { }
static inline void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) { } static inline void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) { }
static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) { } static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc) { }
#endif /* CONFIG_DCB */ #endif /* CONFIG_DCB */
#endif /* _ICE_DCB_LIB_H_ */ #endif /* _ICE_DCB_LIB_H_ */
...@@ -68,21 +68,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf) ...@@ -68,21 +68,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
return -ENODEV; return -ENODEV;
} }
/**
* ice_eswitch_remap_ring - reconfigure ring of switchdev ctrl VSI
* @ring: pointer to ring
* @q_vector: pointer of q_vector which is connected with this ring
* @netdev: netdevice connected with this ring
*/
static void
ice_eswitch_remap_ring(struct ice_ring *ring, struct ice_q_vector *q_vector,
struct net_device *netdev)
{
ring->q_vector = q_vector;
ring->next = NULL;
ring->netdev = netdev;
}
/** /**
* ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI
* @pf: pointer to PF struct * @pf: pointer to PF struct
...@@ -102,23 +87,27 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) ...@@ -102,23 +87,27 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
ice_for_each_txq(vsi, q_id) { ice_for_each_txq(vsi, q_id) {
struct ice_repr *repr = pf->vf[q_id].repr; struct ice_repr *repr = pf->vf[q_id].repr;
struct ice_q_vector *q_vector = repr->q_vector; struct ice_q_vector *q_vector = repr->q_vector;
struct ice_ring *tx_ring = vsi->tx_rings[q_id]; struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
struct ice_ring *rx_ring = vsi->rx_rings[q_id]; struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
q_vector->vsi = vsi; q_vector->vsi = vsi;
q_vector->reg_idx = vsi->q_vectors[0]->reg_idx; q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
q_vector->num_ring_tx = 1; q_vector->num_ring_tx = 1;
q_vector->tx.ring = tx_ring; q_vector->tx.tx_ring = tx_ring;
ice_eswitch_remap_ring(tx_ring, q_vector, repr->netdev); tx_ring->q_vector = q_vector;
tx_ring->next = NULL;
tx_ring->netdev = repr->netdev;
/* In switchdev mode, from OS stack perspective, there is only /* In switchdev mode, from OS stack perspective, there is only
* one queue for given netdev, so it needs to be indexed as 0. * one queue for given netdev, so it needs to be indexed as 0.
*/ */
tx_ring->q_index = 0; tx_ring->q_index = 0;
q_vector->num_ring_rx = 1; q_vector->num_ring_rx = 1;
q_vector->rx.ring = rx_ring; q_vector->rx.rx_ring = rx_ring;
ice_eswitch_remap_ring(rx_ring, q_vector, repr->netdev); rx_ring->q_vector = q_vector;
rx_ring->next = NULL;
rx_ring->netdev = repr->netdev;
} }
} }
...@@ -390,7 +379,7 @@ static void ice_eswitch_set_rxdid(struct ice_vsi *vsi, u32 rxdid) ...@@ -390,7 +379,7 @@ static void ice_eswitch_set_rxdid(struct ice_vsi *vsi, u32 rxdid)
int i; int i;
ice_for_each_rxq(vsi, i) { ice_for_each_rxq(vsi, i) {
struct ice_ring *ring = vsi->rx_rings[i]; struct ice_rx_ring *ring = vsi->rx_rings[i];
u16 pf_q = vsi->rxq_map[ring->q_index]; u16 pf_q = vsi->rxq_map[ring->q_index];
ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true); ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
...@@ -511,7 +500,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, ...@@ -511,7 +500,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
* context, regular netdev associated with Rx ring is returned. * context, regular netdev associated with Rx ring is returned.
*/ */
struct net_device * struct net_device *
ice_eswitch_get_target_netdev(struct ice_ring *rx_ring, ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc) union ice_32b_rx_flex_desc *rx_desc)
{ {
struct ice_32b_rx_flex_desc_nic_2 *desc; struct ice_32b_rx_flex_desc_nic_2 *desc;
......
...@@ -22,7 +22,7 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi); ...@@ -22,7 +22,7 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi);
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf); void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
struct net_device * struct net_device *
ice_eswitch_get_target_netdev(struct ice_ring *rx_ring, ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc); union ice_32b_rx_flex_desc *rx_desc);
void ice_eswitch_set_target_vsi(struct sk_buff *skb, void ice_eswitch_set_target_vsi(struct sk_buff *skb,
...@@ -68,7 +68,7 @@ static inline bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf) ...@@ -68,7 +68,7 @@ static inline bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
} }
static inline struct net_device * static inline struct net_device *
ice_eswitch_get_target_netdev(struct ice_ring *rx_ring, ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc) union ice_32b_rx_flex_desc *rx_desc)
{ {
return rx_ring->netdev; return rx_ring->netdev;
......
...@@ -603,7 +603,7 @@ static bool ice_lbtest_check_frame(u8 *frame) ...@@ -603,7 +603,7 @@ static bool ice_lbtest_check_frame(u8 *frame)
* *
* Function sends loopback packets on a test Tx ring. * Function sends loopback packets on a test Tx ring.
*/ */
static int ice_diag_send(struct ice_ring *tx_ring, u8 *data, u16 size) static int ice_diag_send(struct ice_tx_ring *tx_ring, u8 *data, u16 size)
{ {
struct ice_tx_desc *tx_desc; struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf; struct ice_tx_buf *tx_buf;
...@@ -656,7 +656,7 @@ static int ice_diag_send(struct ice_ring *tx_ring, u8 *data, u16 size) ...@@ -656,7 +656,7 @@ static int ice_diag_send(struct ice_ring *tx_ring, u8 *data, u16 size)
* Function receives loopback packets and verify their correctness. * Function receives loopback packets and verify their correctness.
* Returns number of received valid frames. * Returns number of received valid frames.
*/ */
static int ice_lbtest_receive_frames(struct ice_ring *rx_ring) static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
{ {
struct ice_rx_buf *rx_buf; struct ice_rx_buf *rx_buf;
int valid_frames, i; int valid_frames, i;
...@@ -695,9 +695,10 @@ static u64 ice_loopback_test(struct net_device *netdev) ...@@ -695,9 +695,10 @@ static u64 ice_loopback_test(struct net_device *netdev)
struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *orig_vsi = np->vsi, *test_vsi; struct ice_vsi *orig_vsi = np->vsi, *test_vsi;
struct ice_pf *pf = orig_vsi->back; struct ice_pf *pf = orig_vsi->back;
struct ice_ring *tx_ring, *rx_ring;
u8 broadcast[ETH_ALEN], ret = 0; u8 broadcast[ETH_ALEN], ret = 0;
int num_frames, valid_frames; int num_frames, valid_frames;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
struct device *dev; struct device *dev;
u8 *tx_frame; u8 *tx_frame;
int i; int i;
...@@ -1350,7 +1351,8 @@ ice_get_ethtool_stats(struct net_device *netdev, ...@@ -1350,7 +1351,8 @@ ice_get_ethtool_stats(struct net_device *netdev,
struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = ice_get_netdev_priv_vsi(np); struct ice_vsi *vsi = ice_get_netdev_priv_vsi(np);
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct ice_ring *ring; struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
unsigned int j; unsigned int j;
int i = 0; int i = 0;
char *p; char *p;
...@@ -1371,10 +1373,10 @@ ice_get_ethtool_stats(struct net_device *netdev, ...@@ -1371,10 +1373,10 @@ ice_get_ethtool_stats(struct net_device *netdev,
rcu_read_lock(); rcu_read_lock();
ice_for_each_alloc_txq(vsi, j) { ice_for_each_alloc_txq(vsi, j) {
ring = READ_ONCE(vsi->tx_rings[j]); tx_ring = READ_ONCE(vsi->tx_rings[j]);
if (ring) { if (tx_ring) {
data[i++] = ring->stats.pkts; data[i++] = tx_ring->stats.pkts;
data[i++] = ring->stats.bytes; data[i++] = tx_ring->stats.bytes;
} else { } else {
data[i++] = 0; data[i++] = 0;
data[i++] = 0; data[i++] = 0;
...@@ -1382,10 +1384,10 @@ ice_get_ethtool_stats(struct net_device *netdev, ...@@ -1382,10 +1384,10 @@ ice_get_ethtool_stats(struct net_device *netdev,
} }
ice_for_each_alloc_rxq(vsi, j) { ice_for_each_alloc_rxq(vsi, j) {
ring = READ_ONCE(vsi->rx_rings[j]); rx_ring = READ_ONCE(vsi->rx_rings[j]);
if (ring) { if (rx_ring) {
data[i++] = ring->stats.pkts; data[i++] = rx_ring->stats.pkts;
data[i++] = ring->stats.bytes; data[i++] = rx_ring->stats.bytes;
} else { } else {
data[i++] = 0; data[i++] = 0;
data[i++] = 0; data[i++] = 0;
...@@ -2702,9 +2704,10 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) ...@@ -2702,9 +2704,10 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
static int static int
ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
{ {
struct ice_ring *tx_rings = NULL, *rx_rings = NULL;
struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_ring *xdp_rings = NULL; struct ice_tx_ring *xdp_rings = NULL;
struct ice_tx_ring *tx_rings = NULL;
struct ice_rx_ring *rx_rings = NULL;
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
int i, timeout = 50, err = 0; int i, timeout = 50, err = 0;
...@@ -2753,12 +2756,12 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) ...@@ -2753,12 +2756,12 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
/* set for the next time the netdev is started */ /* set for the next time the netdev is started */
if (!netif_running(vsi->netdev)) { if (!netif_running(vsi->netdev)) {
for (i = 0; i < vsi->alloc_txq; i++) ice_for_each_alloc_txq(vsi, i)
vsi->tx_rings[i]->count = new_tx_cnt; vsi->tx_rings[i]->count = new_tx_cnt;
for (i = 0; i < vsi->alloc_rxq; i++) ice_for_each_alloc_rxq(vsi, i)
vsi->rx_rings[i]->count = new_rx_cnt; vsi->rx_rings[i]->count = new_rx_cnt;
if (ice_is_xdp_ena_vsi(vsi)) if (ice_is_xdp_ena_vsi(vsi))
for (i = 0; i < vsi->num_xdp_txq; i++) ice_for_each_xdp_txq(vsi, i)
vsi->xdp_rings[i]->count = new_tx_cnt; vsi->xdp_rings[i]->count = new_tx_cnt;
vsi->num_tx_desc = (u16)new_tx_cnt; vsi->num_tx_desc = (u16)new_tx_cnt;
vsi->num_rx_desc = (u16)new_rx_cnt; vsi->num_rx_desc = (u16)new_rx_cnt;
...@@ -2807,7 +2810,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) ...@@ -2807,7 +2810,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
goto free_tx; goto free_tx;
} }
for (i = 0; i < vsi->num_xdp_txq; i++) { ice_for_each_xdp_txq(vsi, i) {
/* clone ring and setup updated count */ /* clone ring and setup updated count */
xdp_rings[i] = *vsi->xdp_rings[i]; xdp_rings[i] = *vsi->xdp_rings[i];
xdp_rings[i].count = new_tx_cnt; xdp_rings[i].count = new_tx_cnt;
...@@ -2901,7 +2904,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) ...@@ -2901,7 +2904,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
} }
if (xdp_rings) { if (xdp_rings) {
for (i = 0; i < vsi->num_xdp_txq; i++) { ice_for_each_xdp_txq(vsi, i) {
ice_free_tx_ring(vsi->xdp_rings[i]); ice_free_tx_ring(vsi->xdp_rings[i]);
*vsi->xdp_rings[i] = xdp_rings[i]; *vsi->xdp_rings[i] = xdp_rings[i];
} }
...@@ -3290,7 +3293,7 @@ static u32 ice_get_combined_cnt(struct ice_vsi *vsi) ...@@ -3290,7 +3293,7 @@ static u32 ice_get_combined_cnt(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, q_idx) { ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
if (q_vector->rx.ring && q_vector->tx.ring) if (q_vector->rx.rx_ring && q_vector->tx.tx_ring)
combined++; combined++;
} }
...@@ -3501,15 +3504,9 @@ static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) ...@@ -3501,15 +3504,9 @@ static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0; return 0;
} }
enum ice_container_type {
ICE_RX_CONTAINER,
ICE_TX_CONTAINER,
};
/** /**
* ice_get_rc_coalesce - get ITR values for specific ring container * ice_get_rc_coalesce - get ITR values for specific ring container
* @ec: ethtool structure to fill with driver's coalesce settings * @ec: ethtool structure to fill with driver's coalesce settings
* @c_type: container type, Rx or Tx
* @rc: ring container that the ITR values will come from * @rc: ring container that the ITR values will come from
* *
* Query the device for ice_ring_container specific ITR values. This is * Query the device for ice_ring_container specific ITR values. This is
...@@ -3519,24 +3516,23 @@ enum ice_container_type { ...@@ -3519,24 +3516,23 @@ enum ice_container_type {
* Returns 0 on success, negative otherwise. * Returns 0 on success, negative otherwise.
*/ */
static int static int
ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type, ice_get_rc_coalesce(struct ethtool_coalesce *ec, struct ice_ring_container *rc)
struct ice_ring_container *rc)
{ {
if (!rc->ring) if (!rc->rx_ring)
return -EINVAL; return -EINVAL;
switch (c_type) { switch (rc->type) {
case ICE_RX_CONTAINER: case ICE_RX_CONTAINER:
ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc); ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc);
ec->rx_coalesce_usecs = rc->itr_setting; ec->rx_coalesce_usecs = rc->itr_setting;
ec->rx_coalesce_usecs_high = rc->ring->q_vector->intrl; ec->rx_coalesce_usecs_high = rc->rx_ring->q_vector->intrl;
break; break;
case ICE_TX_CONTAINER: case ICE_TX_CONTAINER:
ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc); ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc);
ec->tx_coalesce_usecs = rc->itr_setting; ec->tx_coalesce_usecs = rc->itr_setting;
break; break;
default: default:
dev_dbg(ice_pf_to_dev(rc->ring->vsi->back), "Invalid c_type %d\n", c_type); dev_dbg(ice_pf_to_dev(rc->rx_ring->vsi->back), "Invalid c_type %d\n", rc->type);
return -EINVAL; return -EINVAL;
} }
...@@ -3557,18 +3553,18 @@ static int ...@@ -3557,18 +3553,18 @@ static int
ice_get_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num) ice_get_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
{ {
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) { if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER, if (ice_get_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx)) &vsi->rx_rings[q_num]->q_vector->rx))
return -EINVAL; return -EINVAL;
if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER, if (ice_get_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx)) &vsi->tx_rings[q_num]->q_vector->tx))
return -EINVAL; return -EINVAL;
} else if (q_num < vsi->num_rxq) { } else if (q_num < vsi->num_rxq) {
if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER, if (ice_get_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx)) &vsi->rx_rings[q_num]->q_vector->rx))
return -EINVAL; return -EINVAL;
} else if (q_num < vsi->num_txq) { } else if (q_num < vsi->num_txq) {
if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER, if (ice_get_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx)) &vsi->tx_rings[q_num]->q_vector->tx))
return -EINVAL; return -EINVAL;
} else { } else {
...@@ -3620,7 +3616,6 @@ ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num, ...@@ -3620,7 +3616,6 @@ ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
/** /**
* ice_set_rc_coalesce - set ITR values for specific ring container * ice_set_rc_coalesce - set ITR values for specific ring container
* @c_type: container type, Rx or Tx
* @ec: ethtool structure from user to update ITR settings * @ec: ethtool structure from user to update ITR settings
* @rc: ring container that the ITR values will come from * @rc: ring container that the ITR values will come from
* @vsi: VSI associated to the ring container * @vsi: VSI associated to the ring container
...@@ -3632,18 +3627,18 @@ ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num, ...@@ -3632,18 +3627,18 @@ ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
* Returns 0 on success, negative otherwise. * Returns 0 on success, negative otherwise.
*/ */
static int static int
ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, ice_set_rc_coalesce(struct ethtool_coalesce *ec,
struct ice_ring_container *rc, struct ice_vsi *vsi) struct ice_ring_container *rc, struct ice_vsi *vsi)
{ {
const char *c_type_str = (c_type == ICE_RX_CONTAINER) ? "rx" : "tx"; const char *c_type_str = (rc->type == ICE_RX_CONTAINER) ? "rx" : "tx";
u32 use_adaptive_coalesce, coalesce_usecs; u32 use_adaptive_coalesce, coalesce_usecs;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
u16 itr_setting; u16 itr_setting;
if (!rc->ring) if (!rc->rx_ring)
return -EINVAL; return -EINVAL;
switch (c_type) { switch (rc->type) {
case ICE_RX_CONTAINER: case ICE_RX_CONTAINER:
if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL || if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL ||
(ec->rx_coalesce_usecs_high && (ec->rx_coalesce_usecs_high &&
...@@ -3653,15 +3648,15 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, ...@@ -3653,15 +3648,15 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
ICE_MAX_INTRL); ICE_MAX_INTRL);
return -EINVAL; return -EINVAL;
} }
if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl && if (ec->rx_coalesce_usecs_high != rc->rx_ring->q_vector->intrl &&
(ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce)) { (ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce)) {
netdev_info(vsi->netdev, "Invalid value, %s-usecs-high cannot be changed if adaptive-tx or adaptive-rx is enabled\n", netdev_info(vsi->netdev, "Invalid value, %s-usecs-high cannot be changed if adaptive-tx or adaptive-rx is enabled\n",
c_type_str); c_type_str);
return -EINVAL; return -EINVAL;
} }
if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) { if (ec->rx_coalesce_usecs_high != rc->rx_ring->q_vector->intrl) {
rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high; rc->rx_ring->q_vector->intrl = ec->rx_coalesce_usecs_high;
ice_write_intrl(rc->ring->q_vector, ice_write_intrl(rc->rx_ring->q_vector,
ec->rx_coalesce_usecs_high); ec->rx_coalesce_usecs_high);
} }
...@@ -3676,7 +3671,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, ...@@ -3676,7 +3671,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
break; break;
default: default:
dev_dbg(ice_pf_to_dev(pf), "Invalid container type %d\n", dev_dbg(ice_pf_to_dev(pf), "Invalid container type %d\n",
c_type); rc->type);
return -EINVAL; return -EINVAL;
} }
...@@ -3725,22 +3720,22 @@ static int ...@@ -3725,22 +3720,22 @@ static int
ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num) ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
{ {
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) { if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec, if (ice_set_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx, &vsi->rx_rings[q_num]->q_vector->rx,
vsi)) vsi))
return -EINVAL; return -EINVAL;
if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec, if (ice_set_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx, &vsi->tx_rings[q_num]->q_vector->tx,
vsi)) vsi))
return -EINVAL; return -EINVAL;
} else if (q_num < vsi->num_rxq) { } else if (q_num < vsi->num_rxq) {
if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec, if (ice_set_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx, &vsi->rx_rings[q_num]->q_vector->rx,
vsi)) vsi))
return -EINVAL; return -EINVAL;
} else if (q_num < vsi->num_txq) { } else if (q_num < vsi->num_txq) {
if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec, if (ice_set_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx, &vsi->tx_rings[q_num]->q_vector->tx,
vsi)) vsi))
return -EINVAL; return -EINVAL;
......
...@@ -46,12 +46,12 @@ static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena) ...@@ -46,12 +46,12 @@ static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
int ret = 0; int ret = 0;
u16 i; u16 i;
for (i = 0; i < vsi->num_rxq; i++) ice_for_each_rxq(vsi, i)
ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false); ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
ice_flush(&vsi->back->hw); ice_flush(&vsi->back->hw);
for (i = 0; i < vsi->num_rxq; i++) { ice_for_each_rxq(vsi, i) {
ret = ice_vsi_wait_one_rx_ring(vsi, ena, i); ret = ice_vsi_wait_one_rx_ring(vsi, ena, i);
if (ret) if (ret)
break; break;
...@@ -390,12 +390,12 @@ static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data) ...@@ -390,12 +390,12 @@ static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
{ {
struct ice_q_vector *q_vector = (struct ice_q_vector *)data; struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
if (!q_vector->tx.ring) if (!q_vector->tx.tx_ring)
return IRQ_HANDLED; return IRQ_HANDLED;
#define FDIR_RX_DESC_CLEAN_BUDGET 64 #define FDIR_RX_DESC_CLEAN_BUDGET 64
ice_clean_rx_irq(q_vector->rx.ring, FDIR_RX_DESC_CLEAN_BUDGET); ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET);
ice_clean_ctrl_tx_irq(q_vector->tx.ring); ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -409,7 +409,7 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) ...@@ -409,7 +409,7 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
{ {
struct ice_q_vector *q_vector = (struct ice_q_vector *)data; struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
if (!q_vector->tx.ring && !q_vector->rx.ring) if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
return IRQ_HANDLED; return IRQ_HANDLED;
q_vector->total_events++; q_vector->total_events++;
...@@ -425,7 +425,7 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d ...@@ -425,7 +425,7 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d
struct ice_pf *pf = q_vector->vsi->back; struct ice_pf *pf = q_vector->vsi->back;
int i; int i;
if (!q_vector->tx.ring && !q_vector->rx.ring) if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
return IRQ_HANDLED; return IRQ_HANDLED;
ice_for_each_vf(pf, i) ice_for_each_vf(pf, i)
...@@ -639,12 +639,12 @@ static void ice_vsi_put_qs(struct ice_vsi *vsi) ...@@ -639,12 +639,12 @@ static void ice_vsi_put_qs(struct ice_vsi *vsi)
mutex_lock(&pf->avail_q_mutex); mutex_lock(&pf->avail_q_mutex);
for (i = 0; i < vsi->alloc_txq; i++) { ice_for_each_alloc_txq(vsi, i) {
clear_bit(vsi->txq_map[i], pf->avail_txqs); clear_bit(vsi->txq_map[i], pf->avail_txqs);
vsi->txq_map[i] = ICE_INVAL_Q_INDEX; vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
} }
for (i = 0; i < vsi->alloc_rxq; i++) { ice_for_each_alloc_rxq(vsi, i) {
clear_bit(vsi->rxq_map[i], pf->avail_rxqs); clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
} }
...@@ -1291,14 +1291,14 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi) ...@@ -1291,14 +1291,14 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
struct ice_q_vector *q_vector = vsi->q_vectors[i]; struct ice_q_vector *q_vector = vsi->q_vectors[i];
if (q_vector) { if (q_vector) {
q_vector->tx.ring = NULL; q_vector->tx.tx_ring = NULL;
q_vector->rx.ring = NULL; q_vector->rx.rx_ring = NULL;
} }
} }
} }
if (vsi->tx_rings) { if (vsi->tx_rings) {
for (i = 0; i < vsi->alloc_txq; i++) { ice_for_each_alloc_txq(vsi, i) {
if (vsi->tx_rings[i]) { if (vsi->tx_rings[i]) {
kfree_rcu(vsi->tx_rings[i], rcu); kfree_rcu(vsi->tx_rings[i], rcu);
WRITE_ONCE(vsi->tx_rings[i], NULL); WRITE_ONCE(vsi->tx_rings[i], NULL);
...@@ -1306,7 +1306,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi) ...@@ -1306,7 +1306,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
} }
} }
if (vsi->rx_rings) { if (vsi->rx_rings) {
for (i = 0; i < vsi->alloc_rxq; i++) { ice_for_each_alloc_rxq(vsi, i) {
if (vsi->rx_rings[i]) { if (vsi->rx_rings[i]) {
kfree_rcu(vsi->rx_rings[i], rcu); kfree_rcu(vsi->rx_rings[i], rcu);
WRITE_ONCE(vsi->rx_rings[i], NULL); WRITE_ONCE(vsi->rx_rings[i], NULL);
...@@ -1327,8 +1327,8 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ...@@ -1327,8 +1327,8 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf); dev = ice_pf_to_dev(pf);
/* Allocate Tx rings */ /* Allocate Tx rings */
for (i = 0; i < vsi->alloc_txq; i++) { ice_for_each_alloc_txq(vsi, i) {
struct ice_ring *ring; struct ice_tx_ring *ring;
/* allocate with kzalloc(), free with kfree_rcu() */ /* allocate with kzalloc(), free with kfree_rcu() */
ring = kzalloc(sizeof(*ring), GFP_KERNEL); ring = kzalloc(sizeof(*ring), GFP_KERNEL);
...@@ -1338,7 +1338,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ...@@ -1338,7 +1338,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->q_index = i; ring->q_index = i;
ring->reg_idx = vsi->txq_map[i]; ring->reg_idx = vsi->txq_map[i];
ring->ring_active = false;
ring->vsi = vsi; ring->vsi = vsi;
ring->tx_tstamps = &pf->ptp.port.tx; ring->tx_tstamps = &pf->ptp.port.tx;
ring->dev = dev; ring->dev = dev;
...@@ -1347,8 +1346,8 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ...@@ -1347,8 +1346,8 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
} }
/* Allocate Rx rings */ /* Allocate Rx rings */
for (i = 0; i < vsi->alloc_rxq; i++) { ice_for_each_alloc_rxq(vsi, i) {
struct ice_ring *ring; struct ice_rx_ring *ring;
/* allocate with kzalloc(), free with kfree_rcu() */ /* allocate with kzalloc(), free with kfree_rcu() */
ring = kzalloc(sizeof(*ring), GFP_KERNEL); ring = kzalloc(sizeof(*ring), GFP_KERNEL);
...@@ -1357,7 +1356,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ...@@ -1357,7 +1356,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->q_index = i; ring->q_index = i;
ring->reg_idx = vsi->rxq_map[i]; ring->reg_idx = vsi->rxq_map[i];
ring->ring_active = false;
ring->vsi = vsi; ring->vsi = vsi;
ring->netdev = vsi->netdev; ring->netdev = vsi->netdev;
ring->dev = dev; ring->dev = dev;
...@@ -1752,7 +1750,7 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) ...@@ -1752,7 +1750,7 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]); return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
} }
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx) int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
{ {
struct ice_aqc_add_tx_qgrp *qg_buf; struct ice_aqc_add_tx_qgrp *qg_buf;
int err; int err;
...@@ -1808,7 +1806,7 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) ...@@ -1808,7 +1806,7 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
* Configure the Tx VSI for operation. * Configure the Tx VSI for operation.
*/ */
static int static int
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, u16 count) ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
{ {
struct ice_aqc_add_tx_qgrp *qg_buf; struct ice_aqc_add_tx_qgrp *qg_buf;
u16 q_idx = 0; u16 q_idx = 0;
...@@ -1859,8 +1857,8 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi) ...@@ -1859,8 +1857,8 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
if (ret) if (ret)
return ret; return ret;
for (i = 0; i < vsi->num_xdp_txq; i++) ice_for_each_xdp_txq(vsi, i)
vsi->xdp_rings[i]->xsk_pool = ice_xsk_pool(vsi->xdp_rings[i]); vsi->xdp_rings[i]->xsk_pool = ice_tx_xsk_pool(vsi->xdp_rings[i]);
return ret; return ret;
} }
...@@ -1895,6 +1893,23 @@ void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl) ...@@ -1895,6 +1893,23 @@ void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl)
ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25)); ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25));
} }
static struct ice_q_vector *ice_pull_qvec_from_rc(struct ice_ring_container *rc)
{
switch (rc->type) {
case ICE_RX_CONTAINER:
if (rc->rx_ring)
return rc->rx_ring->q_vector;
break;
case ICE_TX_CONTAINER:
if (rc->tx_ring)
return rc->tx_ring->q_vector;
default:
break;
}
return NULL;
}
/** /**
* __ice_write_itr - write throttle rate to register * __ice_write_itr - write throttle rate to register
* @q_vector: pointer to interrupt data structure * @q_vector: pointer to interrupt data structure
...@@ -1919,11 +1934,10 @@ void ice_write_itr(struct ice_ring_container *rc, u16 itr) ...@@ -1919,11 +1934,10 @@ void ice_write_itr(struct ice_ring_container *rc, u16 itr)
{ {
struct ice_q_vector *q_vector; struct ice_q_vector *q_vector;
if (!rc->ring) q_vector = ice_pull_qvec_from_rc(rc);
if (!q_vector)
return; return;
q_vector = rc->ring->q_vector;
__ice_write_itr(q_vector, rc, itr); __ice_write_itr(q_vector, rc, itr);
} }
...@@ -1941,7 +1955,7 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi) ...@@ -1941,7 +1955,7 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
u16 txq = 0, rxq = 0; u16 txq = 0, rxq = 0;
int i, q; int i, q;
for (i = 0; i < vsi->num_q_vectors; i++) { ice_for_each_q_vector(vsi, i) {
struct ice_q_vector *q_vector = vsi->q_vectors[i]; struct ice_q_vector *q_vector = vsi->q_vectors[i];
u16 reg_idx = q_vector->reg_idx; u16 reg_idx = q_vector->reg_idx;
...@@ -2099,7 +2113,7 @@ int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi) ...@@ -2099,7 +2113,7 @@ int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
*/ */
static int static int
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num, struct ice_ring **rings, u16 count) u16 rel_vmvf_num, struct ice_tx_ring **rings, u16 count)
{ {
u16 q_idx; u16 q_idx;
...@@ -2635,7 +2649,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi) ...@@ -2635,7 +2649,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
u32 rxq = 0; u32 rxq = 0;
int i, q; int i, q;
for (i = 0; i < vsi->num_q_vectors; i++) { ice_for_each_q_vector(vsi, i) {
struct ice_q_vector *q_vector = vsi->q_vectors[i]; struct ice_q_vector *q_vector = vsi->q_vectors[i];
ice_write_intrl(q_vector, 0); ice_write_intrl(q_vector, 0);
...@@ -3201,7 +3215,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) ...@@ -3201,7 +3215,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
ice_vsi_map_rings_to_vectors(vsi); ice_vsi_map_rings_to_vectors(vsi);
if (ice_is_xdp_ena_vsi(vsi)) { if (ice_is_xdp_ena_vsi(vsi)) {
vsi->num_xdp_txq = vsi->alloc_rxq; ret = ice_vsi_determine_xdp_res(vsi);
if (ret)
goto err_vectors;
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog); ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
if (ret) if (ret)
goto err_vectors; goto err_vectors;
...@@ -3398,16 +3414,16 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) ...@@ -3398,16 +3414,16 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
/** /**
* ice_update_ring_stats - Update ring statistics * ice_update_ring_stats - Update ring statistics
* @ring: ring to update * @stats: stats to be updated
* @pkts: number of processed packets * @pkts: number of processed packets
* @bytes: number of processed bytes * @bytes: number of processed bytes
* *
* This function assumes that caller has acquired a u64_stats_sync lock. * This function assumes that caller has acquired a u64_stats_sync lock.
*/ */
static void ice_update_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes) static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes)
{ {
ring->stats.bytes += bytes; stats->bytes += bytes;
ring->stats.pkts += pkts; stats->pkts += pkts;
} }
/** /**
...@@ -3416,10 +3432,10 @@ static void ice_update_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes) ...@@ -3416,10 +3432,10 @@ static void ice_update_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes)
* @pkts: number of processed packets * @pkts: number of processed packets
* @bytes: number of processed bytes * @bytes: number of processed bytes
*/ */
void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes) void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
{ {
u64_stats_update_begin(&tx_ring->syncp); u64_stats_update_begin(&tx_ring->syncp);
ice_update_ring_stats(tx_ring, pkts, bytes); ice_update_ring_stats(&tx_ring->stats, pkts, bytes);
u64_stats_update_end(&tx_ring->syncp); u64_stats_update_end(&tx_ring->syncp);
} }
...@@ -3429,10 +3445,10 @@ void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes) ...@@ -3429,10 +3445,10 @@ void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes)
* @pkts: number of processed packets * @pkts: number of processed packets
* @bytes: number of processed bytes * @bytes: number of processed bytes
*/ */
void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes) void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
{ {
u64_stats_update_begin(&rx_ring->syncp); u64_stats_update_begin(&rx_ring->syncp);
ice_update_ring_stats(rx_ring, pkts, bytes); ice_update_ring_stats(&rx_ring->stats, pkts, bytes);
u64_stats_update_end(&rx_ring->syncp); u64_stats_update_end(&rx_ring->syncp);
} }
......
...@@ -14,7 +14,7 @@ void ice_update_eth_stats(struct ice_vsi *vsi); ...@@ -14,7 +14,7 @@ void ice_update_eth_stats(struct ice_vsi *vsi);
int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx); int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx); int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx);
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi); int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
...@@ -93,9 +93,9 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi); ...@@ -93,9 +93,9 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena); void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
void ice_update_tx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes); void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes);
void ice_update_rx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes); void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes);
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi); void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
......
This diff is collapsed.
...@@ -1287,7 +1287,7 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) ...@@ -1287,7 +1287,7 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
* The timestamp is in ns, so we must convert the result first. * The timestamp is in ns, so we must convert the result first.
*/ */
void void
ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring, ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
{ {
u32 ts_high; u32 ts_high;
......
...@@ -180,7 +180,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb); ...@@ -180,7 +180,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
void ice_ptp_process_ts(struct ice_pf *pf); void ice_ptp_process_ts(struct ice_pf *pf);
void void
ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring, ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb); union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb);
void ice_ptp_init(struct ice_pf *pf); void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf); void ice_ptp_release(struct ice_pf *pf);
...@@ -208,7 +208,7 @@ ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) ...@@ -208,7 +208,7 @@ ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
static inline void ice_ptp_process_ts(struct ice_pf *pf) { } static inline void ice_ptp_process_ts(struct ice_pf *pf) { }
static inline void static inline void
ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring, ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { } union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { }
static inline void ice_ptp_init(struct ice_pf *pf) { } static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { } static inline void ice_ptp_release(struct ice_pf *pf) { }
......
...@@ -64,15 +64,15 @@ DECLARE_EVENT_CLASS(ice_rx_dim_template, ...@@ -64,15 +64,15 @@ DECLARE_EVENT_CLASS(ice_rx_dim_template,
TP_ARGS(q_vector, dim), TP_ARGS(q_vector, dim),
TP_STRUCT__entry(__field(struct ice_q_vector *, q_vector) TP_STRUCT__entry(__field(struct ice_q_vector *, q_vector)
__field(struct dim *, dim) __field(struct dim *, dim)
__string(devname, q_vector->rx.ring->netdev->name)), __string(devname, q_vector->rx.rx_ring->netdev->name)),
TP_fast_assign(__entry->q_vector = q_vector; TP_fast_assign(__entry->q_vector = q_vector;
__entry->dim = dim; __entry->dim = dim;
__assign_str(devname, q_vector->rx.ring->netdev->name);), __assign_str(devname, q_vector->rx.rx_ring->netdev->name);),
TP_printk("netdev: %s Rx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d", TP_printk("netdev: %s Rx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d",
__get_str(devname), __get_str(devname),
__entry->q_vector->rx.ring->q_index, __entry->q_vector->rx.rx_ring->q_index,
__entry->dim->state, __entry->dim->state,
__entry->dim->profile_ix, __entry->dim->profile_ix,
__entry->dim->tune_state, __entry->dim->tune_state,
...@@ -91,15 +91,15 @@ DECLARE_EVENT_CLASS(ice_tx_dim_template, ...@@ -91,15 +91,15 @@ DECLARE_EVENT_CLASS(ice_tx_dim_template,
TP_ARGS(q_vector, dim), TP_ARGS(q_vector, dim),
TP_STRUCT__entry(__field(struct ice_q_vector *, q_vector) TP_STRUCT__entry(__field(struct ice_q_vector *, q_vector)
__field(struct dim *, dim) __field(struct dim *, dim)
__string(devname, q_vector->tx.ring->netdev->name)), __string(devname, q_vector->tx.tx_ring->netdev->name)),
TP_fast_assign(__entry->q_vector = q_vector; TP_fast_assign(__entry->q_vector = q_vector;
__entry->dim = dim; __entry->dim = dim;
__assign_str(devname, q_vector->tx.ring->netdev->name);), __assign_str(devname, q_vector->tx.tx_ring->netdev->name);),
TP_printk("netdev: %s Tx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d", TP_printk("netdev: %s Tx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d",
__get_str(devname), __get_str(devname),
__entry->q_vector->tx.ring->q_index, __entry->q_vector->tx.tx_ring->q_index,
__entry->dim->state, __entry->dim->state,
__entry->dim->profile_ix, __entry->dim->profile_ix,
__entry->dim->tune_state, __entry->dim->tune_state,
...@@ -115,7 +115,7 @@ DEFINE_EVENT(ice_tx_dim_template, ice_tx_dim_work, ...@@ -115,7 +115,7 @@ DEFINE_EVENT(ice_tx_dim_template, ice_tx_dim_work,
/* Events related to a vsi & ring */ /* Events related to a vsi & ring */
DECLARE_EVENT_CLASS(ice_tx_template, DECLARE_EVENT_CLASS(ice_tx_template,
TP_PROTO(struct ice_ring *ring, struct ice_tx_desc *desc, TP_PROTO(struct ice_tx_ring *ring, struct ice_tx_desc *desc,
struct ice_tx_buf *buf), struct ice_tx_buf *buf),
TP_ARGS(ring, desc, buf), TP_ARGS(ring, desc, buf),
...@@ -135,7 +135,7 @@ DECLARE_EVENT_CLASS(ice_tx_template, ...@@ -135,7 +135,7 @@ DECLARE_EVENT_CLASS(ice_tx_template,
#define DEFINE_TX_TEMPLATE_OP_EVENT(name) \ #define DEFINE_TX_TEMPLATE_OP_EVENT(name) \
DEFINE_EVENT(ice_tx_template, name, \ DEFINE_EVENT(ice_tx_template, name, \
TP_PROTO(struct ice_ring *ring, \ TP_PROTO(struct ice_tx_ring *ring, \
struct ice_tx_desc *desc, \ struct ice_tx_desc *desc, \
struct ice_tx_buf *buf), \ struct ice_tx_buf *buf), \
TP_ARGS(ring, desc, buf)) TP_ARGS(ring, desc, buf))
...@@ -145,7 +145,7 @@ DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq_unmap); ...@@ -145,7 +145,7 @@ DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq_unmap);
DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq_unmap_eop); DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq_unmap_eop);
DECLARE_EVENT_CLASS(ice_rx_template, DECLARE_EVENT_CLASS(ice_rx_template,
TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc), TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc),
TP_ARGS(ring, desc), TP_ARGS(ring, desc),
...@@ -161,12 +161,12 @@ DECLARE_EVENT_CLASS(ice_rx_template, ...@@ -161,12 +161,12 @@ DECLARE_EVENT_CLASS(ice_rx_template,
__entry->ring, __entry->desc) __entry->ring, __entry->desc)
); );
DEFINE_EVENT(ice_rx_template, ice_clean_rx_irq, DEFINE_EVENT(ice_rx_template, ice_clean_rx_irq,
TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc), TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc),
TP_ARGS(ring, desc) TP_ARGS(ring, desc)
); );
DECLARE_EVENT_CLASS(ice_rx_indicate_template, DECLARE_EVENT_CLASS(ice_rx_indicate_template,
TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc, TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc,
struct sk_buff *skb), struct sk_buff *skb),
TP_ARGS(ring, desc, skb), TP_ARGS(ring, desc, skb),
...@@ -186,13 +186,13 @@ DECLARE_EVENT_CLASS(ice_rx_indicate_template, ...@@ -186,13 +186,13 @@ DECLARE_EVENT_CLASS(ice_rx_indicate_template,
); );
DEFINE_EVENT(ice_rx_indicate_template, ice_clean_rx_irq_indicate, DEFINE_EVENT(ice_rx_indicate_template, ice_clean_rx_irq_indicate,
TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc, TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc,
struct sk_buff *skb), struct sk_buff *skb),
TP_ARGS(ring, desc, skb) TP_ARGS(ring, desc, skb)
); );
DECLARE_EVENT_CLASS(ice_xmit_template, DECLARE_EVENT_CLASS(ice_xmit_template,
TP_PROTO(struct ice_ring *ring, struct sk_buff *skb), TP_PROTO(struct ice_tx_ring *ring, struct sk_buff *skb),
TP_ARGS(ring, skb), TP_ARGS(ring, skb),
...@@ -210,7 +210,7 @@ DECLARE_EVENT_CLASS(ice_xmit_template, ...@@ -210,7 +210,7 @@ DECLARE_EVENT_CLASS(ice_xmit_template,
#define DEFINE_XMIT_TEMPLATE_OP_EVENT(name) \ #define DEFINE_XMIT_TEMPLATE_OP_EVENT(name) \
DEFINE_EVENT(ice_xmit_template, name, \ DEFINE_EVENT(ice_xmit_template, name, \
TP_PROTO(struct ice_ring *ring, struct sk_buff *skb), \ TP_PROTO(struct ice_tx_ring *ring, struct sk_buff *skb), \
TP_ARGS(ring, skb)) TP_ARGS(ring, skb))
DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring); DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring);
......
This diff is collapsed.
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#define ICE_MAX_CHAINED_RX_BUFS 5 #define ICE_MAX_CHAINED_RX_BUFS 5
#define ICE_MAX_BUF_TXD 8 #define ICE_MAX_BUF_TXD 8
#define ICE_MIN_TX_LEN 17 #define ICE_MIN_TX_LEN 17
#define ICE_TX_THRESH 32
/* The size limit for a transmit buffer in a descriptor is (16K - 1). /* The size limit for a transmit buffer in a descriptor is (16K - 1).
* In order to align with the read requests we will align the value to * In order to align with the read requests we will align the value to
...@@ -154,7 +155,7 @@ struct ice_tx_buf { ...@@ -154,7 +155,7 @@ struct ice_tx_buf {
struct ice_tx_offload_params { struct ice_tx_offload_params {
u64 cd_qw1; u64 cd_qw1;
struct ice_ring *tx_ring; struct ice_tx_ring *tx_ring;
u32 td_cmd; u32 td_cmd;
u32 td_offset; u32 td_offset;
u32 td_l2tag1; u32 td_l2tag1;
...@@ -251,9 +252,9 @@ enum ice_dynamic_itr { ...@@ -251,9 +252,9 @@ enum ice_dynamic_itr {
#define ICE_TX_LEGACY 1 #define ICE_TX_LEGACY 1
/* descriptor ring, associated with a VSI */ /* descriptor ring, associated with a VSI */
struct ice_ring { struct ice_rx_ring {
/* CL1 - 1st cacheline starts here */ /* CL1 - 1st cacheline starts here */
struct ice_ring *next; /* pointer to next ring in q_vector */ struct ice_rx_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */ void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */ struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */ struct net_device *netdev; /* netdev ring maps to */
...@@ -261,15 +262,13 @@ struct ice_ring { ...@@ -261,15 +262,13 @@ struct ice_ring {
struct ice_q_vector *q_vector; /* Backreference to associated vector */ struct ice_q_vector *q_vector; /* Backreference to associated vector */
u8 __iomem *tail; u8 __iomem *tail;
union { union {
struct ice_tx_buf *tx_buf;
struct ice_rx_buf *rx_buf; struct ice_rx_buf *rx_buf;
struct xdp_buff **xdp_buf; struct xdp_buff **xdp_buf;
}; };
/* CL2 - 2nd cacheline starts here */ /* CL2 - 2nd cacheline starts here */
struct xdp_rxq_info xdp_rxq;
/* CL3 - 3rd cacheline starts here */
u16 q_index; /* Queue number of ring */ u16 q_index; /* Queue number of ring */
u16 q_handle; /* Queue handle per TC */
u8 ring_active:1; /* is ring online or not */
u16 count; /* Number of descriptors */ u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */ u16 reg_idx; /* HW register index of the ring */
...@@ -278,63 +277,97 @@ struct ice_ring { ...@@ -278,63 +277,97 @@ struct ice_ring {
u16 next_to_use; u16 next_to_use;
u16 next_to_clean; u16 next_to_clean;
u16 next_to_alloc; u16 next_to_alloc;
u16 rx_offset;
u16 rx_buf_len;
/* stats structs */ /* stats structs */
struct ice_rxq_stats rx_stats;
struct ice_q_stats stats; struct ice_q_stats stats;
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
union {
struct ice_txq_stats tx_stats;
struct ice_rxq_stats rx_stats;
};
struct rcu_head rcu; /* to avoid race on free */ struct rcu_head rcu; /* to avoid race on free */
DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */ /* CL4 - 3rd cacheline starts here */
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
struct ice_tx_ring *xdp_ring;
struct xsk_buff_pool *xsk_pool; struct xsk_buff_pool *xsk_pool;
u16 rx_offset;
/* CL3 - 3rd cacheline starts here */
struct xdp_rxq_info xdp_rxq;
struct sk_buff *skb; struct sk_buff *skb;
/* CLX - the below items are only accessed infrequently and should be dma_addr_t dma; /* physical address of ring */
* in their own cache line if possible
*/
#define ICE_TX_FLAGS_RING_XDP BIT(0)
#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1) #define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
u64 cached_phctime;
u8 dcb_tc; /* Traffic class of ring */
u8 ptp_rx;
u8 flags; u8 flags;
} ____cacheline_internodealigned_in_smp;
struct ice_tx_ring {
/* CL1 - 1st cacheline starts here */
struct ice_tx_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
u8 __iomem *tail;
struct ice_tx_buf *tx_buf;
struct ice_q_vector *q_vector; /* Backreference to associated vector */
struct net_device *netdev; /* netdev ring maps to */
struct ice_vsi *vsi; /* Backreference to associated VSI */
/* CL2 - 2nd cacheline starts here */
dma_addr_t dma; /* physical address of ring */ dma_addr_t dma; /* physical address of ring */
unsigned int size; /* length of descriptor ring in bytes */ struct xsk_buff_pool *xsk_pool;
u16 next_to_use;
u16 next_to_clean;
u16 next_rs;
u16 next_dd;
u16 q_handle; /* Queue handle per TC */
u16 reg_idx; /* HW register index of the ring */
u16 count; /* Number of descriptors */
u16 q_index; /* Queue number of ring */
/* stats structs */
struct ice_q_stats stats;
struct u64_stats_sync syncp;
struct ice_txq_stats tx_stats;
/* CL3 - 3rd cacheline starts here */
struct rcu_head rcu; /* to avoid race on free */
DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
struct ice_ptp_tx *tx_tstamps;
spinlock_t tx_lock;
u32 txq_teid; /* Added Tx queue TEID */ u32 txq_teid; /* Added Tx queue TEID */
u16 rx_buf_len; #define ICE_TX_FLAGS_RING_XDP BIT(0)
u8 flags;
u8 dcb_tc; /* Traffic class of ring */ u8 dcb_tc; /* Traffic class of ring */
struct ice_ptp_tx *tx_tstamps; u8 ptp_tx;
u64 cached_phctime;
u8 ptp_rx:1;
u8 ptp_tx:1;
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
static inline bool ice_ring_uses_build_skb(struct ice_ring *ring) static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
{ {
return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB); return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
} }
static inline void ice_set_ring_build_skb_ena(struct ice_ring *ring) static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring)
{ {
ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB; ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
} }
static inline void ice_clear_ring_build_skb_ena(struct ice_ring *ring) static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring)
{ {
ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB; ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
} }
static inline bool ice_ring_is_xdp(struct ice_ring *ring) static inline bool ice_ring_is_xdp(struct ice_tx_ring *ring)
{ {
return !!(ring->flags & ICE_TX_FLAGS_RING_XDP); return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
} }
enum ice_container_type {
ICE_RX_CONTAINER,
ICE_TX_CONTAINER,
};
struct ice_ring_container { struct ice_ring_container {
/* head of linked-list of rings */ /* head of linked-list of rings */
struct ice_ring *ring; union {
struct ice_rx_ring *rx_ring;
struct ice_tx_ring *tx_ring;
};
struct dim dim; /* data for net_dim algorithm */ struct dim dim; /* data for net_dim algorithm */
u16 itr_idx; /* index in the interrupt vector */ u16 itr_idx; /* index in the interrupt vector */
/* this matches the maximum number of ITR bits, but in usec /* this matches the maximum number of ITR bits, but in usec
...@@ -343,6 +376,7 @@ struct ice_ring_container { ...@@ -343,6 +376,7 @@ struct ice_ring_container {
u16 itr_setting:13; u16 itr_setting:13;
u16 itr_reserved:2; u16 itr_reserved:2;
u16 itr_mode:1; u16 itr_mode:1;
enum ice_container_type type;
}; };
struct ice_coalesce_stored { struct ice_coalesce_stored {
...@@ -354,10 +388,13 @@ struct ice_coalesce_stored { ...@@ -354,10 +388,13 @@ struct ice_coalesce_stored {
}; };
/* iterator for handling rings in ring container */ /* iterator for handling rings in ring container */
#define ice_for_each_ring(pos, head) \ #define ice_for_each_rx_ring(pos, head) \
for (pos = (head).ring; pos; pos = pos->next) for (pos = (head).rx_ring; pos; pos = pos->next)
#define ice_for_each_tx_ring(pos, head) \
for (pos = (head).tx_ring; pos; pos = pos->next)
static inline unsigned int ice_rx_pg_order(struct ice_ring *ring) static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
{ {
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
if (ring->rx_buf_len > (PAGE_SIZE / 2)) if (ring->rx_buf_len > (PAGE_SIZE / 2))
...@@ -370,21 +407,21 @@ static inline unsigned int ice_rx_pg_order(struct ice_ring *ring) ...@@ -370,21 +407,21 @@ static inline unsigned int ice_rx_pg_order(struct ice_ring *ring)
union ice_32b_rx_flex_desc; union ice_32b_rx_flex_desc;
bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count); bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, u16 cleaned_count);
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev); netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
u16 u16
ice_select_queue(struct net_device *dev, struct sk_buff *skb, ice_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev); struct net_device *sb_dev);
void ice_clean_tx_ring(struct ice_ring *tx_ring); void ice_clean_tx_ring(struct ice_tx_ring *tx_ring);
void ice_clean_rx_ring(struct ice_ring *rx_ring); void ice_clean_rx_ring(struct ice_rx_ring *rx_ring);
int ice_setup_tx_ring(struct ice_ring *tx_ring); int ice_setup_tx_ring(struct ice_tx_ring *tx_ring);
int ice_setup_rx_ring(struct ice_ring *rx_ring); int ice_setup_rx_ring(struct ice_rx_ring *rx_ring);
void ice_free_tx_ring(struct ice_ring *tx_ring); void ice_free_tx_ring(struct ice_tx_ring *tx_ring);
void ice_free_rx_ring(struct ice_ring *rx_ring); void ice_free_rx_ring(struct ice_rx_ring *rx_ring);
int ice_napi_poll(struct napi_struct *napi, int budget); int ice_napi_poll(struct napi_struct *napi, int budget);
int int
ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
u8 *raw_packet); u8 *raw_packet);
int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget); int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget);
void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring); void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring);
#endif /* _ICE_TXRX_H_ */ #endif /* _ICE_TXRX_H_ */
...@@ -3,13 +3,14 @@ ...@@ -3,13 +3,14 @@
#include "ice_txrx_lib.h" #include "ice_txrx_lib.h"
#include "ice_eswitch.h" #include "ice_eswitch.h"
#include "ice_lib.h"
/** /**
* ice_release_rx_desc - Store the new tail and head values * ice_release_rx_desc - Store the new tail and head values
* @rx_ring: ring to bump * @rx_ring: ring to bump
* @val: new head index * @val: new head index
*/ */
void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val) void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val)
{ {
u16 prev_ntu = rx_ring->next_to_use & ~0x7; u16 prev_ntu = rx_ring->next_to_use & ~0x7;
...@@ -67,7 +68,7 @@ static enum pkt_hash_types ice_ptype_to_htype(u16 ptype) ...@@ -67,7 +68,7 @@ static enum pkt_hash_types ice_ptype_to_htype(u16 ptype)
* @rx_ptype: the ptype value from the descriptor * @rx_ptype: the ptype value from the descriptor
*/ */
static void static void
ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, ice_rx_hash(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb, u16 rx_ptype) struct sk_buff *skb, u16 rx_ptype)
{ {
struct ice_32b_rx_flex_desc_nic *nic_mdid; struct ice_32b_rx_flex_desc_nic *nic_mdid;
...@@ -94,7 +95,7 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, ...@@ -94,7 +95,7 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
* skb->protocol must be set before this function is called * skb->protocol must be set before this function is called
*/ */
static void static void
ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
union ice_32b_rx_flex_desc *rx_desc, u16 ptype) union ice_32b_rx_flex_desc *rx_desc, u16 ptype)
{ {
struct ice_rx_ptype_decoded decoded; struct ice_rx_ptype_decoded decoded;
...@@ -179,7 +180,7 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, ...@@ -179,7 +180,7 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
* other fields within the skb. * other fields within the skb.
*/ */
void void
ice_process_skb_fields(struct ice_ring *rx_ring, ice_process_skb_fields(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb, u16 ptype) struct sk_buff *skb, u16 ptype)
{ {
...@@ -205,7 +206,7 @@ ice_process_skb_fields(struct ice_ring *rx_ring, ...@@ -205,7 +206,7 @@ ice_process_skb_fields(struct ice_ring *rx_ring,
* gro receive functions (with/without VLAN tag) * gro receive functions (with/without VLAN tag)
*/ */
void void
ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
{ {
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(vlan_tag & VLAN_VID_MASK)) (vlan_tag & VLAN_VID_MASK))
...@@ -213,19 +214,68 @@ ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) ...@@ -213,19 +214,68 @@ ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
napi_gro_receive(&rx_ring->q_vector->napi, skb); napi_gro_receive(&rx_ring->q_vector->napi, skb);
} }
/**
* ice_clean_xdp_irq - Reclaim resources after transmit completes on XDP ring
* @xdp_ring: XDP ring to clean
*/
static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
{
unsigned int total_bytes = 0, total_pkts = 0;
u16 ntc = xdp_ring->next_to_clean;
struct ice_tx_desc *next_dd_desc;
u16 next_dd = xdp_ring->next_dd;
struct ice_tx_buf *tx_buf;
int i;
next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd);
if (!(next_dd_desc->cmd_type_offset_bsz &
cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
return;
for (i = 0; i < ICE_TX_THRESH; i++) {
tx_buf = &xdp_ring->tx_buf[ntc];
total_bytes += tx_buf->bytecount;
/* normally tx_buf->gso_segs was taken but at this point
* it's always 1 for us
*/
total_pkts++;
page_frag_free(tx_buf->raw_buf);
dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
dma_unmap_len_set(tx_buf, len, 0);
tx_buf->raw_buf = NULL;
ntc++;
if (ntc >= xdp_ring->count)
ntc = 0;
}
next_dd_desc->cmd_type_offset_bsz = 0;
xdp_ring->next_dd = xdp_ring->next_dd + ICE_TX_THRESH;
if (xdp_ring->next_dd > xdp_ring->count)
xdp_ring->next_dd = ICE_TX_THRESH - 1;
xdp_ring->next_to_clean = ntc;
ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes);
}
/** /**
* ice_xmit_xdp_ring - submit single packet to XDP ring for transmission * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission
* @data: packet data pointer * @data: packet data pointer
* @size: packet data size * @size: packet data size
* @xdp_ring: XDP ring for transmission * @xdp_ring: XDP ring for transmission
*/ */
int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring) int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
{ {
u16 i = xdp_ring->next_to_use; u16 i = xdp_ring->next_to_use;
struct ice_tx_desc *tx_desc; struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf; struct ice_tx_buf *tx_buf;
dma_addr_t dma; dma_addr_t dma;
if (ICE_DESC_UNUSED(xdp_ring) < ICE_TX_THRESH)
ice_clean_xdp_irq(xdp_ring);
if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) { if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
xdp_ring->tx_stats.tx_busy++; xdp_ring->tx_stats.tx_busy++;
return ICE_XDP_CONSUMED; return ICE_XDP_CONSUMED;
...@@ -246,21 +296,26 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring) ...@@ -246,21 +296,26 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
tx_desc = ICE_TX_DESC(xdp_ring, i); tx_desc = ICE_TX_DESC(xdp_ring, i);
tx_desc->buf_addr = cpu_to_le64(dma); tx_desc->buf_addr = cpu_to_le64(dma);
tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, 0,
size, 0); size, 0);
/* Make certain all of the status bits have been updated
* before next_to_watch is written.
*/
smp_wmb();
i++; i++;
if (i == xdp_ring->count) if (i == xdp_ring->count) {
i = 0; i = 0;
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
tx_buf->next_to_watch = tx_desc; tx_desc->cmd_type_offset_bsz |=
cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
xdp_ring->next_rs = ICE_TX_THRESH - 1;
}
xdp_ring->next_to_use = i; xdp_ring->next_to_use = i;
if (i > xdp_ring->next_rs) {
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
tx_desc->cmd_type_offset_bsz |=
cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
xdp_ring->next_rs += ICE_TX_THRESH;
}
return ICE_XDP_TX; return ICE_XDP_TX;
} }
...@@ -271,7 +326,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring) ...@@ -271,7 +326,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
* *
* Returns negative on failure, 0 on success. * Returns negative on failure, 0 on success.
*/ */
int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring) int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
{ {
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
...@@ -283,22 +338,23 @@ int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring) ...@@ -283,22 +338,23 @@ int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
/** /**
* ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
* @rx_ring: Rx ring * @xdp_ring: XDP ring
* @xdp_res: Result of the receive batch * @xdp_res: Result of the receive batch
* *
* This function bumps XDP Tx tail and/or flush redirect map, and * This function bumps XDP Tx tail and/or flush redirect map, and
* should be called when a batch of packets has been processed in the * should be called when a batch of packets has been processed in the
* napi loop. * napi loop.
*/ */
void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res) void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res)
{ {
if (xdp_res & ICE_XDP_REDIR) if (xdp_res & ICE_XDP_REDIR)
xdp_do_flush_map(); xdp_do_flush_map();
if (xdp_res & ICE_XDP_TX) { if (xdp_res & ICE_XDP_TX) {
struct ice_ring *xdp_ring = if (static_branch_unlikely(&ice_xdp_locking_key))
rx_ring->vsi->xdp_rings[rx_ring->q_index]; spin_lock(&xdp_ring->tx_lock);
ice_xdp_ring_update_tail(xdp_ring); ice_xdp_ring_update_tail(xdp_ring);
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_unlock(&xdp_ring->tx_lock);
} }
} }
...@@ -37,7 +37,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) ...@@ -37,7 +37,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
* *
* This function updates the XDP Tx ring tail register. * This function updates the XDP Tx ring tail register.
*/ */
static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring) static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring)
{ {
/* Force memory writes to complete before letting h/w /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. * know there are new descriptors to fetch.
...@@ -46,14 +46,14 @@ static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring) ...@@ -46,14 +46,14 @@ static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring)
writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
} }
void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res); void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res);
int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring); int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring); int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring);
void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val); void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
void void
ice_process_skb_fields(struct ice_ring *rx_ring, ice_process_skb_fields(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb, u16 ptype); struct sk_buff *skb, u16 ptype);
void void
ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag); ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
#endif /* !_ICE_TXRX_LIB_H_ */ #endif /* !_ICE_TXRX_LIB_H_ */
...@@ -3347,7 +3347,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -3347,7 +3347,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
q_map = vqs->tx_queues; q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
struct ice_ring *ring = vsi->tx_rings[vf_q_id]; struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id];
struct ice_txq_meta txq_meta = { 0 }; struct ice_txq_meta txq_meta = { 0 };
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
......
...@@ -67,7 +67,7 @@ ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector, ...@@ -67,7 +67,7 @@ ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
* @q_vector: queue vector * @q_vector: queue vector
*/ */
static void static void
ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring, ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
struct ice_q_vector *q_vector) struct ice_q_vector *q_vector)
{ {
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
...@@ -104,16 +104,17 @@ ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector) ...@@ -104,16 +104,17 @@ ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
u16 reg_idx = q_vector->reg_idx; u16 reg_idx = q_vector->reg_idx;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
struct ice_ring *ring; struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
ice_cfg_itr(hw, q_vector); ice_cfg_itr(hw, q_vector);
ice_for_each_ring(ring, q_vector->tx) ice_for_each_tx_ring(tx_ring, q_vector->tx)
ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx, ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx,
q_vector->tx.itr_idx); q_vector->tx.itr_idx);
ice_for_each_ring(ring, q_vector->rx) ice_for_each_rx_ring(rx_ring, q_vector->rx)
ice_cfg_rxq_interrupt(vsi, ring->reg_idx, reg_idx, ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx,
q_vector->rx.itr_idx); q_vector->rx.itr_idx);
ice_flush(hw); ice_flush(hw);
...@@ -144,8 +145,9 @@ static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector) ...@@ -144,8 +145,9 @@ static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
{ {
struct ice_txq_meta txq_meta = { }; struct ice_txq_meta txq_meta = { };
struct ice_ring *tx_ring, *rx_ring;
struct ice_q_vector *q_vector; struct ice_q_vector *q_vector;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
int timeout = 50; int timeout = 50;
int err; int err;
...@@ -171,7 +173,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) ...@@ -171,7 +173,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
if (err) if (err)
return err; return err;
if (ice_is_xdp_ena_vsi(vsi)) { if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx]; struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
memset(&txq_meta, 0, sizeof(txq_meta)); memset(&txq_meta, 0, sizeof(txq_meta));
ice_fill_txq_meta(vsi, xdp_ring, &txq_meta); ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
...@@ -201,8 +203,9 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) ...@@ -201,8 +203,9 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{ {
struct ice_aqc_add_tx_qgrp *qg_buf; struct ice_aqc_add_tx_qgrp *qg_buf;
struct ice_ring *tx_ring, *rx_ring;
struct ice_q_vector *q_vector; struct ice_q_vector *q_vector;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
u16 size; u16 size;
int err; int err;
...@@ -225,7 +228,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) ...@@ -225,7 +228,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
goto free_buf; goto free_buf;
if (ice_is_xdp_ena_vsi(vsi)) { if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx]; struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
memset(qg_buf, 0, size); memset(qg_buf, 0, size);
qg_buf->num_txqs = 1; qg_buf->num_txqs = 1;
...@@ -233,7 +236,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) ...@@ -233,7 +236,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (err) if (err)
goto free_buf; goto free_buf;
ice_set_ring_xdp(xdp_ring); ice_set_ring_xdp(xdp_ring);
xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring); xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
} }
err = ice_vsi_cfg_rxq(rx_ring); err = ice_vsi_cfg_rxq(rx_ring);
...@@ -360,7 +363,7 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) ...@@ -360,7 +363,7 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
* *
* Returns true if all allocations were successful, false if any fail. * Returns true if all allocations were successful, false if any fail.
*/ */
bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
{ {
union ice_32b_rx_flex_desc *rx_desc; union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use; u16 ntu = rx_ring->next_to_use;
...@@ -403,7 +406,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) ...@@ -403,7 +406,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
* ice_bump_ntc - Bump the next_to_clean counter of an Rx ring * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
* @rx_ring: Rx ring * @rx_ring: Rx ring
*/ */
static void ice_bump_ntc(struct ice_ring *rx_ring) static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
{ {
int ntc = rx_ring->next_to_clean + 1; int ntc = rx_ring->next_to_clean + 1;
...@@ -422,7 +425,7 @@ static void ice_bump_ntc(struct ice_ring *rx_ring) ...@@ -422,7 +425,7 @@ static void ice_bump_ntc(struct ice_ring *rx_ring)
* Returns the skb on success, NULL on failure. * Returns the skb on success, NULL on failure.
*/ */
static struct sk_buff * static struct sk_buff *
ice_construct_skb_zc(struct ice_ring *rx_ring, struct xdp_buff **xdp_arr) ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
{ {
struct xdp_buff *xdp = *xdp_arr; struct xdp_buff *xdp = *xdp_arr;
unsigned int metasize = xdp->data - xdp->data_meta; unsigned int metasize = xdp->data - xdp->data_meta;
...@@ -449,22 +452,18 @@ ice_construct_skb_zc(struct ice_ring *rx_ring, struct xdp_buff **xdp_arr) ...@@ -449,22 +452,18 @@ ice_construct_skb_zc(struct ice_ring *rx_ring, struct xdp_buff **xdp_arr)
* ice_run_xdp_zc - Executes an XDP program in zero-copy path * ice_run_xdp_zc - Executes an XDP program in zero-copy path
* @rx_ring: Rx ring * @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program * @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action
* *
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/ */
static int static int
ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp) ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
{ {
int err, result = ICE_XDP_PASS; int err, result = ICE_XDP_PASS;
struct bpf_prog *xdp_prog;
struct ice_ring *xdp_ring;
u32 act; u32 act;
/* ZC patch is enabled only when XDP program is set,
* so here it can not be NULL
*/
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
act = bpf_prog_run_xdp(xdp_prog, xdp); act = bpf_prog_run_xdp(xdp_prog, xdp);
if (likely(act == XDP_REDIRECT)) { if (likely(act == XDP_REDIRECT)) {
...@@ -478,7 +477,6 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp) ...@@ -478,7 +477,6 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
case XDP_PASS: case XDP_PASS:
break; break;
case XDP_TX: case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
result = ice_xmit_xdp_buff(xdp, xdp_ring); result = ice_xmit_xdp_buff(xdp, xdp_ring);
if (result == ICE_XDP_CONSUMED) if (result == ICE_XDP_CONSUMED)
goto out_failure; goto out_failure;
...@@ -505,13 +503,21 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp) ...@@ -505,13 +503,21 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
* *
* Returns number of processed packets on success, remaining budget on failure. * Returns number of processed packets on success, remaining budget on failure.
*/ */
int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
{ {
unsigned int total_rx_bytes = 0, total_rx_packets = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
struct ice_tx_ring *xdp_ring;
unsigned int xdp_xmit = 0; unsigned int xdp_xmit = 0;
struct bpf_prog *xdp_prog;
bool failure = false; bool failure = false;
/* ZC patch is enabled only when XDP program is set,
* so here it can not be NULL
*/
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
xdp_ring = rx_ring->xdp_ring;
while (likely(total_rx_packets < (unsigned int)budget)) { while (likely(total_rx_packets < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc; union ice_32b_rx_flex_desc *rx_desc;
unsigned int size, xdp_res = 0; unsigned int size, xdp_res = 0;
...@@ -542,7 +548,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) ...@@ -542,7 +548,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
xsk_buff_set_size(*xdp, size); xsk_buff_set_size(*xdp, size);
xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool); xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool);
xdp_res = ice_run_xdp_zc(rx_ring, *xdp); xdp_res = ice_run_xdp_zc(rx_ring, *xdp, xdp_prog, xdp_ring);
if (xdp_res) { if (xdp_res) {
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
xdp_xmit |= xdp_res; xdp_xmit |= xdp_res;
...@@ -590,7 +596,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) ...@@ -590,7 +596,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
if (cleaned_count >= ICE_RX_BUF_WRITE) if (cleaned_count >= ICE_RX_BUF_WRITE)
failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count); failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count);
ice_finalize_xdp_rx(rx_ring, xdp_xmit); ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes); ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
...@@ -612,7 +618,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) ...@@ -612,7 +618,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
* *
* Returns true if cleanup/transmission is done. * Returns true if cleanup/transmission is done.
*/ */
static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget) static bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, int budget)
{ {
struct ice_tx_desc *tx_desc = NULL; struct ice_tx_desc *tx_desc = NULL;
bool work_done = true; bool work_done = true;
...@@ -663,7 +669,7 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget) ...@@ -663,7 +669,7 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
* @tx_buf: Tx buffer to clean * @tx_buf: Tx buffer to clean
*/ */
static void static void
ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf) ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
{ {
xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf); xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma), dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
...@@ -678,7 +684,7 @@ ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf) ...@@ -678,7 +684,7 @@ ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf)
* *
* Returns true if cleanup/tranmission is done. * Returns true if cleanup/tranmission is done.
*/ */
bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget) bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget)
{ {
int total_packets = 0, total_bytes = 0; int total_packets = 0, total_bytes = 0;
s16 ntc = xdp_ring->next_to_clean; s16 ntc = xdp_ring->next_to_clean;
...@@ -751,7 +757,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, ...@@ -751,7 +757,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_q_vector *q_vector; struct ice_q_vector *q_vector;
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
struct ice_ring *ring; struct ice_tx_ring *ring;
if (test_bit(ICE_DOWN, vsi->state)) if (test_bit(ICE_DOWN, vsi->state))
return -ENETDOWN; return -ENETDOWN;
...@@ -802,7 +808,7 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi) ...@@ -802,7 +808,7 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
* ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
* @rx_ring: ring to be cleaned * @rx_ring: ring to be cleaned
*/ */
void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring) void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
{ {
u16 i; u16 i;
...@@ -820,7 +826,7 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring) ...@@ -820,7 +826,7 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
* ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
* @xdp_ring: XDP_Tx ring * @xdp_ring: XDP_Tx ring
*/ */
void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring) void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring)
{ {
u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use; u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
u32 xsk_frames = 0; u32 xsk_frames = 0;
......
...@@ -11,13 +11,13 @@ struct ice_vsi; ...@@ -11,13 +11,13 @@ struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS #ifdef CONFIG_XDP_SOCKETS
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid); u16 qid);
int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget); int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget);
bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget); bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags); int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count); bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi); bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring); void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring); void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
#else #else
static inline int static inline int
ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi, ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
...@@ -28,21 +28,21 @@ ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi, ...@@ -28,21 +28,21 @@ ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
} }
static inline int static inline int
ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring, ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
int __always_unused budget) int __always_unused budget)
{ {
return 0; return 0;
} }
static inline bool static inline bool
ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring, ice_clean_tx_irq_zc(struct ice_tx_ring __always_unused *xdp_ring,
int __always_unused budget) int __always_unused budget)
{ {
return false; return false;
} }
static inline bool static inline bool
ice_alloc_rx_bufs_zc(struct ice_ring __always_unused *rx_ring, ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
u16 __always_unused count) u16 __always_unused count)
{ {
return false; return false;
...@@ -60,7 +60,7 @@ ice_xsk_wakeup(struct net_device __always_unused *netdev, ...@@ -60,7 +60,7 @@ ice_xsk_wakeup(struct net_device __always_unused *netdev,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring) { } static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { }
static inline void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring) { } static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { }
#endif /* CONFIG_XDP_SOCKETS */ #endif /* CONFIG_XDP_SOCKETS */
#endif /* !_ICE_XSK_H_ */ #endif /* !_ICE_XSK_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment