Commit c7a21904 authored by Michal Swiatkowski's avatar Michal Swiatkowski Committed by Tony Nguyen

ice: Remove xsk_buff_pool from VSI structure

Current implementation of netdev already contains xsk_buff_pools.
We no longer have to contain these structures in ice_vsi.

Refactor the code to operate on netdev-provided xsk_buff_pools.

Move scheduling napi on each queue to a separate function to
simplify setup function.
Signed-off-by: default avatarMichal Swiatkowski <michal.swiatkowski@intel.com>
Reviewed-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: default avatarKiran Bhandare <kiranx.bhandare@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent 34295a36
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <net/devlink.h> #include <net/devlink.h>
#include <net/ipv6.h> #include <net/ipv6.h>
#include <net/xdp_sock.h> #include <net/xdp_sock.h>
#include <net/xdp_sock_drv.h>
#include <net/geneve.h> #include <net/geneve.h>
#include <net/gre.h> #include <net/gre.h>
#include <net/udp_tunnel.h> #include <net/udp_tunnel.h>
...@@ -326,9 +327,6 @@ struct ice_vsi { ...@@ -326,9 +327,6 @@ struct ice_vsi {
struct ice_ring **xdp_rings; /* XDP ring array */ struct ice_ring **xdp_rings; /* XDP ring array */
u16 num_xdp_txq; /* Used XDP queues */ u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
struct xsk_buff_pool **xsk_pools;
u16 num_xsk_pools_used;
u16 num_xsk_pools;
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
/* struct that defines an interrupt vector */ /* struct that defines an interrupt vector */
...@@ -517,17 +515,15 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring) ...@@ -517,17 +515,15 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
*/ */
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring) static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
{ {
struct xsk_buff_pool **pools = ring->vsi->xsk_pools;
u16 qid = ring->q_index; u16 qid = ring->q_index;
if (ice_ring_is_xdp(ring)) if (ice_ring_is_xdp(ring))
qid -= ring->vsi->num_xdp_txq; qid -= ring->vsi->num_xdp_txq;
if (qid >= ring->vsi->num_xsk_pools || !pools || !pools[qid] || if (!ice_is_xdp_ena_vsi(ring->vsi))
!ice_is_xdp_ena_vsi(ring->vsi))
return NULL; return NULL;
return pools[qid]; return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
} }
/** /**
......
...@@ -2475,6 +2475,22 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi) ...@@ -2475,6 +2475,22 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi)
max_txqs); max_txqs);
} }
/**
* ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
* @vsi: VSI to schedule napi on
*/
static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
{
int i;
ice_for_each_rxq(vsi, i) {
struct ice_ring *rx_ring = vsi->rx_rings[i];
if (rx_ring->xsk_pool)
napi_schedule(&rx_ring->q_vector->napi);
}
}
/** /**
* ice_xdp_setup_prog - Add or remove XDP eBPF program * ice_xdp_setup_prog - Add or remove XDP eBPF program
* @vsi: VSI to setup XDP for * @vsi: VSI to setup XDP for
...@@ -2519,16 +2535,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, ...@@ -2519,16 +2535,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (if_running) if (if_running)
ret = ice_up(vsi); ret = ice_up(vsi);
if (!ret && prog && vsi->xsk_pools) { if (!ret && prog)
int i; ice_vsi_rx_napi_schedule(vsi);
ice_for_each_rxq(vsi, i) {
struct ice_ring *rx_ring = vsi->rx_rings[i];
if (rx_ring->xsk_pool)
napi_schedule(&rx_ring->q_vector->napi);
}
}
return (ret || xdp_ring_err) ? -ENOMEM : 0; return (ret || xdp_ring_err) ? -ENOMEM : 0;
} }
......
...@@ -259,45 +259,6 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) ...@@ -259,45 +259,6 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
return err; return err;
} }
/**
* ice_xsk_alloc_pools - allocate a buffer pool for an XDP socket
* @vsi: VSI to allocate the buffer pool on
*
* Returns 0 on success, negative on error
*/
static int ice_xsk_alloc_pools(struct ice_vsi *vsi)
{
if (vsi->xsk_pools)
return 0;
vsi->xsk_pools = kcalloc(vsi->num_xsk_pools, sizeof(*vsi->xsk_pools),
GFP_KERNEL);
if (!vsi->xsk_pools) {
vsi->num_xsk_pools = 0;
return -ENOMEM;
}
return 0;
}
/**
* ice_xsk_remove_pool - Remove an buffer pool for a certain ring/qid
* @vsi: VSI from which the VSI will be removed
* @qid: Ring/qid associated with the buffer pool
*/
static void ice_xsk_remove_pool(struct ice_vsi *vsi, u16 qid)
{
vsi->xsk_pools[qid] = NULL;
vsi->num_xsk_pools_used--;
if (vsi->num_xsk_pools_used == 0) {
kfree(vsi->xsk_pools);
vsi->xsk_pools = NULL;
vsi->num_xsk_pools = 0;
}
}
/** /**
* ice_xsk_pool_disable - disable a buffer pool region * ice_xsk_pool_disable - disable a buffer pool region
* @vsi: Current VSI * @vsi: Current VSI
...@@ -307,12 +268,12 @@ static void ice_xsk_remove_pool(struct ice_vsi *vsi, u16 qid) ...@@ -307,12 +268,12 @@ static void ice_xsk_remove_pool(struct ice_vsi *vsi, u16 qid)
*/ */
static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid) static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
{ {
if (!vsi->xsk_pools || qid >= vsi->num_xsk_pools || struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
!vsi->xsk_pools[qid])
if (!pool)
return -EINVAL; return -EINVAL;
xsk_pool_dma_unmap(vsi->xsk_pools[qid], ICE_RX_DMA_ATTR); xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
ice_xsk_remove_pool(vsi, qid);
return 0; return 0;
} }
...@@ -333,22 +294,11 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) ...@@ -333,22 +294,11 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
if (vsi->type != ICE_VSI_PF) if (vsi->type != ICE_VSI_PF)
return -EINVAL; return -EINVAL;
if (!vsi->num_xsk_pools) if (qid >= vsi->netdev->real_num_rx_queues ||
vsi->num_xsk_pools = min_t(u16, vsi->num_rxq, vsi->num_txq); qid >= vsi->netdev->real_num_tx_queues)
if (qid >= vsi->num_xsk_pools)
return -EINVAL; return -EINVAL;
err = ice_xsk_alloc_pools(vsi); err = xsk_pool_dma_map(pool, ice_pf_to_dev(vsi->back),
if (err)
return err;
if (vsi->xsk_pools && vsi->xsk_pools[qid])
return -EBUSY;
vsi->xsk_pools[qid] = pool;
vsi->num_xsk_pools_used++;
err = xsk_pool_dma_map(vsi->xsk_pools[qid], ice_pf_to_dev(vsi->back),
ICE_RX_DMA_ATTR); ICE_RX_DMA_ATTR);
if (err) if (err)
return err; return err;
...@@ -842,11 +792,8 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi) ...@@ -842,11 +792,8 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
{ {
int i; int i;
if (!vsi->xsk_pools) ice_for_each_rxq(vsi, i) {
return false; if (xsk_get_pool_from_qid(vsi->netdev, i))
for (i = 0; i < vsi->num_xsk_pools; i++) {
if (vsi->xsk_pools[i])
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment