Commit 3b4f0b66 authored by Björn Töpel's avatar Björn Töpel Committed by Alexei Starovoitov

i40e, xsk: Migrate to new MEM_TYPE_XSK_BUFF_POOL

Remove MEM_TYPE_ZERO_COPY in favor of the new MEM_TYPE_XSK_BUFF_POOL
APIs. The AF_XDP zero-copy rx_bi ring is now simply a struct xdp_buff
pointer.

v4->v5: Fixed "warning: Excess function parameter 'bi' description in
        'i40e_construct_skb_zc'". (Jakub)
Signed-off-by: default avatarBjörn Töpel <bjorn.topel@intel.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Cc: intel-wired-lan@lists.osuosl.org
Link: https://lore.kernel.org/bpf/20200520192103.355233-9-bjorn.topel@gmail.com
parent be1222b5
...@@ -3266,21 +3266,19 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ...@@ -3266,21 +3266,19 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ret = i40e_alloc_rx_bi_zc(ring); ret = i40e_alloc_rx_bi_zc(ring);
if (ret) if (ret)
return ret; return ret;
ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr - ring->rx_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_umem);
XDP_PACKET_HEADROOM;
/* For AF_XDP ZC, we disallow packets to span on /* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that * multiple buffers, thus letting us skip that
* handling in the fast-path. * handling in the fast-path.
*/ */
chain_len = 1; chain_len = 1;
ring->zca.free = i40e_zca_free;
ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_ZERO_COPY, MEM_TYPE_XSK_BUFF_POOL,
&ring->zca); NULL);
if (ret) if (ret)
return ret; return ret;
dev_info(&vsi->back->pdev->dev, dev_info(&vsi->back->pdev->dev,
"Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n", "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->queue_index); ring->queue_index);
} else { } else {
...@@ -3351,9 +3349,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ...@@ -3351,9 +3349,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
writel(0, ring->tail); writel(0, ring->tail);
ok = ring->xsk_umem ? if (ring->xsk_umem) {
i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) : xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
!i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
} else {
ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
}
if (!ok) { if (!ok) {
/* Log this in case the user has forgotten to give the kernel /* Log this in case the user has forgotten to give the kernel
* any buffers, even later in the application. * any buffers, even later in the application.
......
...@@ -301,12 +301,6 @@ struct i40e_rx_buffer { ...@@ -301,12 +301,6 @@ struct i40e_rx_buffer {
__u16 pagecnt_bias; __u16 pagecnt_bias;
}; };
struct i40e_rx_buffer_zc {
dma_addr_t dma;
void *addr;
u64 handle;
};
struct i40e_queue_stats { struct i40e_queue_stats {
u64 packets; u64 packets;
u64 bytes; u64 bytes;
...@@ -356,7 +350,7 @@ struct i40e_ring { ...@@ -356,7 +350,7 @@ struct i40e_ring {
union { union {
struct i40e_tx_buffer *tx_bi; struct i40e_tx_buffer *tx_bi;
struct i40e_rx_buffer *rx_bi; struct i40e_rx_buffer *rx_bi;
struct i40e_rx_buffer_zc *rx_bi_zc; struct xdp_buff **rx_bi_zc;
}; };
DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS); DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
u16 queue_index; /* Queue number of ring */ u16 queue_index; /* Queue number of ring */
...@@ -418,7 +412,6 @@ struct i40e_ring { ...@@ -418,7 +412,6 @@ struct i40e_ring {
struct i40e_channel *ch; struct i40e_channel *ch;
struct xdp_rxq_info xdp_rxq; struct xdp_rxq_info xdp_rxq;
struct xdp_umem *xsk_umem; struct xdp_umem *xsk_umem;
struct zero_copy_allocator zca; /* ZC allocator anchor */
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct i40e_ring *ring) static inline bool ring_uses_build_skb(struct i40e_ring *ring)
......
This diff is collapsed.
...@@ -12,7 +12,6 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair); ...@@ -12,7 +12,6 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair);
int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair); int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair);
int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem, int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
u16 qid); u16 qid);
void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);
bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count); bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count);
int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget); int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment