Commit 9e928831 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'xsk: another round of fixes'

Maciej Fijalkowski says:

====================

Hello,

yet another fixes for XSK from Magnus and me.

Magnus addresses the fact that xp_alloc() can return NULL, so this needs
to be handled to avoid clearing entries in the SW ring on driver side.
Then he addresses the off-by-one problem in Tx desc cleaning routine for
ice ZC driver.

From my side, I am adding protection to ZC Rx processing loop so that
cleaning of descriptors wouldn't go over already processed entries.
Then I also fix an issue with assigning XSK pool to Tx queues.

This is directed to bpf tree.

Thanks!

Maciej Fijalkowski (2):
  ice: xsk: stop Rx processing when ntc catches ntu
  ice: xsk: fix indexing in ice_tx_xsk_pool()
====================
Acked-by: default avatarAlexander Lobakin <alexandr.lobakin@intel.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 7df482e6 1ac2524d
...@@ -710,7 +710,7 @@ static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring) ...@@ -710,7 +710,7 @@ static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring)
struct ice_vsi *vsi = ring->vsi; struct ice_vsi *vsi = ring->vsi;
u16 qid; u16 qid;
qid = ring->q_index - vsi->num_xdp_txq; qid = ring->q_index - vsi->alloc_txq;
if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
return NULL; return NULL;
......
...@@ -608,6 +608,9 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) ...@@ -608,6 +608,9 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
*/ */
dma_rmb(); dma_rmb();
if (unlikely(rx_ring->next_to_clean == rx_ring->next_to_use))
break;
xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean); xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean);
size = le16_to_cpu(rx_desc->wb.pkt_len) & size = le16_to_cpu(rx_desc->wb.pkt_len) &
...@@ -754,7 +757,7 @@ static u16 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring, int napi_budget) ...@@ -754,7 +757,7 @@ static u16 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring, int napi_budget)
next_dd = next_dd + tx_thresh; next_dd = next_dd + tx_thresh;
if (next_dd >= desc_cnt) if (next_dd >= desc_cnt)
next_dd = tx_thresh - 1; next_dd = tx_thresh - 1;
} while (budget--); } while (--budget);
xdp_ring->next_dd = next_dd; xdp_ring->next_dd = next_dd;
......
...@@ -591,9 +591,13 @@ u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) ...@@ -591,9 +591,13 @@ u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
u32 nb_entries1 = 0, nb_entries2; u32 nb_entries1 = 0, nb_entries2;
if (unlikely(pool->dma_need_sync)) { if (unlikely(pool->dma_need_sync)) {
struct xdp_buff *buff;
/* Slow path */ /* Slow path */
*xdp = xp_alloc(pool); buff = xp_alloc(pool);
return !!*xdp; if (buff)
*xdp = buff;
return !!buff;
} }
if (unlikely(pool->free_list_cnt)) { if (unlikely(pool->free_list_cnt)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment