Commit f8509aa0 authored by Magnus Karlsson's avatar Magnus Karlsson Committed by Alexei Starovoitov

xsk: ixgbe: i40e: ice: mlx5: Xsk_umem_discard_addr to xsk_umem_release_addr

Change the name of xsk_umem_discard_addr to xsk_umem_release_addr to
better reflect the new naming of the AF_XDP queue manipulation
functions. As this functions is used by drivers implementing support
for AF_XDP zero-copy, it requires a name change to these drivers. The
function xsk_umem_release_addr_rq has also changed name in the same
fashion.
Signed-off-by: default avatarMagnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/1576759171-28550-10-git-send-email-magnus.karlsson@intel.com
parent 03896ef1
...@@ -269,7 +269,7 @@ static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring, ...@@ -269,7 +269,7 @@ static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring,
bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
xsk_umem_discard_addr(umem); xsk_umem_release_addr(umem);
return true; return true;
} }
...@@ -306,7 +306,7 @@ static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring, ...@@ -306,7 +306,7 @@ static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring,
bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
xsk_umem_discard_addr_rq(umem); xsk_umem_release_addr_rq(umem);
return true; return true;
} }
......
...@@ -555,7 +555,7 @@ ice_alloc_buf_fast_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) ...@@ -555,7 +555,7 @@ ice_alloc_buf_fast_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
rx_buf->handle = handle + umem->headroom; rx_buf->handle = handle + umem->headroom;
xsk_umem_discard_addr(umem); xsk_umem_release_addr(umem);
return true; return true;
} }
...@@ -591,7 +591,7 @@ ice_alloc_buf_slow_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) ...@@ -591,7 +591,7 @@ ice_alloc_buf_slow_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
rx_buf->handle = handle + umem->headroom; rx_buf->handle = handle + umem->headroom;
xsk_umem_discard_addr_rq(umem); xsk_umem_release_addr_rq(umem);
return true; return true;
} }
......
...@@ -277,7 +277,7 @@ static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring, ...@@ -277,7 +277,7 @@ static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring,
bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
xsk_umem_discard_addr(umem); xsk_umem_release_addr(umem);
return true; return true;
} }
...@@ -304,7 +304,7 @@ static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring, ...@@ -304,7 +304,7 @@ static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring,
bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
xsk_umem_discard_addr_rq(umem); xsk_umem_release_addr_rq(umem);
return true; return true;
} }
......
...@@ -35,7 +35,7 @@ int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq, ...@@ -35,7 +35,7 @@ int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
*/ */
dma_info->addr = xdp_umem_get_dma(umem, handle); dma_info->addr = xdp_umem_get_dma(umem, handle);
xsk_umem_discard_addr_rq(umem); xsk_umem_release_addr_rq(umem);
dma_sync_single_for_device(rq->pdev, dma_info->addr, PAGE_SIZE, dma_sync_single_for_device(rq->pdev, dma_info->addr, PAGE_SIZE,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
......
...@@ -119,7 +119,7 @@ bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs); ...@@ -119,7 +119,7 @@ bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
/* Used from netdev driver */ /* Used from netdev driver */
bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt); bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr); bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
void xsk_umem_discard_addr(struct xdp_umem *umem); void xsk_umem_release_addr(struct xdp_umem *umem);
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc); bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
void xsk_umem_consume_tx_done(struct xdp_umem *umem); void xsk_umem_consume_tx_done(struct xdp_umem *umem);
...@@ -208,12 +208,12 @@ static inline bool xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) ...@@ -208,12 +208,12 @@ static inline bool xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
return addr; return addr;
} }
static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem) static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem)
{ {
struct xdp_umem_fq_reuse *rq = umem->fq_reuse; struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
if (!rq->length) if (!rq->length)
xsk_umem_discard_addr(umem); xsk_umem_release_addr(umem);
else else
rq->length--; rq->length--;
} }
...@@ -258,7 +258,7 @@ static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) ...@@ -258,7 +258,7 @@ static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
return NULL; return NULL;
} }
static inline void xsk_umem_discard_addr(struct xdp_umem *umem) static inline void xsk_umem_release_addr(struct xdp_umem *umem)
{ {
} }
...@@ -332,7 +332,7 @@ static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) ...@@ -332,7 +332,7 @@ static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
return NULL; return NULL;
} }
static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem) static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem)
{ {
} }
......
...@@ -51,11 +51,11 @@ bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) ...@@ -51,11 +51,11 @@ bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
} }
EXPORT_SYMBOL(xsk_umem_peek_addr); EXPORT_SYMBOL(xsk_umem_peek_addr);
void xsk_umem_discard_addr(struct xdp_umem *umem) void xsk_umem_release_addr(struct xdp_umem *umem)
{ {
xskq_cons_release(umem->fq); xskq_cons_release(umem->fq);
} }
EXPORT_SYMBOL(xsk_umem_discard_addr); EXPORT_SYMBOL(xsk_umem_release_addr);
void xsk_set_rx_need_wakeup(struct xdp_umem *umem) void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment