Commit 91d5b702 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Daniel Borkmann

xsk: Replace the cheap_dma flag with a dma_need_sync flag

Invert the polarity and better name the flag so that the use case is
properly documented.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20200629130359.2690853-3-hch@lst.de
parent 3aa91625
...@@ -40,7 +40,7 @@ struct xsk_buff_pool { ...@@ -40,7 +40,7 @@ struct xsk_buff_pool {
u32 headroom; u32 headroom;
u32 chunk_size; u32 chunk_size;
u32 frame_len; u32 frame_len;
bool cheap_dma; bool dma_need_sync;
bool unaligned; bool unaligned;
void *addrs; void *addrs;
struct device *dev; struct device *dev;
...@@ -80,7 +80,7 @@ static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb) ...@@ -80,7 +80,7 @@ static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb); void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb) static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
{ {
if (xskb->pool->cheap_dma) if (!xskb->pool->dma_need_sync)
return; return;
xp_dma_sync_for_cpu_slow(xskb); xp_dma_sync_for_cpu_slow(xskb);
...@@ -91,7 +91,7 @@ void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma, ...@@ -91,7 +91,7 @@ void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool, static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
dma_addr_t dma, size_t size) dma_addr_t dma, size_t size)
{ {
if (pool->cheap_dma) if (!pool->dma_need_sync)
return; return;
xp_dma_sync_for_device_slow(pool, dma, size); xp_dma_sync_for_device_slow(pool, dma, size);
......
...@@ -55,7 +55,6 @@ struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks, ...@@ -55,7 +55,6 @@ struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks,
pool->free_heads_cnt = chunks; pool->free_heads_cnt = chunks;
pool->headroom = headroom; pool->headroom = headroom;
pool->chunk_size = chunk_size; pool->chunk_size = chunk_size;
pool->cheap_dma = true;
pool->unaligned = unaligned; pool->unaligned = unaligned;
pool->frame_len = chunk_size - headroom - XDP_PACKET_HEADROOM; pool->frame_len = chunk_size - headroom - XDP_PACKET_HEADROOM;
INIT_LIST_HEAD(&pool->free_list); INIT_LIST_HEAD(&pool->free_list);
...@@ -195,7 +194,7 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, ...@@ -195,7 +194,7 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
xp_check_dma_contiguity(pool); xp_check_dma_contiguity(pool);
pool->dev = dev; pool->dev = dev;
pool->cheap_dma = xp_check_cheap_dma(pool); pool->dma_need_sync = !xp_check_cheap_dma(pool);
return 0; return 0;
} }
EXPORT_SYMBOL(xp_dma_map); EXPORT_SYMBOL(xp_dma_map);
...@@ -280,7 +279,7 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool) ...@@ -280,7 +279,7 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM; xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
xskb->xdp.data_meta = xskb->xdp.data; xskb->xdp.data_meta = xskb->xdp.data;
if (!pool->cheap_dma) { if (pool->dma_need_sync) {
dma_sync_single_range_for_device(pool->dev, xskb->dma, 0, dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
pool->frame_len, pool->frame_len,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment