Commit f81da39b authored by Shannon Nelson's avatar Shannon Nelson Committed by David S. Miller

ionic: Add XDP packet headroom

If an xdp program is loaded, add headroom at the beginning
of the frame to allow for editing and insertions that an XDP
program might need room for, and tailroom used later for XDP
frame tracking.  These are only needed in the first Rx buffer
in a packet, not for any trailing frags.
Co-developed-by: default avatarBrett Creeley <brett.creeley@amd.com>
Signed-off-by: default avatarBrett Creeley <brett.creeley@amd.com>
Signed-off-by: default avatarShannon Nelson <shannon.nelson@amd.com>
Reviewed-by: default avatarJacob Keller <jacob.e.keller@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 180e35cd
...@@ -189,7 +189,9 @@ static bool ionic_rx_buf_recycle(struct ionic_queue *q, ...@@ -189,7 +189,9 @@ static bool ionic_rx_buf_recycle(struct ionic_queue *q,
static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
struct ionic_desc_info *desc_info, struct ionic_desc_info *desc_info,
struct ionic_rxq_comp *comp, unsigned int headroom,
unsigned int len,
unsigned int num_sg_elems,
bool synced) bool synced)
{ {
struct net_device *netdev = q->lif->netdev; struct net_device *netdev = q->lif->netdev;
...@@ -199,12 +201,10 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, ...@@ -199,12 +201,10 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
struct sk_buff *skb; struct sk_buff *skb;
unsigned int i; unsigned int i;
u16 frag_len; u16 frag_len;
u16 len;
stats = q_to_rx_stats(q); stats = q_to_rx_stats(q);
buf_info = &desc_info->bufs[0]; buf_info = &desc_info->bufs[0];
len = le16_to_cpu(comp->len);
prefetchw(buf_info->page); prefetchw(buf_info->page);
...@@ -216,23 +216,26 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, ...@@ -216,23 +216,26 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
return NULL; return NULL;
} }
i = comp->num_sg_elems + 1; i = num_sg_elems + 1;
do { do {
if (unlikely(!buf_info->page)) { if (unlikely(!buf_info->page)) {
dev_kfree_skb(skb); dev_kfree_skb(skb);
return NULL; return NULL;
} }
if (headroom)
frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
else
frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info)); frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
len -= frag_len; len -= frag_len;
if (!synced) if (!synced)
dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info), dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info),
0, frag_len, DMA_FROM_DEVICE); headroom, frag_len, DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf_info->page, buf_info->page_offset, frag_len, buf_info->page, buf_info->page_offset + headroom,
IONIC_PAGE_SIZE); frag_len, IONIC_PAGE_SIZE);
if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) { if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
dma_unmap_page(dev, buf_info->dma_addr, dma_unmap_page(dev, buf_info->dma_addr,
...@@ -240,6 +243,10 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, ...@@ -240,6 +243,10 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
buf_info->page = NULL; buf_info->page = NULL;
} }
/* only needed on the first buffer */
if (headroom)
headroom = 0;
buf_info++; buf_info++;
i--; i--;
...@@ -250,7 +257,8 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, ...@@ -250,7 +257,8 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q, static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
struct ionic_desc_info *desc_info, struct ionic_desc_info *desc_info,
struct ionic_rxq_comp *comp, unsigned int headroom,
unsigned int len,
bool synced) bool synced)
{ {
struct net_device *netdev = q->lif->netdev; struct net_device *netdev = q->lif->netdev;
...@@ -258,12 +266,10 @@ static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q, ...@@ -258,12 +266,10 @@ static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
struct ionic_rx_stats *stats; struct ionic_rx_stats *stats;
struct device *dev = q->dev; struct device *dev = q->dev;
struct sk_buff *skb; struct sk_buff *skb;
u16 len;
stats = q_to_rx_stats(q); stats = q_to_rx_stats(q);
buf_info = &desc_info->bufs[0]; buf_info = &desc_info->bufs[0];
len = le16_to_cpu(comp->len);
skb = napi_alloc_skb(&q_to_qcq(q)->napi, len); skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
if (unlikely(!skb)) { if (unlikely(!skb)) {
...@@ -280,10 +286,10 @@ static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q, ...@@ -280,10 +286,10 @@ static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
if (!synced) if (!synced)
dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info), dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info),
0, len, DMA_FROM_DEVICE); headroom, len, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info), len); skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info) + headroom, len);
dma_sync_single_range_for_device(dev, ionic_rx_buf_pa(buf_info), dma_sync_single_range_for_device(dev, ionic_rx_buf_pa(buf_info),
0, len, DMA_FROM_DEVICE); headroom, len, DMA_FROM_DEVICE);
skb_put(skb, len); skb_put(skb, len);
skb->protocol = eth_type_trans(skb, q->lif->netdev); skb->protocol = eth_type_trans(skb, q->lif->netdev);
...@@ -303,10 +309,10 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, ...@@ -303,10 +309,10 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
xdp_init_buff(&xdp_buf, IONIC_PAGE_SIZE, rxq->xdp_rxq_info); xdp_init_buff(&xdp_buf, IONIC_PAGE_SIZE, rxq->xdp_rxq_info);
xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info), xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info),
0, len, false); XDP_PACKET_HEADROOM, len, false);
dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info), dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info),
0, len, XDP_PACKET_HEADROOM, len,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
prefetchw(&xdp_buf.data_hard_start); prefetchw(&xdp_buf.data_hard_start);
...@@ -345,6 +351,7 @@ static void ionic_rx_clean(struct ionic_queue *q, ...@@ -345,6 +351,7 @@ static void ionic_rx_clean(struct ionic_queue *q,
struct ionic_rx_stats *stats; struct ionic_rx_stats *stats;
struct ionic_rxq_comp *comp; struct ionic_rxq_comp *comp;
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
unsigned int headroom;
struct sk_buff *skb; struct sk_buff *skb;
u16 len; u16 len;
...@@ -366,10 +373,12 @@ static void ionic_rx_clean(struct ionic_queue *q, ...@@ -366,10 +373,12 @@ static void ionic_rx_clean(struct ionic_queue *q,
ionic_run_xdp(stats, netdev, xdp_prog, q, desc_info->bufs, len)) ionic_run_xdp(stats, netdev, xdp_prog, q, desc_info->bufs, len))
return; return;
headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
if (len <= q->lif->rx_copybreak) if (len <= q->lif->rx_copybreak)
skb = ionic_rx_copybreak(q, desc_info, comp, !!xdp_prog); skb = ionic_rx_copybreak(q, desc_info, headroom, len, !!xdp_prog);
else else
skb = ionic_rx_frags(q, desc_info, comp, !!xdp_prog); skb = ionic_rx_frags(q, desc_info, headroom, len,
comp->num_sg_elems, !!xdp_prog);
if (unlikely(!skb)) { if (unlikely(!skb)) {
stats->dropped++; stats->dropped++;
...@@ -493,8 +502,9 @@ void ionic_rx_fill(struct ionic_queue *q) ...@@ -493,8 +502,9 @@ void ionic_rx_fill(struct ionic_queue *q)
unsigned int frag_len; unsigned int frag_len;
unsigned int nfrags; unsigned int nfrags;
unsigned int n_fill; unsigned int n_fill;
unsigned int i, j;
unsigned int len; unsigned int len;
unsigned int i;
unsigned int j;
n_fill = ionic_q_space_avail(q); n_fill = ionic_q_space_avail(q);
...@@ -503,9 +513,12 @@ void ionic_rx_fill(struct ionic_queue *q) ...@@ -503,9 +513,12 @@ void ionic_rx_fill(struct ionic_queue *q)
if (n_fill < fill_threshold) if (n_fill < fill_threshold)
return; return;
len = netdev->mtu + ETH_HLEN + VLAN_HLEN; len = netdev->mtu + VLAN_ETH_HLEN;
for (i = n_fill; i; i--) { for (i = n_fill; i; i--) {
unsigned int headroom;
unsigned int buf_len;
nfrags = 0; nfrags = 0;
remain_len = len; remain_len = len;
desc_info = &q->info[q->head_idx]; desc_info = &q->info[q->head_idx];
...@@ -520,9 +533,18 @@ void ionic_rx_fill(struct ionic_queue *q) ...@@ -520,9 +533,18 @@ void ionic_rx_fill(struct ionic_queue *q)
} }
} }
/* fill main descriptor - buf[0] */ /* fill main descriptor - buf[0]
desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info)); * XDP uses space in the first buffer, so account for
frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info)); * head room, tail room, and ip header in the first frag size.
*/
headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
if (q->xdp_rxq_info)
buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN;
else
buf_len = ionic_rx_buf_size(buf_info);
frag_len = min_t(u16, len, buf_len);
desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info) + headroom);
desc->len = cpu_to_le16(frag_len); desc->len = cpu_to_le16(frag_len);
remain_len -= frag_len; remain_len -= frag_len;
buf_info++; buf_info++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment