Commit c56d91ce authored by Sunil Goutham's avatar Sunil Goutham Committed by David S. Miller

net: thunderx: Add support for XDP_DROP

Adds support for XDP_DROP.
Also since in XDP mode there is just a single buffer per page,
made changes to recycle DMA mapping info as well along with pages.
Signed-off-by: default avatarSunil Goutham <sgoutham@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 05c773f5
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/filter.h> #include <linux/filter.h>
#include "nic_reg.h" #include "nic_reg.h"
...@@ -505,6 +506,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, ...@@ -505,6 +506,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic,
struct cqe_rx_t *cqe_rx) struct cqe_rx_t *cqe_rx)
{ {
struct xdp_buff xdp; struct xdp_buff xdp;
struct page *page;
u32 action; u32 action;
u16 len; u16 len;
u64 dma_addr, cpu_addr; u64 dma_addr, cpu_addr;
...@@ -527,12 +529,27 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, ...@@ -527,12 +529,27 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic,
switch (action) { switch (action) {
case XDP_PASS: case XDP_PASS:
case XDP_TX: case XDP_TX:
case XDP_ABORTED:
case XDP_DROP:
/* Pass on all packets to network stack */ /* Pass on all packets to network stack */
return false; return false;
default: default:
bpf_warn_invalid_xdp_action(action); bpf_warn_invalid_xdp_action(action);
case XDP_ABORTED:
trace_xdp_exception(nic->netdev, prog, action);
case XDP_DROP:
page = virt_to_page(xdp.data);
/* Check if it's a recycled page, if not
* unmap the DMA mapping.
*
* Recycled page holds an extra reference.
*/
if (page_ref_count(page) == 1) {
dma_addr &= PAGE_MASK;
dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
RCV_FRAG_LEN, DMA_FROM_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
}
put_page(page);
return true;
} }
return false; return false;
} }
...@@ -645,7 +662,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev, ...@@ -645,7 +662,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx)) if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx))
return; return;
skb = nicvf_get_rcv_skb(snic, cqe_rx); skb = nicvf_get_rcv_skb(snic, cqe_rx, nic->xdp_prog ? true : false);
if (!skb) { if (!skb) {
netdev_dbg(nic->netdev, "Packet not received\n"); netdev_dbg(nic->netdev, "Packet not received\n");
return; return;
......
...@@ -117,6 +117,7 @@ static struct pgcache *nicvf_alloc_page(struct nicvf *nic, ...@@ -117,6 +117,7 @@ static struct pgcache *nicvf_alloc_page(struct nicvf *nic,
/* Save the page in page cache */ /* Save the page in page cache */
pgcache->page = page; pgcache->page = page;
pgcache->dma_addr = 0;
rbdr->pgalloc++; rbdr->pgalloc++;
} }
...@@ -144,7 +145,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr, ...@@ -144,7 +145,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
/* Check if request can be accomodated in previous allocated page. /* Check if request can be accomodated in previous allocated page.
* But in XDP mode only one buffer per page is permitted. * But in XDP mode only one buffer per page is permitted.
*/ */
if (!nic->pnicvf->xdp_prog && nic->rb_page && if (!rbdr->is_xdp && nic->rb_page &&
((nic->rb_page_offset + buf_len) <= PAGE_SIZE)) { ((nic->rb_page_offset + buf_len) <= PAGE_SIZE)) {
nic->rb_pageref++; nic->rb_pageref++;
goto ret; goto ret;
...@@ -165,18 +166,24 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr, ...@@ -165,18 +166,24 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
if (pgcache) if (pgcache)
nic->rb_page = pgcache->page; nic->rb_page = pgcache->page;
ret: ret:
/* HW will ensure data coherency, CPU sync not required */ if (rbdr->is_xdp && pgcache && pgcache->dma_addr) {
*rbuf = (u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page, *rbuf = pgcache->dma_addr;
nic->rb_page_offset, buf_len, } else {
DMA_FROM_DEVICE, /* HW will ensure data coherency, CPU sync not required */
DMA_ATTR_SKIP_CPU_SYNC); *rbuf = (u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) { nic->rb_page_offset, buf_len,
if (!nic->rb_page_offset) DMA_FROM_DEVICE,
__free_pages(nic->rb_page, 0); DMA_ATTR_SKIP_CPU_SYNC);
nic->rb_page = NULL; if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
return -ENOMEM; if (!nic->rb_page_offset)
__free_pages(nic->rb_page, 0);
nic->rb_page = NULL;
return -ENOMEM;
}
if (pgcache)
pgcache->dma_addr = *rbuf;
nic->rb_page_offset += buf_len;
} }
nic->rb_page_offset += buf_len;
return 0; return 0;
} }
...@@ -230,8 +237,16 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, ...@@ -230,8 +237,16 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
* On embedded platforms i.e 81xx/83xx available memory itself * On embedded platforms i.e 81xx/83xx available memory itself
* is low and minimum ring size of RBDR is 8K, that takes away * is low and minimum ring size of RBDR is 8K, that takes away
* lots of memory. * lots of memory.
*
* But for XDP it has to be a single buffer per page.
*/ */
rbdr->pgcnt = ring_len / (PAGE_SIZE / buf_size); if (!nic->pnicvf->xdp_prog) {
rbdr->pgcnt = ring_len / (PAGE_SIZE / buf_size);
rbdr->is_xdp = false;
} else {
rbdr->pgcnt = ring_len;
rbdr->is_xdp = true;
}
rbdr->pgcnt = roundup_pow_of_two(rbdr->pgcnt); rbdr->pgcnt = roundup_pow_of_two(rbdr->pgcnt);
rbdr->pgcache = kzalloc(sizeof(*rbdr->pgcache) * rbdr->pgcache = kzalloc(sizeof(*rbdr->pgcache) *
rbdr->pgcnt, GFP_KERNEL); rbdr->pgcnt, GFP_KERNEL);
...@@ -1454,8 +1469,31 @@ static inline unsigned frag_num(unsigned i) ...@@ -1454,8 +1469,31 @@ static inline unsigned frag_num(unsigned i)
#endif #endif
} }
static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,
u64 buf_addr, bool xdp)
{
struct page *page = NULL;
int len = RCV_FRAG_LEN;
if (xdp) {
page = virt_to_page(phys_to_virt(buf_addr));
/* Check if it's a recycled page, if not
* unmap the DMA mapping.
*
* Recycled page holds an extra reference.
*/
if (page_ref_count(page) != 1)
return;
/* Receive buffers in XDP mode are mapped from page start */
dma_addr &= PAGE_MASK;
}
dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, len,
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
}
/* Returns SKB for a received packet */ /* Returns SKB for a received packet */
struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
struct cqe_rx_t *cqe_rx, bool xdp)
{ {
int frag; int frag;
int payload_len = 0; int payload_len = 0;
...@@ -1490,10 +1528,9 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) ...@@ -1490,10 +1528,9 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
if (!frag) { if (!frag) {
/* First fragment */ /* First fragment */
dma_unmap_page_attrs(&nic->pdev->dev, nicvf_unmap_rcv_buffer(nic,
*rb_ptrs - cqe_rx->align_pad, *rb_ptrs - cqe_rx->align_pad,
RCV_FRAG_LEN, DMA_FROM_DEVICE, phys_addr, xdp);
DMA_ATTR_SKIP_CPU_SYNC);
skb = nicvf_rb_ptr_to_skb(nic, skb = nicvf_rb_ptr_to_skb(nic,
phys_addr - cqe_rx->align_pad, phys_addr - cqe_rx->align_pad,
payload_len); payload_len);
...@@ -1503,9 +1540,7 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) ...@@ -1503,9 +1540,7 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
skb_put(skb, payload_len); skb_put(skb, payload_len);
} else { } else {
/* Add fragments */ /* Add fragments */
dma_unmap_page_attrs(&nic->pdev->dev, *rb_ptrs, nicvf_unmap_rcv_buffer(nic, *rb_ptrs, phys_addr, xdp);
RCV_FRAG_LEN, DMA_FROM_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
page = virt_to_page(phys_to_virt(phys_addr)); page = virt_to_page(phys_to_virt(phys_addr));
offset = phys_to_virt(phys_addr) - page_address(page); offset = phys_to_virt(phys_addr) - page_address(page);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
......
...@@ -228,6 +228,7 @@ struct rbdr { ...@@ -228,6 +228,7 @@ struct rbdr {
u32 head; u32 head;
u32 tail; u32 tail;
struct q_desc_mem dmem; struct q_desc_mem dmem;
bool is_xdp;
/* For page recycling */ /* For page recycling */
int pgidx; int pgidx;
...@@ -339,7 +340,8 @@ void nicvf_sq_free_used_descs(struct net_device *netdev, ...@@ -339,7 +340,8 @@ void nicvf_sq_free_used_descs(struct net_device *netdev,
int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq, int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
struct sk_buff *skb, u8 sq_num); struct sk_buff *skb, u8 sq_num);
struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx); struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
struct cqe_rx_t *cqe_rx, bool xdp);
void nicvf_rbdr_task(unsigned long data); void nicvf_rbdr_task(unsigned long data);
void nicvf_rbdr_work(struct work_struct *work); void nicvf_rbdr_work(struct work_struct *work);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment