Commit 26efaef7 authored by Taehee Yoo's avatar Taehee Yoo Committed by David S. Miller

net: atlantic: Implement xdp data plane

It supports XDP_PASS, XDP_DROP and multi buffer.

The new function aq_nic_xmit_xdpf() is used to send packet with
xdp_frame and internally it calls aq_nic_map_xdp().

AQC chip supports 32 multi-queues and 8 vectors(irq).
there are two option
1. under 8 cores and 4 tx queues per core.
2. under 4 cores and 8 tx queues per core.

Like ixgbe, these tx queues can be used only for XDP_TX, XDP_REDIRECT
queue. If so, no tx_lock is needed.
But this patchset doesn't use this strategy because getting hardware tx
queue index cost is too high.
So, tx_lock is used in the aq_nic_xmit_xdpf().

single-core, single queue, 80% cpu utilization.

  30.75%  bpf_prog_xxx_xdp_prog_tx  [k] bpf_prog_xxx_xdp_prog_tx
  10.35%  [kernel]                  [k] aq_hw_read_reg <---------- here
   4.38%  [kernel]                  [k] get_page_from_freelist

single-core, 8 queues, 100% cpu utilization, half PPS.

  45.56%  [kernel]                  [k] aq_hw_read_reg <---------- here
  17.58%  bpf_prog_xxx_xdp_prog_tx  [k] bpf_prog_xxx_xdp_prog_tx
   4.72%  [kernel]                  [k] hw_atl_b0_hw_ring_rx_receive

The new function __aq_ring_xdp_clean() is a xdp rx handler and this is
called only when XDP is attached.
Signed-off-by: default avatarTaehee Yoo <ap420073@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0d14657f
...@@ -97,6 +97,15 @@ static const char * const aq_ethtool_queue_rx_stat_names[] = { ...@@ -97,6 +97,15 @@ static const char * const aq_ethtool_queue_rx_stat_names[] = {
"%sQueue[%d] AllocFails", "%sQueue[%d] AllocFails",
"%sQueue[%d] SkbAllocFails", "%sQueue[%d] SkbAllocFails",
"%sQueue[%d] Polls", "%sQueue[%d] Polls",
"%sQueue[%d] PageFlips",
"%sQueue[%d] PageReuses",
"%sQueue[%d] PageFrees",
"%sQueue[%d] XdpAbort",
"%sQueue[%d] XdpDrop",
"%sQueue[%d] XdpPass",
"%sQueue[%d] XdpTx",
"%sQueue[%d] XdpInvalid",
"%sQueue[%d] XdpRedirect",
}; };
static const char * const aq_ethtool_queue_tx_stat_names[] = { static const char * const aq_ethtool_queue_tx_stat_names[] = {
......
...@@ -569,6 +569,103 @@ int aq_nic_start(struct aq_nic_s *self) ...@@ -569,6 +569,103 @@ int aq_nic_start(struct aq_nic_s *self)
return err; return err;
} }
static unsigned int aq_nic_map_xdp(struct aq_nic_s *self,
struct xdp_frame *xdpf,
struct aq_ring_s *ring)
{
struct device *dev = aq_nic_get_dev(self);
struct aq_ring_buff_s *first = NULL;
unsigned int dx = ring->sw_tail;
struct aq_ring_buff_s *dx_buff;
struct skb_shared_info *sinfo;
unsigned int frag_count = 0U;
unsigned int nr_frags = 0U;
unsigned int ret = 0U;
u16 total_len;
dx_buff = &ring->buff_ring[dx];
dx_buff->flags = 0U;
sinfo = xdp_get_shared_info_from_frame(xdpf);
total_len = xdpf->len;
dx_buff->len = total_len;
if (xdp_frame_has_frags(xdpf)) {
nr_frags = sinfo->nr_frags;
total_len += sinfo->xdp_frags_size;
}
dx_buff->pa = dma_map_single(dev, xdpf->data, dx_buff->len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, dx_buff->pa)))
goto exit;
first = dx_buff;
dx_buff->len_pkt = total_len;
dx_buff->is_sop = 1U;
dx_buff->is_mapped = 1U;
++ret;
for (; nr_frags--; ++frag_count) {
skb_frag_t *frag = &sinfo->frags[frag_count];
unsigned int frag_len = skb_frag_size(frag);
unsigned int buff_offset = 0U;
unsigned int buff_size = 0U;
dma_addr_t frag_pa;
while (frag_len) {
if (frag_len > AQ_CFG_TX_FRAME_MAX)
buff_size = AQ_CFG_TX_FRAME_MAX;
else
buff_size = frag_len;
frag_pa = skb_frag_dma_map(dev, frag, buff_offset,
buff_size, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, frag_pa)))
goto mapping_error;
dx = aq_ring_next_dx(ring, dx);
dx_buff = &ring->buff_ring[dx];
dx_buff->flags = 0U;
dx_buff->len = buff_size;
dx_buff->pa = frag_pa;
dx_buff->is_mapped = 1U;
dx_buff->eop_index = 0xffffU;
frag_len -= buff_size;
buff_offset += buff_size;
++ret;
}
}
first->eop_index = dx;
dx_buff->is_eop = 1U;
dx_buff->skb = NULL;
dx_buff->xdpf = xdpf;
goto exit;
mapping_error:
for (dx = ring->sw_tail;
ret > 0;
--ret, dx = aq_ring_next_dx(ring, dx)) {
dx_buff = &ring->buff_ring[dx];
if (!dx_buff->pa)
continue;
if (unlikely(dx_buff->is_sop))
dma_unmap_single(dev, dx_buff->pa, dx_buff->len,
DMA_TO_DEVICE);
else
dma_unmap_page(dev, dx_buff->pa, dx_buff->len,
DMA_TO_DEVICE);
}
exit:
return ret;
}
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
struct aq_ring_s *ring) struct aq_ring_s *ring)
{ {
...@@ -697,6 +794,7 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, ...@@ -697,6 +794,7 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
first->eop_index = dx; first->eop_index = dx;
dx_buff->is_eop = 1U; dx_buff->is_eop = 1U;
dx_buff->skb = skb; dx_buff->skb = skb;
dx_buff->xdpf = NULL;
goto exit; goto exit;
mapping_error: mapping_error:
...@@ -725,6 +823,44 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, ...@@ -725,6 +823,44 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
return ret; return ret;
} }
int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring,
struct xdp_frame *xdpf)
{
u16 queue_index = AQ_NIC_RING2QMAP(aq_nic, tx_ring->idx);
struct net_device *ndev = aq_nic_get_ndev(aq_nic);
struct skb_shared_info *sinfo;
int cpu = smp_processor_id();
int err = NETDEV_TX_BUSY;
struct netdev_queue *nq;
unsigned int frags = 1;
if (xdp_frame_has_frags(xdpf)) {
sinfo = xdp_get_shared_info_from_frame(xdpf);
frags += sinfo->nr_frags;
}
if (frags > AQ_CFG_SKB_FRAGS_MAX)
return err;
nq = netdev_get_tx_queue(ndev, tx_ring->idx);
__netif_tx_lock(nq, cpu);
aq_ring_update_queue_state(tx_ring);
/* Above status update may stop the queue. Check this. */
if (__netif_subqueue_stopped(aq_nic_get_ndev(aq_nic), queue_index))
goto out;
frags = aq_nic_map_xdp(aq_nic, xdpf, tx_ring);
if (likely(frags))
err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw, tx_ring,
frags);
out:
__netif_tx_unlock(nq);
return err;
}
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
{ {
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self); struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
......
...@@ -180,6 +180,8 @@ void aq_nic_ndev_free(struct aq_nic_s *self); ...@@ -180,6 +180,8 @@ void aq_nic_ndev_free(struct aq_nic_s *self);
int aq_nic_start(struct aq_nic_s *self); int aq_nic_start(struct aq_nic_s *self);
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
struct aq_ring_s *ring); struct aq_ring_s *ring);
int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring,
struct xdp_frame *xdpf);
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb); int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p); int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
int aq_nic_get_regs_count(struct aq_nic_s *self); int aq_nic_get_regs_count(struct aq_nic_s *self);
......
...@@ -106,6 +106,12 @@ struct aq_ring_stats_rx_s { ...@@ -106,6 +106,12 @@ struct aq_ring_stats_rx_s {
u64 pg_losts; u64 pg_losts;
u64 pg_flips; u64 pg_flips;
u64 pg_reuses; u64 pg_reuses;
u64 xdp_aborted;
u64 xdp_drop;
u64 xdp_pass;
u64 xdp_tx;
u64 xdp_invalid;
u64 xdp_redirect;
}; };
struct aq_ring_stats_tx_s { struct aq_ring_stats_tx_s {
......
...@@ -155,7 +155,7 @@ int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic, ...@@ -155,7 +155,7 @@ int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
goto err_exit; goto err_exit;
} }
if (xdp_rxq_info_reg_mem_model(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq, if (xdp_rxq_info_reg_mem_model(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
MEM_TYPE_PAGE_ORDER0, NULL) < 0) { MEM_TYPE_PAGE_SHARED, NULL) < 0) {
xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq); xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
err = -ENOMEM; err = -ENOMEM;
goto err_exit; goto err_exit;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment