Commit de1db4a6 authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
40GbE Intel Wired LAN Driver Updates 2021-02-08

This series contains updates to i40e driver only.

Cristian makes improvements to driver XDP path. Avoids writing
next-to-clean pointer on every update, removes redundant updates of
cleaned_count and buffer info, creates a helper function to consolidate
XDP actions and simplifies some of the behavior.

Eryk adds messages to inform the user when MTU is larger than supported
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 74784ee0 613142b0
......@@ -12448,9 +12448,10 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb,
* i40e_xdp_setup - add/remove an XDP program
* @vsi: VSI to changed
* @prog: XDP program
* @extack: netlink extended ack
**/
static int i40e_xdp_setup(struct i40e_vsi *vsi,
struct bpf_prog *prog)
static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
struct i40e_pf *pf = vsi->back;
......@@ -12459,8 +12460,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
int i;
/* Don't allow frames that span over multiple buffers */
if (frame_size > vsi->rx_buf_len)
if (frame_size > vsi->rx_buf_len) {
NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
return -EINVAL;
}
if (!i40e_enabled_xdp_vsi(vsi) && !prog)
return 0;
......@@ -12769,7 +12772,7 @@ static int i40e_xdp(struct net_device *dev,
switch (xdp->command) {
case XDP_SETUP_PROG:
return i40e_xdp_setup(vsi, xdp->prog);
return i40e_xdp_setup(vsi, xdp->prog, xdp->extack);
case XDP_SETUP_XSK_POOL:
return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
xdp->xsk.queue_id);
......
......@@ -250,27 +250,68 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
xdp->data_end - xdp->data_hard_start,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
goto out;
skb_reserve(skb, xdp->data - xdp->data_hard_start);
memcpy(__skb_put(skb, datasize), xdp->data, datasize);
if (metasize)
skb_metadata_set(skb, metasize);
out:
xsk_buff_free(xdp);
return skb;
}
/**
* i40e_inc_ntc: Advance the next_to_clean index
* @rx_ring: Rx ring
**/
static void i40e_inc_ntc(struct i40e_ring *rx_ring)
static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
struct xdp_buff *xdp_buff,
union i40e_rx_desc *rx_desc,
unsigned int *rx_packets,
unsigned int *rx_bytes,
unsigned int size,
unsigned int xdp_res)
{
u32 ntc = rx_ring->next_to_clean + 1;
struct sk_buff *skb;
*rx_packets = 1;
*rx_bytes = size;
if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX)
return;
if (xdp_res == I40E_XDP_CONSUMED) {
xsk_buff_free(xdp_buff);
return;
}
if (xdp_res == I40E_XDP_PASS) {
/* NB! We are not checking for errors using
* i40e_test_staterr with
* BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
* SBP is *not* set in PRT_SBPVSI (default not set).
*/
skb = i40e_construct_skb_zc(rx_ring, xdp_buff);
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
*rx_packets = 0;
*rx_bytes = 0;
return;
}
ntc = (ntc < rx_ring->count) ? ntc : 0;
rx_ring->next_to_clean = ntc;
if (eth_skb_pad(skb)) {
*rx_packets = 0;
*rx_bytes = 0;
return;
}
*rx_bytes = skb->len;
i40e_process_skb_fields(rx_ring, rx_desc, skb);
napi_gro_receive(&rx_ring->q_vector->napi, skb);
return;
}
/* Should never get here, as all valid cases have been handled already.
*/
WARN_ON_ONCE(1);
}
/**
......@@ -284,17 +325,20 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
u16 next_to_clean = rx_ring->next_to_clean;
u16 count_mask = rx_ring->count - 1;
unsigned int xdp_res, xdp_xmit = 0;
bool failure = false;
struct sk_buff *skb;
while (likely(total_rx_packets < (unsigned int)budget)) {
union i40e_rx_desc *rx_desc;
struct xdp_buff **bi;
unsigned int rx_packets;
unsigned int rx_bytes;
struct xdp_buff *bi;
unsigned int size;
u64 qword;
rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
rx_desc = I40E_RX_DESC(rx_ring, next_to_clean);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
/* This memory barrier is needed to keep us from reading
......@@ -307,11 +351,9 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
i40e_clean_programming_status(rx_ring,
rx_desc->raw.qword[0],
qword);
bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
xsk_buff_free(*bi);
*bi = NULL;
cleaned_count++;
i40e_inc_ntc(rx_ring);
bi = *i40e_rx_bi(rx_ring, next_to_clean);
xsk_buff_free(bi);
next_to_clean = (next_to_clean + 1) & count_mask;
continue;
}
......@@ -320,53 +362,22 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
if (!size)
break;
bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
(*bi)->data_end = (*bi)->data + size;
xsk_buff_dma_sync_for_cpu(*bi, rx_ring->xsk_pool);
xdp_res = i40e_run_xdp_zc(rx_ring, *bi);
if (xdp_res) {
if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR))
xdp_xmit |= xdp_res;
else
xsk_buff_free(*bi);
*bi = NULL;
total_rx_bytes += size;
total_rx_packets++;
cleaned_count++;
i40e_inc_ntc(rx_ring);
continue;
}
/* XDP_PASS path */
/* NB! We are not checking for errors using
* i40e_test_staterr with
* BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
* SBP is *not* set in PRT_SBPVSI (default not set).
*/
skb = i40e_construct_skb_zc(rx_ring, *bi);
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
break;
}
*bi = NULL;
cleaned_count++;
i40e_inc_ntc(rx_ring);
if (eth_skb_pad(skb))
continue;
total_rx_bytes += skb->len;
total_rx_packets++;
i40e_process_skb_fields(rx_ring, rx_desc, skb);
napi_gro_receive(&rx_ring->q_vector->napi, skb);
bi = *i40e_rx_bi(rx_ring, next_to_clean);
bi->data_end = bi->data + size;
xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
xdp_res = i40e_run_xdp_zc(rx_ring, bi);
i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
&rx_bytes, size, xdp_res);
total_rx_packets += rx_packets;
total_rx_bytes += rx_bytes;
xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
next_to_clean = (next_to_clean + 1) & count_mask;
}
rx_ring->next_to_clean = next_to_clean;
cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
if (cleaned_count >= I40E_RX_BUFFER_WRITE)
failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
......@@ -374,7 +385,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
if (failure || next_to_clean == rx_ring->next_to_use)
xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else
xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
......@@ -604,16 +615,14 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
{
u16 i;
for (i = 0; i < rx_ring->count; i++) {
struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, i);
u16 count_mask = rx_ring->count - 1;
u16 ntc = rx_ring->next_to_clean;
u16 ntu = rx_ring->next_to_use;
if (!rx_bi)
continue;
for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc);
xsk_buff_free(rx_bi);
rx_bi = NULL;
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment