Commit 03740165 authored by David S. Miller's avatar David S. Miller

Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
10GbE Intel Wired LAN Driver Updates 2018-03-23

This series contains updates to ixgbe and ixgbevf only.

Paul adds status register reads to reduce a potential race condition
where registers can read 0xFFFFFFFF during a PCI reset, which in turn
causes the driver to remove the adapter.  Then fixes an assignment
operation with an "OR" operation.

Shannon Nelson provides several IPsec offload cleanups to ixgbe, as well as a
patch to enable TSO with IPsec offload.

Tony provides the much anticipated XDP support for ixgbevf.  Currently,
pass, drop and XDP_TX actions are supported, as well as meta data and
stats reporting.

Björn Töpel tweaks the page counting for XDP_REDIRECT, since a page can
have its reference count decreased via the xdp_do_redirect() call.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ee7a60c9 ed93a398
......@@ -154,6 +154,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
ixgbe_link_speed speed);
#define IXGBE_FAILED_READ_RETRIES 5
#define IXGBE_FAILED_READ_REG 0xffffffffU
#define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU
#define IXGBE_FAILED_READ_CFG_WORD 0xffffU
......
......@@ -774,11 +774,7 @@ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC;
itd->flags = 0;
if (xs->id.proto == IPPROTO_ESP) {
struct sk_buff *skb = first->skb;
int ret, authlen, trailerlen;
u8 padlen;
itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
IXGBE_ADVTXD_TUCMD_L4T_TCP;
......@@ -790,19 +786,28 @@ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
* padlen bytes of padding. This ends up not the same
* as the static value found in xs->props.trailer_len (21).
*
* The "correct" way to get the auth length would be to use
* authlen = crypto_aead_authsize(xs->data);
* but since we know we only have one size to worry about
* we can let the compiler use the constant and save us a
* few CPU cycles.
* ... but if we're doing GSO, don't bother as the stack
* doesn't add a trailer for those.
*/
authlen = IXGBE_IPSEC_AUTH_BITS / 8;
ret = skb_copy_bits(skb, skb->len - (authlen + 2), &padlen, 1);
if (unlikely(ret))
return 0;
trailerlen = authlen + 2 + padlen;
itd->trailer_len = trailerlen;
if (!skb_is_gso(first->skb)) {
/* The "correct" way to get the auth length would be
* to use
* authlen = crypto_aead_authsize(xs->data);
* but since we know we only have one size to worry
* about * we can let the compiler use the constant
* and save us a few CPU cycles.
*/
const int authlen = IXGBE_IPSEC_AUTH_BITS / 8;
struct sk_buff *skb = first->skb;
u8 padlen;
int ret;
ret = skb_copy_bits(skb, skb->len - (authlen + 2),
&padlen, 1);
if (unlikely(ret))
return 0;
itd->trailer_len = authlen + 2 + padlen;
}
}
if (tsa->encrypt)
itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
......@@ -924,8 +929,13 @@ void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
ixgbe_ipsec_clear_hw_tables(adapter);
adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
adapter->netdev->features |= NETIF_F_HW_ESP;
adapter->netdev->hw_enc_features |= NETIF_F_HW_ESP;
#define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \
NETIF_F_HW_ESP_TX_CSUM | \
NETIF_F_GSO_ESP)
adapter->netdev->features |= IXGBE_ESP_FEATURES;
adapter->netdev->hw_enc_features |= IXGBE_ESP_FEATURES;
return;
......
......@@ -353,23 +353,32 @@ static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
ixgbe_service_event_schedule(adapter);
}
static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
static u32 ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
{
u8 __iomem *reg_addr;
u32 value;
int i;
reg_addr = READ_ONCE(hw->hw_addr);
if (ixgbe_removed(reg_addr))
return IXGBE_FAILED_READ_REG;
/* The following check not only optimizes a bit by not
* performing a read on the status register when the
* register just read was a status register read that
* returned IXGBE_FAILED_READ_REG. It also blocks any
* potential recursion.
/* Register read of 0xFFFFFFF can indicate the adapter has been removed,
* so perform several status register reads to determine if the adapter
* has been removed.
*/
if (reg == IXGBE_STATUS) {
ixgbe_remove_adapter(hw);
return;
for (i = 0; i < IXGBE_FAILED_READ_RETRIES; i++) {
value = readl(reg_addr + IXGBE_STATUS);
if (value != IXGBE_FAILED_READ_REG)
break;
mdelay(3);
}
value = ixgbe_read_reg(hw, IXGBE_STATUS);
if (value == IXGBE_FAILED_READ_REG)
ixgbe_remove_adapter(hw);
else
value = readl(reg_addr + reg);
return value;
}
/**
......@@ -415,7 +424,7 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
writes_completed:
value = readl(reg_addr + reg);
if (unlikely(value == IXGBE_FAILED_READ_REG))
ixgbe_check_remove(hw, reg);
value = ixgbe_check_remove(hw, reg);
return value;
}
......@@ -1620,7 +1629,8 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
bi->dma = dma;
bi->page = page;
bi->page_offset = ixgbe_rx_offset(rx_ring);
bi->pagecnt_bias = 1;
page_ref_add(page, USHRT_MAX - 1);
bi->pagecnt_bias = USHRT_MAX;
rx_ring->rx_stats.alloc_rx_page++;
return true;
......@@ -2030,8 +2040,8 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
* the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds.
*/
if (unlikely(!pagecnt_bias)) {
page_ref_add(page, USHRT_MAX);
if (unlikely(pagecnt_bias == 1)) {
page_ref_add(page, USHRT_MAX - 1);
rx_buffer->pagecnt_bias = USHRT_MAX;
}
......@@ -7721,7 +7731,8 @@ static void ixgbe_service_task(struct work_struct *work)
static int ixgbe_tso(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
u8 *hdr_len)
u8 *hdr_len,
struct ixgbe_ipsec_tx_data *itd)
{
u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct sk_buff *skb = first->skb;
......@@ -7735,6 +7746,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
u32 fceof_saidx = 0;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
......@@ -7760,13 +7772,15 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
if (ip.v4->version == 4) {
unsigned char *csum_start = skb_checksum_start(skb);
unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
int len = csum_start - trans_start;
/* IP header will have to cancel out any data that
* is not a part of the outer IP header
* is not a part of the outer IP header, so set to
* a reverse csum if needed, else init check to 0.
*/
ip.v4->check = csum_fold(csum_partial(trans_start,
csum_start - trans_start,
0));
ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
csum_fold(csum_partial(trans_start,
len, 0)) : 0;
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
ip.v4->tot_len = 0;
......@@ -7797,12 +7811,15 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
fceof_saidx |= itd->sa_idx;
type_tucmd |= itd->flags | itd->trailer_len;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = l4.hdr - ip.hdr;
vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
mss_l4len_idx);
return 1;
......@@ -7864,10 +7881,8 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
if (first->tx_flags & IXGBE_TX_FLAGS_IPSEC) {
fceof_saidx |= itd->sa_idx;
type_tucmd |= itd->flags | itd->trailer_len;
}
fceof_saidx |= itd->sa_idx;
type_tucmd |= itd->flags | itd->trailer_len;
ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0);
}
......@@ -8495,7 +8510,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
goto out_drop;
#endif
tso = ixgbe_tso(tx_ring, first, &hdr_len);
tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx);
if (tso < 0)
goto out_drop;
else if (!tso)
......@@ -9904,15 +9919,15 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
/* We can only support IPV4 TSO in tunnels if we can mangle the
* inner IP ID field, so strip TSO if MANGLEID is not supported.
* IPsec offoad sets skb->encapsulation but still can handle
* the TSO, so it's the exception.
*/
if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
features &= ~NETIF_F_TSO;
#ifdef CONFIG_XFRM_OFFLOAD
/* IPsec offload doesn't get along well with others *yet* */
if (skb->sp)
features &= ~(NETIF_F_TSO | NETIF_F_HW_CSUM);
if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) {
#ifdef CONFIG_XFRM
if (!skb->sp)
#endif
features &= ~NETIF_F_TSO;
}
return features;
}
......
......@@ -1847,9 +1847,9 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
(IXGBE_CS4227_EDC_MODE_SR << 1));
if (setup_linear)
reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1;
reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1;
else
reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1;
reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 1;
ret_val = hw->phy.ops.write_reg(hw, reg_slice,
IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
......
/*******************************************************************************
Intel 82599 Virtual Function driver
Copyright(c) 1999 - 2015 Intel Corporation.
Copyright(c) 1999 - 2018 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
......@@ -82,6 +82,7 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
#define IXGBEVF_QUEUE_STATS_LEN ( \
(((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
((struct ixgbevf_adapter *)netdev_priv(netdev))->num_xdp_queues + \
((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
(sizeof(struct ixgbevf_stats) / sizeof(u64)))
#define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
......@@ -269,7 +270,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
u32 new_rx_count, new_tx_count;
int i, err = 0;
int i, j, err = 0;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
......@@ -293,15 +294,19 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i]->count = new_tx_count;
for (i = 0; i < adapter->num_xdp_queues; i++)
adapter->xdp_ring[i]->count = new_tx_count;
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count;
adapter->xdp_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count;
goto clear_reset;
}
if (new_tx_count != adapter->tx_ring_count) {
tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring));
tx_ring = vmalloc((adapter->num_tx_queues +
adapter->num_xdp_queues) * sizeof(*tx_ring));
if (!tx_ring) {
err = -ENOMEM;
goto clear_reset;
......@@ -324,6 +329,24 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
goto clear_reset;
}
}
for (j = 0; j < adapter->num_xdp_queues; i++, j++) {
/* clone ring and setup updated count */
tx_ring[i] = *adapter->xdp_ring[j];
tx_ring[i].count = new_tx_count;
err = ixgbevf_setup_tx_resources(&tx_ring[i]);
if (err) {
while (i) {
i--;
ixgbevf_free_tx_resources(&tx_ring[i]);
}
vfree(tx_ring);
tx_ring = NULL;
goto clear_reset;
}
}
}
if (new_rx_count != adapter->rx_ring_count) {
......@@ -336,8 +359,13 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
for (i = 0; i < adapter->num_rx_queues; i++) {
/* clone ring and setup updated count */
rx_ring[i] = *adapter->rx_ring[i];
/* Clear copied XDP RX-queue info */
memset(&rx_ring[i].xdp_rxq, 0,
sizeof(rx_ring[i].xdp_rxq));
rx_ring[i].count = new_rx_count;
err = ixgbevf_setup_rx_resources(&rx_ring[i]);
err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
if (err) {
while (i) {
i--;
......@@ -363,6 +391,12 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
}
adapter->tx_ring_count = new_tx_count;
for (j = 0; j < adapter->num_xdp_queues; i++, j++) {
ixgbevf_free_tx_resources(adapter->xdp_ring[j]);
*adapter->xdp_ring[j] = tx_ring[i];
}
adapter->xdp_ring_count = new_tx_count;
vfree(tx_ring);
tx_ring = NULL;
}
......@@ -385,7 +419,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
clear_reset:
/* free Tx resources if Rx error is encountered */
if (tx_ring) {
for (i = 0; i < adapter->num_tx_queues; i++)
for (i = 0;
i < adapter->num_tx_queues + adapter->num_xdp_queues; i++)
ixgbevf_free_tx_resources(&tx_ring[i]);
vfree(tx_ring);
}
......@@ -457,6 +492,23 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
i += 2;
}
/* populate XDP queue data */
for (j = 0; j < adapter->num_xdp_queues; j++) {
ring = adapter->xdp_ring[j];
if (!ring) {
data[i++] = 0;
data[i++] = 0;
continue;
}
do {
start = u64_stats_fetch_begin_irq(&ring->syncp);
data[i] = ring->stats.packets;
data[i + 1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
}
/* populate Rx queue data */
for (j = 0; j < adapter->num_rx_queues; j++) {
ring = adapter->rx_ring[j];
......@@ -500,6 +552,12 @@ static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < adapter->num_xdp_queues; i++) {
sprintf(p, "xdp_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "xdp_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
......
......@@ -2,7 +2,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
Copyright(c) 1999 - 2015 Intel Corporation.
Copyright(c) 1999 - 2018 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
......@@ -35,6 +35,7 @@
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/u64_stats_sync.h>
#include <net/xdp.h>
#include "vf.h"
......@@ -51,7 +52,11 @@
struct ixgbevf_tx_buffer {
union ixgbe_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
struct sk_buff *skb;
union {
struct sk_buff *skb;
/* XDP uses address ptr on irq_clean */
void *data;
};
unsigned int bytecount;
unsigned short gso_segs;
__be16 protocol;
......@@ -94,12 +99,21 @@ enum ixgbevf_ring_state_t {
__IXGBEVF_RX_BUILD_SKB_ENABLED,
__IXGBEVF_TX_DETECT_HANG,
__IXGBEVF_HANG_CHECK_ARMED,
__IXGBEVF_TX_XDP_RING,
};
#define ring_is_xdp(ring) \
test_bit(__IXGBEVF_TX_XDP_RING, &(ring)->state)
#define set_ring_xdp(ring) \
set_bit(__IXGBEVF_TX_XDP_RING, &(ring)->state)
#define clear_ring_xdp(ring) \
clear_bit(__IXGBEVF_TX_XDP_RING, &(ring)->state)
struct ixgbevf_ring {
struct ixgbevf_ring *next;
struct ixgbevf_q_vector *q_vector; /* backpointer to q_vector */
struct net_device *netdev;
struct bpf_prog *xdp_prog;
struct device *dev;
void *desc; /* descriptor ring memory */
dma_addr_t dma; /* phys. address of descriptor ring */
......@@ -120,7 +134,7 @@ struct ixgbevf_ring {
struct ixgbevf_tx_queue_stats tx_stats;
struct ixgbevf_rx_queue_stats rx_stats;
};
struct xdp_rxq_info xdp_rxq;
u64 hw_csum_rx_error;
u8 __iomem *tail;
struct sk_buff *skb;
......@@ -137,6 +151,7 @@ struct ixgbevf_ring {
#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
#define MAX_XDP_QUEUES IXGBE_VF_MAX_TX_QUEUES
#define IXGBEVF_MAX_RSS_QUEUES 2
#define IXGBEVF_82599_RETA_SIZE 128 /* 128 entries */
#define IXGBEVF_X550_VFRETA_SIZE 64 /* 64 entries */
......@@ -337,6 +352,10 @@ struct ixgbevf_adapter {
u32 eims_enable_mask;
u32 eims_other;
/* XDP */
int num_xdp_queues;
struct ixgbevf_ring *xdp_ring[MAX_XDP_QUEUES];
/* TX */
int num_tx_queues;
struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
......@@ -357,6 +376,7 @@ struct ixgbevf_adapter {
/* OS defined structs */
struct net_device *netdev;
struct bpf_prog *xdp_prog;
struct pci_dev *pdev;
/* structs defined in ixgbe_vf.h */
......@@ -370,6 +390,7 @@ struct ixgbevf_adapter {
unsigned long state;
u64 tx_busy;
unsigned int tx_ring_count;
unsigned int xdp_ring_count;
unsigned int rx_ring_count;
u8 __iomem *io_addr; /* Mainly for iounmap use */
......@@ -443,7 +464,8 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter);
void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
void ixgbevf_reset(struct ixgbevf_adapter *adapter);
void ixgbevf_set_ethtool_ops(struct net_device *netdev);
int ixgbevf_setup_rx_resources(struct ixgbevf_ring *);
int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
struct ixgbevf_ring *rx_ring);
int ixgbevf_setup_tx_resources(struct ixgbevf_ring *);
void ixgbevf_free_rx_resources(struct ixgbevf_ring *);
void ixgbevf_free_tx_resources(struct ixgbevf_ring *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment