Commit 4833a009 authored by David S. Miller's avatar David S. Miller

Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2016-03-29

This series contains fixes to ixgbe and ixgbevf.

Tushar fixes an issue which was introduced with an earlier commit, where
hardware register RAR0 default MAC address does not get set properly.

Alex fixes two issues, first being the VXLAN port number should be stored
in network order instead of in host order.  The second fix corrects the ATR
code to handle IPv6 extension headers.  The issue was ATR code was assuming
that it would be able to use tcp_hdr for every TCP frame that came through,
but that is not the case, which resulted in bad filters being setup.

Mark fixes a use of usleep_range() to udelay() in the case where a lock
is being held.

Stefan fixes the offline self tests where ndo_stop() should be used instead
of ndo_close(), which causes IFF_UP to be cleared and interface routes get
removed.

Emil fixes the error case where we need to return an error when a MAC
address change is rejected by the PF.  This helps prevent the user from
modifying the MAC address when the operation is not permitted.

Sridhar provides three fixes for ixgbe, all dealing with traffic class
offload handling.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c0e760c9 b5aea3de
...@@ -661,9 +661,7 @@ struct ixgbe_adapter { ...@@ -661,9 +661,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10) #define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10)
#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11) #define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11)
#ifdef CONFIG_IXGBE_VXLAN
#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12) #define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12)
#endif
#define IXGBE_FLAG2_VLAN_PROMISC BIT(13) #define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
/* Tx fast path data */ /* Tx fast path data */
...@@ -675,6 +673,9 @@ struct ixgbe_adapter { ...@@ -675,6 +673,9 @@ struct ixgbe_adapter {
int num_rx_queues; int num_rx_queues;
u16 rx_itr_setting; u16 rx_itr_setting;
/* Port number used to identify VXLAN traffic */
__be16 vxlan_port;
/* TX */ /* TX */
struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
...@@ -782,9 +783,6 @@ struct ixgbe_adapter { ...@@ -782,9 +783,6 @@ struct ixgbe_adapter {
u32 timer_event_accumulator; u32 timer_event_accumulator;
u32 vferr_refcount; u32 vferr_refcount;
struct ixgbe_mac_addr *mac_table; struct ixgbe_mac_addr *mac_table;
#ifdef CONFIG_IXGBE_VXLAN
u16 vxlan_port;
#endif
struct kobject *info_kobj; struct kobject *info_kobj;
#ifdef CONFIG_IXGBE_HWMON #ifdef CONFIG_IXGBE_HWMON
struct hwmon_buff *ixgbe_hwmon_buff; struct hwmon_buff *ixgbe_hwmon_buff;
...@@ -879,6 +877,8 @@ extern const char ixgbe_driver_version[]; ...@@ -879,6 +877,8 @@ extern const char ixgbe_driver_version[];
extern char ixgbe_default_device_descr[]; extern char ixgbe_default_device_descr[];
#endif /* IXGBE_FCOE */ #endif /* IXGBE_FCOE */
int ixgbe_open(struct net_device *netdev);
int ixgbe_close(struct net_device *netdev);
void ixgbe_up(struct ixgbe_adapter *adapter); void ixgbe_up(struct ixgbe_adapter *adapter);
void ixgbe_down(struct ixgbe_adapter *adapter); void ixgbe_down(struct ixgbe_adapter *adapter);
void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
......
...@@ -2053,7 +2053,7 @@ static void ixgbe_diag_test(struct net_device *netdev, ...@@ -2053,7 +2053,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
if (if_running) if (if_running)
/* indicate we're in test mode */ /* indicate we're in test mode */
dev_close(netdev); ixgbe_close(netdev);
else else
ixgbe_reset(adapter); ixgbe_reset(adapter);
...@@ -2091,7 +2091,7 @@ static void ixgbe_diag_test(struct net_device *netdev, ...@@ -2091,7 +2091,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
/* clear testing bit and return adapter to previous state */ /* clear testing bit and return adapter to previous state */
clear_bit(__IXGBE_TESTING, &adapter->state); clear_bit(__IXGBE_TESTING, &adapter->state);
if (if_running) if (if_running)
dev_open(netdev); ixgbe_open(netdev);
else if (hw->mac.ops.disable_tx_laser) else if (hw->mac.ops.disable_tx_laser)
hw->mac.ops.disable_tx_laser(hw); hw->mac.ops.disable_tx_laser(hw);
} else { } else {
......
...@@ -4531,9 +4531,7 @@ static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter) ...@@ -4531,9 +4531,7 @@ static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550: case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_x:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
#ifdef CONFIG_IXGBE_VXLAN
adapter->vxlan_port = 0; adapter->vxlan_port = 0;
#endif
break; break;
default: default:
break; break;
...@@ -5994,7 +5992,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -5994,7 +5992,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
* handler is registered with the OS, the watchdog timer is started, * handler is registered with the OS, the watchdog timer is started,
* and the stack is notified that the interface is ready. * and the stack is notified that the interface is ready.
**/ **/
static int ixgbe_open(struct net_device *netdev) int ixgbe_open(struct net_device *netdev)
{ {
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
...@@ -6096,7 +6094,7 @@ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter) ...@@ -6096,7 +6094,7 @@ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
* needs to be disabled. A global MAC reset is issued to stop the * needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed. * hardware, and all transmit and receive resources are freed.
**/ **/
static int ixgbe_close(struct net_device *netdev) int ixgbe_close(struct net_device *netdev)
{ {
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
...@@ -7560,11 +7558,10 @@ static void ixgbe_atr(struct ixgbe_ring *ring, ...@@ -7560,11 +7558,10 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
struct ipv6hdr *ipv6; struct ipv6hdr *ipv6;
} hdr; } hdr;
struct tcphdr *th; struct tcphdr *th;
unsigned int hlen;
struct sk_buff *skb; struct sk_buff *skb;
#ifdef CONFIG_IXGBE_VXLAN
u8 encap = false;
#endif /* CONFIG_IXGBE_VXLAN */
__be16 vlan_id; __be16 vlan_id;
int l4_proto;
/* if ring doesn't have a interrupt vector, cannot perform ATR */ /* if ring doesn't have a interrupt vector, cannot perform ATR */
if (!q_vector) if (!q_vector)
...@@ -7576,62 +7573,50 @@ static void ixgbe_atr(struct ixgbe_ring *ring, ...@@ -7576,62 +7573,50 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
ring->atr_count++; ring->atr_count++;
/* currently only IPv4/IPv6 with TCP is supported */
if ((first->protocol != htons(ETH_P_IP)) &&
(first->protocol != htons(ETH_P_IPV6)))
return;
/* snag network header to get L4 type and address */ /* snag network header to get L4 type and address */
skb = first->skb; skb = first->skb;
hdr.network = skb_network_header(skb); hdr.network = skb_network_header(skb);
if (!skb->encapsulation) {
th = tcp_hdr(skb);
} else {
#ifdef CONFIG_IXGBE_VXLAN #ifdef CONFIG_IXGBE_VXLAN
if (skb->encapsulation &&
first->protocol == htons(ETH_P_IP) &&
hdr.ipv4->protocol != IPPROTO_UDP) {
struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_adapter *adapter = q_vector->adapter;
if (!adapter->vxlan_port) /* verify the port is recognized as VXLAN */
return; if (adapter->vxlan_port &&
if (first->protocol != htons(ETH_P_IP) || udp_hdr(skb)->dest == adapter->vxlan_port)
hdr.ipv4->version != IPVERSION || hdr.network = skb_inner_network_header(skb);
hdr.ipv4->protocol != IPPROTO_UDP) {
return;
}
if (ntohs(udp_hdr(skb)->dest) != adapter->vxlan_port)
return;
encap = true;
hdr.network = skb_inner_network_header(skb);
th = inner_tcp_hdr(skb);
#else
return;
#endif /* CONFIG_IXGBE_VXLAN */
} }
#endif /* CONFIG_IXGBE_VXLAN */
/* Currently only IPv4/IPv6 with TCP is supported */ /* Currently only IPv4/IPv6 with TCP is supported */
switch (hdr.ipv4->version) { switch (hdr.ipv4->version) {
case IPVERSION: case IPVERSION:
if (hdr.ipv4->protocol != IPPROTO_TCP) /* access ihl as u8 to avoid unaligned access on ia64 */
return; hlen = (hdr.network[0] & 0x0F) << 2;
l4_proto = hdr.ipv4->protocol;
break; break;
case 6: case 6:
if (likely((unsigned char *)th - hdr.network == hlen = hdr.network - skb->data;
sizeof(struct ipv6hdr))) { l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
if (hdr.ipv6->nexthdr != IPPROTO_TCP) hlen -= hdr.network - skb->data;
return;
} else {
__be16 frag_off;
u8 l4_hdr;
ipv6_skip_exthdr(skb, hdr.network - skb->data +
sizeof(struct ipv6hdr),
&l4_hdr, &frag_off);
if (unlikely(frag_off))
return;
if (l4_hdr != IPPROTO_TCP)
return;
}
break; break;
default: default:
return; return;
} }
/* skip this packet since it is invalid or the socket is closing */ if (l4_proto != IPPROTO_TCP)
if (!th || th->fin) return;
th = (struct tcphdr *)(hdr.network + hlen);
/* skip this packet since the socket is closing */
if (th->fin)
return; return;
/* sample on all syn packets or once every atr sample count */ /* sample on all syn packets or once every atr sample count */
...@@ -7682,10 +7667,8 @@ static void ixgbe_atr(struct ixgbe_ring *ring, ...@@ -7682,10 +7667,8 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
break; break;
} }
#ifdef CONFIG_IXGBE_VXLAN if (hdr.network != skb_network_header(skb))
if (encap)
input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK; input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
#endif /* CONFIG_IXGBE_VXLAN */
/* This assumes the Rx queue and Tx queue are bound to the same CPU */ /* This assumes the Rx queue and Tx queue are bound to the same CPU */
ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
...@@ -8209,10 +8192,17 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) ...@@ -8209,10 +8192,17 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
struct tc_cls_u32_offload *cls) struct tc_cls_u32_offload *cls)
{ {
u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
u32 loc;
int err; int err;
if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
return -EINVAL;
loc = cls->knode.handle & 0xfffff;
spin_lock(&adapter->fdir_perfect_lock); spin_lock(&adapter->fdir_perfect_lock);
err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, cls->knode.handle); err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
spin_unlock(&adapter->fdir_perfect_lock); spin_unlock(&adapter->fdir_perfect_lock);
return err; return err;
} }
...@@ -8221,20 +8211,30 @@ static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter, ...@@ -8221,20 +8211,30 @@ static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
__be16 protocol, __be16 protocol,
struct tc_cls_u32_offload *cls) struct tc_cls_u32_offload *cls)
{ {
u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
if (uhtid >= IXGBE_MAX_LINK_HANDLE)
return -EINVAL;
/* This ixgbe devices do not support hash tables at the moment /* This ixgbe devices do not support hash tables at the moment
* so abort when given hash tables. * so abort when given hash tables.
*/ */
if (cls->hnode.divisor > 0) if (cls->hnode.divisor > 0)
return -EINVAL; return -EINVAL;
set_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables); set_bit(uhtid - 1, &adapter->tables);
return 0; return 0;
} }
static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter, static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
struct tc_cls_u32_offload *cls) struct tc_cls_u32_offload *cls)
{ {
clear_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables); u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
if (uhtid >= IXGBE_MAX_LINK_HANDLE)
return -EINVAL;
clear_bit(uhtid - 1, &adapter->tables);
return 0; return 0;
} }
...@@ -8252,27 +8252,29 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, ...@@ -8252,27 +8252,29 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
#endif #endif
int i, err = 0; int i, err = 0;
u8 queue; u8 queue;
u32 handle; u32 uhtid, link_uhtid;
memset(&mask, 0, sizeof(union ixgbe_atr_input)); memset(&mask, 0, sizeof(union ixgbe_atr_input));
handle = cls->knode.handle; uhtid = TC_U32_USERHTID(cls->knode.handle);
link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
/* At the moment cls_u32 jumps to transport layer and skips past /* At the moment cls_u32 jumps to network layer and skips past
* L2 headers. The canonical method to match L2 frames is to use * L2 headers. The canonical method to match L2 frames is to use
* negative values. However this is error prone at best but really * negative values. However this is error prone at best but really
* just broken because there is no way to "know" what sort of hdr * just broken because there is no way to "know" what sort of hdr
* is in front of the transport layer. Fix cls_u32 to support L2 * is in front of the network layer. Fix cls_u32 to support L2
* headers when needed. * headers when needed.
*/ */
if (protocol != htons(ETH_P_IP)) if (protocol != htons(ETH_P_IP))
return -EINVAL; return -EINVAL;
if (cls->knode.link_handle || if (link_uhtid) {
cls->knode.link_handle >= IXGBE_MAX_LINK_HANDLE) {
struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
u32 uhtid = TC_U32_USERHTID(cls->knode.link_handle);
if (!test_bit(uhtid, &adapter->tables)) if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
return -EINVAL;
if (!test_bit(link_uhtid - 1, &adapter->tables))
return -EINVAL; return -EINVAL;
for (i = 0; nexthdr[i].jump; i++) { for (i = 0; nexthdr[i].jump; i++) {
...@@ -8288,10 +8290,7 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, ...@@ -8288,10 +8290,7 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
nexthdr->mask != cls->knode.sel->keys[0].mask) nexthdr->mask != cls->knode.sel->keys[0].mask)
return -EINVAL; return -EINVAL;
if (uhtid >= IXGBE_MAX_LINK_HANDLE) adapter->jump_tables[link_uhtid] = nexthdr->jump;
return -EINVAL;
adapter->jump_tables[uhtid] = nexthdr->jump;
} }
return 0; return 0;
} }
...@@ -8308,13 +8307,13 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, ...@@ -8308,13 +8307,13 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
* To add support for new nodes update ixgbe_model.h parse structures * To add support for new nodes update ixgbe_model.h parse structures
* this function _should_ be generic try not to hardcode values here. * this function _should_ be generic try not to hardcode values here.
*/ */
if (TC_U32_USERHTID(handle) == 0x800) { if (uhtid == 0x800) {
field_ptr = adapter->jump_tables[0]; field_ptr = adapter->jump_tables[0];
} else { } else {
if (TC_U32_USERHTID(handle) >= ARRAY_SIZE(adapter->jump_tables)) if (uhtid >= IXGBE_MAX_LINK_HANDLE)
return -EINVAL; return -EINVAL;
field_ptr = adapter->jump_tables[TC_U32_USERHTID(handle)]; field_ptr = adapter->jump_tables[uhtid];
} }
if (!field_ptr) if (!field_ptr)
...@@ -8332,8 +8331,7 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, ...@@ -8332,8 +8331,7 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
int j; int j;
for (j = 0; field_ptr[j].val; j++) { for (j = 0; field_ptr[j].val; j++) {
if (field_ptr[j].off == off && if (field_ptr[j].off == off) {
field_ptr[j].mask == m) {
field_ptr[j].val(input, &mask, val, m); field_ptr[j].val(input, &mask, val, m);
input->filter.formatted.flow_type |= input->filter.formatted.flow_type |=
field_ptr[j].type; field_ptr[j].type;
...@@ -8393,8 +8391,8 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, ...@@ -8393,8 +8391,8 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
return -EINVAL; return -EINVAL;
} }
int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto, static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
struct tc_to_netdev *tc) struct tc_to_netdev *tc)
{ {
struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_adapter *adapter = netdev_priv(dev);
...@@ -8554,7 +8552,6 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, ...@@ -8554,7 +8552,6 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
{ {
struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
u16 new_port = ntohs(port);
if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
return; return;
...@@ -8562,18 +8559,18 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, ...@@ -8562,18 +8559,18 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
if (sa_family == AF_INET6) if (sa_family == AF_INET6)
return; return;
if (adapter->vxlan_port == new_port) if (adapter->vxlan_port == port)
return; return;
if (adapter->vxlan_port) { if (adapter->vxlan_port) {
netdev_info(dev, netdev_info(dev,
"Hit Max num of VXLAN ports, not adding port %d\n", "Hit Max num of VXLAN ports, not adding port %d\n",
new_port); ntohs(port));
return; return;
} }
adapter->vxlan_port = new_port; adapter->vxlan_port = port;
IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, new_port); IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, ntohs(port));
} }
/** /**
...@@ -8586,7 +8583,6 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, ...@@ -8586,7 +8583,6 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
__be16 port) __be16 port)
{ {
struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_adapter *adapter = netdev_priv(dev);
u16 new_port = ntohs(port);
if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
return; return;
...@@ -8594,9 +8590,9 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, ...@@ -8594,9 +8590,9 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
if (sa_family == AF_INET6) if (sa_family == AF_INET6)
return; return;
if (adapter->vxlan_port != new_port) { if (adapter->vxlan_port != port) {
netdev_info(dev, "Port %d was not found, not deleting\n", netdev_info(dev, "Port %d was not found, not deleting\n",
new_port); ntohs(port));
return; return;
} }
...@@ -9265,17 +9261,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -9265,17 +9261,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS; netdev->priv_flags |= IFF_SUPP_NOFCS;
#ifdef CONFIG_IXGBE_VXLAN
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
netdev->hw_enc_features |= NETIF_F_RXCSUM;
break;
default:
break;
}
#endif /* CONFIG_IXGBE_VXLAN */
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
netdev->dcbnl_ops = &dcbnl_ops; netdev->dcbnl_ops = &dcbnl_ops;
#endif #endif
...@@ -9329,6 +9314,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -9329,6 +9314,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_sw_init; goto err_sw_init;
} }
/* Set hw->mac.addr to permanent MAC address */
ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
ixgbe_mac_set_default_filter(adapter); ixgbe_mac_set_default_filter(adapter);
setup_timer(&adapter->service_timer, &ixgbe_service_timer, setup_timer(&adapter->service_timer, &ixgbe_service_timer,
......
...@@ -32,7 +32,6 @@ ...@@ -32,7 +32,6 @@
struct ixgbe_mat_field { struct ixgbe_mat_field {
unsigned int off; unsigned int off;
unsigned int mask;
int (*val)(struct ixgbe_fdir_filter *input, int (*val)(struct ixgbe_fdir_filter *input,
union ixgbe_atr_input *mask, union ixgbe_atr_input *mask,
u32 val, u32 m); u32 val, u32 m);
...@@ -58,35 +57,27 @@ static inline int ixgbe_mat_prgm_dip(struct ixgbe_fdir_filter *input, ...@@ -58,35 +57,27 @@ static inline int ixgbe_mat_prgm_dip(struct ixgbe_fdir_filter *input,
} }
static struct ixgbe_mat_field ixgbe_ipv4_fields[] = { static struct ixgbe_mat_field ixgbe_ipv4_fields[] = {
{ .off = 12, .mask = -1, .val = ixgbe_mat_prgm_sip, { .off = 12, .val = ixgbe_mat_prgm_sip,
.type = IXGBE_ATR_FLOW_TYPE_IPV4}, .type = IXGBE_ATR_FLOW_TYPE_IPV4},
{ .off = 16, .mask = -1, .val = ixgbe_mat_prgm_dip, { .off = 16, .val = ixgbe_mat_prgm_dip,
.type = IXGBE_ATR_FLOW_TYPE_IPV4}, .type = IXGBE_ATR_FLOW_TYPE_IPV4},
{ .val = NULL } /* terminal node */ { .val = NULL } /* terminal node */
}; };
static inline int ixgbe_mat_prgm_sport(struct ixgbe_fdir_filter *input, static inline int ixgbe_mat_prgm_ports(struct ixgbe_fdir_filter *input,
union ixgbe_atr_input *mask, union ixgbe_atr_input *mask,
u32 val, u32 m) u32 val, u32 m)
{ {
input->filter.formatted.src_port = val & 0xffff; input->filter.formatted.src_port = val & 0xffff;
mask->formatted.src_port = m & 0xffff; mask->formatted.src_port = m & 0xffff;
return 0; input->filter.formatted.dst_port = val >> 16;
}; mask->formatted.dst_port = m >> 16;
static inline int ixgbe_mat_prgm_dport(struct ixgbe_fdir_filter *input,
union ixgbe_atr_input *mask,
u32 val, u32 m)
{
input->filter.formatted.dst_port = val & 0xffff;
mask->formatted.dst_port = m & 0xffff;
return 0; return 0;
}; };
static struct ixgbe_mat_field ixgbe_tcp_fields[] = { static struct ixgbe_mat_field ixgbe_tcp_fields[] = {
{.off = 0, .mask = 0xffff, .val = ixgbe_mat_prgm_sport, {.off = 0, .val = ixgbe_mat_prgm_ports,
.type = IXGBE_ATR_FLOW_TYPE_TCPV4},
{.off = 2, .mask = 0xffff, .val = ixgbe_mat_prgm_dport,
.type = IXGBE_ATR_FLOW_TYPE_TCPV4}, .type = IXGBE_ATR_FLOW_TYPE_TCPV4},
{ .val = NULL } /* terminal node */ { .val = NULL } /* terminal node */
}; };
......
...@@ -355,7 +355,7 @@ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl) ...@@ -355,7 +355,7 @@ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
if (!(command & IXGBE_SB_IOSF_CTRL_BUSY)) if (!(command & IXGBE_SB_IOSF_CTRL_BUSY))
break; break;
usleep_range(10, 20); udelay(10);
} }
if (ctrl) if (ctrl)
*ctrl = command; *ctrl = command;
......
...@@ -680,7 +680,7 @@ static void ixgbevf_diag_test(struct net_device *netdev, ...@@ -680,7 +680,7 @@ static void ixgbevf_diag_test(struct net_device *netdev,
if (if_running) if (if_running)
/* indicate we're in test mode */ /* indicate we're in test mode */
dev_close(netdev); ixgbevf_close(netdev);
else else
ixgbevf_reset(adapter); ixgbevf_reset(adapter);
...@@ -692,7 +692,7 @@ static void ixgbevf_diag_test(struct net_device *netdev, ...@@ -692,7 +692,7 @@ static void ixgbevf_diag_test(struct net_device *netdev,
clear_bit(__IXGBEVF_TESTING, &adapter->state); clear_bit(__IXGBEVF_TESTING, &adapter->state);
if (if_running) if (if_running)
dev_open(netdev); ixgbevf_open(netdev);
} else { } else {
hw_dbg(&adapter->hw, "online testing starting\n"); hw_dbg(&adapter->hw, "online testing starting\n");
/* Online tests */ /* Online tests */
......
...@@ -486,6 +486,8 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; ...@@ -486,6 +486,8 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
extern const char ixgbevf_driver_name[]; extern const char ixgbevf_driver_name[];
extern const char ixgbevf_driver_version[]; extern const char ixgbevf_driver_version[];
int ixgbevf_open(struct net_device *netdev);
int ixgbevf_close(struct net_device *netdev);
void ixgbevf_up(struct ixgbevf_adapter *adapter); void ixgbevf_up(struct ixgbevf_adapter *adapter);
void ixgbevf_down(struct ixgbevf_adapter *adapter); void ixgbevf_down(struct ixgbevf_adapter *adapter);
void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
......
...@@ -3122,7 +3122,7 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) ...@@ -3122,7 +3122,7 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
* handler is registered with the OS, the watchdog timer is started, * handler is registered with the OS, the watchdog timer is started,
* and the stack is notified that the interface is ready. * and the stack is notified that the interface is ready.
**/ **/
static int ixgbevf_open(struct net_device *netdev) int ixgbevf_open(struct net_device *netdev)
{ {
struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
...@@ -3205,7 +3205,7 @@ static int ixgbevf_open(struct net_device *netdev) ...@@ -3205,7 +3205,7 @@ static int ixgbevf_open(struct net_device *netdev)
* needs to be disabled. A global MAC reset is issued to stop the * needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed. * hardware, and all transmit and receive resources are freed.
**/ **/
static int ixgbevf_close(struct net_device *netdev) int ixgbevf_close(struct net_device *netdev)
{ {
struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev);
...@@ -3692,19 +3692,23 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) ...@@ -3692,19 +3692,23 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
struct sockaddr *addr = p; struct sockaddr *addr = p;
int err;
if (!is_valid_ether_addr(addr->sa_data)) if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
ether_addr_copy(netdev->dev_addr, addr->sa_data);
ether_addr_copy(hw->mac.addr, addr->sa_data);
spin_lock_bh(&adapter->mbx_lock); spin_lock_bh(&adapter->mbx_lock);
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
spin_unlock_bh(&adapter->mbx_lock); spin_unlock_bh(&adapter->mbx_lock);
if (err)
return -EPERM;
ether_addr_copy(hw->mac.addr, addr->sa_data);
ether_addr_copy(netdev->dev_addr, addr->sa_data);
return 0; return 0;
} }
......
...@@ -408,8 +408,10 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, ...@@ -408,8 +408,10 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
/* if nacked the address was rejected, use "perm_addr" */ /* if nacked the address was rejected, use "perm_addr" */
if (!ret_val && if (!ret_val &&
(msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
ixgbevf_get_mac_addr_vf(hw, hw->mac.addr); ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
return IXGBE_ERR_MBX;
}
return ret_val; return ret_val;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment