Commit 024af151 authored by David S. Miller's avatar David S. Miller

Merge branch 'amd-xgbe-next'

Tom Lendacky says:

====================
amd-xgbe: AMD XGBE driver updates 2016-02-12

The following updates and fixes are included in this driver update series:

- Disable VLAN filtering in promiscuous mode
- Change from using napi_complete to napi_complete_done
- Use __napi_schedule_irqoff when running in interrupt context
- Verify ethtool speed setting is valid for the selected speedset
- Enable PFC based on the pfc_en setting
- Fix the mapping of priorities to traffic classes
- Do traffic class setup when DCB nl callbacks are invoked
- Check Rx queue fifos before stopping Rx queue DMA
- Switch from disable_irq to masking interrupts for auto-negotiation

This patch series is based on net-next.

Changes from v1:
- Removed #ifndef and #define of CRCPOLY_LE as part of the patch to
  disable VLAN filtering in promiscuous mode
- Reworked changes to xgbe_setup_tc to resolve conflicts with commit
  16e5cc64 (net: rework setup_tc ndo op to consume general tc operand)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7009212b ced3fcae
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* *
* License 1: GPLv2 * License 1: GPLv2
* *
* Copyright (c) 2014 Advanced Micro Devices, Inc. * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* *
* This file is free software; you may copy, redistribute and/or modify * This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
* *
* License 2: Modified BSD * License 2: Modified BSD
* *
* Copyright (c) 2014 Advanced Micro Devices, Inc. * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -768,12 +768,16 @@ ...@@ -768,12 +768,16 @@
#define MTL_Q_TQDR 0x08 #define MTL_Q_TQDR 0x08
#define MTL_Q_RQOMR 0x40 #define MTL_Q_RQOMR 0x40
#define MTL_Q_RQMPOCR 0x44 #define MTL_Q_RQMPOCR 0x44
#define MTL_Q_RQDR 0x4c #define MTL_Q_RQDR 0x48
#define MTL_Q_RQFCR 0x50 #define MTL_Q_RQFCR 0x50
#define MTL_Q_IER 0x70 #define MTL_Q_IER 0x70
#define MTL_Q_ISR 0x74 #define MTL_Q_ISR 0x74
/* MTL queue register entry bit positions and sizes */ /* MTL queue register entry bit positions and sizes */
#define MTL_Q_RQDR_PRXQ_INDEX 16
#define MTL_Q_RQDR_PRXQ_WIDTH 14
#define MTL_Q_RQDR_RXQSTS_INDEX 4
#define MTL_Q_RQDR_RXQSTS_WIDTH 2
#define MTL_Q_RQFCR_RFA_INDEX 1 #define MTL_Q_RQFCR_RFA_INDEX 1
#define MTL_Q_RQFCR_RFA_WIDTH 6 #define MTL_Q_RQFCR_RFA_WIDTH 6
#define MTL_Q_RQFCR_RFD_INDEX 17 #define MTL_Q_RQFCR_RFD_INDEX 17
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* *
* License 1: GPLv2 * License 1: GPLv2
* *
* Copyright (c) 2014 Advanced Micro Devices, Inc. * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* *
* This file is free software; you may copy, redistribute and/or modify * This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
* *
* License 2: Modified BSD * License 2: Modified BSD
* *
* Copyright (c) 2014 Advanced Micro Devices, Inc. * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -146,6 +146,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev, ...@@ -146,6 +146,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
{ {
struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_prv_data *pdata = netdev_priv(netdev);
unsigned int i, tc_ets, tc_ets_weight; unsigned int i, tc_ets, tc_ets_weight;
u8 max_tc = 0;
tc_ets = 0; tc_ets = 0;
tc_ets_weight = 0; tc_ets_weight = 0;
...@@ -157,12 +158,9 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev, ...@@ -157,12 +158,9 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
netif_dbg(pdata, drv, netdev, "PRIO%u: TC=%hhu\n", i, netif_dbg(pdata, drv, netdev, "PRIO%u: TC=%hhu\n", i,
ets->prio_tc[i]); ets->prio_tc[i]);
if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && max_tc = max_t(u8, max_tc, ets->prio_tc[i]);
(i >= pdata->hw_feat.tc_cnt)) if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]))
return -EINVAL; max_tc = max_t(u8, max_tc, i);
if (ets->prio_tc[i] >= pdata->hw_feat.tc_cnt)
return -EINVAL;
switch (ets->tc_tsa[i]) { switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT: case IEEE_8021QAZ_TSA_STRICT:
...@@ -171,15 +169,28 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev, ...@@ -171,15 +169,28 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
tc_ets = 1; tc_ets = 1;
tc_ets_weight += ets->tc_tx_bw[i]; tc_ets_weight += ets->tc_tx_bw[i];
break; break;
default: default:
netif_err(pdata, drv, netdev,
"unsupported TSA algorithm (%hhu)\n",
ets->tc_tsa[i]);
return -EINVAL; return -EINVAL;
} }
} }
/* Check maximum traffic class requested */
if (max_tc >= pdata->hw_feat.tc_cnt) {
netif_err(pdata, drv, netdev,
"exceeded number of supported traffic classes\n");
return -EINVAL;
}
/* Weights must add up to 100% */ /* Weights must add up to 100% */
if (tc_ets && (tc_ets_weight != 100)) if (tc_ets && (tc_ets_weight != 100)) {
netif_err(pdata, drv, netdev,
"sum of ETS algorithm weights is not 100 (%u)\n",
tc_ets_weight);
return -EINVAL; return -EINVAL;
}
if (!pdata->ets) { if (!pdata->ets) {
pdata->ets = devm_kzalloc(pdata->dev, sizeof(*pdata->ets), pdata->ets = devm_kzalloc(pdata->dev, sizeof(*pdata->ets),
...@@ -188,6 +199,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev, ...@@ -188,6 +199,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
return -ENOMEM; return -ENOMEM;
} }
pdata->num_tcs = max_tc + 1;
memcpy(pdata->ets, ets, sizeof(*pdata->ets)); memcpy(pdata->ets, ets, sizeof(*pdata->ets));
pdata->hw_if.config_dcb_tc(pdata); pdata->hw_if.config_dcb_tc(pdata);
...@@ -221,6 +233,13 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev, ...@@ -221,6 +233,13 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
"cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n", "cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n",
pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay); pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
/* Check PFC for supported number of traffic classes */
if (pfc->pfc_en & ~((1 << pdata->hw_feat.tc_cnt) - 1)) {
netif_err(pdata, drv, netdev,
"PFC requested for unsupported traffic class\n");
return -EINVAL;
}
if (!pdata->pfc) { if (!pdata->pfc) {
pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc), pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc),
GFP_KERNEL); GFP_KERNEL);
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* *
* License 1: GPLv2 * License 1: GPLv2
* *
* Copyright (c) 2014 Advanced Micro Devices, Inc. * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* *
* This file is free software; you may copy, redistribute and/or modify * This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
* *
* License 2: Modified BSD * License 2: Modified BSD
* *
* Copyright (c) 2014 Advanced Micro Devices, Inc. * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -518,13 +518,45 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) ...@@ -518,13 +518,45 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
{ {
struct ieee_pfc *pfc = pdata->pfc;
struct ieee_ets *ets = pdata->ets;
unsigned int max_q_count, q_count; unsigned int max_q_count, q_count;
unsigned int reg, reg_val; unsigned int reg, reg_val;
unsigned int i; unsigned int i;
/* Set MTL flow control */ /* Set MTL flow control */
for (i = 0; i < pdata->rx_q_count; i++) for (i = 0; i < pdata->rx_q_count; i++) {
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1); unsigned int ehfc = 0;
if (pfc && ets) {
unsigned int prio;
for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
unsigned int tc;
/* Does this queue handle the priority? */
if (pdata->prio2q_map[prio] != i)
continue;
/* Get the Traffic Class for this priority */
tc = ets->prio_tc[prio];
/* Check if flow control should be enabled */
if (pfc->pfc_en & (1 << tc)) {
ehfc = 1;
break;
}
}
} else {
ehfc = 1;
}
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
netif_dbg(pdata, drv, pdata->netdev,
"flow control %s for RXq%u\n",
ehfc ? "enabled" : "disabled", i);
}
/* Set MAC flow control */ /* Set MAC flow control */
max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
...@@ -702,6 +734,113 @@ static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata) ...@@ -702,6 +734,113 @@ static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
return 0; return 0;
} }
static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
{
/* Put the VLAN tag in the Rx descriptor */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
/* Don't check the VLAN type */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
/* Check only C-TAG (0x8100) packets */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
/* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
/* Enable VLAN tag stripping */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
return 0;
}
static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
{
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
return 0;
}
static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
{
/* Enable VLAN filtering */
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
/* Enable VLAN Hash Table filtering */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
/* Disable VLAN tag inverse matching */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
/* Only filter on the lower 12-bits of the VLAN tag */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
/* In order for the VLAN Hash Table filtering to be effective,
* the VLAN tag identifier in the VLAN Tag Register must not
* be zero. Set the VLAN tag identifier to "1" to enable the
* VLAN Hash Table filtering. This implies that a VLAN tag of
* 1 will always pass filtering.
*/
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
return 0;
}
static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
{
/* Disable VLAN filtering */
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
return 0;
}
static u32 xgbe_vid_crc32_le(__le16 vid_le)
{
u32 poly = 0xedb88320; /* CRCPOLY_LE */
u32 crc = ~0;
u32 temp = 0;
unsigned char *data = (unsigned char *)&vid_le;
unsigned char data_byte = 0;
int i, bits;
bits = get_bitmask_order(VLAN_VID_MASK);
for (i = 0; i < bits; i++) {
if ((i % 8) == 0)
data_byte = data[i / 8];
temp = ((crc & 1) ^ data_byte) & 1;
crc >>= 1;
data_byte >>= 1;
if (temp)
crc ^= poly;
}
return crc;
}
static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
{
u32 crc;
u16 vid;
__le16 vid_le;
u16 vlan_hash_table = 0;
/* Generate the VLAN Hash Table value */
for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
/* Get the CRC32 value of the VLAN ID */
vid_le = cpu_to_le16(vid);
crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
vlan_hash_table |= (1 << crc);
}
/* Set the VLAN Hash Table filtering register */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
return 0;
}
static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
unsigned int enable) unsigned int enable)
{ {
...@@ -714,6 +853,14 @@ static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, ...@@ -714,6 +853,14 @@ static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
enable ? "entering" : "leaving"); enable ? "entering" : "leaving");
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val); XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
/* Hardware will still perform VLAN filtering in promiscuous mode */
if (enable) {
xgbe_disable_rx_vlan_filtering(pdata);
} else {
if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
xgbe_enable_rx_vlan_filtering(pdata);
}
return 0; return 0;
} }
...@@ -875,6 +1022,7 @@ static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata) ...@@ -875,6 +1022,7 @@ static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
int mmd_reg) int mmd_reg)
{ {
unsigned long flags;
unsigned int mmd_address; unsigned int mmd_address;
int mmd_data; int mmd_data;
...@@ -892,10 +1040,10 @@ static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, ...@@ -892,10 +1040,10 @@ static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
* register offsets must therefore be adjusted by left shifting the * register offsets must therefore be adjusted by left shifting the
* offset 2 bits and reading 32 bits of data. * offset 2 bits and reading 32 bits of data.
*/ */
mutex_lock(&pdata->xpcs_mutex); spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8); XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2); mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2);
mutex_unlock(&pdata->xpcs_mutex); spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
return mmd_data; return mmd_data;
} }
...@@ -904,6 +1052,7 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, ...@@ -904,6 +1052,7 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
int mmd_reg, int mmd_data) int mmd_reg, int mmd_data)
{ {
unsigned int mmd_address; unsigned int mmd_address;
unsigned long flags;
if (mmd_reg & MII_ADDR_C45) if (mmd_reg & MII_ADDR_C45)
mmd_address = mmd_reg & ~MII_ADDR_C45; mmd_address = mmd_reg & ~MII_ADDR_C45;
...@@ -919,10 +1068,10 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, ...@@ -919,10 +1068,10 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
* register offsets must therefore be adjusted by left shifting the * register offsets must therefore be adjusted by left shifting the
* offset 2 bits and reading 32 bits of data. * offset 2 bits and reading 32 bits of data.
*/ */
mutex_lock(&pdata->xpcs_mutex); spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8); XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data); XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
mutex_unlock(&pdata->xpcs_mutex); spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
} }
static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc) static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
...@@ -944,116 +1093,6 @@ static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata) ...@@ -944,116 +1093,6 @@ static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
return 0; return 0;
} }
static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
{
/* Put the VLAN tag in the Rx descriptor */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
/* Don't check the VLAN type */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
/* Check only C-TAG (0x8100) packets */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
/* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
/* Enable VLAN tag stripping */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
return 0;
}
static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
{
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
return 0;
}
static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
{
/* Enable VLAN filtering */
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
/* Enable VLAN Hash Table filtering */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
/* Disable VLAN tag inverse matching */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
/* Only filter on the lower 12-bits of the VLAN tag */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
/* In order for the VLAN Hash Table filtering to be effective,
* the VLAN tag identifier in the VLAN Tag Register must not
* be zero. Set the VLAN tag identifier to "1" to enable the
* VLAN Hash Table filtering. This implies that a VLAN tag of
* 1 will always pass filtering.
*/
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
return 0;
}
static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
{
/* Disable VLAN filtering */
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
return 0;
}
#ifndef CRCPOLY_LE
#define CRCPOLY_LE 0xedb88320
#endif
static u32 xgbe_vid_crc32_le(__le16 vid_le)
{
u32 poly = CRCPOLY_LE;
u32 crc = ~0;
u32 temp = 0;
unsigned char *data = (unsigned char *)&vid_le;
unsigned char data_byte = 0;
int i, bits;
bits = get_bitmask_order(VLAN_VID_MASK);
for (i = 0; i < bits; i++) {
if ((i % 8) == 0)
data_byte = data[i / 8];
temp = ((crc & 1) ^ data_byte) & 1;
crc >>= 1;
data_byte >>= 1;
if (temp)
crc ^= poly;
}
return crc;
}
static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
{
u32 crc;
u16 vid;
__le16 vid_le;
u16 vlan_hash_table = 0;
/* Generate the VLAN Hash Table value */
for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
/* Get the CRC32 value of the VLAN ID */
vid_le = cpu_to_le16(vid);
crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
vlan_hash_table |= (1 << crc);
}
/* Set the VLAN Hash Table filtering register */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
return 0;
}
static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
{ {
struct xgbe_ring_desc *rdesc = rdata->rdesc; struct xgbe_ring_desc *rdesc = rdata->rdesc;
...@@ -1288,11 +1327,42 @@ static int xgbe_config_tstamp(struct xgbe_prv_data *pdata, ...@@ -1288,11 +1327,42 @@ static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
return 0; return 0;
} }
static void xgbe_config_tc(struct xgbe_prv_data *pdata)
{
unsigned int offset, queue, prio;
u8 i;
netdev_reset_tc(pdata->netdev);
if (!pdata->num_tcs)
return;
netdev_set_num_tc(pdata->netdev, pdata->num_tcs);
for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) {
while ((queue < pdata->tx_q_count) &&
(pdata->q2tc_map[queue] == i))
queue++;
netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n",
i, offset, queue - 1);
netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset);
offset = queue;
}
if (!pdata->ets)
return;
for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
netdev_set_prio_tc_map(pdata->netdev, prio,
pdata->ets->prio_tc[prio]);
}
static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
{ {
struct ieee_ets *ets = pdata->ets; struct ieee_ets *ets = pdata->ets;
unsigned int total_weight, min_weight, weight; unsigned int total_weight, min_weight, weight;
unsigned int i; unsigned int mask, reg, reg_val;
unsigned int i, prio;
if (!ets) if (!ets)
return; return;
...@@ -1309,6 +1379,25 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) ...@@ -1309,6 +1379,25 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
min_weight = 1; min_weight = 1;
for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
/* Map the priorities to the traffic class */
mask = 0;
for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
if (ets->prio_tc[prio] == i)
mask |= (1 << prio);
}
mask &= 0xff;
netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n",
i, mask);
reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG));
reg_val = XGMAC_IOREAD(pdata, reg);
reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3));
reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3));
XGMAC_IOWRITE(pdata, reg, reg_val);
/* Set the traffic class algorithm */
switch (ets->tc_tsa[i]) { switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT: case IEEE_8021QAZ_TSA_STRICT:
netif_dbg(pdata, drv, pdata->netdev, netif_dbg(pdata, drv, pdata->netdev,
...@@ -1329,38 +1418,12 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) ...@@ -1329,38 +1418,12 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
break; break;
} }
} }
xgbe_config_tc(pdata);
} }
static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata) static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
{ {
struct ieee_pfc *pfc = pdata->pfc;
struct ieee_ets *ets = pdata->ets;
unsigned int mask, reg, reg_val;
unsigned int tc, prio;
if (!pfc || !ets)
return;
for (tc = 0; tc < pdata->hw_feat.tc_cnt; tc++) {
mask = 0;
for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
if ((pfc->pfc_en & (1 << prio)) &&
(ets->prio_tc[prio] == tc))
mask |= (1 << prio);
}
mask &= 0xff;
netif_dbg(pdata, drv, pdata->netdev, "TC%u PFC mask=%#x\n",
tc, mask);
reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG));
reg_val = XGMAC_IOREAD(pdata, reg);
reg_val &= ~(0xff << ((tc % MTL_TCPM_TC_PER_REG) << 3));
reg_val |= (mask << ((tc % MTL_TCPM_TC_PER_REG) << 3));
XGMAC_IOWRITE(pdata, reg, reg_val);
}
xgbe_config_flow_control(pdata); xgbe_config_flow_control(pdata);
} }
...@@ -2595,6 +2658,32 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata) ...@@ -2595,6 +2658,32 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
} }
} }
static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata,
unsigned int queue)
{
unsigned int rx_status;
unsigned long rx_timeout;
/* The Rx engine cannot be stopped if it is actively processing
* packets. Wait for the Rx queue to empty the Rx fifo. Don't
* wait forever though...
*/
rx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
while (time_before(jiffies, rx_timeout)) {
rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
(XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
break;
usleep_range(500, 1000);
}
if (!time_before(jiffies, rx_timeout))
netdev_info(pdata->netdev,
"timed out waiting for Rx queue %u to empty\n",
queue);
}
static void xgbe_enable_rx(struct xgbe_prv_data *pdata) static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
{ {
struct xgbe_channel *channel; struct xgbe_channel *channel;
...@@ -2633,6 +2722,10 @@ static void xgbe_disable_rx(struct xgbe_prv_data *pdata) ...@@ -2633,6 +2722,10 @@ static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
/* Prepare for Rx DMA channel stop */
for (i = 0; i < pdata->rx_q_count; i++)
xgbe_prepare_rx_stop(pdata, i);
/* Disable each Rx queue */ /* Disable each Rx queue */
XGMAC_IOWRITE(pdata, MAC_RQC0R, 0); XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
...@@ -2881,6 +2974,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) ...@@ -2881,6 +2974,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->get_tx_tstamp = xgbe_get_tx_tstamp; hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
/* For Data Center Bridging config */ /* For Data Center Bridging config */
hw_if->config_tc = xgbe_config_tc;
hw_if->config_dcb_tc = xgbe_config_dcb_tc; hw_if->config_dcb_tc = xgbe_config_dcb_tc;
hw_if->config_dcb_pfc = xgbe_config_dcb_pfc; hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* *
* License 1: GPLv2 * License 1: GPLv2
* *
* Copyright (c) 2014 Advanced Micro Devices, Inc. * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* *
* This file is free software; you may copy, redistribute and/or modify * This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
* *
* License 2: Modified BSD * License 2: Modified BSD
* *
* Copyright (c) 2014 Advanced Micro Devices, Inc. * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -356,7 +356,7 @@ static irqreturn_t xgbe_isr(int irq, void *data) ...@@ -356,7 +356,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
xgbe_disable_rx_tx_ints(pdata); xgbe_disable_rx_tx_ints(pdata);
/* Turn on polling */ /* Turn on polling */
__napi_schedule(&pdata->napi); __napi_schedule_irqoff(&pdata->napi);
} }
} }
...@@ -409,7 +409,7 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data) ...@@ -409,7 +409,7 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data)
disable_irq_nosync(channel->dma_irq); disable_irq_nosync(channel->dma_irq);
/* Turn on polling */ /* Turn on polling */
__napi_schedule(&channel->napi); __napi_schedule_irqoff(&channel->napi);
} }
return IRQ_HANDLED; return IRQ_HANDLED;
...@@ -1630,32 +1630,18 @@ static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, ...@@ -1630,32 +1630,18 @@ static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
struct tc_to_netdev *tc_to_netdev) struct tc_to_netdev *tc_to_netdev)
{ {
struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_prv_data *pdata = netdev_priv(netdev);
unsigned int offset, queue; u8 tc;
u8 i, tc;
if (handle != TC_H_ROOT || tc_to_netdev->type != TC_SETUP_MQPRIO) if (handle != TC_H_ROOT || tc_to_netdev->type != TC_SETUP_MQPRIO)
return -EINVAL; return -EINVAL;
tc = tc_to_netdev->tc; tc = tc_to_netdev->tc;
if (tc && (tc != pdata->hw_feat.tc_cnt)) if (tc > pdata->hw_feat.tc_cnt)
return -EINVAL; return -EINVAL;
if (tc) { pdata->num_tcs = tc;
netdev_set_num_tc(netdev, tc); pdata->hw_if.config_tc(pdata);
for (i = 0, queue = 0, offset = 0; i < tc; i++) {
while ((queue < pdata->tx_q_count) &&
(pdata->q2tc_map[queue] == i))
queue++;
netif_dbg(pdata, drv, netdev, "TC%u using TXq%u-%u\n",
i, offset, queue - 1);
netdev_set_tc_queue(netdev, i, queue - offset, offset);
offset = queue;
}
} else {
netdev_reset_tc(netdev);
}
return 0; return 0;
} }
...@@ -2068,7 +2054,7 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget) ...@@ -2068,7 +2054,7 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget)
/* If we processed everything, we are done */ /* If we processed everything, we are done */
if (processed < budget) { if (processed < budget) {
/* Turn off polling */ /* Turn off polling */
napi_complete(napi); napi_complete_done(napi, processed);
/* Enable Tx and Rx interrupts */ /* Enable Tx and Rx interrupts */
enable_irq(channel->dma_irq); enable_irq(channel->dma_irq);
...@@ -2110,7 +2096,7 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget) ...@@ -2110,7 +2096,7 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget)
/* If we processed everything, we are done */ /* If we processed everything, we are done */
if (processed < budget) { if (processed < budget) {
/* Turn off polling */ /* Turn off polling */
napi_complete(napi); napi_complete_done(napi, processed);
/* Enable Tx and Rx interrupts */ /* Enable Tx and Rx interrupts */
xgbe_enable_rx_tx_ints(pdata); xgbe_enable_rx_tx_ints(pdata);
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* *
* License 1: GPLv2 * License 1: GPLv2
* *
* Copyright (c) 2014 Advanced Micro Devices, Inc. * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* *
* This file is free software; you may copy, redistribute and/or modify * This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
* *
* License 2: Modified BSD * License 2: Modified BSD
* *
* Copyright (c) 2014 Advanced Micro Devices, Inc. * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -318,8 +318,20 @@ static int xgbe_set_settings(struct net_device *netdev, ...@@ -318,8 +318,20 @@ static int xgbe_set_settings(struct net_device *netdev,
if (cmd->autoneg == AUTONEG_DISABLE) { if (cmd->autoneg == AUTONEG_DISABLE) {
switch (speed) { switch (speed) {
case SPEED_10000: case SPEED_10000:
break;
case SPEED_2500: case SPEED_2500:
if (pdata->speed_set != XGBE_SPEEDSET_2500_10000) {
netdev_err(netdev, "unsupported speed %u\n",
speed);
return -EINVAL;
}
break;
case SPEED_1000: case SPEED_1000:
if (pdata->speed_set != XGBE_SPEEDSET_1000_10000) {
netdev_err(netdev, "unsupported speed %u\n",
speed);
return -EINVAL;
}
break; break;
default: default:
netdev_err(netdev, "unsupported speed %u\n", speed); netdev_err(netdev, "unsupported speed %u\n", speed);
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* *
* License 1: GPLv2 * License 1: GPLv2
* *
* Copyright (c) 2014 Advanced Micro Devices, Inc. * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* *
* This file is free software; you may copy, redistribute and/or modify * This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
* *
* License 2: Modified BSD * License 2: Modified BSD
* *
* Copyright (c) 2014 Advanced Micro Devices, Inc. * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -363,7 +363,7 @@ static int xgbe_probe(struct platform_device *pdev) ...@@ -363,7 +363,7 @@ static int xgbe_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, netdev); platform_set_drvdata(pdev, netdev);
spin_lock_init(&pdata->lock); spin_lock_init(&pdata->lock);
mutex_init(&pdata->xpcs_mutex); spin_lock_init(&pdata->xpcs_lock);
mutex_init(&pdata->rss_mutex); mutex_init(&pdata->rss_mutex);
spin_lock_init(&pdata->tstamp_lock); spin_lock_init(&pdata->tstamp_lock);
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* *
* License 1: GPLv2 * License 1: GPLv2
* *
* Copyright (c) 2014 Advanced Micro Devices, Inc. * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* *
* This file is free software; you may copy, redistribute and/or modify * This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
* *
* License 2: Modified BSD * License 2: Modified BSD
* *
* Copyright (c) 2014 Advanced Micro Devices, Inc. * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -626,10 +626,22 @@ static irqreturn_t xgbe_an_isr(int irq, void *data) ...@@ -626,10 +626,22 @@ static irqreturn_t xgbe_an_isr(int irq, void *data)
netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n"); netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
/* Interrupt reason must be read and cleared outside of IRQ context */ /* Disable AN interrupts */
disable_irq_nosync(pdata->an_irq); XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
/* Save the interrupt(s) that fired */
pdata->an_int = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT);
queue_work(pdata->an_workqueue, &pdata->an_irq_work); if (pdata->an_int) {
/* Clear the interrupt(s) that fired and process them */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int);
queue_work(pdata->an_workqueue, &pdata->an_irq_work);
} else {
/* Enable AN interrupts */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK,
XGBE_AN_INT_MASK);
}
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -673,34 +685,26 @@ static void xgbe_an_state_machine(struct work_struct *work) ...@@ -673,34 +685,26 @@ static void xgbe_an_state_machine(struct work_struct *work)
struct xgbe_prv_data, struct xgbe_prv_data,
an_work); an_work);
enum xgbe_an cur_state = pdata->an_state; enum xgbe_an cur_state = pdata->an_state;
unsigned int int_reg, int_mask;
mutex_lock(&pdata->an_mutex); mutex_lock(&pdata->an_mutex);
/* Read the interrupt */ if (!pdata->an_int)
int_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT);
if (!int_reg)
goto out; goto out;
next_int: next_int:
if (int_reg & XGBE_AN_PG_RCV) { if (pdata->an_int & XGBE_AN_PG_RCV) {
pdata->an_state = XGBE_AN_PAGE_RECEIVED; pdata->an_state = XGBE_AN_PAGE_RECEIVED;
int_mask = XGBE_AN_PG_RCV; pdata->an_int &= ~XGBE_AN_PG_RCV;
} else if (int_reg & XGBE_AN_INC_LINK) { } else if (pdata->an_int & XGBE_AN_INC_LINK) {
pdata->an_state = XGBE_AN_INCOMPAT_LINK; pdata->an_state = XGBE_AN_INCOMPAT_LINK;
int_mask = XGBE_AN_INC_LINK; pdata->an_int &= ~XGBE_AN_INC_LINK;
} else if (int_reg & XGBE_AN_INT_CMPLT) { } else if (pdata->an_int & XGBE_AN_INT_CMPLT) {
pdata->an_state = XGBE_AN_COMPLETE; pdata->an_state = XGBE_AN_COMPLETE;
int_mask = XGBE_AN_INT_CMPLT; pdata->an_int &= ~XGBE_AN_INT_CMPLT;
} else { } else {
pdata->an_state = XGBE_AN_ERROR; pdata->an_state = XGBE_AN_ERROR;
int_mask = 0;
} }
/* Clear the interrupt to be processed */
int_reg &= ~int_mask;
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, int_reg);
pdata->an_result = pdata->an_state; pdata->an_result = pdata->an_state;
again: again:
...@@ -740,14 +744,14 @@ static void xgbe_an_state_machine(struct work_struct *work) ...@@ -740,14 +744,14 @@ static void xgbe_an_state_machine(struct work_struct *work)
} }
if (pdata->an_state == XGBE_AN_NO_LINK) { if (pdata->an_state == XGBE_AN_NO_LINK) {
int_reg = 0; pdata->an_int = 0;
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
} else if (pdata->an_state == XGBE_AN_ERROR) { } else if (pdata->an_state == XGBE_AN_ERROR) {
netdev_err(pdata->netdev, netdev_err(pdata->netdev,
"error during auto-negotiation, state=%u\n", "error during auto-negotiation, state=%u\n",
cur_state); cur_state);
int_reg = 0; pdata->an_int = 0;
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
} }
...@@ -765,11 +769,12 @@ static void xgbe_an_state_machine(struct work_struct *work) ...@@ -765,11 +769,12 @@ static void xgbe_an_state_machine(struct work_struct *work)
if (cur_state != pdata->an_state) if (cur_state != pdata->an_state)
goto again; goto again;
if (int_reg) if (pdata->an_int)
goto next_int; goto next_int;
out: out:
enable_irq(pdata->an_irq); /* Enable AN interrupts on the way out */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_INT_MASK);
mutex_unlock(&pdata->an_mutex); mutex_unlock(&pdata->an_mutex);
} }
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* *
* License 1: GPLv2 * License 1: GPLv2
* *
* Copyright (c) 2014 Advanced Micro Devices, Inc. * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* *
* This file is free software; you may copy, redistribute and/or modify * This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
* *
* License 2: Modified BSD * License 2: Modified BSD
* *
* Copyright (c) 2014 Advanced Micro Devices, Inc. * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -673,6 +673,7 @@ struct xgbe_hw_if { ...@@ -673,6 +673,7 @@ struct xgbe_hw_if {
u64 (*get_tx_tstamp)(struct xgbe_prv_data *); u64 (*get_tx_tstamp)(struct xgbe_prv_data *);
/* For Data Center Bridging config */ /* For Data Center Bridging config */
void (*config_tc)(struct xgbe_prv_data *);
void (*config_dcb_tc)(struct xgbe_prv_data *); void (*config_dcb_tc)(struct xgbe_prv_data *);
void (*config_dcb_pfc)(struct xgbe_prv_data *); void (*config_dcb_pfc)(struct xgbe_prv_data *);
...@@ -773,8 +774,8 @@ struct xgbe_prv_data { ...@@ -773,8 +774,8 @@ struct xgbe_prv_data {
/* Overall device lock */ /* Overall device lock */
spinlock_t lock; spinlock_t lock;
/* XPCS indirect addressing mutex */ /* XPCS indirect addressing lock */
struct mutex xpcs_mutex; spinlock_t xpcs_lock;
/* RSS addressing mutex */ /* RSS addressing mutex */
struct mutex rss_mutex; struct mutex rss_mutex;
...@@ -880,6 +881,7 @@ struct xgbe_prv_data { ...@@ -880,6 +881,7 @@ struct xgbe_prv_data {
struct ieee_pfc *pfc; struct ieee_pfc *pfc;
unsigned int q2tc_map[XGBE_MAX_QUEUES]; unsigned int q2tc_map[XGBE_MAX_QUEUES];
unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS]; unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS];
u8 num_tcs;
/* Hardware features of the device */ /* Hardware features of the device */
struct xgbe_hw_features hw_feat; struct xgbe_hw_features hw_feat;
...@@ -925,6 +927,7 @@ struct xgbe_prv_data { ...@@ -925,6 +927,7 @@ struct xgbe_prv_data {
u32 serdes_dfe_tap_ena[XGBE_SPEEDS]; u32 serdes_dfe_tap_ena[XGBE_SPEEDS];
/* Auto-negotiation state machine support */ /* Auto-negotiation state machine support */
unsigned int an_int;
struct mutex an_mutex; struct mutex an_mutex;
enum xgbe_an an_result; enum xgbe_an an_result;
enum xgbe_an an_state; enum xgbe_an an_state;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment