Commit 7ff65cde authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jett Kirsher says:

====================
This series contains updates to e1000e and ixgbe.
 ...
Alexander Duyck (5):
  ixgbe: Simplify logic for getting traffic class from user priority
  ixgbe: Cleanup unpacking code for DCB
  ixgbe: Populate the prio_tc_map in ixgbe_setup_tc
  ixgbe: Add function for obtaining FCoE TC based on FCoE user priority
  ixgbe: Merge FCoE set_num and cache_ring calls into RSS/DCB config
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ac1ae5f3 d411a936
...@@ -1677,16 +1677,18 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) ...@@ -1677,16 +1677,18 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
e_dbg("ANYSTATE -> DOWN\n"); e_dbg("ANYSTATE -> DOWN\n");
} else { } else {
/* /*
* Check several times, if Sync and Config * Check several times, if SYNCH bit and CONFIG
* both are consistently 1 then simply ignore * bit both are consistently 1 then simply ignore
* the Invalid bit and restart Autoneg * the IV bit and restart Autoneg
*/ */
for (i = 0; i < AN_RETRY_COUNT; i++) { for (i = 0; i < AN_RETRY_COUNT; i++) {
udelay(10); udelay(10);
rxcw = er32(RXCW); rxcw = er32(RXCW);
if ((rxcw & E1000_RXCW_IV) && if ((rxcw & E1000_RXCW_SYNCH) &&
!((rxcw & E1000_RXCW_SYNCH) && (rxcw & E1000_RXCW_C))
(rxcw & E1000_RXCW_C))) { continue;
if (rxcw & E1000_RXCW_IV) {
mac->serdes_has_link = false; mac->serdes_has_link = false;
mac->serdes_link_state = mac->serdes_link_state =
e1000_serdes_link_down; e1000_serdes_link_down;
......
...@@ -514,6 +514,7 @@ extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); ...@@ -514,6 +514,7 @@ extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_get_hw_control(struct e1000_adapter *adapter); extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
extern void e1000e_release_hw_control(struct e1000_adapter *adapter); extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
extern unsigned int copybreak; extern unsigned int copybreak;
......
...@@ -1897,7 +1897,6 @@ static int e1000_set_coalesce(struct net_device *netdev, ...@@ -1897,7 +1897,6 @@ static int e1000_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec) struct ethtool_coalesce *ec)
{ {
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
((ec->rx_coalesce_usecs > 4) && ((ec->rx_coalesce_usecs > 4) &&
...@@ -1916,9 +1915,9 @@ static int e1000_set_coalesce(struct net_device *netdev, ...@@ -1916,9 +1915,9 @@ static int e1000_set_coalesce(struct net_device *netdev,
} }
if (adapter->itr_setting != 0) if (adapter->itr_setting != 0)
ew32(ITR, 1000000000 / (adapter->itr * 256)); e1000e_write_itr(adapter, adapter->itr);
else else
ew32(ITR, 0); e1000e_write_itr(adapter, 0);
return 0; return 0;
} }
......
...@@ -2473,6 +2473,30 @@ static void e1000_set_itr(struct e1000_adapter *adapter) ...@@ -2473,6 +2473,30 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
} }
} }
/**
* e1000e_write_itr - write the ITR value to the appropriate registers
* @adapter: address of board private structure
* @itr: new ITR value to program
*
* e1000e_write_itr determines if the adapter is in MSI-X mode
* and, if so, writes the EITR registers with the ITR value.
* Otherwise, it writes the ITR value into the ITR register.
**/
void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
{
struct e1000_hw *hw = &adapter->hw;
u32 new_itr = itr ? 1000000000 / (itr * 256) : 0;
if (adapter->msix_entries) {
int vector;
for (vector = 0; vector < adapter->num_vectors; vector++)
writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector));
} else {
ew32(ITR, new_itr);
}
}
/** /**
* e1000_alloc_queues - Allocate memory for all rings * e1000_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize * @adapter: board private structure to initialize
...@@ -3059,7 +3083,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) ...@@ -3059,7 +3083,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
/* irq moderation */ /* irq moderation */
ew32(RADV, adapter->rx_abs_int_delay); ew32(RADV, adapter->rx_abs_int_delay);
if ((adapter->itr_setting != 0) && (adapter->itr != 0)) if ((adapter->itr_setting != 0) && (adapter->itr != 0))
ew32(ITR, 1000000000 / (adapter->itr * 256)); e1000e_write_itr(adapter, adapter->itr);
ctrl_ext = er32(CTRL_EXT); ctrl_ext = er32(CTRL_EXT);
/* Auto-Mask interrupts upon ICR access */ /* Auto-Mask interrupts upon ICR access */
...@@ -3486,14 +3510,14 @@ void e1000e_reset(struct e1000_adapter *adapter) ...@@ -3486,14 +3510,14 @@ void e1000e_reset(struct e1000_adapter *adapter)
dev_info(&adapter->pdev->dev, dev_info(&adapter->pdev->dev,
"Interrupt Throttle Rate turned off\n"); "Interrupt Throttle Rate turned off\n");
adapter->flags2 |= FLAG2_DISABLE_AIM; adapter->flags2 |= FLAG2_DISABLE_AIM;
ew32(ITR, 0); e1000e_write_itr(adapter, 0);
} }
} else if (adapter->flags2 & FLAG2_DISABLE_AIM) { } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
dev_info(&adapter->pdev->dev, dev_info(&adapter->pdev->dev,
"Interrupt Throttle Rate turned on\n"); "Interrupt Throttle Rate turned on\n");
adapter->flags2 &= ~FLAG2_DISABLE_AIM; adapter->flags2 &= ~FLAG2_DISABLE_AIM;
adapter->itr = 20000; adapter->itr = 20000;
ew32(ITR, 1000000000 / (adapter->itr * 256)); e1000e_write_itr(adapter, adapter->itr);
} }
} }
...@@ -4576,7 +4600,7 @@ static void e1000_watchdog_task(struct work_struct *work) ...@@ -4576,7 +4600,7 @@ static void e1000_watchdog_task(struct work_struct *work)
adapter->gorc - adapter->gotc) / 10000; adapter->gorc - adapter->gotc) / 10000;
u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
ew32(ITR, 1000000000 / (itr * 256)); e1000e_write_itr(adapter, itr);
} }
/* Cause software interrupt to ensure Rx ring is cleaned */ /* Cause software interrupt to ensure Rx ring is cleaned */
......
...@@ -707,6 +707,7 @@ extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); ...@@ -707,6 +707,7 @@ extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
struct netdev_fcoe_hbainfo *info); struct netdev_fcoe_hbainfo *info);
extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
#endif /* IXGBE_FCOE */ #endif /* IXGBE_FCOE */
static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
......
...@@ -180,67 +180,79 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, ...@@ -180,67 +180,79 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en) void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
{ {
int i; struct tc_configuration *tc_config = &cfg->tc_config[0];
int tc;
*pfc_en = 0; for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) if (tc_config[tc].dcb_pfc != pfc_disabled)
*pfc_en |= !!(cfg->tc_config[i].dcb_pfc & 0xF) << i; *pfc_en |= 1 << tc;
}
} }
void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction, void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction,
u16 *refill) u16 *refill)
{ {
struct tc_bw_alloc *p; struct tc_configuration *tc_config = &cfg->tc_config[0];
int i; int tc;
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++)
p = &cfg->tc_config[i].path[direction]; refill[tc] = tc_config[tc].path[direction].data_credits_refill;
refill[i] = p->data_credits_refill;
}
} }
void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max) void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max)
{ {
int i; struct tc_configuration *tc_config = &cfg->tc_config[0];
int tc;
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++)
max[i] = cfg->tc_config[i].desc_credits_max; max[tc] = tc_config[tc].desc_credits_max;
} }
void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction, void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction,
u8 *bwgid) u8 *bwgid)
{ {
struct tc_bw_alloc *p; struct tc_configuration *tc_config = &cfg->tc_config[0];
int i; int tc;
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++)
p = &cfg->tc_config[i].path[direction]; bwgid[tc] = tc_config[tc].path[direction].bwg_id;
bwgid[i] = p->bwg_id;
}
} }
void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction, void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
u8 *ptype) u8 *ptype)
{ {
struct tc_bw_alloc *p; struct tc_configuration *tc_config = &cfg->tc_config[0];
int i; int tc;
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++)
p = &cfg->tc_config[i].path[direction]; ptype[tc] = tc_config[tc].path[direction].prio_type;
ptype[i] = p->prio_type; }
u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
{
struct tc_configuration *tc_config = &cfg->tc_config[0];
u8 prio_mask = 1 << up;
u8 tc;
/*
* Test for TCs 7 through 1 and report the first match we find. If
* we find no match we can assume that the TC is 0 since the TC must
* be set for all user priorities
*/
for (tc = MAX_TRAFFIC_CLASS - 1; tc; tc--) {
if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap)
break;
} }
return tc;
} }
void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map) void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
{ {
int i, up; u8 up;
unsigned long bitmap;
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { for (up = 0; up < MAX_USER_PRIORITY; up++)
bitmap = cfg->tc_config[i].path[direction].up_to_tc_bitmap; map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up);
for_each_set_bit(up, &bitmap, MAX_USER_PRIORITY)
map[up] = i;
}
} }
/** /**
......
...@@ -146,6 +146,7 @@ void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *); ...@@ -146,6 +146,7 @@ void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *);
void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *); void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *);
void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *); void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);
void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *); void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *);
u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8);
/* DCB credits calculation */ /* DCB credits calculation */
s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *, s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
......
...@@ -151,34 +151,21 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) ...@@ -151,34 +151,21 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
{ {
int err = 0;
u8 prio_tc[MAX_USER_PRIORITY] = {0};
int i;
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
int err = 0;
/* Fail command if not in CEE mode */ /* Fail command if not in CEE mode */
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return 1; return 1;
/* verify there is something to do, if not then exit */ /* verify there is something to do, if not then exit */
if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) if (!state == !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
goto out;
if (state > 0) {
err = ixgbe_setup_tc(netdev, adapter->dcb_cfg.num_tcs.pg_tcs);
ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
} else {
err = ixgbe_setup_tc(netdev, 0);
}
if (err)
goto out; goto out;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) err = ixgbe_setup_tc(netdev,
netdev_set_prio_tc_map(netdev, i, prio_tc[i]); state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0);
out: out:
return err ? 1 : 0; return !!err;
} }
static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
...@@ -584,9 +571,6 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, ...@@ -584,9 +571,6 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
if (err) if (err)
goto err_out; goto err_out;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
netdev_set_prio_tc_map(dev, i, ets->prio_tc[i]);
err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame); err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
err_out: err_out:
return err; return err;
......
...@@ -960,3 +960,18 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, ...@@ -960,3 +960,18 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
return 0; return 0;
} }
/**
* ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to
* @adapter - pointer to the device adapter structure
*
* Return : TC that FCoE is mapped to
*/
u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter)
{
#ifdef CONFIG_IXGBE_DCB
return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up);
#else
return 0;
#endif
}
...@@ -28,29 +28,7 @@ ...@@ -28,29 +28,7 @@
#include "ixgbe.h" #include "ixgbe.h"
#include "ixgbe_sriov.h" #include "ixgbe_sriov.h"
/**
* ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
* @adapter: board private structure to initialize
*
* Cache the descriptor ring offsets for RSS to the assigned rings.
*
**/
static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
{
int i;
if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
return false;
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i]->reg_idx = i;
return true;
}
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
/* ixgbe_get_first_reg_idx - Return first register index associated with ring */ /* ixgbe_get_first_reg_idx - Return first register index associated with ring */
static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
unsigned int *tx, unsigned int *rx) unsigned int *tx, unsigned int *rx)
...@@ -136,39 +114,8 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) ...@@ -136,39 +114,8 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
return true; return true;
} }
#endif
#ifdef IXGBE_FCOE #endif
/**
* ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
* @adapter: board private structure to initialize
*
* Cache the descriptor ring offsets for FCoE mode to the assigned rings.
*
*/
static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
{
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
int i;
u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
return false;
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
ixgbe_cache_ring_rss(adapter);
fcoe_rx_i = f->offset;
fcoe_tx_i = f->offset;
}
for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
adapter->rx_ring[f->offset + i]->reg_idx = fcoe_rx_i;
adapter->tx_ring[f->offset + i]->reg_idx = fcoe_tx_i;
}
return true;
}
#endif /* IXGBE_FCOE */
/** /**
* ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
* @adapter: board private structure to initialize * @adapter: board private structure to initialize
...@@ -187,6 +134,28 @@ static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) ...@@ -187,6 +134,28 @@ static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
return false; return false;
} }
/**
* ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
* @adapter: board private structure to initialize
*
* Cache the descriptor ring offsets for RSS to the assigned rings.
*
**/
static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
{
int i;
if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
return false;
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i]->reg_idx = i;
return true;
}
/** /**
* ixgbe_cache_ring_register - Descriptor ring to register mapping * ixgbe_cache_ring_register - Descriptor ring to register mapping
* @adapter: board private structure to initialize * @adapter: board private structure to initialize
...@@ -212,13 +181,7 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) ...@@ -212,13 +181,7 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
return; return;
#endif #endif
#ifdef IXGBE_FCOE ixgbe_cache_ring_rss(adapter);
if (ixgbe_cache_ring_fcoe(adapter))
return;
#endif /* IXGBE_FCOE */
if (ixgbe_cache_ring_rss(adapter))
return;
} }
/** /**
...@@ -234,6 +197,74 @@ static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) ...@@ -234,6 +197,74 @@ static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
return false; return false;
} }
#define IXGBE_RSS_16Q_MASK 0xF
#define IXGBE_RSS_8Q_MASK 0x7
#define IXGBE_RSS_4Q_MASK 0x3
#define IXGBE_RSS_2Q_MASK 0x1
#define IXGBE_RSS_DISABLED_MASK 0x0
#ifdef CONFIG_IXGBE_DCB
static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
{
struct net_device *dev = adapter->netdev;
struct ixgbe_ring_feature *f;
int rss_i, rss_m, i;
int tcs;
/* Map queue offset and counts onto allocated tx queues */
tcs = netdev_get_num_tc(dev);
/* verify we have DCB queueing enabled before proceeding */
if (tcs <= 1)
return false;
/* determine the upper limit for our current DCB mode */
rss_i = dev->num_tx_queues / tcs;
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
/* 8 TC w/ 4 queues per TC */
rss_i = min_t(u16, rss_i, 4);
rss_m = IXGBE_RSS_4Q_MASK;
} else if (tcs > 4) {
/* 8 TC w/ 8 queues per TC */
rss_i = min_t(u16, rss_i, 8);
rss_m = IXGBE_RSS_8Q_MASK;
} else {
/* 4 TC w/ 16 queues per TC */
rss_i = min_t(u16, rss_i, 16);
rss_m = IXGBE_RSS_16Q_MASK;
}
/* set RSS mask and indices */
f = &adapter->ring_feature[RING_F_RSS];
rss_i = min_t(int, rss_i, f->limit);
f->indices = rss_i;
f->mask = rss_m;
#ifdef IXGBE_FCOE
/* FCoE enabled queues require special configuration indexed
* by feature specific indices and offset. Here we map FCoE
* indices onto the DCB queue pairs allowing FCoE to own
* configuration later.
*/
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
u8 tc = ixgbe_fcoe_get_tc(adapter);
f = &adapter->ring_feature[RING_F_FCOE];
f->indices = min_t(u16, rss_i, f->limit);
f->offset = rss_i * tc;
}
#endif /* IXGBE_FCOE */
for (i = 0; i < tcs; i++)
netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
adapter->num_tx_queues = rss_i * tcs;
adapter->num_rx_queues = rss_i * tcs;
return true;
}
#endif
/** /**
* ixgbe_set_rss_queues - Allocate queues for RSS * ixgbe_set_rss_queues - Allocate queues for RSS
* @adapter: board private structure to initialize * @adapter: board private structure to initialize
...@@ -257,7 +288,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) ...@@ -257,7 +288,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
rss_i = f->limit; rss_i = f->limit;
f->indices = rss_i; f->indices = rss_i;
f->mask = 0xF; f->mask = IXGBE_RSS_16Q_MASK;
/* /*
* Use Flow Director in addition to RSS to ensure the best * Use Flow Director in addition to RSS to ensure the best
...@@ -271,93 +302,41 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) ...@@ -271,93 +302,41 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
rss_i = max_t(u16, rss_i, f->indices); rss_i = max_t(u16, rss_i, f->indices);
} }
adapter->num_rx_queues = rss_i;
adapter->num_tx_queues = rss_i;
return true;
}
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
/** /*
* ixgbe_set_fcoe_queues - Allocate queues for Fiber Channel over Ethernet (FCoE) * FCoE can exist on the same rings as standard network traffic
* @adapter: board private structure to initialize * however it is preferred to avoid that if possible. In order
* * to get the best performance we allocate as many FCoE queues
* FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. * as we can and we place them at the end of the ring array to
* Offset is used as the index of the first rx queue used by FCoE. * avoid sharing queues with standard RSS on systems with 24 or
**/ * more CPUs.
static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) */
{ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; struct net_device *dev = adapter->netdev;
u16 fcoe_i;
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) f = &adapter->ring_feature[RING_F_FCOE];
return false;
f->indices = min_t(int, num_online_cpus(), f->limit); /* merge FCoE queues with RSS queues */
fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
adapter->num_rx_queues = 1; /* limit indices to rss_i if MSI-X is disabled */
adapter->num_tx_queues = 1; if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
fcoe_i = rss_i;
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { /* attempt to reserve some queues for just FCoE */
e_info(probe, "FCoE enabled with RSS\n"); f->indices = min_t(u16, fcoe_i, f->limit);
ixgbe_set_rss_queues(adapter); f->offset = fcoe_i - f->indices;
rss_i = max_t(u16, fcoe_i, rss_i);
} }
/* adding FCoE rx rings to the end */
f->offset = adapter->num_rx_queues;
adapter->num_rx_queues += f->indices;
adapter->num_tx_queues += f->indices;
return true;
}
#endif /* IXGBE_FCOE */ #endif /* IXGBE_FCOE */
adapter->num_rx_queues = rss_i;
/* Artificial max queue cap per traffic class in DCB mode */ adapter->num_tx_queues = rss_i;
#define DCB_QUEUE_CAP 8
#ifdef CONFIG_IXGBE_DCB
static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
{
int per_tc_q, q, i, offset = 0;
struct net_device *dev = adapter->netdev;
int tcs = netdev_get_num_tc(dev);
if (!tcs)
return false;
/* Map queue offset and counts onto allocated tx queues */
per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP);
q = min_t(int, num_online_cpus(), per_tc_q);
for (i = 0; i < tcs; i++) {
netdev_set_tc_queue(dev, i, q, offset);
offset += q;
}
adapter->num_tx_queues = q * tcs;
adapter->num_rx_queues = q * tcs;
#ifdef IXGBE_FCOE
/* FCoE enabled queues require special configuration indexed
* by feature specific indices and offset. Here we map FCoE
* indices onto the DCB queue pairs allowing FCoE to own
* configuration later.
*/
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
u8 prio_tc[MAX_USER_PRIORITY] = {0};
int tc;
struct ixgbe_ring_feature *f =
&adapter->ring_feature[RING_F_FCOE];
ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
tc = prio_tc[adapter->fcoe.up];
f->indices = dev->tc_to_txq[tc].count;
f->offset = dev->tc_to_txq[tc].offset;
}
#endif
return true; return true;
} }
#endif
/** /**
* ixgbe_set_num_queues - Allocate queues for device, feature dependent * ixgbe_set_num_queues - Allocate queues for device, feature dependent
...@@ -386,11 +365,6 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) ...@@ -386,11 +365,6 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
goto done; goto done;
#endif #endif
#ifdef IXGBE_FCOE
if (ixgbe_set_fcoe_queues(adapter))
goto done;
#endif /* IXGBE_FCOE */
if (ixgbe_set_rss_queues(adapter)) if (ixgbe_set_rss_queues(adapter))
goto done; goto done;
......
...@@ -3610,16 +3610,17 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) ...@@ -3610,16 +3610,17 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
if (hw->mac.type != ixgbe_mac_82598EB) { if (hw->mac.type != ixgbe_mac_82598EB) {
int i; int i;
u32 reg = 0; u32 reg = 0;
u8 msb = 0;
u8 rss_i = adapter->netdev->tc_to_txq[0].count - 1;
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { while (rss_i) {
u8 msb = 0; msb++;
u8 cnt = adapter->netdev->tc_to_txq[i].count; rss_i >>= 1;
}
while (cnt >>= 1)
msb++;
for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
reg |= msb << IXGBE_RQTC_SHIFT_TC(i); reg |= msb << IXGBE_RQTC_SHIFT_TC(i);
}
IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg); IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg);
} }
} }
...@@ -3646,18 +3647,12 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) ...@@ -3646,18 +3647,12 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
/* FCoE traffic class uses FCOE jumbo frames */ /* FCoE traffic class uses FCOE jumbo frames */
if (dev->features & NETIF_F_FCOE_MTU) { if ((dev->features & NETIF_F_FCOE_MTU) &&
int fcoe_pb = 0; (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
(pb == ixgbe_fcoe_get_tc(adapter)))
#ifdef CONFIG_IXGBE_DCB tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
fcoe_pb = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
#endif #endif
if (fcoe_pb == pb && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE)
tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
}
#endif
/* Calculate delay value for device */ /* Calculate delay value for device */
switch (hw->mac.type) { switch (hw->mac.type) {
case ixgbe_mac_X540: case ixgbe_mac_X540:
...@@ -6595,6 +6590,31 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) ...@@ -6595,6 +6590,31 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
return; return;
} }
/**
* ixgbe_set_prio_tc_map - Configure netdev prio tc map
* @adapter: Pointer to adapter struct
*
* Populate the netdev user priority to tc map
*/
static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
{
struct net_device *dev = adapter->netdev;
struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
u8 prio;
for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
u8 tc = 0;
if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
else if (ets)
tc = ets->prio_tc[prio];
netdev_set_prio_tc_map(dev, prio, tc);
}
}
/** /**
* ixgbe_setup_tc - configure net_device for multiple traffic classes * ixgbe_setup_tc - configure net_device for multiple traffic classes
* *
...@@ -6633,6 +6653,8 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) ...@@ -6633,6 +6653,8 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
if (tc) { if (tc) {
netdev_set_num_tc(dev, tc); netdev_set_num_tc(dev, tc);
ixgbe_set_prio_tc_map(adapter);
adapter->flags |= IXGBE_FLAG_DCB_ENABLED; adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
...@@ -6642,6 +6664,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) ...@@ -6642,6 +6664,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
} }
} else { } else {
netdev_reset_tc(dev); netdev_reset_tc(dev);
if (adapter->hw.mac.type == ixgbe_mac_82598EB) if (adapter->hw.mac.type == ixgbe_mac_82598EB)
adapter->hw.fc.requested_mode = adapter->last_lfc_mode; adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
...@@ -7005,7 +7028,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, ...@@ -7005,7 +7028,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
#endif #endif
if (ii->mac == ixgbe_mac_82598EB) if (ii->mac == ixgbe_mac_82598EB)
#ifdef CONFIG_IXGBE_DCB
indices = min_t(unsigned int, indices, MAX_TRAFFIC_CLASS * 4);
#else
indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
#endif
else else
indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment