Commit e901acd6 authored by John Fastabend's avatar John Fastabend Committed by Jeff Kirsher

ixgbe: DCB use existing TX and RX queues

The number of TX and RX queues allocated depends on the device
type, the current features set, online CPUs, and various
compile flags.

To enable DCB with multiple queues and allow it to coexist with
all the features currently implemented it has to setup a valid
queue count. This is done at init time using the FDIR and RSS
max queue counts and allowing each TC to allocate a queue per
CPU.

DCB will now use available queues up to (8 x TCs) this is somewhat
arbitrary cap but allows DCB to use up to 64 queues. Its easy to
increase this later if that is needed.

This is prep work to enable Flow Director with DCB. After this
DCB can easily coexist with existing features and no longer
needs its own DCB feature ring.
Signed-off-by: default avatarJohn Fastabend <john.r.fastabend@intel.com>
Tested-by: default avatarRoss Brattain <ross.b.brattain@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 8b1c0b24
...@@ -244,7 +244,6 @@ struct ixgbe_ring { ...@@ -244,7 +244,6 @@ struct ixgbe_ring {
enum ixgbe_ring_f_enum { enum ixgbe_ring_f_enum {
RING_F_NONE = 0, RING_F_NONE = 0,
RING_F_DCB,
RING_F_VMDQ, /* SR-IOV uses the same ring feature */ RING_F_VMDQ, /* SR-IOV uses the same ring feature */
RING_F_RSS, RING_F_RSS,
RING_F_FDIR, RING_F_FDIR,
...@@ -255,7 +254,6 @@ enum ixgbe_ring_f_enum { ...@@ -255,7 +254,6 @@ enum ixgbe_ring_f_enum {
RING_F_ARRAY_SIZE /* must be last in enum set */ RING_F_ARRAY_SIZE /* must be last in enum set */
}; };
#define IXGBE_MAX_DCB_INDICES 64
#define IXGBE_MAX_RSS_INDICES 16 #define IXGBE_MAX_RSS_INDICES 16
#define IXGBE_MAX_VMDQ_INDICES 64 #define IXGBE_MAX_VMDQ_INDICES 64
#define IXGBE_MAX_FDIR_INDICES 64 #define IXGBE_MAX_FDIR_INDICES 64
......
...@@ -4417,72 +4417,72 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) ...@@ -4417,72 +4417,72 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
return false; return false;
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { f->indices = min((int)num_online_cpus(), f->indices);
#ifdef CONFIG_IXGBE_DCB
int tc;
struct net_device *dev = adapter->netdev;
tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up); adapter->num_rx_queues = 1;
f->indices = dev->tc_to_txq[tc].count; adapter->num_tx_queues = 1;
f->mask = dev->tc_to_txq[tc].offset;
#endif
} else {
f->indices = min((int)num_online_cpus(), f->indices);
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
e_info(probe, "FCoE enabled with RSS\n"); e_info(probe, "FCoE enabled with RSS\n");
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
ixgbe_set_fdir_queues(adapter); ixgbe_set_fdir_queues(adapter);
else else
ixgbe_set_rss_queues(adapter); ixgbe_set_rss_queues(adapter);
}
/* adding FCoE rx rings to the end */
f->mask = adapter->num_rx_queues;
adapter->num_rx_queues += f->indices;
adapter->num_tx_queues += f->indices;
} }
/* adding FCoE rx rings to the end */
f->mask = adapter->num_rx_queues;
adapter->num_rx_queues += f->indices;
adapter->num_tx_queues += f->indices;
return true; return true;
} }
#endif /* IXGBE_FCOE */ #endif /* IXGBE_FCOE */
/* Artificial max queue cap per traffic class in DCB mode */
#define DCB_QUEUE_CAP 8
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
{ {
bool ret = false; int per_tc_q, q, i, offset = 0;
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB]; struct net_device *dev = adapter->netdev;
int tcs = netdev_get_num_tc(adapter->netdev); int tcs = netdev_get_num_tc(dev);
int max_q, i, q;
if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !tcs) if (!tcs)
return ret; return false;
max_q = adapter->netdev->num_tx_queues / tcs; /* Map queue offset and counts onto allocated tx queues */
per_tc_q = min(dev->num_tx_queues / tcs, (unsigned int)DCB_QUEUE_CAP);
q = min((int)num_online_cpus(), per_tc_q);
f->indices = 0;
for (i = 0; i < tcs; i++) { for (i = 0; i < tcs; i++) {
q = min((int)num_online_cpus(), max_q); netdev_set_prio_tc_map(dev, i, i);
f->indices += q; netdev_set_tc_queue(dev, i, q, offset);
offset += q;
} }
f->mask = 0x7 << 3; adapter->num_tx_queues = q * tcs;
adapter->num_rx_queues = f->indices; adapter->num_rx_queues = q * tcs;
adapter->num_tx_queues = f->indices;
ret = true;
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
/* FCoE enabled queues require special configuration done through /* FCoE enabled queues require special configuration indexed
* configure_fcoe() and others. Here we map FCoE indices onto the * by feature specific indices and mask. Here we map FCoE
* DCB queue pairs allowing FCoE to own configuration later. * indices onto the DCB queue pairs allowing FCoE to own
* configuration later.
*/ */
ixgbe_set_fcoe_queues(adapter); if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
int tc;
struct ixgbe_ring_feature *f =
&adapter->ring_feature[RING_F_FCOE];
tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
f->indices = dev->tc_to_txq[tc].count;
f->mask = dev->tc_to_txq[tc].offset;
}
#endif #endif
return ret; return true;
} }
#endif #endif
...@@ -5172,7 +5172,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) ...@@ -5172,7 +5172,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
adapter->ring_feature[RING_F_RSS].indices = rss; adapter->ring_feature[RING_F_RSS].indices = rss;
adapter->flags |= IXGBE_FLAG_RSS_ENABLED; adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
switch (hw->mac.type) { switch (hw->mac.type) {
case ixgbe_mac_82598EB: case ixgbe_mac_82598EB:
if (hw->device_id == IXGBE_DEV_ID_82598AT) if (hw->device_id == IXGBE_DEV_ID_82598AT)
...@@ -7213,10 +7212,8 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) ...@@ -7213,10 +7212,8 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
*/ */
int ixgbe_setup_tc(struct net_device *dev, u8 tc) int ixgbe_setup_tc(struct net_device *dev, u8 tc)
{ {
unsigned int q, i, offset = 0;
struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
int max_q = adapter->netdev->num_tx_queues / tc;
/* If DCB is anabled do not remove traffic classes, multiple /* If DCB is anabled do not remove traffic classes, multiple
* traffic classes are required to implement DCB * traffic classes are required to implement DCB
...@@ -7242,14 +7239,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) ...@@ -7242,14 +7239,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
else else
netdev_reset_tc(dev); netdev_reset_tc(dev);
/* Partition Tx queues evenly amongst traffic classes */
for (i = 0; i < tc; i++) {
q = min((int)num_online_cpus(), max_q);
netdev_set_prio_tc_map(dev, i, i);
netdev_set_tc_queue(dev, i, q, offset);
offset += q;
}
ixgbe_init_interrupt_scheme(adapter); ixgbe_init_interrupt_scheme(adapter);
ixgbe_validate_rtr(adapter, tc); ixgbe_validate_rtr(adapter, tc);
if (netif_running(dev)) if (netif_running(dev))
...@@ -7436,14 +7425,16 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, ...@@ -7436,14 +7425,16 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
pci_set_master(pdev); pci_set_master(pdev);
pci_save_state(pdev); pci_save_state(pdev);
#ifdef CONFIG_IXGBE_DCB
indices *= MAX_TRAFFIC_CLASS;
#endif
if (ii->mac == ixgbe_mac_82598EB) if (ii->mac == ixgbe_mac_82598EB)
indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
else else
indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
#if defined(CONFIG_DCB) #ifdef IXGBE_FCOE
indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
#elif defined(IXGBE_FCOE)
indices += min_t(unsigned int, num_possible_cpus(), indices += min_t(unsigned int, num_possible_cpus(),
IXGBE_MAX_FCOE_INDICES); IXGBE_MAX_FCOE_INDICES);
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment