Commit e4655e4a authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2017-10-13

This series contains updates to mqprio and i40e.

Amritha introduces a new hardware offload mode in tc/mqprio where the TCs,
the queue configurations and bandwidth rate limits are offloaded to the
hardware. The existing mqprio framework is extended to configure the queue
counts and layout and also added support for rate limiting. This is
achieved through new netlink attributes for the 'mode' option which takes
values such as 'dcb' (default) and 'channel' and a 'shaper' option for
QoS attributes such as bandwidth rate limits in hw mode 1.  Legacy devices
can fall back to the existing setup supporting hw mode 1 without these
additional options where only the TCs are offloaded and then the 'mode'
and 'shaper' options defaults to DCB support.  The i40e driver enables the
new mqprio hardware offload mechanism factoring the TCs, queue
configuration and bandwidth rates by creating HW channel VSIs.
In this new mode, the priority to traffic class mapping and the user
specified queue ranges are used to configure the traffic class when the
'mode' option is set to 'channel'. This is achieved by creating HW
channels(VSI). A new channel is created for each of the traffic class
configuration offloaded via mqprio framework except for the first TC (TC0)
which is for the main VSI. TC0 for the main VSI is also reconfigured as
per user provided queue parameters. Finally, bandwidth rate limits are set
on these traffic classes through the shaper attribute by sending these
rates in addition to the number of TCs and the queue configurations.

Colin Ian King makes an array of constant values "constant".

Alan fixes and issue where on some firmware versions, we were failing to
actually fill out the phy_types which caused ethtool to not report any
link types.  Also hardened against a potentially malicious VF by not
letting the VF to reset itself after requesting to change the number of
queues (via ethtool), let the PF reset the VF to institute the requested
changes.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ae0783b1 17a9422d
...@@ -54,6 +54,7 @@ ...@@ -54,6 +54,7 @@
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/net_tstamp.h> #include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h> #include <linux/ptp_clock_kernel.h>
#include <net/pkt_cls.h>
#include "i40e_type.h" #include "i40e_type.h"
#include "i40e_prototype.h" #include "i40e_prototype.h"
#include "i40e_client.h" #include "i40e_client.h"
...@@ -87,6 +88,7 @@ ...@@ -87,6 +88,7 @@
#define I40E_AQ_LEN 256 #define I40E_AQ_LEN 256
#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ #define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
#define I40E_MAX_USER_PRIORITY 8 #define I40E_MAX_USER_PRIORITY 8
#define I40E_MAX_QUEUES_PER_CH 64
#define I40E_DEFAULT_TRAFFIC_CLASS BIT(0) #define I40E_DEFAULT_TRAFFIC_CLASS BIT(0)
#define I40E_DEFAULT_MSG_ENABLE 4 #define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10 #define I40E_QUEUE_WAIT_RETRY_LIMIT 10
...@@ -126,6 +128,10 @@ ...@@ -126,6 +128,10 @@
/* default to trying for four seconds */ /* default to trying for four seconds */
#define I40E_TRY_LINK_TIMEOUT (4 * HZ) #define I40E_TRY_LINK_TIMEOUT (4 * HZ)
/* BW rate limiting */
#define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */
#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* accumulate 4 credits max */
/* driver state flags */ /* driver state flags */
enum i40e_state_t { enum i40e_state_t {
__I40E_TESTING, __I40E_TESTING,
...@@ -157,6 +163,8 @@ enum i40e_state_t { ...@@ -157,6 +163,8 @@ enum i40e_state_t {
__I40E_STATE_SIZE__, __I40E_STATE_SIZE__,
}; };
#define I40E_PF_RESET_FLAG BIT_ULL(__I40E_PF_RESET_REQUESTED)
/* VSI state flags */ /* VSI state flags */
enum i40e_vsi_state_t { enum i40e_vsi_state_t {
__I40E_VSI_DOWN, __I40E_VSI_DOWN,
...@@ -338,6 +346,25 @@ struct i40e_flex_pit { ...@@ -338,6 +346,25 @@ struct i40e_flex_pit {
u8 pit_index; u8 pit_index;
}; };
struct i40e_channel {
struct list_head list;
bool initialized;
u8 type;
u16 vsi_number; /* Assigned VSI number from AQ 'Add VSI' response */
u16 stat_counter_idx;
u16 base_queue;
u16 num_queue_pairs; /* Requested by user */
u16 seid;
u8 enabled_tc;
struct i40e_aqc_vsi_properties_data info;
u64 max_tx_rate;
/* track this channel belongs to which VSI */
struct i40e_vsi *parent_vsi;
};
/* struct that defines the Ethernet device */ /* struct that defines the Ethernet device */
struct i40e_pf { struct i40e_pf {
struct pci_dev *pdev; struct pci_dev *pdev;
...@@ -454,6 +481,7 @@ struct i40e_pf { ...@@ -454,6 +481,7 @@ struct i40e_pf {
#define I40E_FLAG_CLIENT_RESET BIT(26) #define I40E_FLAG_CLIENT_RESET BIT(26)
#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(27) #define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(27)
#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(28) #define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(28)
#define I40E_FLAG_TC_MQPRIO BIT(29)
struct i40e_client_instance *cinst; struct i40e_client_instance *cinst;
bool stat_offsets_loaded; bool stat_offsets_loaded;
...@@ -534,6 +562,8 @@ struct i40e_pf { ...@@ -534,6 +562,8 @@ struct i40e_pf {
u32 ioremap_len; u32 ioremap_len;
u32 fd_inv; u32 fd_inv;
u16 phy_led_val; u16 phy_led_val;
u16 override_q_count;
}; };
/** /**
...@@ -677,6 +707,7 @@ struct i40e_vsi { ...@@ -677,6 +707,7 @@ struct i40e_vsi {
enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */ enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */
s16 vf_id; /* Virtual function ID for SRIOV VSIs */ s16 vf_id; /* Virtual function ID for SRIOV VSIs */
struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */
struct i40e_tc_configuration tc_config; struct i40e_tc_configuration tc_config;
struct i40e_aqc_vsi_properties_data info; struct i40e_aqc_vsi_properties_data info;
...@@ -698,6 +729,16 @@ struct i40e_vsi { ...@@ -698,6 +729,16 @@ struct i40e_vsi {
bool current_isup; /* Sync 'link up' logging */ bool current_isup; /* Sync 'link up' logging */
enum i40e_aq_link_speed current_speed; /* Sync link speed logging */ enum i40e_aq_link_speed current_speed; /* Sync link speed logging */
/* channel specific fields */
u16 cnt_q_avail; /* num of queues available for channel usage */
u16 orig_rss_size;
u16 current_rss_size;
bool reconfig_rss;
u16 next_base_queue; /* next queue to be used for channel setup */
struct list_head ch_list;
void *priv; /* client driver data reference. */ void *priv; /* client driver data reference. */
/* VSI specific handlers */ /* VSI specific handlers */
...@@ -1002,4 +1043,7 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi) ...@@ -1002,4 +1043,7 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
{ {
return !!vsi->xdp_prog; return !!vsi->xdp_prog;
} }
int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate);
#endif /* _I40E_H_ */ #endif /* _I40E_H_ */
...@@ -1611,8 +1611,13 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, ...@@ -1611,8 +1611,13 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
if (report_init) { if (report_init) {
if (hw->mac.type == I40E_MAC_XL710 && if (hw->mac.type == I40E_MAC_XL710 &&
hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
status = i40e_aq_get_link_info(hw, true, NULL, NULL); status = i40e_aq_get_link_info(hw, true, NULL, NULL);
} else {
hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
hw->phy.phy_types |=
((u64)abilities->phy_type_ext << 32);
}
} }
return status; return status;
......
...@@ -798,8 +798,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, ...@@ -798,8 +798,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
*/ */
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
i40e_do_reset_safe(pf, i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
BIT_ULL(__I40E_PF_RESET_REQUESTED));
} }
vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0); vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
......
...@@ -36,7 +36,9 @@ ...@@ -36,7 +36,9 @@
static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw, static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
u32 reg, u32 mask) u32 reg, u32 mask)
{ {
const u32 patterns[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; static const u32 patterns[] = {
0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
};
u32 pat, val, orig_val; u32 pat, val, orig_val;
int i; int i;
......
...@@ -2652,7 +2652,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, ...@@ -2652,7 +2652,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
switch (cmd->cmd) { switch (cmd->cmd) {
case ETHTOOL_GRXRINGS: case ETHTOOL_GRXRINGS:
cmd->data = vsi->num_queue_pairs; cmd->data = vsi->rss_size;
ret = 0; ret = 0;
break; break;
case ETHTOOL_GRXFH: case ETHTOOL_GRXFH:
...@@ -3897,6 +3897,12 @@ static int i40e_set_channels(struct net_device *dev, ...@@ -3897,6 +3897,12 @@ static int i40e_set_channels(struct net_device *dev,
if (vsi->type != I40E_VSI_MAIN) if (vsi->type != I40E_VSI_MAIN)
return -EINVAL; return -EINVAL;
/* We do not support setting channels via ethtool when TCs are
* configured through mqprio
*/
if (pf->flags & I40E_FLAG_TC_MQPRIO)
return -EINVAL;
/* verify they are not requesting separate vectors */ /* verify they are not requesting separate vectors */
if (!count || ch->rx_count || ch->tx_count) if (!count || ch->rx_count || ch->tx_count)
return -EINVAL; return -EINVAL;
......
...@@ -1588,6 +1588,170 @@ static int i40e_set_mac(struct net_device *netdev, void *p) ...@@ -1588,6 +1588,170 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
return 0; return 0;
} }
/**
* i40e_config_rss_aq - Prepare for RSS using AQ commands
* @vsi: vsi structure
* @seed: RSS hash seed
**/
static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
u8 *lut, u16 lut_size)
{
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
int ret = 0;
if (seed) {
struct i40e_aqc_get_set_rss_key_data *seed_dw =
(struct i40e_aqc_get_set_rss_key_data *)seed;
ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
if (ret) {
dev_info(&pf->pdev->dev,
"Cannot set RSS key, err %s aq_err %s\n",
i40e_stat_str(hw, ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
}
if (lut) {
bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
if (ret) {
dev_info(&pf->pdev->dev,
"Cannot set RSS lut, err %s aq_err %s\n",
i40e_stat_str(hw, ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
}
return ret;
}
/**
* i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
* @vsi: VSI structure
**/
static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
u8 seed[I40E_HKEY_ARRAY_SIZE];
u8 *lut;
int ret;
if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
return 0;
if (!vsi->rss_size)
vsi->rss_size = min_t(int, pf->alloc_rss_size,
vsi->num_queue_pairs);
if (!vsi->rss_size)
return -EINVAL;
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
return -ENOMEM;
/* Use the user configured hash keys and lookup table if there is one,
* otherwise use default
*/
if (vsi->rss_lut_user)
memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
else
i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
if (vsi->rss_hkey_user)
memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
else
netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
kfree(lut);
return ret;
}
/**
* i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
* @vsi: the VSI being configured,
* @ctxt: VSI context structure
* @enabled_tc: number of traffic classes to enable
*
* Prepares VSI tc_config to have queue configurations based on MQPRIO options.
**/
static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
struct i40e_vsi_context *ctxt,
u8 enabled_tc)
{
u16 qcount = 0, max_qcount, qmap, sections = 0;
int i, override_q, pow, num_qps, ret;
u8 netdev_tc = 0, offset = 0;
if (vsi->type != I40E_VSI_MAIN)
return -EINVAL;
sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
num_qps = vsi->mqprio_qopt.qopt.count[0];
/* find the next higher power-of-2 of num queue pairs */
pow = ilog2(num_qps);
if (!is_power_of_2(num_qps))
pow++;
qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
(pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
/* Setup queue offset/count for all TCs for given VSI */
max_qcount = vsi->mqprio_qopt.qopt.count[0];
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
/* See if the given TC is enabled for the given VSI */
if (vsi->tc_config.enabled_tc & BIT(i)) {
offset = vsi->mqprio_qopt.qopt.offset[i];
qcount = vsi->mqprio_qopt.qopt.count[i];
if (qcount > max_qcount)
max_qcount = qcount;
vsi->tc_config.tc_info[i].qoffset = offset;
vsi->tc_config.tc_info[i].qcount = qcount;
vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
} else {
/* TC is not enabled so set the offset to
* default queue and allocate one queue
* for the given TC.
*/
vsi->tc_config.tc_info[i].qoffset = 0;
vsi->tc_config.tc_info[i].qcount = 1;
vsi->tc_config.tc_info[i].netdev_tc = 0;
}
}
/* Set actual Tx/Rx queue pairs */
vsi->num_queue_pairs = offset + qcount;
/* Setup queue TC[0].qmap for given VSI context */
ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
ctxt->info.valid_sections |= cpu_to_le16(sections);
/* Reconfigure RSS for main VSI with max queue count */
vsi->rss_size = max_qcount;
ret = i40e_vsi_config_rss(vsi);
if (ret) {
dev_info(&vsi->back->pdev->dev,
"Failed to reconfig rss for num_queues (%u)\n",
max_qcount);
return ret;
}
vsi->reconfig_rss = true;
dev_dbg(&vsi->back->pdev->dev,
"Reconfigured rss with num_queues (%u)\n", max_qcount);
/* Find queue count available for channel VSIs and starting offset
* for channel VSIs
*/
override_q = vsi->mqprio_qopt.qopt.count[0];
if (override_q && override_q < vsi->num_queue_pairs) {
vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
vsi->next_base_queue = override_q;
}
return 0;
}
/** /**
* i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
* @vsi: the VSI being setup * @vsi: the VSI being setup
...@@ -1626,7 +1790,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, ...@@ -1626,7 +1790,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
numtc = 1; numtc = 1;
} }
} else { } else {
/* At least TC0 is enabled in case of non-DCB case */ /* At least TC0 is enabled in non-DCB, non-MQPRIO case */
numtc = 1; numtc = 1;
} }
...@@ -2881,7 +3045,7 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring) ...@@ -2881,7 +3045,7 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
{ {
int cpu; int cpu;
if (!ring->q_vector || !ring->netdev) if (!ring->q_vector || !ring->netdev || ring->ch)
return; return;
/* We only initialize XPS once, so as not to overwrite user settings */ /* We only initialize XPS once, so as not to overwrite user settings */
...@@ -2944,7 +3108,14 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) ...@@ -2944,7 +3108,14 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
* initialization. This has to be done regardless of * initialization. This has to be done regardless of
* DCB as by default everything is mapped to TC0. * DCB as by default everything is mapped to TC0.
*/ */
tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
if (ring->ch)
tx_ctx.rdylist =
le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
else
tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
tx_ctx.rdylist_act = 0; tx_ctx.rdylist_act = 0;
/* clear the context in the HMC */ /* clear the context in the HMC */
...@@ -2966,12 +3137,23 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) ...@@ -2966,12 +3137,23 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
} }
/* Now associate this queue with this PCI function */ /* Now associate this queue with this PCI function */
if (vsi->type == I40E_VSI_VMDQ2) { if (ring->ch) {
qtx_ctl = I40E_QTX_CTL_VM_QUEUE; if (ring->ch->type == I40E_VSI_VMDQ2)
qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
I40E_QTX_CTL_VFVM_INDX_MASK; else
return -EINVAL;
qtx_ctl |= (ring->ch->vsi_number <<
I40E_QTX_CTL_VFVM_INDX_SHIFT) &
I40E_QTX_CTL_VFVM_INDX_MASK;
} else { } else {
qtx_ctl = I40E_QTX_CTL_PF_QUEUE; if (vsi->type == I40E_VSI_VMDQ2) {
qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
I40E_QTX_CTL_VFVM_INDX_MASK;
} else {
qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
}
} }
qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
...@@ -3140,6 +3322,7 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) ...@@ -3140,6 +3322,7 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
rx_ring->dcb_tc = 0; rx_ring->dcb_tc = 0;
tx_ring->dcb_tc = 0; tx_ring->dcb_tc = 0;
} }
return;
} }
for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
...@@ -4855,6 +5038,24 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) ...@@ -4855,6 +5038,24 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
return enabled_tc; return enabled_tc;
} }
/**
* i40e_mqprio_get_enabled_tc - Get enabled traffic classes
* @pf: PF being queried
*
* Query the current MQPRIO configuration and return the number of
* traffic classes enabled.
**/
static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
{
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
u8 enabled_tc = 1, i;
for (i = 1; i < num_tc; i++)
enabled_tc |= BIT(i);
return enabled_tc;
}
/** /**
* i40e_pf_get_num_tc - Get enabled traffic classes for PF * i40e_pf_get_num_tc - Get enabled traffic classes for PF
* @pf: PF being queried * @pf: PF being queried
...@@ -4868,7 +5069,10 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) ...@@ -4868,7 +5069,10 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
u8 num_tc = 0; u8 num_tc = 0;
struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
/* If DCB is not enabled then always in single TC */ if (pf->flags & I40E_FLAG_TC_MQPRIO)
return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
/* If neither MQPRIO nor DCB is enabled, then always use single TC */
if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
return 1; return 1;
...@@ -4897,7 +5101,12 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) ...@@ -4897,7 +5101,12 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
**/ **/
static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
{ {
/* If DCB is not enabled for this PF then just return default TC */ if (pf->flags & I40E_FLAG_TC_MQPRIO)
return i40e_mqprio_get_enabled_tc(pf);
/* If neither MQPRIO nor DCB is enabled for this PF then just return
* default TC
*/
if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
return I40E_DEFAULT_TRAFFIC_CLASS; return I40E_DEFAULT_TRAFFIC_CLASS;
...@@ -4987,6 +5196,16 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, ...@@ -4987,6 +5196,16 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
i40e_status ret; i40e_status ret;
int i; int i;
if (vsi->back->flags & I40E_FLAG_TC_MQPRIO)
return 0;
if (!vsi->mqprio_qopt.qopt.hw) {
ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
if (ret)
dev_info(&vsi->back->pdev->dev,
"Failed to reset tx rate for vsi->seid %u\n",
vsi->seid);
return ret;
}
bw_data.tc_valid_bits = enabled_tc; bw_data.tc_valid_bits = enabled_tc;
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
bw_data.tc_bw_credits[i] = bw_share[i]; bw_data.tc_bw_credits[i] = bw_share[i];
...@@ -5049,6 +5268,9 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) ...@@ -5049,6 +5268,9 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
vsi->tc_config.tc_info[i].qoffset); vsi->tc_config.tc_info[i].qoffset);
} }
if (pf->flags & I40E_FLAG_TC_MQPRIO)
return;
/* Assign UP2TC map for the VSI */ /* Assign UP2TC map for the VSI */
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
/* Get the actual TC# for the UP */ /* Get the actual TC# for the UP */
...@@ -5099,7 +5321,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) ...@@ -5099,7 +5321,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
int i; int i;
/* Check if enabled_tc is same as existing or new TCs */ /* Check if enabled_tc is same as existing or new TCs */
if (vsi->tc_config.enabled_tc == enabled_tc) if (vsi->tc_config.enabled_tc == enabled_tc &&
vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
return ret; return ret;
/* Enable ETS TCs with equal BW Share for now across all VSIs */ /* Enable ETS TCs with equal BW Share for now across all VSIs */
...@@ -5122,15 +5345,37 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) ...@@ -5122,15 +5345,37 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ctxt.vf_num = 0; ctxt.vf_num = 0;
ctxt.uplink_seid = vsi->uplink_seid; ctxt.uplink_seid = vsi->uplink_seid;
ctxt.info = vsi->info; ctxt.info = vsi->info;
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
if (ret)
goto out;
} else {
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
}
/* On destroying the qdisc, reset vsi->rss_size, as number of enabled
* queues changed.
*/
if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
vsi->num_queue_pairs);
ret = i40e_vsi_config_rss(vsi);
if (ret) {
dev_info(&vsi->back->pdev->dev,
"Failed to reconfig rss for num_queues\n");
return ret;
}
vsi->reconfig_rss = false;
}
if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
ctxt.info.valid_sections |= ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
} }
/* Update the VSI after updating the VSI queue-mapping information */ /* Update the VSI after updating the VSI queue-mapping
* information
*/
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) { if (ret) {
dev_info(&vsi->back->pdev->dev, dev_info(&vsi->back->pdev->dev,
...@@ -5162,87 +5407,812 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) ...@@ -5162,87 +5407,812 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
} }
/** /**
* i40e_veb_config_tc - Configure TCs for given VEB * i40e_get_link_speed - Returns link speed for the interface
* @veb: given VEB * @vsi: VSI to be configured
* @enabled_tc: TC bitmap
* *
* Configures given TC bitmap for VEB (switching) element
**/ **/
int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) int i40e_get_link_speed(struct i40e_vsi *vsi)
{ {
struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; struct i40e_pf *pf = vsi->back;
struct i40e_pf *pf = veb->pf;
int ret = 0;
int i;
/* No TCs or already enabled TCs just return */
if (!enabled_tc || veb->enabled_tc == enabled_tc)
return ret;
bw_data.tc_valid_bits = enabled_tc;
/* bw_data.absolute_credits is not set (relative) */
/* Enable ETS TCs with equal BW Share for now */ switch (pf->hw.phy.link_info.link_speed) {
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { case I40E_LINK_SPEED_40GB:
if (enabled_tc & BIT(i)) return 40000;
bw_data.tc_bw_share_credits[i] = 1; case I40E_LINK_SPEED_25GB:
return 25000;
case I40E_LINK_SPEED_20GB:
return 20000;
case I40E_LINK_SPEED_10GB:
return 10000;
case I40E_LINK_SPEED_1GB:
return 1000;
default:
return -EINVAL;
} }
}
ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, /**
&bw_data, NULL); * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
if (ret) { * @vsi: VSI to be configured
dev_info(&pf->pdev->dev, * @seid: seid of the channel/VSI
"VEB bw config failed, err %s aq_err %s\n", * @max_tx_rate: max TX rate to be configured as BW limit
i40e_stat_str(&pf->hw, ret), *
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); * Helper function to set BW limit for a given VSI
goto out; **/
} int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
{
struct i40e_pf *pf = vsi->back;
int speed = 0;
int ret = 0;
/* Update the BW information */ speed = i40e_get_link_speed(vsi);
ret = i40e_veb_get_bw_info(veb); if (max_tx_rate > speed) {
if (ret) { dev_err(&pf->pdev->dev,
dev_info(&pf->pdev->dev, "Invalid max tx rate %llu specified for VSI seid %d.",
"Failed getting veb bw config, err %s aq_err %s\n", max_tx_rate, seid);
i40e_stat_str(&pf->hw, ret), return -EINVAL;
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); }
if (max_tx_rate && max_tx_rate < 50) {
dev_warn(&pf->pdev->dev,
"Setting max tx rate to minimum usable value of 50Mbps.\n");
max_tx_rate = 50;
} }
out: /* Tx rate credits are in values of 50Mbps, 0 is disabled */
ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid,
max_tx_rate / I40E_BW_CREDIT_DIVISOR,
I40E_MAX_BW_INACTIVE_ACCUM, NULL);
if (ret)
dev_err(&pf->pdev->dev,
"Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return ret; return ret;
} }
#ifdef CONFIG_I40E_DCB
/** /**
* i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs * i40e_remove_queue_channels - Remove queue channels for the TCs
* @pf: PF struct * @vsi: VSI to be configured
* *
* Reconfigure VEB/VSIs on a given PF; it is assumed that * Remove queue channels for the TCs
* the caller would've quiesce all the VSIs before calling
* this function
**/ **/
static void i40e_dcb_reconfigure(struct i40e_pf *pf) static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
{ {
u8 tc_map = 0; struct i40e_channel *ch, *ch_tmp;
int ret; int ret, i;
u8 v;
/* Enable the TCs available on PF to all VEBs */ /* Reset rss size that was stored when reconfiguring rss for
tc_map = i40e_pf_get_tc_map(pf); * channel VSIs with non-power-of-2 queue count.
for (v = 0; v < I40E_MAX_VEB; v++) { */
if (!pf->veb[v]) vsi->current_rss_size = 0;
/* perform cleanup for channels if they exist */
if (list_empty(&vsi->ch_list))
return;
list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
struct i40e_vsi *p_vsi;
list_del(&ch->list);
p_vsi = ch->parent_vsi;
if (!p_vsi || !ch->initialized) {
kfree(ch);
continue; continue;
ret = i40e_veb_config_tc(pf->veb[v], tc_map);
if (ret) {
dev_info(&pf->pdev->dev,
"Failed configuring TC for VEB seid=%d\n",
pf->veb[v]->seid);
/* Will try to configure as many components */
} }
} /* Reset queue contexts */
for (i = 0; i < ch->num_queue_pairs; i++) {
struct i40e_ring *tx_ring, *rx_ring;
u16 pf_q;
/* Update each VSI */ pf_q = ch->base_queue + i;
for (v = 0; v < pf->num_alloc_vsi; v++) { tx_ring = vsi->tx_rings[pf_q];
if (!pf->vsi[v]) tx_ring->ch = NULL;
rx_ring = vsi->rx_rings[pf_q];
rx_ring->ch = NULL;
}
/* Reset BW configured for this VSI via mqprio */
ret = i40e_set_bw_limit(vsi, ch->seid, 0);
if (ret)
dev_info(&vsi->back->pdev->dev,
"Failed to reset tx rate for ch->seid %u\n",
ch->seid);
/* delete VSI from FW */
ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
NULL);
if (ret)
dev_err(&vsi->back->pdev->dev,
"unable to remove channel (%d) for parent VSI(%d)\n",
ch->seid, p_vsi->seid);
kfree(ch);
}
INIT_LIST_HEAD(&vsi->ch_list);
}
/**
* i40e_is_any_channel - channel exist or not
* @vsi: ptr to VSI to which channels are associated with
*
* Returns true or false if channel(s) exist for associated VSI or not
**/
static bool i40e_is_any_channel(struct i40e_vsi *vsi)
{
struct i40e_channel *ch, *ch_tmp;
list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
if (ch->initialized)
return true;
}
return false;
}
/**
* i40e_get_max_queues_for_channel
* @vsi: ptr to VSI to which channels are associated with
*
* Helper function which returns max value among the queue counts set on the
* channels/TCs created.
**/
static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
{
struct i40e_channel *ch, *ch_tmp;
int max = 0;
list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
if (!ch->initialized)
continue;
if (ch->num_queue_pairs > max)
max = ch->num_queue_pairs;
}
return max;
}
/**
* i40e_validate_num_queues - validate num_queues w.r.t channel
* @pf: ptr to PF device
* @num_queues: number of queues
* @vsi: the parent VSI
* @reconfig_rss: indicates should the RSS be reconfigured or not
*
* This function validates number of queues in the context of new channel
* which is being established and determines if RSS should be reconfigured
* or not for parent VSI.
**/
static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
struct i40e_vsi *vsi, bool *reconfig_rss)
{
int max_ch_queues;
if (!reconfig_rss)
return -EINVAL;
*reconfig_rss = false;
if (num_queues > I40E_MAX_QUEUES_PER_CH) {
dev_err(&pf->pdev->dev,
"Failed to create VMDq VSI. User requested num_queues (%d) > I40E_MAX_QUEUES_PER_VSI (%u)\n",
num_queues, I40E_MAX_QUEUES_PER_CH);
return -EINVAL;
}
if (vsi->current_rss_size) {
if (num_queues > vsi->current_rss_size) {
dev_dbg(&pf->pdev->dev,
"Error: num_queues (%d) > vsi's current_size(%d)\n",
num_queues, vsi->current_rss_size);
return -EINVAL;
} else if ((num_queues < vsi->current_rss_size) &&
(!is_power_of_2(num_queues))) {
dev_dbg(&pf->pdev->dev,
"Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
num_queues, vsi->current_rss_size);
return -EINVAL;
}
}
if (!is_power_of_2(num_queues)) {
/* Find the max num_queues configured for channel if channel
* exist.
* if channel exist, then enforce 'num_queues' to be more than
* max ever queues configured for channel.
*/
max_ch_queues = i40e_get_max_queues_for_channel(vsi);
if (num_queues < max_ch_queues) {
dev_dbg(&pf->pdev->dev,
"Error: num_queues (%d) < max queues configured for channel(%d)\n",
num_queues, max_ch_queues);
return -EINVAL;
}
*reconfig_rss = true;
}
return 0;
}
/**
* i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
* @vsi: the VSI being setup
* @rss_size: size of RSS, accordingly LUT gets reprogrammed
*
* This function reconfigures RSS by reprogramming LUTs using 'rss_size'
**/
static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
{
struct i40e_pf *pf = vsi->back;
u8 seed[I40E_HKEY_ARRAY_SIZE];
struct i40e_hw *hw = &pf->hw;
int local_rss_size;
u8 *lut;
int ret;
if (!vsi->rss_size)
return -EINVAL;
if (rss_size > vsi->rss_size)
return -EINVAL;
local_rss_size = min_t(int, vsi->rss_size, rss_size);
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
return -ENOMEM;
/* Ignoring user configured lut if there is one */
i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
/* Use user configured hash key if there is one, otherwise
* use default.
*/
if (vsi->rss_hkey_user)
memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
else
netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
if (ret) {
dev_info(&pf->pdev->dev,
"Cannot set RSS lut, err %s aq_err %s\n",
i40e_stat_str(hw, ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
kfree(lut);
return ret;
}
kfree(lut);
/* Do the update w.r.t. storing rss_size */
if (!vsi->orig_rss_size)
vsi->orig_rss_size = vsi->rss_size;
vsi->current_rss_size = local_rss_size;
return ret;
}
/**
* i40e_channel_setup_queue_map - Setup a channel queue map
* @pf: ptr to PF device
* @vsi: the VSI being setup
* @ctxt: VSI context structure
* @ch: ptr to channel structure
*
* Setup queue map for a specific channel
**/
static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
struct i40e_vsi_context *ctxt,
struct i40e_channel *ch)
{
u16 qcount, qmap, sections = 0;
u8 offset = 0;
int pow;
sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
ch->num_queue_pairs = qcount;
/* find the next higher power-of-2 of num queue pairs */
pow = ilog2(qcount);
if (!is_power_of_2(qcount))
pow++;
qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
(pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
/* Setup queue TC[0].qmap for given VSI context */
ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
ctxt->info.valid_sections |= cpu_to_le16(sections);
}
/**
* i40e_add_channel - add a channel by adding VSI
* @pf: ptr to PF device
* @uplink_seid: underlying HW switching element (VEB) ID
* @ch: ptr to channel structure
*
* Add a channel (VSI) using add_vsi and queue_map
**/
static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
struct i40e_channel *ch)
{
struct i40e_hw *hw = &pf->hw;
struct i40e_vsi_context ctxt;
u8 enabled_tc = 0x1; /* TC0 enabled */
int ret;
if (ch->type != I40E_VSI_VMDQ2) {
dev_info(&pf->pdev->dev,
"add new vsi failed, ch->type %d\n", ch->type);
return -EINVAL;
}
memset(&ctxt, 0, sizeof(ctxt));
ctxt.pf_num = hw->pf_id;
ctxt.vf_num = 0;
ctxt.uplink_seid = uplink_seid;
ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
if (ch->type == I40E_VSI_VMDQ2)
ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
ctxt.info.switch_id =
cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
}
/* Set queue map for a given VSI context */
i40e_channel_setup_queue_map(pf, &ctxt, ch);
/* Now time to create VSI */
ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"add new vsi failed, err %s aq_err %s\n",
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return -ENOENT;
}
/* Success, update channel */
ch->enabled_tc = enabled_tc;
ch->seid = ctxt.seid;
ch->vsi_number = ctxt.vsi_number;
ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
/* copy just the sections touched not the entire info
* since not all sections are valid as returned by
* update vsi params
*/
ch->info.mapping_flags = ctxt.info.mapping_flags;
memcpy(&ch->info.queue_mapping,
&ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
sizeof(ctxt.info.tc_mapping));
return 0;
}
static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
u8 *bw_share)
{
struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
i40e_status ret;
int i;
bw_data.tc_valid_bits = ch->enabled_tc;
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
bw_data.tc_bw_credits[i] = bw_share[i];
ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
&bw_data, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
"Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
vsi->back->hw.aq.asq_last_status, ch->seid);
return -EINVAL;
}
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
ch->info.qs_handle[i] = bw_data.qs_handles[i];
return 0;
}
/**
* i40e_channel_config_tx_ring - config TX ring associated with new channel
* @pf: ptr to PF device
* @vsi: the VSI being setup
* @ch: ptr to channel structure
*
* Configure TX rings associated with channel (VSI) since queues are being
* from parent VSI.
**/
static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
struct i40e_vsi *vsi,
struct i40e_channel *ch)
{
i40e_status ret;
int i;
u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
/* Enable ETS TCs with equal BW Share for now across all VSIs */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (ch->enabled_tc & BIT(i))
bw_share[i] = 1;
}
/* configure BW for new VSI */
ret = i40e_channel_config_bw(vsi, ch, bw_share);
if (ret) {
dev_info(&vsi->back->pdev->dev,
"Failed configuring TC map %d for channel (seid %u)\n",
ch->enabled_tc, ch->seid);
return ret;
}
for (i = 0; i < ch->num_queue_pairs; i++) {
struct i40e_ring *tx_ring, *rx_ring;
u16 pf_q;
pf_q = ch->base_queue + i;
/* Get to TX ring ptr of main VSI, for re-setup TX queue
* context
*/
tx_ring = vsi->tx_rings[pf_q];
tx_ring->ch = ch;
/* Get the RX ring ptr */
rx_ring = vsi->rx_rings[pf_q];
rx_ring->ch = ch;
}
return 0;
}
/**
* i40e_setup_hw_channel - setup new channel
* @pf: ptr to PF device
* @vsi: the VSI being setup
* @ch: ptr to channel structure
* @uplink_seid: underlying HW switching element (VEB) ID
* @type: type of channel to be created (VMDq2/VF)
*
* Setup new channel (VSI) based on specified type (VMDq2/VF)
* and configures TX rings accordingly
**/
static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
struct i40e_vsi *vsi,
struct i40e_channel *ch,
u16 uplink_seid, u8 type)
{
int ret;
ch->initialized = false;
ch->base_queue = vsi->next_base_queue;
ch->type = type;
/* Proceed with creation of channel (VMDq2) VSI */
ret = i40e_add_channel(pf, uplink_seid, ch);
if (ret) {
dev_info(&pf->pdev->dev,
"failed to add_channel using uplink_seid %u\n",
uplink_seid);
return ret;
}
/* Mark the successful creation of channel */
ch->initialized = true;
/* Reconfigure TX queues using QTX_CTL register */
ret = i40e_channel_config_tx_ring(pf, vsi, ch);
if (ret) {
dev_info(&pf->pdev->dev,
"failed to configure TX rings for channel %u\n",
ch->seid);
return ret;
}
/* update 'next_base_queue' */
vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
dev_dbg(&pf->pdev->dev,
"Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
ch->seid, ch->vsi_number, ch->stat_counter_idx,
ch->num_queue_pairs,
vsi->next_base_queue);
return ret;
}
/**
* i40e_setup_channel - setup new channel using uplink element
* @pf: ptr to PF device
* @type: type of channel to be created (VMDq2/VF)
* @uplink_seid: underlying HW switching element (VEB) ID
* @ch: ptr to channel structure
*
* Setup new channel (VSI) based on specified type (VMDq2/VF)
* and uplink switching element (uplink_seid)
**/
static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
struct i40e_channel *ch)
{
u8 vsi_type;
u16 seid;
int ret;
if (vsi->type == I40E_VSI_MAIN) {
vsi_type = I40E_VSI_VMDQ2;
} else {
dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
vsi->type);
return false;
}
/* underlying switching element */
seid = pf->vsi[pf->lan_vsi]->uplink_seid;
/* create channel (VSI), configure TX rings */
ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
if (ret) {
dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
return false;
}
return ch->initialized ? true : false;
}
/**
* i40e_create_queue_channel - function to create channel
* @vsi: VSI to be configured
* @ch: ptr to channel (it contains channel specific params)
*
* This function creates channel (VSI) using num_queues specified by user,
* reconfigs RSS if needed.
**/
int i40e_create_queue_channel(struct i40e_vsi *vsi,
struct i40e_channel *ch)
{
struct i40e_pf *pf = vsi->back;
bool reconfig_rss;
int err;
if (!ch)
return -EINVAL;
if (!ch->num_queue_pairs) {
dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
ch->num_queue_pairs);
return -EINVAL;
}
/* validate user requested num_queues for channel */
err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
&reconfig_rss);
if (err) {
dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
ch->num_queue_pairs);
return -EINVAL;
}
/* By default we are in VEPA mode, if this is the first VF/VMDq
* VSI to be added switch to VEB mode.
*/
if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
(!i40e_is_any_channel(vsi))) {
if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
dev_dbg(&pf->pdev->dev,
"Failed to create channel. Override queues (%u) not power of 2\n",
vsi->tc_config.tc_info[0].qcount);
return -EINVAL;
}
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
if (vsi->type == I40E_VSI_MAIN) {
if (pf->flags & I40E_FLAG_TC_MQPRIO)
i40e_do_reset(pf, I40E_PF_RESET_FLAG,
true);
else
i40e_do_reset_safe(pf,
I40E_PF_RESET_FLAG);
}
}
/* now onwards for main VSI, number of queues will be value
* of TC0's queue count
*/
}
/* By this time, vsi->cnt_q_avail shall be set to non-zero and
* it should be more than num_queues
*/
if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
dev_dbg(&pf->pdev->dev,
"Error: cnt_q_avail (%u) less than num_queues %d\n",
vsi->cnt_q_avail, ch->num_queue_pairs);
return -EINVAL;
}
/* reconfig_rss only if vsi type is MAIN_VSI */
if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
if (err) {
dev_info(&pf->pdev->dev,
"Error: unable to reconfig rss for num_queues (%u)\n",
ch->num_queue_pairs);
return -EINVAL;
}
}
if (!i40e_setup_channel(pf, vsi, ch)) {
dev_info(&pf->pdev->dev, "Failed to setup channel\n");
return -EINVAL;
}
dev_info(&pf->pdev->dev,
"Setup channel (id:%u) utilizing num_queues %d\n",
ch->seid, ch->num_queue_pairs);
/* configure VSI for BW limit */
if (ch->max_tx_rate) {
if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
return -EINVAL;
dev_dbg(&pf->pdev->dev,
"Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
ch->max_tx_rate,
ch->max_tx_rate / I40E_BW_CREDIT_DIVISOR, ch->seid);
}
/* in case of VF, this will be main SRIOV VSI */
ch->parent_vsi = vsi;
/* and update main_vsi's count for queue_available to use */
vsi->cnt_q_avail -= ch->num_queue_pairs;
return 0;
}
/**
* i40e_configure_queue_channels - Add queue channel for the given TCs
* @vsi: VSI to be configured
*
* Configures queue channel mapping to the given TCs
**/
static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
{
struct i40e_channel *ch;
int ret = 0, i;
/* Create app vsi with the TCs. Main VSI with TC0 is already set up */
for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (vsi->tc_config.enabled_tc & BIT(i)) {
ch = kzalloc(sizeof(*ch), GFP_KERNEL);
if (!ch) {
ret = -ENOMEM;
goto err_free;
}
INIT_LIST_HEAD(&ch->list);
ch->num_queue_pairs =
vsi->tc_config.tc_info[i].qcount;
ch->base_queue =
vsi->tc_config.tc_info[i].qoffset;
/* Bandwidth limit through tc interface is in bytes/s,
* change to Mbit/s
*/
ch->max_tx_rate =
vsi->mqprio_qopt.max_rate[i] / (1000000 / 8);
list_add_tail(&ch->list, &vsi->ch_list);
ret = i40e_create_queue_channel(vsi, ch);
if (ret) {
dev_err(&vsi->back->pdev->dev,
"Failed creating queue channel with TC%d: queues %d\n",
i, ch->num_queue_pairs);
goto err_free;
}
}
}
return ret;
err_free:
i40e_remove_queue_channels(vsi);
return ret;
}
/**
* i40e_veb_config_tc - Configure TCs for given VEB
* @veb: given VEB
* @enabled_tc: TC bitmap
*
* Configures given TC bitmap for VEB (switching) element
**/
int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
{
struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
struct i40e_pf *pf = veb->pf;
int ret = 0;
int i;
/* No TCs or already enabled TCs just return */
if (!enabled_tc || veb->enabled_tc == enabled_tc)
return ret;
bw_data.tc_valid_bits = enabled_tc;
/* bw_data.absolute_credits is not set (relative) */
/* Enable ETS TCs with equal BW Share for now */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (enabled_tc & BIT(i))
bw_data.tc_bw_share_credits[i] = 1;
}
ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
&bw_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"VEB bw config failed, err %s aq_err %s\n",
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
/* Update the BW information */
ret = i40e_veb_get_bw_info(veb);
if (ret) {
dev_info(&pf->pdev->dev,
"Failed getting veb bw config, err %s aq_err %s\n",
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
out:
return ret;
}
#ifdef CONFIG_I40E_DCB
/**
* i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
* @pf: PF struct
*
* Reconfigure VEB/VSIs on a given PF; it is assumed that
* the caller would've quiesce all the VSIs before calling
* this function
**/
static void i40e_dcb_reconfigure(struct i40e_pf *pf)
{
u8 tc_map = 0;
int ret;
u8 v;
/* Enable the TCs available on PF to all VEBs */
tc_map = i40e_pf_get_tc_map(pf);
for (v = 0; v < I40E_MAX_VEB; v++) {
if (!pf->veb[v])
continue;
ret = i40e_veb_config_tc(pf->veb[v], tc_map);
if (ret) {
dev_info(&pf->pdev->dev,
"Failed configuring TC for VEB seid=%d\n",
pf->veb[v]->seid);
/* Will try to configure as many components */
}
}
/* Update each VSI */
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (!pf->vsi[v])
continue; continue;
/* - Enable all TCs for the LAN VSI /* - Enable all TCs for the LAN VSI
...@@ -5561,75 +6531,215 @@ void i40e_down(struct i40e_vsi *vsi) ...@@ -5561,75 +6531,215 @@ void i40e_down(struct i40e_vsi *vsi)
} }
/**
* i40e_validate_mqprio_qopt- validate queue mapping info
* @vsi: the VSI being configured
* @mqprio_qopt: queue parametrs
**/
static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
struct tc_mqprio_qopt_offload *mqprio_qopt)
{
u64 sum_max_rate = 0;
int i;
if (mqprio_qopt->qopt.offset[0] != 0 ||
mqprio_qopt->qopt.num_tc < 1 ||
mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
return -EINVAL;
for (i = 0; ; i++) {
if (!mqprio_qopt->qopt.count[i])
return -EINVAL;
if (mqprio_qopt->min_rate[i]) {
dev_err(&vsi->back->pdev->dev,
"Invalid min tx rate (greater than 0) specified\n");
return -EINVAL;
}
sum_max_rate += (mqprio_qopt->max_rate[i] / (1000000 / 8));
if (i >= mqprio_qopt->qopt.num_tc - 1)
break;
if (mqprio_qopt->qopt.offset[i + 1] !=
(mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
return -EINVAL;
}
if (vsi->num_queue_pairs <
(mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
return -EINVAL;
}
if (sum_max_rate > i40e_get_link_speed(vsi)) {
dev_err(&vsi->back->pdev->dev,
"Invalid max tx rate specified\n");
return -EINVAL;
}
return 0;
}
/**
* i40e_vsi_set_default_tc_config - set default values for tc configuration
* @vsi: the VSI being configured
**/
static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
{
u16 qcount;
int i;
/* Only TC0 is enabled */
vsi->tc_config.numtc = 1;
vsi->tc_config.enabled_tc = 1;
qcount = min_t(int, vsi->alloc_queue_pairs,
i40e_pf_get_max_q_per_tc(vsi->back));
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
/* For the TC that is not enabled set the offset to to default
* queue and allocate one queue for the given TC.
*/
vsi->tc_config.tc_info[i].qoffset = 0;
if (i == 0)
vsi->tc_config.tc_info[i].qcount = qcount;
else
vsi->tc_config.tc_info[i].qcount = 1;
vsi->tc_config.tc_info[i].netdev_tc = 0;
}
}
/** /**
* i40e_setup_tc - configure multiple traffic classes * i40e_setup_tc - configure multiple traffic classes
* @netdev: net device to configure * @netdev: net device to configure
* @tc: number of traffic classes to enable * @type_data: tc offload data
**/ **/
static int i40e_setup_tc(struct net_device *netdev, u8 tc) static int i40e_setup_tc(struct net_device *netdev, void *type_data)
{ {
struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
u8 enabled_tc = 0; u8 enabled_tc = 0, num_tc, hw;
bool need_reset = false;
int ret = -EINVAL; int ret = -EINVAL;
u16 mode;
int i; int i;
/* Check if DCB enabled to continue */ num_tc = mqprio_qopt->qopt.num_tc;
if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { hw = mqprio_qopt->qopt.hw;
netdev_info(netdev, "DCB is not enabled for adapter\n"); mode = mqprio_qopt->mode;
goto exit; if (!hw) {
pf->flags &= ~I40E_FLAG_TC_MQPRIO;
memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
goto config_tc;
} }
/* Check if MFP enabled */ /* Check if MFP enabled */
if (pf->flags & I40E_FLAG_MFP_ENABLED) { if (pf->flags & I40E_FLAG_MFP_ENABLED) {
netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); netdev_info(netdev,
goto exit; "Configuring TC not supported in MFP mode\n");
return ret;
} }
switch (mode) {
case TC_MQPRIO_MODE_DCB:
pf->flags &= ~I40E_FLAG_TC_MQPRIO;
/* Check whether tc count is within enabled limit */ /* Check if DCB enabled to continue */
if (tc > i40e_pf_get_num_tc(pf)) { if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
netdev_info(netdev, "TC count greater than enabled on link for adapter\n"); netdev_info(netdev,
goto exit; "DCB is not enabled for adapter\n");
return ret;
}
/* Check whether tc count is within enabled limit */
if (num_tc > i40e_pf_get_num_tc(pf)) {
netdev_info(netdev,
"TC count greater than enabled on link for adapter\n");
return ret;
}
break;
case TC_MQPRIO_MODE_CHANNEL:
if (pf->flags & I40E_FLAG_DCB_ENABLED) {
netdev_info(netdev,
"Full offload of TC Mqprio options is not supported when DCB is enabled\n");
return ret;
}
if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
return ret;
ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
if (ret)
return ret;
memcpy(&vsi->mqprio_qopt, mqprio_qopt,
sizeof(*mqprio_qopt));
pf->flags |= I40E_FLAG_TC_MQPRIO;
pf->flags &= ~I40E_FLAG_DCB_ENABLED;
break;
default:
return -EINVAL;
} }
config_tc:
/* Generate TC map for number of tc requested */ /* Generate TC map for number of tc requested */
for (i = 0; i < tc; i++) for (i = 0; i < num_tc; i++)
enabled_tc |= BIT(i); enabled_tc |= BIT(i);
/* Requesting same TC configuration as already enabled */ /* Requesting same TC configuration as already enabled */
if (enabled_tc == vsi->tc_config.enabled_tc) if (enabled_tc == vsi->tc_config.enabled_tc &&
mode != TC_MQPRIO_MODE_CHANNEL)
return 0; return 0;
/* Quiesce VSI queues */ /* Quiesce VSI queues */
i40e_quiesce_vsi(vsi); i40e_quiesce_vsi(vsi);
if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
i40e_remove_queue_channels(vsi);
/* Configure VSI for enabled TCs */ /* Configure VSI for enabled TCs */
ret = i40e_vsi_config_tc(vsi, enabled_tc); ret = i40e_vsi_config_tc(vsi, enabled_tc);
if (ret) { if (ret) {
netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n", netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
vsi->seid); vsi->seid);
need_reset = true;
goto exit; goto exit;
} }
/* Unquiesce VSI */ if (pf->flags & I40E_FLAG_TC_MQPRIO) {
i40e_unquiesce_vsi(vsi); if (vsi->mqprio_qopt.max_rate[0]) {
u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0] /
(1000000 / 8);
ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
if (!ret) {
dev_dbg(&vsi->back->pdev->dev,
"Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
max_tx_rate,
max_tx_rate / I40E_BW_CREDIT_DIVISOR,
vsi->seid);
} else {
need_reset = true;
goto exit;
}
}
ret = i40e_configure_queue_channels(vsi);
if (ret) {
netdev_info(netdev,
"Failed configuring queue channels\n");
need_reset = true;
goto exit;
}
}
exit: exit:
/* Reset the configuration data to defaults, only TC0 is enabled */
if (need_reset) {
i40e_vsi_set_default_tc_config(vsi);
need_reset = false;
}
/* Unquiesce VSI */
i40e_unquiesce_vsi(vsi);
return ret; return ret;
} }
static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type, static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data) void *type_data)
{ {
struct tc_mqprio_qopt *mqprio = type_data;
if (type != TC_SETUP_MQPRIO) if (type != TC_SETUP_MQPRIO)
return -EOPNOTSUPP; return -EOPNOTSUPP;
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; return i40e_setup_tc(netdev, type_data);
return i40e_setup_tc(netdev, mqprio->num_tc);
} }
/** /**
...@@ -5747,7 +6857,7 @@ int i40e_vsi_open(struct i40e_vsi *vsi) ...@@ -5747,7 +6857,7 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
err_setup_tx: err_setup_tx:
i40e_vsi_free_tx_resources(vsi); i40e_vsi_free_tx_resources(vsi);
if (vsi == pf->vsi[pf->lan_vsi]) if (vsi == pf->vsi[pf->lan_vsi])
i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true); i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
return err; return err;
} }
...@@ -5875,7 +6985,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) ...@@ -5875,7 +6985,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
wr32(&pf->hw, I40E_GLGEN_RTRIG, val); wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
i40e_flush(&pf->hw); i40e_flush(&pf->hw);
} else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) { } else if (reset_flags & I40E_PF_RESET_FLAG) {
/* Request a PF Reset /* Request a PF Reset
* *
...@@ -7030,6 +8140,46 @@ static void i40e_fdir_teardown(struct i40e_pf *pf) ...@@ -7030,6 +8140,46 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
i40e_vsi_release(vsi); i40e_vsi_release(vsi);
} }
/**
* i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
* @vsi: PF main vsi
*
* Rebuilds channel VSIs if they existed before reset
**/
static int i40e_rebuild_channels(struct i40e_vsi *vsi)
{
struct i40e_channel *ch, *ch_tmp;
i40e_status ret;
if (list_empty(&vsi->ch_list))
return 0;
list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
if (!ch->initialized)
break;
/* Proceed with creation of channel (VMDq2) VSI */
ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
if (ret) {
dev_info(&vsi->back->pdev->dev,
"failed to rebuild channels using uplink_seid %u\n",
vsi->uplink_seid);
return ret;
}
if (ch->max_tx_rate) {
if (i40e_set_bw_limit(vsi, ch->seid,
ch->max_tx_rate))
return -EINVAL;
dev_dbg(&vsi->back->pdev->dev,
"Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
ch->max_tx_rate,
ch->max_tx_rate / I40E_BW_CREDIT_DIVISOR,
ch->seid);
}
}
return 0;
}
/** /**
* i40e_prep_for_reset - prep for the core to reset * i40e_prep_for_reset - prep for the core to reset
* @pf: board private structure * @pf: board private structure
...@@ -7166,6 +8316,7 @@ static int i40e_reset(struct i40e_pf *pf) ...@@ -7166,6 +8316,7 @@ static int i40e_reset(struct i40e_pf *pf)
**/ **/
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{ {
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
u8 set_fc_aq_fail = 0; u8 set_fc_aq_fail = 0;
i40e_status ret; i40e_status ret;
...@@ -7248,7 +8399,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) ...@@ -7248,7 +8399,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
* If there were VEBs but the reconstitution failed, we'll try * If there were VEBs but the reconstitution failed, we'll try
* try to recover minimal use by getting the basic PF VSI working. * try to recover minimal use by getting the basic PF VSI working.
*/ */
if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { if (vsi->uplink_seid != pf->mac_seid) {
dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
/* find the one VEB connected to the MAC, and find orphans */ /* find the one VEB connected to the MAC, and find orphans */
for (v = 0; v < I40E_MAX_VEB; v++) { for (v = 0; v < I40E_MAX_VEB; v++) {
...@@ -7272,8 +8423,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) ...@@ -7272,8 +8423,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"rebuild of switch failed: %d, will try to set up simple PF connection\n", "rebuild of switch failed: %d, will try to set up simple PF connection\n",
ret); ret);
pf->vsi[pf->lan_vsi]->uplink_seid vsi->uplink_seid = pf->mac_seid;
= pf->mac_seid;
break; break;
} else if (pf->veb[v]->uplink_seid == 0) { } else if (pf->veb[v]->uplink_seid == 0) {
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
...@@ -7284,10 +8434,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) ...@@ -7284,10 +8434,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
} }
} }
if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) { if (vsi->uplink_seid == pf->mac_seid) {
dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
/* no VEB, so rebuild only the Main VSI */ /* no VEB, so rebuild only the Main VSI */
ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]); ret = i40e_add_vsi(vsi);
if (ret) { if (ret) {
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"rebuild of Main VSI failed: %d\n", ret); "rebuild of Main VSI failed: %d\n", ret);
...@@ -7295,6 +8445,27 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) ...@@ -7295,6 +8445,27 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
} }
} }
if (vsi->mqprio_qopt.max_rate[0]) {
u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0] / (1000000 / 8);
ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
if (!ret)
dev_dbg(&vsi->back->pdev->dev,
"Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
max_tx_rate,
max_tx_rate / I40E_BW_CREDIT_DIVISOR,
vsi->seid);
else
goto end_unlock;
}
/* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
* for this main VSI if they exist
*/
ret = i40e_rebuild_channels(vsi);
if (ret)
goto end_unlock;
/* Reconfigure hardware for allowing smaller MSS in the case /* Reconfigure hardware for allowing smaller MSS in the case
* of TSO, so that we avoid the MDD being fired and causing * of TSO, so that we avoid the MDD being fired and causing
* a reset in the case of small MSS+TSO. * a reset in the case of small MSS+TSO.
...@@ -8454,45 +9625,6 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) ...@@ -8454,45 +9625,6 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
return err; return err;
} }
/**
* i40e_config_rss_aq - Prepare for RSS using AQ commands
* @vsi: vsi structure
* @seed: RSS hash seed
**/
static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
u8 *lut, u16 lut_size)
{
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
int ret = 0;
if (seed) {
struct i40e_aqc_get_set_rss_key_data *seed_dw =
(struct i40e_aqc_get_set_rss_key_data *)seed;
ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
if (ret) {
dev_info(&pf->pdev->dev,
"Cannot set RSS key, err %s aq_err %s\n",
i40e_stat_str(hw, ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
}
if (lut) {
bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
if (ret) {
dev_info(&pf->pdev->dev,
"Cannot set RSS lut, err %s aq_err %s\n",
i40e_stat_str(hw, ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
}
return ret;
}
/** /**
* i40e_get_rss_aq - Get RSS keys and lut by using AQ commands * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
* @vsi: Pointer to vsi structure * @vsi: Pointer to vsi structure
...@@ -8539,46 +9671,6 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, ...@@ -8539,46 +9671,6 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
return ret; return ret;
} }
/**
* i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
* @vsi: VSI structure
**/
static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
{
u8 seed[I40E_HKEY_ARRAY_SIZE];
struct i40e_pf *pf = vsi->back;
u8 *lut;
int ret;
if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
return 0;
if (!vsi->rss_size)
vsi->rss_size = min_t(int, pf->alloc_rss_size,
vsi->num_queue_pairs);
if (!vsi->rss_size)
return -EINVAL;
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
return -ENOMEM;
/* Use the user configured hash keys and lookup table if there is one,
* otherwise use default
*/
if (vsi->rss_lut_user)
memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
else
i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
if (vsi->rss_hkey_user)
memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
else
netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
kfree(lut);
return ret;
}
/** /**
* i40e_config_rss_reg - Configure RSS keys and lut by writing registers * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
* @vsi: Pointer to vsi structure * @vsi: Pointer to vsi structure
...@@ -9223,7 +10315,7 @@ static int i40e_set_features(struct net_device *netdev, ...@@ -9223,7 +10315,7 @@ static int i40e_set_features(struct net_device *netdev,
need_reset = i40e_set_ntuple(pf, features); need_reset = i40e_set_ntuple(pf, features);
if (need_reset) if (need_reset)
i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true); i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
return 0; return 0;
} }
...@@ -9475,8 +10567,7 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, ...@@ -9475,8 +10567,7 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
else else
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
true);
break; break;
} }
} }
...@@ -11609,6 +12700,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -11609,6 +12700,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis; goto err_vsis;
} }
INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
/* Make sure flow control is set according to current settings */ /* Make sure flow control is set according to current settings */
err = i40e_set_fc(hw, &set_fc_aq_fail, true); err = i40e_set_fc(hw, &set_fc_aq_fail, true);
......
...@@ -426,6 +426,8 @@ struct i40e_ring { ...@@ -426,6 +426,8 @@ struct i40e_ring {
* i40e_clean_rx_ring_irq() is called * i40e_clean_rx_ring_irq() is called
* for this ring. * for this ring.
*/ */
struct i40e_channel *ch;
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct i40e_ring *ring) static inline bool ring_uses_build_skb(struct i40e_ring *ring)
......
...@@ -1425,8 +1425,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) ...@@ -1425,8 +1425,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (num_vfs) { if (num_vfs) {
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
i40e_do_reset_safe(pf, i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
BIT_ULL(__I40E_PF_RESET_REQUESTED));
} }
return i40e_pci_sriov_enable(pdev, num_vfs); return i40e_pci_sriov_enable(pdev, num_vfs);
} }
...@@ -1434,7 +1433,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) ...@@ -1434,7 +1433,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!pci_vfs_assigned(pf->pdev)) { if (!pci_vfs_assigned(pf->pdev)) {
i40e_free_vfs(pf); i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
} else { } else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
return -EINVAL; return -EINVAL;
...@@ -2046,8 +2045,9 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2046,8 +2045,9 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
* @msglen: msg length * @msglen: msg length
* *
* VFs get a default number of queues but can use this message to request a * VFs get a default number of queues but can use this message to request a
* different number. Will respond with either the number requested or the * different number. If the request is successful, PF will reset the VF and
* maximum we can support. * return 0. If unsuccessful, PF will send message informing VF of number of
* available queues and return result of sending VF a message.
**/ **/
static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen) static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
{ {
...@@ -2078,7 +2078,11 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen) ...@@ -2078,7 +2078,11 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
pf->queues_left); pf->queues_left);
vfres->num_queue_pairs = pf->queues_left + cur_pairs; vfres->num_queue_pairs = pf->queues_left + cur_pairs;
} else { } else {
/* successful request */
vf->num_req_queues = req_pairs; vf->num_req_queues = req_pairs;
i40e_vc_notify_vf_reset(vf);
i40e_reset_vf(vf, false);
return 0;
} }
return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0, return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
...@@ -3118,8 +3122,6 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, ...@@ -3118,8 +3122,6 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
return ret; return ret;
} }
#define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */
#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* device can accumulate 4 credits max */
/** /**
* i40e_ndo_set_vf_bw * i40e_ndo_set_vf_bw
* @netdev: network interface device structure * @netdev: network interface device structure
...@@ -3135,7 +3137,6 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, ...@@ -3135,7 +3137,6 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
struct i40e_pf *pf = np->vsi->back; struct i40e_pf *pf = np->vsi->back;
struct i40e_vsi *vsi; struct i40e_vsi *vsi;
struct i40e_vf *vf; struct i40e_vf *vf;
int speed = 0;
int ret = 0; int ret = 0;
/* validate the request */ /* validate the request */
...@@ -3160,48 +3161,10 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, ...@@ -3160,48 +3161,10 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
goto error; goto error;
} }
switch (pf->hw.phy.link_info.link_speed) { ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
case I40E_LINK_SPEED_40GB: if (ret)
speed = 40000;
break;
case I40E_LINK_SPEED_25GB:
speed = 25000;
break;
case I40E_LINK_SPEED_20GB:
speed = 20000;
break;
case I40E_LINK_SPEED_10GB:
speed = 10000;
break;
case I40E_LINK_SPEED_1GB:
speed = 1000;
break;
default:
break;
}
if (max_tx_rate > speed) {
dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.\n",
max_tx_rate, vf->vf_id);
ret = -EINVAL;
goto error; goto error;
}
if ((max_tx_rate < 50) && (max_tx_rate > 0)) {
dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n");
max_tx_rate = 50;
}
/* Tx rate credits are in values of 50Mbps, 0 is disabled*/
ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
max_tx_rate / I40E_BW_CREDIT_DIVISOR,
I40E_MAX_BW_INACTIVE_ACCUM, NULL);
if (ret) {
dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n",
ret);
ret = -EIO;
goto error;
}
vf->tx_rate = max_tx_rate; vf->tx_rate = max_tx_rate;
error: error:
return ret; return ret;
......
...@@ -407,6 +407,7 @@ int i40evf_request_queues(struct i40evf_adapter *adapter, int num) ...@@ -407,6 +407,7 @@ int i40evf_request_queues(struct i40evf_adapter *adapter, int num)
vfres.num_queue_pairs = num; vfres.num_queue_pairs = num;
adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES; adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES;
adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES, return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES,
(u8 *)&vfres, sizeof(vfres)); (u8 *)&vfres, sizeof(vfres));
} }
...@@ -1098,15 +1099,13 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, ...@@ -1098,15 +1099,13 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
case VIRTCHNL_OP_REQUEST_QUEUES: { case VIRTCHNL_OP_REQUEST_QUEUES: {
struct virtchnl_vf_res_request *vfres = struct virtchnl_vf_res_request *vfres =
(struct virtchnl_vf_res_request *)msg; (struct virtchnl_vf_res_request *)msg;
if (vfres->num_queue_pairs == adapter->num_req_queues) { if (vfres->num_queue_pairs != adapter->num_req_queues) {
adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
i40evf_schedule_reset(adapter);
} else {
dev_info(&adapter->pdev->dev, dev_info(&adapter->pdev->dev,
"Requested %d queues, PF can support %d\n", "Requested %d queues, PF can support %d\n",
adapter->num_req_queues, adapter->num_req_queues,
vfres->num_queue_pairs); vfres->num_queue_pairs);
adapter->num_req_queues = 0; adapter->num_req_queues = 0;
adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
} }
} }
break; break;
......
...@@ -333,8 +333,8 @@ struct virtchnl_vsi_queue_config_info { ...@@ -333,8 +333,8 @@ struct virtchnl_vsi_queue_config_info {
* additional queues must be negotiated. This is a best effort request as it * additional queues must be negotiated. This is a best effort request as it
* is possible the PF does not have enough queues left to support the request. * is possible the PF does not have enough queues left to support the request.
* If the PF cannot support the number requested it will respond with the * If the PF cannot support the number requested it will respond with the
* maximum number it is able to support; otherwise it will respond with the * maximum number it is able to support. If the request is successful, PF will
* number requested. * then reset the VF to institute required changes.
*/ */
/* VF resource request */ /* VF resource request */
......
...@@ -546,6 +546,15 @@ struct tc_cls_bpf_offload { ...@@ -546,6 +546,15 @@ struct tc_cls_bpf_offload {
u32 gen_flags; u32 gen_flags;
}; };
struct tc_mqprio_qopt_offload {
/* struct tc_mqprio_qopt must always be the first element */
struct tc_mqprio_qopt qopt;
u16 mode;
u16 shaper;
u32 flags;
u64 min_rate[TC_QOPT_MAX_QUEUE];
u64 max_rate[TC_QOPT_MAX_QUEUE];
};
/* This structure holds cookie structure that is passed from user /* This structure holds cookie structure that is passed from user
* to the kernel for actions and classifiers * to the kernel for actions and classifiers
......
...@@ -625,6 +625,22 @@ enum { ...@@ -625,6 +625,22 @@ enum {
#define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1) #define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1)
enum {
TC_MQPRIO_MODE_DCB,
TC_MQPRIO_MODE_CHANNEL,
__TC_MQPRIO_MODE_MAX
};
#define __TC_MQPRIO_MODE_MAX (__TC_MQPRIO_MODE_MAX - 1)
enum {
TC_MQPRIO_SHAPER_DCB,
TC_MQPRIO_SHAPER_BW_RATE, /* Add new shapers below */
__TC_MQPRIO_SHAPER_MAX
};
#define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1)
struct tc_mqprio_qopt { struct tc_mqprio_qopt {
__u8 num_tc; __u8 num_tc;
__u8 prio_tc_map[TC_QOPT_BITMASK + 1]; __u8 prio_tc_map[TC_QOPT_BITMASK + 1];
...@@ -633,6 +649,22 @@ struct tc_mqprio_qopt { ...@@ -633,6 +649,22 @@ struct tc_mqprio_qopt {
__u16 offset[TC_QOPT_MAX_QUEUE]; __u16 offset[TC_QOPT_MAX_QUEUE];
}; };
#define TC_MQPRIO_F_MODE 0x1
#define TC_MQPRIO_F_SHAPER 0x2
#define TC_MQPRIO_F_MIN_RATE 0x4
#define TC_MQPRIO_F_MAX_RATE 0x8
enum {
TCA_MQPRIO_UNSPEC,
TCA_MQPRIO_MODE,
TCA_MQPRIO_SHAPER,
TCA_MQPRIO_MIN_RATE64,
TCA_MQPRIO_MAX_RATE64,
__TCA_MQPRIO_MAX,
};
#define TCA_MQPRIO_MAX (__TCA_MQPRIO_MAX - 1)
/* SFB */ /* SFB */
enum { enum {
......
...@@ -18,10 +18,16 @@ ...@@ -18,10 +18,16 @@
#include <net/netlink.h> #include <net/netlink.h>
#include <net/pkt_sched.h> #include <net/pkt_sched.h>
#include <net/sch_generic.h> #include <net/sch_generic.h>
#include <net/pkt_cls.h>
struct mqprio_sched { struct mqprio_sched {
struct Qdisc **qdiscs; struct Qdisc **qdiscs;
u16 mode;
u16 shaper;
int hw_offload; int hw_offload;
u32 flags;
u64 min_rate[TC_QOPT_MAX_QUEUE];
u64 max_rate[TC_QOPT_MAX_QUEUE];
}; };
static void mqprio_destroy(struct Qdisc *sch) static void mqprio_destroy(struct Qdisc *sch)
...@@ -39,9 +45,17 @@ static void mqprio_destroy(struct Qdisc *sch) ...@@ -39,9 +45,17 @@ static void mqprio_destroy(struct Qdisc *sch)
} }
if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) { if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) {
struct tc_mqprio_qopt mqprio = {}; struct tc_mqprio_qopt_offload mqprio = { { 0 } };
dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO, &mqprio); switch (priv->mode) {
case TC_MQPRIO_MODE_DCB:
case TC_MQPRIO_MODE_CHANNEL:
dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO,
&mqprio);
break;
default:
return;
}
} else { } else {
netdev_set_num_tc(dev, 0); netdev_set_num_tc(dev, 0);
} }
...@@ -97,6 +111,26 @@ static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt) ...@@ -97,6 +111,26 @@ static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
return 0; return 0;
} }
static const struct nla_policy mqprio_policy[TCA_MQPRIO_MAX + 1] = {
[TCA_MQPRIO_MODE] = { .len = sizeof(u16) },
[TCA_MQPRIO_SHAPER] = { .len = sizeof(u16) },
[TCA_MQPRIO_MIN_RATE64] = { .type = NLA_NESTED },
[TCA_MQPRIO_MAX_RATE64] = { .type = NLA_NESTED },
};
static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
const struct nla_policy *policy, int len)
{
int nested_len = nla_len(nla) - NLA_ALIGN(len);
if (nested_len >= nla_attr_size(0))
return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
nested_len, policy, NULL);
memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
return 0;
}
static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
{ {
struct net_device *dev = qdisc_dev(sch); struct net_device *dev = qdisc_dev(sch);
...@@ -105,6 +139,10 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) ...@@ -105,6 +139,10 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
struct Qdisc *qdisc; struct Qdisc *qdisc;
int i, err = -EOPNOTSUPP; int i, err = -EOPNOTSUPP;
struct tc_mqprio_qopt *qopt = NULL; struct tc_mqprio_qopt *qopt = NULL;
struct nlattr *tb[TCA_MQPRIO_MAX + 1];
struct nlattr *attr;
int rem;
int len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt));
BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE); BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK); BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
...@@ -122,6 +160,58 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) ...@@ -122,6 +160,58 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
if (mqprio_parse_opt(dev, qopt)) if (mqprio_parse_opt(dev, qopt))
return -EINVAL; return -EINVAL;
if (len > 0) {
err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy,
sizeof(*qopt));
if (err < 0)
return err;
if (!qopt->hw)
return -EINVAL;
if (tb[TCA_MQPRIO_MODE]) {
priv->flags |= TC_MQPRIO_F_MODE;
priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]);
}
if (tb[TCA_MQPRIO_SHAPER]) {
priv->flags |= TC_MQPRIO_F_SHAPER;
priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]);
}
if (tb[TCA_MQPRIO_MIN_RATE64]) {
if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
return -EINVAL;
i = 0;
nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64],
rem) {
if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64)
return -EINVAL;
if (i >= qopt->num_tc)
break;
priv->min_rate[i] = *(u64 *)nla_data(attr);
i++;
}
priv->flags |= TC_MQPRIO_F_MIN_RATE;
}
if (tb[TCA_MQPRIO_MAX_RATE64]) {
if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
return -EINVAL;
i = 0;
nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64],
rem) {
if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64)
return -EINVAL;
if (i >= qopt->num_tc)
break;
priv->max_rate[i] = *(u64 *)nla_data(attr);
i++;
}
priv->flags |= TC_MQPRIO_F_MAX_RATE;
}
}
/* pre-allocate qdisc, attachment can't fail */ /* pre-allocate qdisc, attachment can't fail */
priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
GFP_KERNEL); GFP_KERNEL);
...@@ -146,14 +236,36 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) ...@@ -146,14 +236,36 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
* supplied and verified mapping * supplied and verified mapping
*/ */
if (qopt->hw) { if (qopt->hw) {
struct tc_mqprio_qopt mqprio = *qopt; struct tc_mqprio_qopt_offload mqprio = {.qopt = *qopt};
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO, switch (priv->mode) {
case TC_MQPRIO_MODE_DCB:
if (priv->shaper != TC_MQPRIO_SHAPER_DCB)
return -EINVAL;
break;
case TC_MQPRIO_MODE_CHANNEL:
mqprio.flags = priv->flags;
if (priv->flags & TC_MQPRIO_F_MODE)
mqprio.mode = priv->mode;
if (priv->flags & TC_MQPRIO_F_SHAPER)
mqprio.shaper = priv->shaper;
if (priv->flags & TC_MQPRIO_F_MIN_RATE)
for (i = 0; i < mqprio.qopt.num_tc; i++)
mqprio.min_rate[i] = priv->min_rate[i];
if (priv->flags & TC_MQPRIO_F_MAX_RATE)
for (i = 0; i < mqprio.qopt.num_tc; i++)
mqprio.max_rate[i] = priv->max_rate[i];
break;
default:
return -EINVAL;
}
err = dev->netdev_ops->ndo_setup_tc(dev,
TC_SETUP_MQPRIO,
&mqprio); &mqprio);
if (err) if (err)
return err; return err;
priv->hw_offload = mqprio.hw; priv->hw_offload = mqprio.qopt.hw;
} else { } else {
netdev_set_num_tc(dev, qopt->num_tc); netdev_set_num_tc(dev, qopt->num_tc);
for (i = 0; i < qopt->num_tc; i++) for (i = 0; i < qopt->num_tc; i++)
...@@ -223,11 +335,51 @@ static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, ...@@ -223,11 +335,51 @@ static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
return 0; return 0;
} }
static int dump_rates(struct mqprio_sched *priv,
struct tc_mqprio_qopt *opt, struct sk_buff *skb)
{
struct nlattr *nest;
int i;
if (priv->flags & TC_MQPRIO_F_MIN_RATE) {
nest = nla_nest_start(skb, TCA_MQPRIO_MIN_RATE64);
if (!nest)
goto nla_put_failure;
for (i = 0; i < opt->num_tc; i++) {
if (nla_put(skb, TCA_MQPRIO_MIN_RATE64,
sizeof(priv->min_rate[i]),
&priv->min_rate[i]))
goto nla_put_failure;
}
nla_nest_end(skb, nest);
}
if (priv->flags & TC_MQPRIO_F_MAX_RATE) {
nest = nla_nest_start(skb, TCA_MQPRIO_MAX_RATE64);
if (!nest)
goto nla_put_failure;
for (i = 0; i < opt->num_tc; i++) {
if (nla_put(skb, TCA_MQPRIO_MAX_RATE64,
sizeof(priv->max_rate[i]),
&priv->max_rate[i]))
goto nla_put_failure;
}
nla_nest_end(skb, nest);
}
return 0;
nla_put_failure:
nla_nest_cancel(skb, nest);
return -1;
}
static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
{ {
struct net_device *dev = qdisc_dev(sch); struct net_device *dev = qdisc_dev(sch);
struct mqprio_sched *priv = qdisc_priv(sch); struct mqprio_sched *priv = qdisc_priv(sch);
unsigned char *b = skb_tail_pointer(skb); struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb);
struct tc_mqprio_qopt opt = { 0 }; struct tc_mqprio_qopt opt = { 0 };
struct Qdisc *qdisc; struct Qdisc *qdisc;
unsigned int i; unsigned int i;
...@@ -258,12 +410,25 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -258,12 +410,25 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
opt.offset[i] = dev->tc_to_txq[i].offset; opt.offset[i] = dev->tc_to_txq[i].offset;
} }
if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) if (nla_put(skb, TCA_OPTIONS, NLA_ALIGN(sizeof(opt)), &opt))
goto nla_put_failure;
if ((priv->flags & TC_MQPRIO_F_MODE) &&
nla_put_u16(skb, TCA_MQPRIO_MODE, priv->mode))
goto nla_put_failure;
if ((priv->flags & TC_MQPRIO_F_SHAPER) &&
nla_put_u16(skb, TCA_MQPRIO_SHAPER, priv->shaper))
goto nla_put_failure;
if ((priv->flags & TC_MQPRIO_F_MIN_RATE ||
priv->flags & TC_MQPRIO_F_MAX_RATE) &&
(dump_rates(priv, &opt, skb) != 0))
goto nla_put_failure; goto nla_put_failure;
return skb->len; return nla_nest_end(skb, nla);
nla_put_failure: nla_put_failure:
nlmsg_trim(skb, b); nlmsg_trim(skb, nla);
return -1; return -1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment