Commit 18af9626 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2019-05-02

This series contains updates to the ice driver only.

Anirudh introduces the framework to store queue specific information in
the VSI queue contexts.  This will allow future changes to update the
structure to hold queue specific information.

Akeem adds additional check so that if there is no queue to disable when
attempting to disable a queue, return a configuration error without
acquiring the lock.  Fixed an issue with non-trusted VFs being able to
add more than the permitted number of VLANs.

Bruce removes unreachable code and updated the function to return void
since it would never return anything but success.

Brett provides most of the changes in the series, starting with reducing
the scope of the error variable used and improved the debug message if
we fail to configure the receive queue.  Updates the driver to use a
macro instead of using the same 'for' loop throughout the driver which
helps with readability.  Fixed an issue where users were led to believe
they could set rx-usecs-high value, yet the changes to this value would
not stick because it was not yet implemented to allow changes to this
value, so implement the missing code to change the value.  Found we had
unnecessary wait when disabling queues, so remove it.  I,proved a
wasteful addition operation in our hot path by adding a member to the
ice_q_vector structure and the necessary changes to use the member which
stores the calculated vector hardware index.  Refactored the link event
flow to make it cleaner and more clear.

Maciej updates the array index when stopping transmit rings, so that
process every ring the VSI, not just the rings in a given transmit
class.

Paul adds support for setting 52 byte RSS hash keys.

Md Fahad cleaned up a runtime change to the PFINT_OICR_ENA register,
since the interrupt handlers will handle resetting the bit, if
necessary.

Tony adds a missing PHY type, which was causing warning message about an
unrecognized PHY.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 70bb13a5 20ce2a1a
...@@ -297,6 +297,7 @@ struct ice_q_vector { ...@@ -297,6 +297,7 @@ struct ice_q_vector {
struct ice_vsi *vsi; struct ice_vsi *vsi;
u16 v_idx; /* index in the vsi->q_vector array. */ u16 v_idx; /* index in the vsi->q_vector array. */
u16 reg_idx;
u8 num_ring_rx; /* total number of Rx rings in vector */ u8 num_ring_rx; /* total number of Rx rings in vector */
u8 num_ring_tx; /* total number of Tx rings in vector */ u8 num_ring_tx; /* total number of Tx rings in vector */
u8 itr_countdown; /* when 0 should adjust adaptive ITR */ u8 itr_countdown; /* when 0 should adjust adaptive ITR */
...@@ -403,7 +404,7 @@ static inline void ...@@ -403,7 +404,7 @@ static inline void
ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
struct ice_q_vector *q_vector) struct ice_q_vector *q_vector)
{ {
u32 vector = (vsi && q_vector) ? vsi->hw_base_vector + q_vector->v_idx : u32 vector = (vsi && q_vector) ? q_vector->reg_idx :
((struct ice_pf *)hw->back)->hw_oicr_idx; ((struct ice_pf *)hw->back)->hw_oicr_idx;
int itr = ICE_ITR_NONE; int itr = ICE_ITR_NONE;
u32 val; u32 val;
...@@ -419,6 +420,26 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, ...@@ -419,6 +420,26 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
wr32(hw, GLINT_DYN_CTL(vector), val); wr32(hw, GLINT_DYN_CTL(vector), val);
} }
/**
* ice_find_vsi_by_type - Find and return VSI of a given type
* @pf: PF to search for VSI
* @type: Value indicating type of VSI we are looking for
*/
static inline struct ice_vsi *
ice_find_vsi_by_type(struct ice_pf *pf, enum ice_vsi_type type)
{
int i;
for (i = 0; i < pf->num_alloc_vsi; i++) {
struct ice_vsi *vsi = pf->vsi[i];
if (vsi && vsi->type == type)
return vsi;
}
return NULL;
}
void ice_set_ethtool_ops(struct net_device *netdev); void ice_set_ethtool_ops(struct net_device *netdev);
int ice_up(struct ice_vsi *vsi); int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi); int ice_down(struct ice_vsi *vsi);
......
...@@ -1291,6 +1291,9 @@ struct ice_aqc_get_set_rss_key { ...@@ -1291,6 +1291,9 @@ struct ice_aqc_get_set_rss_key {
#define ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE 0x28 #define ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE 0x28
#define ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE 0xC #define ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE 0xC
#define ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE \
(ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE + \
ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE)
struct ice_aqc_get_set_rss_keys { struct ice_aqc_get_set_rss_keys {
u8 standard_rss_key[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE]; u8 standard_rss_key[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
......
...@@ -647,7 +647,7 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) ...@@ -647,7 +647,7 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
* Determines the itr/intrl granularities based on the maximum aggregate * Determines the itr/intrl granularities based on the maximum aggregate
* bandwidth according to the device's configuration during power-on. * bandwidth according to the device's configuration during power-on.
*/ */
static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw) static void ice_get_itr_intrl_gran(struct ice_hw *hw)
{ {
u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
...@@ -664,13 +664,7 @@ static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw) ...@@ -664,13 +664,7 @@ static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw)
hw->itr_gran = ICE_ITR_GRAN_MAX_25; hw->itr_gran = ICE_ITR_GRAN_MAX_25;
hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
break; break;
default:
ice_debug(hw, ICE_DBG_INIT,
"Failed to determine itr/intrl granularity\n");
return ICE_ERR_CFG;
} }
return 0;
} }
/** /**
...@@ -697,9 +691,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ...@@ -697,9 +691,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status) if (status)
return status; return status;
status = ice_get_itr_intrl_gran(hw); ice_get_itr_intrl_gran(hw);
if (status)
return status;
status = ice_init_all_ctrlq(hw); status = ice_init_all_ctrlq(hw);
if (status) if (status)
...@@ -2790,11 +2782,36 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) ...@@ -2790,11 +2782,36 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
return 0; return 0;
} }
/**
* ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
* @hw: pointer to the HW struct
* @vsi_handle: software VSI handle
* @tc: TC number
* @q_handle: software queue handle
*/
static struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
{
struct ice_vsi_ctx *vsi;
struct ice_q_ctx *q_ctx;
vsi = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi)
return NULL;
if (q_handle >= vsi->num_lan_q_entries[tc])
return NULL;
if (!vsi->lan_q_ctx[tc])
return NULL;
q_ctx = vsi->lan_q_ctx[tc];
return &q_ctx[q_handle];
}
/** /**
* ice_ena_vsi_txq * ice_ena_vsi_txq
* @pi: port information structure * @pi: port information structure
* @vsi_handle: software VSI handle * @vsi_handle: software VSI handle
* @tc: TC number * @tc: TC number
* @q_handle: software queue handle
* @num_qgrps: Number of added queue groups * @num_qgrps: Number of added queue groups
* @buf: list of queue groups to be added * @buf: list of queue groups to be added
* @buf_size: size of buffer for indirect command * @buf_size: size of buffer for indirect command
...@@ -2803,12 +2820,13 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) ...@@ -2803,12 +2820,13 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* This function adds one LAN queue * This function adds one LAN queue
*/ */
enum ice_status enum ice_status
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
{ {
struct ice_aqc_txsched_elem_data node = { 0 }; struct ice_aqc_txsched_elem_data node = { 0 };
struct ice_sched_node *parent; struct ice_sched_node *parent;
struct ice_q_ctx *q_ctx;
enum ice_status status; enum ice_status status;
struct ice_hw *hw; struct ice_hw *hw;
...@@ -2825,6 +2843,14 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, ...@@ -2825,6 +2843,14 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
mutex_lock(&pi->sched_lock); mutex_lock(&pi->sched_lock);
q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
if (!q_ctx) {
ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
q_handle);
status = ICE_ERR_PARAM;
goto ena_txq_exit;
}
/* find a parent node */ /* find a parent node */
parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
ICE_SCHED_NODE_OWNER_LAN); ICE_SCHED_NODE_OWNER_LAN);
...@@ -2851,7 +2877,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, ...@@ -2851,7 +2877,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
/* add the LAN queue */ /* add the LAN queue */
status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
if (status) { if (status) {
ice_debug(hw, ICE_DBG_SCHED, "enable Q %d failed %d\n", ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
le16_to_cpu(buf->txqs[0].txq_id), le16_to_cpu(buf->txqs[0].txq_id),
hw->adminq.sq_last_status); hw->adminq.sq_last_status);
goto ena_txq_exit; goto ena_txq_exit;
...@@ -2859,6 +2885,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, ...@@ -2859,6 +2885,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
node.node_teid = buf->txqs[0].q_teid; node.node_teid = buf->txqs[0].q_teid;
node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
q_ctx->q_handle = q_handle;
/* add a leaf node into schduler tree queue layer */ /* add a leaf node into schduler tree queue layer */
status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
...@@ -2871,7 +2898,10 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, ...@@ -2871,7 +2898,10 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
/** /**
* ice_dis_vsi_txq * ice_dis_vsi_txq
* @pi: port information structure * @pi: port information structure
* @vsi_handle: software VSI handle
* @tc: TC number
* @num_queues: number of queues * @num_queues: number of queues
* @q_handles: pointer to software queue handle array
* @q_ids: pointer to the q_id array * @q_ids: pointer to the q_id array
* @q_teids: pointer to queue node teids * @q_teids: pointer to queue node teids
* @rst_src: if called due to reset, specifies the reset source * @rst_src: if called due to reset, specifies the reset source
...@@ -2881,25 +2911,30 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, ...@@ -2881,25 +2911,30 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
* This function removes queues and their corresponding nodes in SW DB * This function removes queues and their corresponding nodes in SW DB
*/ */
enum ice_status enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, u16 *q_handles, u16 *q_ids, u32 *q_teids,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
{ {
enum ice_status status = ICE_ERR_DOES_NOT_EXIST; enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_aqc_dis_txq_item qg_list; struct ice_aqc_dis_txq_item qg_list;
struct ice_q_ctx *q_ctx;
u16 i; u16 i;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG; return ICE_ERR_CFG;
/* if queue is disabled already yet the disable queue command has to be
* sent to complete the VF reset, then call ice_aq_dis_lan_txq without
* any queue information
*/
if (!num_queues && rst_src) if (!num_queues) {
return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num, /* if queue is disabled already yet the disable queue command
NULL); * has to be sent to complete the VF reset, then call
* ice_aq_dis_lan_txq without any queue information
*/
if (rst_src)
return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
vmvf_num, NULL);
return ICE_ERR_CFG;
}
mutex_lock(&pi->sched_lock); mutex_lock(&pi->sched_lock);
...@@ -2909,6 +2944,17 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, ...@@ -2909,6 +2944,17 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
if (!node) if (!node)
continue; continue;
q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
if (!q_ctx) {
ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
q_handles[i]);
continue;
}
if (q_ctx->q_handle != q_handles[i]) {
ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
q_ctx->q_handle, q_handles[i]);
continue;
}
qg_list.parent_teid = node->info.parent_teid; qg_list.parent_teid = node->info.parent_teid;
qg_list.num_qs = 1; qg_list.num_qs = 1;
qg_list.q_id[0] = cpu_to_le16(q_ids[i]); qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
...@@ -2919,6 +2965,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, ...@@ -2919,6 +2965,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
if (status) if (status)
break; break;
ice_free_sched_node(pi, node); ice_free_sched_node(pi, node);
q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
} }
mutex_unlock(&pi->sched_lock); mutex_unlock(&pi->sched_lock);
return status; return status;
......
...@@ -99,15 +99,16 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, ...@@ -99,15 +99,16 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, u16 *q_handle, u16 *q_ids, u32 *q_teids,
struct ice_sq_cd *cmd_details); enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd);
enum ice_status enum ice_status
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *max_lanqs); u16 *max_lanqs);
enum ice_status enum ice_status
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw); void ice_replay_post(struct ice_hw *hw);
......
...@@ -1034,6 +1034,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks, ...@@ -1034,6 +1034,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
25000baseCR_Full); 25000baseCR_Full);
break; break;
case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseCR_Full); 25000baseCR_Full);
break; break;
...@@ -2228,12 +2229,18 @@ static int ...@@ -2228,12 +2229,18 @@ static int
ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type, ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
struct ice_ring_container *rc) struct ice_ring_container *rc)
{ {
struct ice_pf *pf = rc->ring->vsi->back; struct ice_pf *pf;
if (!rc->ring)
return -EINVAL;
pf = rc->ring->vsi->back;
switch (c_type) { switch (c_type) {
case ICE_RX_CONTAINER: case ICE_RX_CONTAINER:
ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting); ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting);
ec->rx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC; ec->rx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC;
ec->rx_coalesce_usecs_high = rc->ring->q_vector->intrl;
break; break;
case ICE_TX_CONTAINER: case ICE_TX_CONTAINER:
ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting); ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting);
...@@ -2342,6 +2349,23 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, ...@@ -2342,6 +2349,23 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
switch (c_type) { switch (c_type) {
case ICE_RX_CONTAINER: case ICE_RX_CONTAINER:
if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL ||
(ec->rx_coalesce_usecs_high &&
ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) {
netdev_info(vsi->netdev,
"Invalid value, rx-usecs-high valid values are 0 (disabled), %d-%d\n",
pf->hw.intrl_gran, ICE_MAX_INTRL);
return -EINVAL;
}
if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) {
rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high;
wr32(&pf->hw, GLINT_RATE(vsi->hw_base_vector +
rc->ring->q_vector->v_idx),
ice_intrl_usec_to_reg(ec->rx_coalesce_usecs_high,
pf->hw.intrl_gran));
}
if (ec->rx_coalesce_usecs != itr_setting && if (ec->rx_coalesce_usecs != itr_setting &&
ec->use_adaptive_rx_coalesce) { ec->use_adaptive_rx_coalesce) {
netdev_info(vsi->netdev, netdev_info(vsi->netdev,
...@@ -2364,6 +2388,12 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, ...@@ -2364,6 +2388,12 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
} }
break; break;
case ICE_TX_CONTAINER: case ICE_TX_CONTAINER:
if (ec->tx_coalesce_usecs_high) {
netdev_info(vsi->netdev,
"setting tx-usecs-high is not supported\n");
return -EINVAL;
}
if (ec->tx_coalesce_usecs != itr_setting && if (ec->tx_coalesce_usecs != itr_setting &&
ec->use_adaptive_tx_coalesce) { ec->use_adaptive_tx_coalesce) {
netdev_info(vsi->netdev, netdev_info(vsi->netdev,
......
This diff is collapsed.
...@@ -80,4 +80,5 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi); ...@@ -80,4 +80,5 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena); int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
#endif /* !_ICE_LIB_H_ */ #endif /* !_ICE_LIB_H_ */
...@@ -590,6 +590,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) ...@@ -590,6 +590,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
const char *speed; const char *speed;
const char *fc; const char *fc;
if (!vsi)
return;
if (vsi->current_isup == isup) if (vsi->current_isup == isup)
return; return;
...@@ -659,15 +662,16 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) ...@@ -659,15 +662,16 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
*/ */
static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
{ {
if (!vsi || test_bit(__ICE_DOWN, vsi->state)) if (!vsi)
return;
if (test_bit(__ICE_DOWN, vsi->state) || !vsi->netdev)
return; return;
if (vsi->type == ICE_VSI_PF) { if (vsi->type == ICE_VSI_PF) {
if (!vsi->netdev) { if (link_up == netif_carrier_ok(vsi->netdev))
dev_dbg(&vsi->back->pdev->dev,
"vsi->netdev is not initialized!\n");
return; return;
}
if (link_up) { if (link_up) {
netif_carrier_on(vsi->netdev); netif_carrier_on(vsi->netdev);
netif_tx_wake_all_queues(vsi->netdev); netif_tx_wake_all_queues(vsi->netdev);
...@@ -682,61 +686,51 @@ static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) ...@@ -682,61 +686,51 @@ static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
* ice_link_event - process the link event * ice_link_event - process the link event
* @pf: pf that the link event is associated with * @pf: pf that the link event is associated with
* @pi: port_info for the port that the link event is associated with * @pi: port_info for the port that the link event is associated with
* @link_up: true if the physical link is up and false if it is down
* @link_speed: current link speed received from the link event
* *
* Returns -EIO if ice_get_link_status() fails * Returns 0 on success and negative on failure
* Returns 0 on success
*/ */
static int static int
ice_link_event(struct ice_pf *pf, struct ice_port_info *pi) ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
u16 link_speed)
{ {
u8 new_link_speed, old_link_speed;
struct ice_phy_info *phy_info; struct ice_phy_info *phy_info;
bool new_link_same_as_old; struct ice_vsi *vsi;
bool new_link, old_link; u16 old_link_speed;
u8 lport; bool old_link;
u16 v; int result;
phy_info = &pi->phy; phy_info = &pi->phy;
phy_info->link_info_old = phy_info->link_info; phy_info->link_info_old = phy_info->link_info;
/* Force ice_get_link_status() to update link info */
phy_info->get_link_info = true;
old_link = (phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
old_link_speed = phy_info->link_info_old.link_speed; old_link_speed = phy_info->link_info_old.link_speed;
lport = pi->lport; /* update the link info structures and re-enable link events,
if (ice_get_link_status(pi, &new_link)) { * don't bail on failure due to other book keeping needed
*/
result = ice_update_link_info(pi);
if (result)
dev_dbg(&pf->pdev->dev, dev_dbg(&pf->pdev->dev,
"Could not get link status for port %d\n", lport); "Failed to update link status and re-enable link events for port %d\n",
return -EIO; pi->lport);
}
new_link_speed = phy_info->link_info.link_speed;
new_link_same_as_old = (new_link == old_link && /* if the old link up/down and speed is the same as the new */
new_link_speed == old_link_speed); if (link_up == old_link && link_speed == old_link_speed)
return result;
ice_for_each_vsi(pf, v) { vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF);
struct ice_vsi *vsi = pf->vsi[v]; if (!vsi || !vsi->port_info)
return -EINVAL;
if (!vsi || !vsi->port_info) ice_vsi_link_event(vsi, link_up);
continue; ice_print_link_msg(vsi, link_up);
if (new_link_same_as_old && if (pf->num_alloc_vfs)
(test_bit(__ICE_DOWN, vsi->state) ||
new_link == netif_carrier_ok(vsi->netdev)))
continue;
if (vsi->port_info->lport == lport) {
ice_print_link_msg(vsi, new_link);
ice_vsi_link_event(vsi, new_link);
}
}
if (!new_link_same_as_old && pf->num_alloc_vfs)
ice_vc_notify_link_state(pf); ice_vc_notify_link_state(pf);
return 0; return result;
} }
/** /**
...@@ -801,20 +795,23 @@ static int ice_init_link_events(struct ice_port_info *pi) ...@@ -801,20 +795,23 @@ static int ice_init_link_events(struct ice_port_info *pi)
/** /**
* ice_handle_link_event - handle link event via ARQ * ice_handle_link_event - handle link event via ARQ
* @pf: pf that the link event is associated with * @pf: pf that the link event is associated with
* * @event: event structure containing link status info
* Return -EINVAL if port_info is null
* Return status on success
*/ */
static int ice_handle_link_event(struct ice_pf *pf) static int
ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
{ {
struct ice_aqc_get_link_status_data *link_data;
struct ice_port_info *port_info; struct ice_port_info *port_info;
int status; int status;
link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
port_info = pf->hw.port_info; port_info = pf->hw.port_info;
if (!port_info) if (!port_info)
return -EINVAL; return -EINVAL;
status = ice_link_event(pf, port_info); status = ice_link_event(pf, port_info,
!!(link_data->link_info & ICE_AQ_LINK_UP),
le16_to_cpu(link_data->link_speed));
if (status) if (status)
dev_dbg(&pf->pdev->dev, dev_dbg(&pf->pdev->dev,
"Could not process link event, error %d\n", status); "Could not process link event, error %d\n", status);
...@@ -926,7 +923,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) ...@@ -926,7 +923,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
switch (opcode) { switch (opcode) {
case ice_aqc_opc_get_link_status: case ice_aqc_opc_get_link_status:
if (ice_handle_link_event(pf)) if (ice_handle_link_event(pf, &event))
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"Could not handle link event\n"); "Could not handle link event\n");
break; break;
...@@ -1096,7 +1093,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) ...@@ -1096,7 +1093,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
u32 reg; u32 reg;
int i; int i;
if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state)) if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state))
return; return;
/* find what triggered the MDD event */ /* find what triggered the MDD event */
...@@ -1229,12 +1226,6 @@ static void ice_handle_mdd_event(struct ice_pf *pf) ...@@ -1229,12 +1226,6 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
} }
} }
/* re-enable MDD interrupt cause */
clear_bit(__ICE_MDD_EVENT_PENDING, pf->state);
reg = rd32(hw, PFINT_OICR_ENA);
reg |= PFINT_OICR_MAL_DETECT_M;
wr32(hw, PFINT_OICR_ENA, reg);
ice_flush(hw);
} }
/** /**
...@@ -1338,7 +1329,7 @@ static int ice_vsi_ena_irq(struct ice_vsi *vsi) ...@@ -1338,7 +1329,7 @@ static int ice_vsi_ena_irq(struct ice_vsi *vsi)
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
int i; int i;
for (i = 0; i < vsi->num_q_vectors; i++) ice_for_each_q_vector(vsi, i)
ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
} }
...@@ -1523,7 +1514,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) ...@@ -1523,7 +1514,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
rd32(hw, PFHMC_ERRORDATA)); rd32(hw, PFHMC_ERRORDATA));
} }
/* Report and mask off any remaining unexpected interrupts */ /* Report any remaining unexpected interrupts */
oicr &= ena_mask; oicr &= ena_mask;
if (oicr) { if (oicr) {
dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n", dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n",
...@@ -1537,12 +1528,9 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) ...@@ -1537,12 +1528,9 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
set_bit(__ICE_PFR_REQ, pf->state); set_bit(__ICE_PFR_REQ, pf->state);
ice_service_task_schedule(pf); ice_service_task_schedule(pf);
} }
ena_mask &= ~oicr;
} }
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
/* re-enable interrupt causes that are not handled during this pass */
wr32(hw, PFINT_OICR_ENA, ena_mask);
if (!test_bit(__ICE_DOWN, pf->state)) { if (!test_bit(__ICE_DOWN, pf->state)) {
ice_service_task_schedule(pf); ice_service_task_schedule(pf);
ice_irq_dynamic_ena(hw, NULL, NULL); ice_irq_dynamic_ena(hw, NULL, NULL);
...@@ -1601,23 +1589,23 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf) ...@@ -1601,23 +1589,23 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
/** /**
* ice_ena_ctrlq_interrupts - enable control queue interrupts * ice_ena_ctrlq_interrupts - enable control queue interrupts
* @hw: pointer to HW structure * @hw: pointer to HW structure
* @v_idx: HW vector index to associate the control queue interrupts with * @reg_idx: HW vector index to associate the control queue interrupts with
*/ */
static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 v_idx) static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
{ {
u32 val; u32 val;
val = ((v_idx & PFINT_OICR_CTL_MSIX_INDX_M) | val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
PFINT_OICR_CTL_CAUSE_ENA_M); PFINT_OICR_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_OICR_CTL, val); wr32(hw, PFINT_OICR_CTL, val);
/* enable Admin queue Interrupt causes */ /* enable Admin queue Interrupt causes */
val = ((v_idx & PFINT_FW_CTL_MSIX_INDX_M) | val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
PFINT_FW_CTL_CAUSE_ENA_M); PFINT_FW_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_FW_CTL, val); wr32(hw, PFINT_FW_CTL, val);
/* enable Mailbox queue Interrupt causes */ /* enable Mailbox queue Interrupt causes */
val = ((v_idx & PFINT_MBX_CTL_MSIX_INDX_M) | val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
PFINT_MBX_CTL_CAUSE_ENA_M); PFINT_MBX_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_MBX_CTL, val); wr32(hw, PFINT_MBX_CTL, val);
...@@ -1705,7 +1693,7 @@ void ice_napi_del(struct ice_vsi *vsi) ...@@ -1705,7 +1693,7 @@ void ice_napi_del(struct ice_vsi *vsi)
if (!vsi->netdev) if (!vsi->netdev)
return; return;
for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) ice_for_each_q_vector(vsi, v_idx)
netif_napi_del(&vsi->q_vectors[v_idx]->napi); netif_napi_del(&vsi->q_vectors[v_idx]->napi);
} }
...@@ -1724,7 +1712,7 @@ static void ice_napi_add(struct ice_vsi *vsi) ...@@ -1724,7 +1712,7 @@ static void ice_napi_add(struct ice_vsi *vsi)
if (!vsi->netdev) if (!vsi->netdev)
return; return;
for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) ice_for_each_q_vector(vsi, v_idx)
netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
ice_napi_poll, NAPI_POLL_WEIGHT); ice_napi_poll, NAPI_POLL_WEIGHT);
} }
...@@ -2960,7 +2948,7 @@ static void ice_napi_enable_all(struct ice_vsi *vsi) ...@@ -2960,7 +2948,7 @@ static void ice_napi_enable_all(struct ice_vsi *vsi)
if (!vsi->netdev) if (!vsi->netdev)
return; return;
for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
if (q_vector->rx.ring || q_vector->tx.ring) if (q_vector->rx.ring || q_vector->tx.ring)
...@@ -3334,7 +3322,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) ...@@ -3334,7 +3322,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
if (!vsi->netdev) if (!vsi->netdev)
return; return;
for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
if (q_vector->rx.ring || q_vector->tx.ring) if (q_vector->rx.ring || q_vector->tx.ring)
...@@ -4223,8 +4211,7 @@ static void ice_tx_timeout(struct net_device *netdev) ...@@ -4223,8 +4211,7 @@ static void ice_tx_timeout(struct net_device *netdev)
/* Read interrupt register */ /* Read interrupt register */
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
val = rd32(hw, val = rd32(hw,
GLINT_DYN_CTL(tx_ring->q_vector->v_idx + GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
tx_ring->vsi->hw_base_vector));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, hung_queue, tx_ring->next_to_clean, vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
......
...@@ -532,6 +532,50 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, ...@@ -532,6 +532,50 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
return status; return status;
} }
/**
* ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
* @hw: pointer to the HW struct
* @vsi_handle: VSI handle
* @tc: TC number
* @new_numqs: number of queues
*/
static enum ice_status
ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
{
struct ice_vsi_ctx *vsi_ctx;
struct ice_q_ctx *q_ctx;
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
return ICE_ERR_PARAM;
/* allocate LAN queue contexts */
if (!vsi_ctx->lan_q_ctx[tc]) {
vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
new_numqs,
sizeof(*q_ctx),
GFP_KERNEL);
if (!vsi_ctx->lan_q_ctx[tc])
return ICE_ERR_NO_MEMORY;
vsi_ctx->num_lan_q_entries[tc] = new_numqs;
return 0;
}
/* num queues are increased, update the queue contexts */
if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
sizeof(*q_ctx), GFP_KERNEL);
if (!q_ctx)
return ICE_ERR_NO_MEMORY;
memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
prev_num * sizeof(*q_ctx));
devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
vsi_ctx->lan_q_ctx[tc] = q_ctx;
vsi_ctx->num_lan_q_entries[tc] = new_numqs;
}
return 0;
}
/** /**
* ice_sched_clear_agg - clears the aggregator related information * ice_sched_clear_agg - clears the aggregator related information
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
...@@ -1403,14 +1447,14 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, ...@@ -1403,14 +1447,14 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
if (!vsi_ctx) if (!vsi_ctx)
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
if (owner == ICE_SCHED_NODE_OWNER_LAN) prev_numqs = vsi_ctx->sched.max_lanq[tc];
prev_numqs = vsi_ctx->sched.max_lanq[tc];
else
return ICE_ERR_PARAM;
/* num queues are not changed or less than the previous number */ /* num queues are not changed or less than the previous number */
if (new_numqs <= prev_numqs) if (new_numqs <= prev_numqs)
return status; return status;
status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
if (status)
return status;
if (new_numqs) if (new_numqs)
ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes); ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
/* Keep the max number of queue configuration all the time. Update the /* Keep the max number of queue configuration all the time. Update the
......
...@@ -328,6 +328,27 @@ ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi) ...@@ -328,6 +328,27 @@ ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
hw->vsi_ctx[vsi_handle] = vsi; hw->vsi_ctx[vsi_handle] = vsi;
} }
/**
* ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
* @hw: pointer to the HW struct
* @vsi_handle: VSI handle
*/
static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
{
struct ice_vsi_ctx *vsi;
u8 i;
vsi = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi)
return;
ice_for_each_traffic_class(i) {
if (vsi->lan_q_ctx[i]) {
devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
vsi->lan_q_ctx[i] = NULL;
}
}
}
/** /**
* ice_clear_vsi_ctx - clear the VSI context entry * ice_clear_vsi_ctx - clear the VSI context entry
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
...@@ -341,6 +362,7 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) ...@@ -341,6 +362,7 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
vsi = ice_get_vsi_ctx(hw, vsi_handle); vsi = ice_get_vsi_ctx(hw, vsi_handle);
if (vsi) { if (vsi) {
ice_clear_vsi_q_ctx(hw, vsi_handle);
devm_kfree(ice_hw_to_dev(hw), vsi); devm_kfree(ice_hw_to_dev(hw), vsi);
hw->vsi_ctx[vsi_handle] = NULL; hw->vsi_ctx[vsi_handle] = NULL;
} }
......
...@@ -9,6 +9,13 @@ ...@@ -9,6 +9,13 @@
#define ICE_SW_CFG_MAX_BUF_LEN 2048 #define ICE_SW_CFG_MAX_BUF_LEN 2048
#define ICE_DFLT_VSI_INVAL 0xff #define ICE_DFLT_VSI_INVAL 0xff
#define ICE_VSI_INVAL_ID 0xffff #define ICE_VSI_INVAL_ID 0xffff
#define ICE_INVAL_Q_HANDLE 0xFFFF
#define ICE_INVAL_Q_HANDLE 0xFFFF
/* VSI queue context structure */
struct ice_q_ctx {
u16 q_handle;
};
/* VSI context structure for add/get/update/free operations */ /* VSI context structure for add/get/update/free operations */
struct ice_vsi_ctx { struct ice_vsi_ctx {
...@@ -20,6 +27,8 @@ struct ice_vsi_ctx { ...@@ -20,6 +27,8 @@ struct ice_vsi_ctx {
struct ice_sched_vsi_info sched; struct ice_sched_vsi_info sched;
u8 alloc_from_pool; u8 alloc_from_pool;
u8 vf_num; u8 vf_num;
u16 num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS];
struct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS];
}; };
enum ice_sw_fwd_act_type { enum ice_sw_fwd_act_type {
......
...@@ -1391,7 +1391,7 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) ...@@ -1391,7 +1391,7 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
if (!test_bit(__ICE_DOWN, vsi->state)) if (!test_bit(__ICE_DOWN, vsi->state))
wr32(&vsi->back->hw, wr32(&vsi->back->hw,
GLINT_DYN_CTL(vsi->hw_base_vector + q_vector->v_idx), GLINT_DYN_CTL(q_vector->reg_idx),
itr_val); itr_val);
} }
......
...@@ -142,6 +142,7 @@ enum ice_rx_dtype { ...@@ -142,6 +142,7 @@ enum ice_rx_dtype {
#define ICE_ITR_ADAPTIVE_BULK 0x0000 #define ICE_ITR_ADAPTIVE_BULK 0x0000
#define ICE_DFLT_INTRL 0 #define ICE_DFLT_INTRL 0
#define ICE_MAX_INTRL 236
/* Legacy or Advanced Mode Queue */ /* Legacy or Advanced Mode Queue */
#define ICE_TX_ADVANCED 0 #define ICE_TX_ADVANCED 0
......
...@@ -996,8 +996,8 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) ...@@ -996,8 +996,8 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
/* Call Disable LAN Tx queue AQ call even when queues are not /* Call Disable LAN Tx queue AQ call even when queues are not
* enabled. This is needed for successful completiom of VFR * enabled. This is needed for successful completiom of VFR
*/ */
ice_dis_vsi_txq(vsi->port_info, 0, NULL, NULL, ICE_VF_RESET, ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
vf->vf_id, NULL); NULL, ICE_VF_RESET, vf->vf_id, NULL);
} }
hw = &pf->hw; hw = &pf->hw;
...@@ -1273,21 +1273,10 @@ void ice_process_vflr_event(struct ice_pf *pf) ...@@ -1273,21 +1273,10 @@ void ice_process_vflr_event(struct ice_pf *pf)
int vf_id; int vf_id;
u32 reg; u32 reg;
if (!test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
!pf->num_alloc_vfs) !pf->num_alloc_vfs)
return; return;
/* Re-enable the VFLR interrupt cause here, before looking for which
* VF got reset. Otherwise, if another VF gets a reset while the
* first one is being processed, that interrupt will be lost, and
* that VF will be stuck in reset forever.
*/
reg = rd32(hw, PFINT_OICR_ENA);
reg |= PFINT_OICR_VFLR_M;
wr32(hw, PFINT_OICR_ENA, reg);
ice_flush(hw);
clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
struct ice_vf *vf = &pf->vf[vf_id]; struct ice_vf *vf = &pf->vf[vf_id];
u32 reg_idx, bit_idx; u32 reg_idx, bit_idx;
...@@ -2329,7 +2318,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) ...@@ -2329,7 +2318,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
/* There is no need to let VF know about being not trusted, /* There is no need to let VF know about being not trusted,
* so we can just return success message here * so we can just return success message here
*/ */
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
} }
...@@ -2370,6 +2358,18 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) ...@@ -2370,6 +2358,18 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
for (i = 0; i < vfl->num_elements; i++) { for (i = 0; i < vfl->num_elements; i++) {
u16 vid = vfl->vlan_id[i]; u16 vid = vfl->vlan_id[i];
if (!ice_is_vf_trusted(vf) &&
vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
dev_info(&pf->pdev->dev,
"VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
vf->vf_id);
/* There is no need to let VF know about being
* not trusted, so we can just return success
* message here as well.
*/
goto error_param;
}
if (ice_vsi_add_vlan(vsi, vid)) { if (ice_vsi_add_vlan(vsi, vid)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment