Commit a8e600e2 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2019-07-31

This series contains updates to ice driver only.

Paul adds support for reporting what the link partner is advertising for
flow control settings.

Jake fixes the hardware statistics register which is prone to rollover
since the statistic registers are either 32 or 40 bits wide, depending
on which register is being read.  So use a 64 bit software statistic to
store off the hardware statistics to track past when it rolls over.
Fixes an issue with the locking of the control queue, where locks were
being destroyed at run time.

Tony fixes an issue that was created when interrupt tracking was
refactored and the call to ice_vsi_setup_vector_base() was removed from
the PF VSI instead of the VF VSI.  Adds a check before trying to
configure a port to ensure that media is attached.

Brett fixes an issue in the receive queue configuration where prefena
(Prefetch Enable) was being set to 0 which caused the hardware to only
fetch descriptors when there are none free in the cache for a received
packet.  Updates the driver to only bump the receive tail once per
napi_poll call, instead of the current model of bumping the tail up to 4
times per napi_poll call.  Adds statistics for receive drops at the port
level to ethtool/netlink.  Cleans up duplicate code in the allocation of
receive buffer code.

Akeem updates the driver to ensure that VFs stay disabled until the
setup or reset is completed.  Modifies the driver to use the allocated
number of transmit queues per VSI to set up the scheduling tree versus
using the total number of available transmit queues.  Also fix the
driver to update the total number of configured queues, after a
successful VF request to change its number of queues before updating the
corresponding VSI for that VF.  Cleaned up unnecessary flags that are no
longer needed.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9b59e39f 3015b8fc
...@@ -329,7 +329,6 @@ struct ice_q_vector { ...@@ -329,7 +329,6 @@ struct ice_q_vector {
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
enum ice_pf_flags { enum ice_pf_flags {
ICE_FLAG_MSIX_ENA,
ICE_FLAG_FLTR_SYNC, ICE_FLAG_FLTR_SYNC,
ICE_FLAG_RSS_ENA, ICE_FLAG_RSS_ENA,
ICE_FLAG_SRIOV_ENA, ICE_FLAG_SRIOV_ENA,
...@@ -337,6 +336,7 @@ enum ice_pf_flags { ...@@ -337,6 +336,7 @@ enum ice_pf_flags {
ICE_FLAG_DCB_CAPABLE, ICE_FLAG_DCB_CAPABLE,
ICE_FLAG_DCB_ENA, ICE_FLAG_DCB_ENA,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
ICE_FLAG_NO_MEDIA,
ICE_FLAG_ENABLE_FW_LLDP, ICE_FLAG_ENABLE_FW_LLDP,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_PF_FLAGS_NBITS /* must be last */ ICE_PF_FLAGS_NBITS /* must be last */
......
...@@ -740,7 +740,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ...@@ -740,7 +740,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
ice_get_itr_intrl_gran(hw); ice_get_itr_intrl_gran(hw);
status = ice_init_all_ctrlq(hw); status = ice_create_all_ctrlq(hw);
if (status) if (status)
goto err_unroll_cqinit; goto err_unroll_cqinit;
...@@ -855,7 +855,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ...@@ -855,7 +855,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
err_unroll_alloc: err_unroll_alloc:
devm_kfree(ice_hw_to_dev(hw), hw->port_info); devm_kfree(ice_hw_to_dev(hw), hw->port_info);
err_unroll_cqinit: err_unroll_cqinit:
ice_shutdown_all_ctrlq(hw); ice_destroy_all_ctrlq(hw);
return status; return status;
} }
...@@ -881,7 +881,7 @@ void ice_deinit_hw(struct ice_hw *hw) ...@@ -881,7 +881,7 @@ void ice_deinit_hw(struct ice_hw *hw)
/* Attempt to disable FW logging before shutting down control queues */ /* Attempt to disable FW logging before shutting down control queues */
ice_cfg_fw_log(hw, false); ice_cfg_fw_log(hw, false);
ice_shutdown_all_ctrlq(hw); ice_destroy_all_ctrlq(hw);
/* Clear VSI contexts if not already cleared */ /* Clear VSI contexts if not already cleared */
ice_clear_all_vsi_ctx(hw); ice_clear_all_vsi_ctx(hw);
...@@ -1078,6 +1078,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { ...@@ -1078,6 +1078,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
{ 0 } { 0 }
}; };
...@@ -1088,7 +1089,8 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { ...@@ -1088,7 +1089,8 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* @rxq_index: the index of the Rx queue * @rxq_index: the index of the Rx queue
* *
* Converts rxq context from sparse to dense structure and then writes * Converts rxq context from sparse to dense structure and then writes
* it to HW register space * it to HW register space and enables the hardware to prefetch descriptors
* instead of only fetching them on demand
*/ */
enum ice_status enum ice_status
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
...@@ -1096,6 +1098,11 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, ...@@ -1096,6 +1098,11 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
{ {
u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
if (!rlan_ctx)
return ICE_ERR_BAD_PTR;
rlan_ctx->prefena = 1;
ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
} }
...@@ -3240,40 +3247,44 @@ void ice_replay_post(struct ice_hw *hw) ...@@ -3240,40 +3247,44 @@ void ice_replay_post(struct ice_hw *hw)
/** /**
* ice_stat_update40 - read 40 bit stat from the chip and update stat values * ice_stat_update40 - read 40 bit stat from the chip and update stat values
* @hw: ptr to the hardware info * @hw: ptr to the hardware info
* @hireg: high 32 bit HW register to read from * @reg: offset of 64 bit HW register to read from
* @loreg: low 32 bit HW register to read from
* @prev_stat_loaded: bool to specify if previous stats are loaded * @prev_stat_loaded: bool to specify if previous stats are loaded
* @prev_stat: ptr to previous loaded stat value * @prev_stat: ptr to previous loaded stat value
* @cur_stat: ptr to current stat value * @cur_stat: ptr to current stat value
*/ */
void void
ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat) u64 *prev_stat, u64 *cur_stat)
{ {
u64 new_data; u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
new_data = rd32(hw, loreg);
new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
/* device stats are not reset at PFR, they likely will not be zeroed /* device stats are not reset at PFR, they likely will not be zeroed
* when the driver starts. So save the first values read and use them as * when the driver starts. Thus, save the value from the first read
* offsets to be subtracted from the raw values in order to report stats * without adding to the statistic value so that we report stats which
* that count from zero. * count up from zero.
*/ */
if (!prev_stat_loaded) if (!prev_stat_loaded) {
*prev_stat = new_data; *prev_stat = new_data;
return;
}
/* Calculate the difference between the new and old values, and then
* add it to the software stat value.
*/
if (new_data >= *prev_stat) if (new_data >= *prev_stat)
*cur_stat = new_data - *prev_stat; *cur_stat += new_data - *prev_stat;
else else
/* to manage the potential roll-over */ /* to manage the potential roll-over */
*cur_stat = (new_data + BIT_ULL(40)) - *prev_stat; *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
*cur_stat &= 0xFFFFFFFFFFULL;
/* Update the previously stored value to prepare for next read */
*prev_stat = new_data;
} }
/** /**
* ice_stat_update32 - read 32 bit stat from the chip and update stat values * ice_stat_update32 - read 32 bit stat from the chip and update stat values
* @hw: ptr to the hardware info * @hw: ptr to the hardware info
* @reg: HW register to read from * @reg: offset of HW register to read from
* @prev_stat_loaded: bool to specify if previous stats are loaded * @prev_stat_loaded: bool to specify if previous stats are loaded
* @prev_stat: ptr to previous loaded stat value * @prev_stat: ptr to previous loaded stat value
* @cur_stat: ptr to current stat value * @cur_stat: ptr to current stat value
...@@ -3287,17 +3298,26 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, ...@@ -3287,17 +3298,26 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
new_data = rd32(hw, reg); new_data = rd32(hw, reg);
/* device stats are not reset at PFR, they likely will not be zeroed /* device stats are not reset at PFR, they likely will not be zeroed
* when the driver starts. So save the first values read and use them as * when the driver starts. Thus, save the value from the first read
* offsets to be subtracted from the raw values in order to report stats * without adding to the statistic value so that we report stats which
* that count from zero. * count up from zero.
*/ */
if (!prev_stat_loaded) if (!prev_stat_loaded) {
*prev_stat = new_data; *prev_stat = new_data;
return;
}
/* Calculate the difference between the new and old values, and then
* add it to the software stat value.
*/
if (new_data >= *prev_stat) if (new_data >= *prev_stat)
*cur_stat = new_data - *prev_stat; *cur_stat += new_data - *prev_stat;
else else
/* to manage the potential roll-over */ /* to manage the potential roll-over */
*cur_stat = (new_data + BIT_ULL(32)) - *prev_stat; *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
/* Update the previously stored value to prepare for next read */
*prev_stat = new_data;
} }
/** /**
......
...@@ -17,8 +17,10 @@ enum ice_status ice_init_hw(struct ice_hw *hw); ...@@ -17,8 +17,10 @@ enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw);
enum ice_status ice_check_reset(struct ice_hw *hw); enum ice_status ice_check_reset(struct ice_hw *hw);
enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req); enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw); enum ice_status ice_init_all_ctrlq(struct ice_hw *hw);
void ice_shutdown_all_ctrlq(struct ice_hw *hw); void ice_shutdown_all_ctrlq(struct ice_hw *hw);
void ice_destroy_all_ctrlq(struct ice_hw *hw);
enum ice_status enum ice_status
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending); struct ice_rq_event_info *e, u16 *pending);
...@@ -123,8 +125,8 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); ...@@ -123,8 +125,8 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw); void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
void void
ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); u64 *prev_stat, u64 *cur_stat);
void void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat); u64 *prev_stat, u64 *cur_stat);
......
...@@ -310,7 +310,7 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) ...@@ -310,7 +310,7 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @cq: pointer to the specific Control queue * @cq: pointer to the specific Control queue
* *
* This is the main initialization routine for the Control Send Queue * This is the main initialization routine for the Control Send Queue
* Prior to calling this function, drivers *MUST* set the following fields * Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure: * in the cq->structure:
* - cq->num_sq_entries * - cq->num_sq_entries
* - cq->sq_buf_size * - cq->sq_buf_size
...@@ -369,7 +369,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) ...@@ -369,7 +369,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @cq: pointer to the specific Control queue * @cq: pointer to the specific Control queue
* *
* The main initialization routine for the Admin Receive (Event) Queue. * The main initialization routine for the Admin Receive (Event) Queue.
* Prior to calling this function, drivers *MUST* set the following fields * Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure: * in the cq->structure:
* - cq->num_rq_entries * - cq->num_rq_entries
* - cq->rq_buf_size * - cq->rq_buf_size
...@@ -569,14 +569,8 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw) ...@@ -569,14 +569,8 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
return 0; return 0;
init_ctrlq_free_rq: init_ctrlq_free_rq:
if (cq->rq.count) {
ice_shutdown_rq(hw, cq); ice_shutdown_rq(hw, cq);
mutex_destroy(&cq->rq_lock);
}
if (cq->sq.count) {
ice_shutdown_sq(hw, cq); ice_shutdown_sq(hw, cq);
mutex_destroy(&cq->sq_lock);
}
return status; return status;
} }
...@@ -585,12 +579,14 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw) ...@@ -585,12 +579,14 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
* @q_type: specific Control queue type * @q_type: specific Control queue type
* *
* Prior to calling this function, drivers *MUST* set the following fields * Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure: * in the cq->structure:
* - cq->num_sq_entries * - cq->num_sq_entries
* - cq->num_rq_entries * - cq->num_rq_entries
* - cq->rq_buf_size * - cq->rq_buf_size
* - cq->sq_buf_size * - cq->sq_buf_size
*
* NOTE: this function does not initialize the controlq locks
*/ */
static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{ {
...@@ -616,8 +612,6 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) ...@@ -616,8 +612,6 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
!cq->rq_buf_size || !cq->sq_buf_size) { !cq->rq_buf_size || !cq->sq_buf_size) {
return ICE_ERR_CFG; return ICE_ERR_CFG;
} }
mutex_init(&cq->sq_lock);
mutex_init(&cq->rq_lock);
/* setup SQ command write back timeout */ /* setup SQ command write back timeout */
cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT; cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
...@@ -625,7 +619,7 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) ...@@ -625,7 +619,7 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
/* allocate the ATQ */ /* allocate the ATQ */
ret_code = ice_init_sq(hw, cq); ret_code = ice_init_sq(hw, cq);
if (ret_code) if (ret_code)
goto init_ctrlq_destroy_locks; return ret_code;
/* allocate the ARQ */ /* allocate the ARQ */
ret_code = ice_init_rq(hw, cq); ret_code = ice_init_rq(hw, cq);
...@@ -637,9 +631,6 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) ...@@ -637,9 +631,6 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
init_ctrlq_free_sq: init_ctrlq_free_sq:
ice_shutdown_sq(hw, cq); ice_shutdown_sq(hw, cq);
init_ctrlq_destroy_locks:
mutex_destroy(&cq->sq_lock);
mutex_destroy(&cq->rq_lock);
return ret_code; return ret_code;
} }
...@@ -647,12 +638,14 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) ...@@ -647,12 +638,14 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
* ice_init_all_ctrlq - main initialization routine for all control queues * ice_init_all_ctrlq - main initialization routine for all control queues
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
* *
* Prior to calling this function, drivers *MUST* set the following fields * Prior to calling this function, the driver MUST* set the following fields
* in the cq->structure for all control queues: * in the cq->structure for all control queues:
* - cq->num_sq_entries * - cq->num_sq_entries
* - cq->num_rq_entries * - cq->num_rq_entries
* - cq->rq_buf_size * - cq->rq_buf_size
* - cq->sq_buf_size * - cq->sq_buf_size
*
* NOTE: this function does not initialize the controlq locks.
*/ */
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
{ {
...@@ -671,10 +664,48 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) ...@@ -671,10 +664,48 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX); return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
} }
/**
* ice_init_ctrlq_locks - Initialize locks for a control queue
* @cq: pointer to the control queue
*
* Initializes the send and receive queue locks for a given control queue.
*/
static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
{
mutex_init(&cq->sq_lock);
mutex_init(&cq->rq_lock);
}
/**
* ice_create_all_ctrlq - main initialization routine for all control queues
* @hw: pointer to the hardware structure
*
* Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure for all control queues:
* - cq->num_sq_entries
* - cq->num_rq_entries
* - cq->rq_buf_size
* - cq->sq_buf_size
*
* This function creates all the control queue locks and then calls
* ice_init_all_ctrlq. It should be called once during driver load. If the
* driver needs to re-initialize control queues at run time it should call
* ice_init_all_ctrlq instead.
*/
enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
{
ice_init_ctrlq_locks(&hw->adminq);
ice_init_ctrlq_locks(&hw->mailboxq);
return ice_init_all_ctrlq(hw);
}
/** /**
* ice_shutdown_ctrlq - shutdown routine for any control queue * ice_shutdown_ctrlq - shutdown routine for any control queue
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
* @q_type: specific Control queue type * @q_type: specific Control queue type
*
* NOTE: this function does not destroy the control queue locks.
*/ */
static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{ {
...@@ -693,19 +724,17 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) ...@@ -693,19 +724,17 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
return; return;
} }
if (cq->sq.count) {
ice_shutdown_sq(hw, cq); ice_shutdown_sq(hw, cq);
mutex_destroy(&cq->sq_lock);
}
if (cq->rq.count) {
ice_shutdown_rq(hw, cq); ice_shutdown_rq(hw, cq);
mutex_destroy(&cq->rq_lock);
}
} }
/** /**
* ice_shutdown_all_ctrlq - shutdown routine for all control queues * ice_shutdown_all_ctrlq - shutdown routine for all control queues
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
*
* NOTE: this function does not destroy the control queue locks. The driver
* may call this at runtime to shutdown and later restart control queues, such
* as in response to a reset event.
*/ */
void ice_shutdown_all_ctrlq(struct ice_hw *hw) void ice_shutdown_all_ctrlq(struct ice_hw *hw)
{ {
...@@ -715,6 +744,37 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw) ...@@ -715,6 +744,37 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw)
ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
} }
/**
* ice_destroy_ctrlq_locks - Destroy locks for a control queue
* @cq: pointer to the control queue
*
* Destroys the send and receive queue locks for a given control queue.
*/
static void
ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
{
mutex_destroy(&cq->sq_lock);
mutex_destroy(&cq->rq_lock);
}
/**
* ice_destroy_all_ctrlq - exit routine for all control queues
* @hw: pointer to the hardware structure
*
* This function shuts down all the control queues and then destroys the
* control queue locks. It should be called once during driver unload. The
* driver should call ice_shutdown_all_ctrlq if it needs to shut down and
* reinitialize control queues, such as in response to a reset event.
*/
void ice_destroy_all_ctrlq(struct ice_hw *hw)
{
/* shut down all the control queues first */
ice_shutdown_all_ctrlq(hw);
ice_destroy_ctrlq_locks(&hw->adminq);
ice_destroy_ctrlq_locks(&hw->mailboxq);
}
/** /**
* ice_clean_sq - cleans Admin send queue (ATQ) * ice_clean_sq - cleans Admin send queue (ATQ)
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
......
...@@ -1716,6 +1716,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks, ...@@ -1716,6 +1716,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
struct net_device *netdev) struct net_device *netdev)
{ {
struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_port_info *pi = np->vsi->port_info;
struct ethtool_link_ksettings cap_ksettings; struct ethtool_link_ksettings cap_ksettings;
struct ice_link_status *link_info; struct ice_link_status *link_info;
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
...@@ -2040,6 +2041,33 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks, ...@@ -2040,6 +2041,33 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
break; break;
} }
ks->base.duplex = DUPLEX_FULL; ks->base.duplex = DUPLEX_FULL;
if (link_info->an_info & ICE_AQ_AN_COMPLETED)
ethtool_link_ksettings_add_link_mode(ks, lp_advertising,
Autoneg);
/* Set flow control negotiated Rx/Tx pause */
switch (pi->fc.current_mode) {
case ICE_FC_FULL:
ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause);
break;
case ICE_FC_TX_PAUSE:
ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause);
ethtool_link_ksettings_add_link_mode(ks, lp_advertising,
Asym_Pause);
break;
case ICE_FC_RX_PAUSE:
ethtool_link_ksettings_add_link_mode(ks, lp_advertising,
Asym_Pause);
break;
case ICE_FC_PFC:
/* fall through */
default:
ethtool_link_ksettings_del_link_mode(ks, lp_advertising, Pause);
ethtool_link_ksettings_del_link_mode(ks, lp_advertising,
Asym_Pause);
break;
}
} }
/** /**
...@@ -2078,9 +2106,12 @@ ice_get_link_ksettings(struct net_device *netdev, ...@@ -2078,9 +2106,12 @@ ice_get_link_ksettings(struct net_device *netdev,
struct ice_aqc_get_phy_caps_data *caps; struct ice_aqc_get_phy_caps_data *caps;
struct ice_link_status *hw_link_info; struct ice_link_status *hw_link_info;
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
enum ice_status status;
int err = 0;
ethtool_link_ksettings_zero_link_mode(ks, supported); ethtool_link_ksettings_zero_link_mode(ks, supported);
ethtool_link_ksettings_zero_link_mode(ks, advertising); ethtool_link_ksettings_zero_link_mode(ks, advertising);
ethtool_link_ksettings_zero_link_mode(ks, lp_advertising);
hw_link_info = &vsi->port_info->phy.link_info; hw_link_info = &vsi->port_info->phy.link_info;
/* set speed and duplex */ /* set speed and duplex */
...@@ -2125,48 +2156,36 @@ ice_get_link_ksettings(struct net_device *netdev, ...@@ -2125,48 +2156,36 @@ ice_get_link_ksettings(struct net_device *netdev,
/* flow control is symmetric and always supported */ /* flow control is symmetric and always supported */
ethtool_link_ksettings_add_link_mode(ks, supported, Pause); ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
switch (vsi->port_info->fc.req_mode) { caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
case ICE_FC_FULL: if (!caps)
return -ENOMEM;
status = ice_aq_get_phy_caps(vsi->port_info, false,
ICE_AQC_REPORT_SW_CFG, caps, NULL);
if (status) {
err = -EIO;
goto done;
}
/* Set the advertised flow control based on the PHY capability */
if ((caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) &&
(caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)) {
ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
break;
case ICE_FC_TX_PAUSE:
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
Asym_Pause); Asym_Pause);
break; } else if (caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) {
case ICE_FC_RX_PAUSE: ethtool_link_ksettings_add_link_mode(ks, advertising,
Asym_Pause);
} else if (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) {
ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
Asym_Pause); Asym_Pause);
break; } else {
case ICE_FC_PFC:
default:
ethtool_link_ksettings_del_link_mode(ks, advertising, Pause); ethtool_link_ksettings_del_link_mode(ks, advertising, Pause);
ethtool_link_ksettings_del_link_mode(ks, advertising, ethtool_link_ksettings_del_link_mode(ks, advertising,
Asym_Pause); Asym_Pause);
break;
} }
caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
if (!caps)
goto done;
if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_TOPO_CAP,
caps, NULL))
netdev_info(netdev, "Get phy capability failed.\n");
/* Set supported FEC modes based on PHY capability */
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN ||
caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN)
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_SW_CFG,
caps, NULL))
netdev_info(netdev, "Get phy capability failed.\n");
/* Set advertised FEC modes based on PHY capability */ /* Set advertised FEC modes based on PHY capability */
ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
...@@ -2178,9 +2197,25 @@ ice_get_link_ksettings(struct net_device *netdev, ...@@ -2178,9 +2197,25 @@ ice_get_link_ksettings(struct net_device *netdev,
caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
status = ice_aq_get_phy_caps(vsi->port_info, false,
ICE_AQC_REPORT_TOPO_CAP, caps, NULL);
if (status) {
err = -EIO;
goto done;
}
/* Set supported FEC modes based on PHY capability */
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN ||
caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN)
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
done: done:
devm_kfree(&vsi->back->pdev->dev, caps); devm_kfree(&vsi->back->pdev->dev, caps);
return 0; return err;
} }
/** /**
...@@ -2763,6 +2798,11 @@ static int ice_nway_reset(struct net_device *netdev) ...@@ -2763,6 +2798,11 @@ static int ice_nway_reset(struct net_device *netdev)
* ice_get_pauseparam - Get Flow Control status * ice_get_pauseparam - Get Flow Control status
* @netdev: network interface device structure * @netdev: network interface device structure
* @pause: ethernet pause (flow control) parameters * @pause: ethernet pause (flow control) parameters
*
* Get requested flow control status from PHY capability.
* If autoneg is true, then ethtool will send the ETHTOOL_GSET ioctl which
* is handled by ice_get_link_ksettings. ice_get_link_ksettings will report
* the negotiated Rx/Tx pause via lp_advertising.
*/ */
static void static void
ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
......
...@@ -281,14 +281,10 @@ ...@@ -281,14 +281,10 @@
#define GL_PWR_MODE_CTL 0x000B820C #define GL_PWR_MODE_CTL 0x000B820C
#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30 #define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30
#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30) #define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30)
#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8))
#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8)) #define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8))
#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8))
#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8)) #define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8))
#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8)) #define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8))
#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8))
#define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8)) #define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8))
#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8))
#define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8)) #define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8))
#define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8)) #define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8))
#define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8)) #define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8))
...@@ -296,38 +292,22 @@ ...@@ -296,38 +292,22 @@
#define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8)) #define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8))
#define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8)) #define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8))
#define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8)) #define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8))
#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8))
#define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8)) #define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8))
#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8))
#define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8)) #define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8))
#define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8)) #define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8))
#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8))
#define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8)) #define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8))
#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8))
#define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8)) #define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8))
#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8))
#define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8)) #define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8))
#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8))
#define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8)) #define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8))
#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8))
#define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8)) #define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8))
#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8))
#define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8)) #define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8))
#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8))
#define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8)) #define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8))
#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8))
#define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8)) #define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8))
#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8))
#define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8)) #define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8))
#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8))
#define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8)) #define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8))
#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8))
#define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8)) #define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8))
#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8))
#define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8)) #define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8))
#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8))
#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8)) #define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8))
#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8))
#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8)) #define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8))
#define GLPRT_PXOFFRXC(_i, _j) (0x00380500 + ((_i) * 8 + (_j) * 64)) #define GLPRT_PXOFFRXC(_i, _j) (0x00380500 + ((_i) * 8 + (_j) * 64))
#define GLPRT_PXOFFTXC(_i, _j) (0x00380F40 + ((_i) * 8 + (_j) * 64)) #define GLPRT_PXOFFTXC(_i, _j) (0x00380F40 + ((_i) * 8 + (_j) * 64))
...@@ -340,32 +320,23 @@ ...@@ -340,32 +320,23 @@
#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8)) #define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8))
#define GLPRT_RXON2OFFCNT(_i, _j) (0x00380700 + ((_i) * 8 + (_j) * 64)) #define GLPRT_RXON2OFFCNT(_i, _j) (0x00380700 + ((_i) * 8 + (_j) * 64))
#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8)) #define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8))
#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8))
#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8)) #define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8))
#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8))
#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8)) #define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8))
#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8))
#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8)) #define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8))
#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8))
#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8)) #define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8))
#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8))
#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8)) #define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8))
#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8))
#define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8)) #define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8))
#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8))
#define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8)) #define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8))
#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8))
#define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8)) #define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8))
#define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4)) #define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4))
#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) #define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8))
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) #define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) #define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
#define PF_VT_PFALLOC_HIF 0x0009DD80 #define PF_VT_PFALLOC_HIF 0x0009DD80
#define VSIQF_HKEY_MAX_INDEX 12 #define VSIQF_HKEY_MAX_INDEX 12
#define VSIQF_HLUT_MAX_INDEX 15 #define VSIQF_HLUT_MAX_INDEX 15
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
#define PRTRPB_RDPC 0x000AC260
#endif /* _ICE_HW_AUTOGEN_H_ */ #endif /* _ICE_HW_AUTOGEN_H_ */
...@@ -290,6 +290,7 @@ struct ice_rlan_ctx { ...@@ -290,6 +290,7 @@ struct ice_rlan_ctx {
u8 tphdata_ena; u8 tphdata_ena;
u8 tphhead_ena; u8 tphhead_ena;
u16 lrxqthresh; /* bigger than needed, see above for reason */ u16 lrxqthresh; /* bigger than needed, see above for reason */
u8 prefena; /* NOTE: normally must be set to 1 at init */
}; };
struct ice_ctx_ele { struct ice_ctx_ele {
......
...@@ -1129,12 +1129,7 @@ static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) ...@@ -1129,12 +1129,7 @@ static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
return -EEXIST; return -EEXIST;
} }
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
num_q_vectors = vsi->num_q_vectors; num_q_vectors = vsi->num_q_vectors;
} else {
err = -EINVAL;
goto err_out;
}
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
err = ice_vsi_alloc_q_vector(vsi, v_idx); err = ice_vsi_alloc_q_vector(vsi, v_idx);
...@@ -1180,9 +1175,6 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) ...@@ -1180,9 +1175,6 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
return -EEXIST; return -EEXIST;
} }
if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
return -ENOENT;
num_q_vectors = vsi->num_q_vectors; num_q_vectors = vsi->num_q_vectors;
/* reserve slots from OS requested IRQs */ /* reserve slots from OS requested IRQs */
vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors, vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
...@@ -1477,40 +1469,32 @@ void ice_update_eth_stats(struct ice_vsi *vsi) ...@@ -1477,40 +1469,32 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
prev_es = &vsi->eth_stats_prev; prev_es = &vsi->eth_stats_prev;
cur_es = &vsi->eth_stats; cur_es = &vsi->eth_stats;
ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num), ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
vsi->stat_offsets_loaded, &prev_es->rx_bytes, &prev_es->rx_bytes, &cur_es->rx_bytes);
&cur_es->rx_bytes);
ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num), ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded,
vsi->stat_offsets_loaded, &prev_es->rx_unicast, &prev_es->rx_unicast, &cur_es->rx_unicast);
&cur_es->rx_unicast);
ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num), ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded,
vsi->stat_offsets_loaded, &prev_es->rx_multicast, &prev_es->rx_multicast, &cur_es->rx_multicast);
&cur_es->rx_multicast);
ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num), ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded,
vsi->stat_offsets_loaded, &prev_es->rx_broadcast, &prev_es->rx_broadcast, &cur_es->rx_broadcast);
&cur_es->rx_broadcast);
ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
&prev_es->rx_discards, &cur_es->rx_discards); &prev_es->rx_discards, &cur_es->rx_discards);
ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num), ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded,
vsi->stat_offsets_loaded, &prev_es->tx_bytes, &prev_es->tx_bytes, &cur_es->tx_bytes);
&cur_es->tx_bytes);
ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num), ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded,
vsi->stat_offsets_loaded, &prev_es->tx_unicast, &prev_es->tx_unicast, &cur_es->tx_unicast);
&cur_es->tx_unicast);
ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num), ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded,
vsi->stat_offsets_loaded, &prev_es->tx_multicast, &prev_es->tx_multicast, &cur_es->tx_multicast);
&cur_es->tx_multicast);
ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num), ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded,
vsi->stat_offsets_loaded, &prev_es->tx_broadcast, &prev_es->tx_broadcast, &cur_es->tx_broadcast);
&cur_es->tx_broadcast);
ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
&prev_es->tx_errors, &cur_es->tx_errors); &prev_es->tx_errors, &cur_es->tx_errors);
...@@ -2156,6 +2140,9 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, ...@@ -2156,6 +2140,9 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
if (status == ICE_ERR_RESET_ONGOING) { if (status == ICE_ERR_RESET_ONGOING) {
dev_dbg(&pf->pdev->dev, dev_dbg(&pf->pdev->dev,
"Reset in progress. LAN Tx queues already disabled\n"); "Reset in progress. LAN Tx queues already disabled\n");
} else if (status == ICE_ERR_DOES_NOT_EXIST) {
dev_dbg(&pf->pdev->dev,
"LAN Tx queues does not exist, nothing to disabled\n");
} else if (status) { } else if (status) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"Failed to disable LAN Tx queues, error: %d\n", "Failed to disable LAN Tx queues, error: %d\n",
...@@ -2519,7 +2506,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ...@@ -2519,7 +2506,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
/* configure VSI nodes based on number of queues and TC's */ /* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++) for (i = 0; i < vsi->tc_cfg.numtc; i++)
max_txqs[i] = pf->num_lan_tx; max_txqs[i] = vsi->alloc_txq;
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs); max_txqs);
...@@ -2610,8 +2597,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) ...@@ -2610,8 +2597,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
{ {
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
int base = vsi->base_vector; int base = vsi->base_vector;
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
int i; int i;
if (!vsi->q_vectors || !vsi->irqs_ready) if (!vsi->q_vectors || !vsi->irqs_ready)
...@@ -2643,7 +2628,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) ...@@ -2643,7 +2628,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
devm_free_irq(&pf->pdev->dev, irq_num, devm_free_irq(&pf->pdev->dev, irq_num,
vsi->q_vectors[i]); vsi->q_vectors[i]);
} }
}
} }
/** /**
...@@ -2821,7 +2805,6 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) ...@@ -2821,7 +2805,6 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
} }
/* disable each interrupt */ /* disable each interrupt */
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
ice_for_each_q_vector(vsi, i) ice_for_each_q_vector(vsi, i)
wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
...@@ -2829,7 +2812,6 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) ...@@ -2829,7 +2812,6 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, i) ice_for_each_q_vector(vsi, i)
synchronize_irq(pf->msix_entries[i + base].vector); synchronize_irq(pf->msix_entries[i + base].vector);
}
} }
/** /**
...@@ -2986,6 +2968,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ...@@ -2986,6 +2968,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret) if (ret)
goto err_rings; goto err_rings;
ret = ice_vsi_setup_vector_base(vsi);
if (ret)
goto err_vectors;
ret = ice_vsi_set_q_vectors_reg_idx(vsi); ret = ice_vsi_set_q_vectors_reg_idx(vsi);
if (ret) if (ret)
goto err_vectors; goto err_vectors;
...@@ -3007,10 +2993,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ...@@ -3007,10 +2993,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret) if (ret)
goto err_rings; goto err_rings;
ret = ice_vsi_setup_vector_base(vsi);
if (ret)
goto err_vectors;
ret = ice_vsi_set_q_vectors_reg_idx(vsi); ret = ice_vsi_set_q_vectors_reg_idx(vsi);
if (ret) if (ret)
goto err_vectors; goto err_vectors;
...@@ -3028,7 +3010,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ...@@ -3028,7 +3010,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
/* configure VSI nodes based on number of queues and TC's */ /* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++) for (i = 0; i < vsi->tc_cfg.numtc; i++)
max_txqs[i] = pf->num_lan_tx; max_txqs[i] = vsi->alloc_txq;
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs); max_txqs);
...@@ -3145,7 +3127,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) ...@@ -3145,7 +3127,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
if (ena_tc & BIT(i)) if (ena_tc & BIT(i))
num_tc++; num_tc++;
/* populate max_txqs per TC */ /* populate max_txqs per TC */
max_txqs[i] = pf->num_lan_tx; max_txqs[i] = vsi->alloc_txq;
} }
vsi->tc_cfg.ena_tc = ena_tc; vsi->tc_cfg.ena_tc = ena_tc;
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
#include "ice_lib.h" #include "ice_lib.h"
#include "ice_dcb_lib.h" #include "ice_dcb_lib.h"
#define DRV_VERSION "0.7.4-k" #define DRV_VERSION "0.7.5-k"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
const char ice_drv_ver[] = DRV_VERSION; const char ice_drv_ver[] = DRV_VERSION;
static const char ice_driver_string[] = DRV_SUMMARY; static const char ice_driver_string[] = DRV_SUMMARY;
...@@ -488,6 +488,7 @@ static void ...@@ -488,6 +488,7 @@ static void
ice_prepare_for_reset(struct ice_pf *pf) ice_prepare_for_reset(struct ice_pf *pf)
{ {
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
u8 i;
/* already prepared for reset */ /* already prepared for reset */
if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
...@@ -497,6 +498,10 @@ ice_prepare_for_reset(struct ice_pf *pf) ...@@ -497,6 +498,10 @@ ice_prepare_for_reset(struct ice_pf *pf)
if (ice_check_sq_alive(hw, &hw->mailboxq)) if (ice_check_sq_alive(hw, &hw->mailboxq))
ice_vc_notify_reset(pf); ice_vc_notify_reset(pf);
/* Disable VFs until reset is completed */
for (i = 0; i < pf->num_alloc_vfs; i++)
clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
/* disable the VSIs and their queues that are not already DOWN */ /* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi(pf, false); ice_pf_dis_all_vsi(pf, false);
...@@ -810,6 +815,20 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, ...@@ -810,6 +815,20 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (!vsi || !vsi->port_info) if (!vsi || !vsi->port_info)
return -EINVAL; return -EINVAL;
/* turn off PHY if media was removed */
if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
result = ice_aq_set_link_restart_an(pi, false, NULL);
if (result) {
dev_dbg(&pf->pdev->dev,
"Failed to set link down, VSI %d error %d\n",
vsi->vsi_num, result);
return result;
}
}
ice_vsi_link_event(vsi, link_up); ice_vsi_link_event(vsi, link_up);
ice_print_link_msg(vsi, link_up); ice_print_link_msg(vsi, link_up);
...@@ -1314,6 +1333,124 @@ static void ice_handle_mdd_event(struct ice_pf *pf) ...@@ -1314,6 +1333,124 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
} }
} }
/**
* ice_force_phys_link_state - Force the physical link state
* @vsi: VSI to force the physical link state to up/down
* @link_up: true/false indicates to set the physical link to up/down
*
* Force the physical link state by getting the current PHY capabilities from
* hardware and setting the PHY config based on the determined capabilities. If
* link changes a link event will be triggered because both the Enable Automatic
* Link Update and LESM Enable bits are set when setting the PHY capabilities.
*
* Returns 0 on success, negative on failure
*/
static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
{
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_aqc_set_phy_cfg_data *cfg;
struct ice_port_info *pi;
struct device *dev;
int retcode;
if (!vsi || !vsi->port_info || !vsi->back)
return -EINVAL;
if (vsi->type != ICE_VSI_PF)
return 0;
dev = &vsi->back->pdev->dev;
pi = vsi->port_info;
pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
NULL);
if (retcode) {
dev_err(dev,
"Failed to get phy capabilities, VSI %d error %d\n",
vsi->vsi_num, retcode);
retcode = -EIO;
goto out;
}
/* No change in link */
if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
goto out;
cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL);
if (!cfg) {
retcode = -ENOMEM;
goto out;
}
cfg->phy_type_low = pcaps->phy_type_low;
cfg->phy_type_high = pcaps->phy_type_high;
cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
cfg->low_power_ctrl = pcaps->low_power_ctrl;
cfg->eee_cap = pcaps->eee_cap;
cfg->eeer_value = pcaps->eeer_value;
cfg->link_fec_opt = pcaps->link_fec_options;
if (link_up)
cfg->caps |= ICE_AQ_PHY_ENA_LINK;
else
cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL);
if (retcode) {
dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
vsi->vsi_num, retcode);
retcode = -EIO;
}
devm_kfree(dev, cfg);
out:
devm_kfree(dev, pcaps);
return retcode;
}
/**
* ice_check_media_subtask - Check for media; bring link up if detected.
* @pf: pointer to PF struct
*/
static void ice_check_media_subtask(struct ice_pf *pf)
{
struct ice_port_info *pi;
struct ice_vsi *vsi;
int err;
vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF);
if (!vsi)
return;
/* No need to check for media if it's already present or the interface
* is down
*/
if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) ||
test_bit(__ICE_DOWN, vsi->state))
return;
/* Refresh link info and check if media is present */
pi = vsi->port_info;
err = ice_update_link_info(pi);
if (err)
return;
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
err = ice_force_phys_link_state(vsi, true);
if (err)
return;
clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
/* A Link Status Event will be generated; the event handler
* will complete bringing the interface up
*/
}
}
/** /**
* ice_service_task - manage and run subtasks * ice_service_task - manage and run subtasks
* @work: pointer to work_struct contained by the PF struct * @work: pointer to work_struct contained by the PF struct
...@@ -1336,6 +1473,7 @@ static void ice_service_task(struct work_struct *work) ...@@ -1336,6 +1473,7 @@ static void ice_service_task(struct work_struct *work)
return; return;
} }
ice_check_media_subtask(pf);
ice_check_for_hang_subtask(pf); ice_check_for_hang_subtask(pf);
ice_sync_fltr_subtask(pf); ice_sync_fltr_subtask(pf);
ice_handle_mdd_event(pf); ice_handle_mdd_event(pf);
...@@ -1409,15 +1547,11 @@ static void ice_irq_affinity_release(struct kref __always_unused *ref) {} ...@@ -1409,15 +1547,11 @@ static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
*/ */
static int ice_vsi_ena_irq(struct ice_vsi *vsi) static int ice_vsi_ena_irq(struct ice_vsi *vsi)
{ {
struct ice_pf *pf = vsi->back; struct ice_hw *hw = &vsi->back->hw;
struct ice_hw *hw = &pf->hw;
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
int i; int i;
ice_for_each_q_vector(vsi, i) ice_for_each_q_vector(vsi, i)
ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
}
ice_flush(hw); ice_flush(hw);
return 0; return 0;
...@@ -1665,7 +1799,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf) ...@@ -1665,7 +1799,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
wr32(hw, PFINT_OICR_ENA, 0); wr32(hw, PFINT_OICR_ENA, 0);
ice_flush(hw); ice_flush(hw);
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { if (pf->msix_entries) {
synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
devm_free_irq(&pf->pdev->dev, devm_free_irq(&pf->pdev->dev,
pf->msix_entries[pf->oicr_idx].vector, pf); pf->msix_entries[pf->oicr_idx].vector, pf);
...@@ -2091,7 +2225,6 @@ static void ice_deinit_pf(struct ice_pf *pf) ...@@ -2091,7 +2225,6 @@ static void ice_deinit_pf(struct ice_pf *pf)
static void ice_init_pf(struct ice_pf *pf) static void ice_init_pf(struct ice_pf *pf)
{ {
bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS); bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.common_cap.sr_iov_1_1) { if (pf->hw.func_caps.common_cap.sr_iov_1_1) {
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
...@@ -2191,7 +2324,6 @@ static int ice_ena_msix_range(struct ice_pf *pf) ...@@ -2191,7 +2324,6 @@ static int ice_ena_msix_range(struct ice_pf *pf)
exit_err: exit_err:
pf->num_lan_msix = 0; pf->num_lan_msix = 0;
clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
return err; return err;
} }
...@@ -2204,7 +2336,6 @@ static void ice_dis_msix(struct ice_pf *pf) ...@@ -2204,7 +2336,6 @@ static void ice_dis_msix(struct ice_pf *pf)
pci_disable_msix(pf->pdev); pci_disable_msix(pf->pdev);
devm_kfree(&pf->pdev->dev, pf->msix_entries); devm_kfree(&pf->pdev->dev, pf->msix_entries);
pf->msix_entries = NULL; pf->msix_entries = NULL;
clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
} }
/** /**
...@@ -2213,7 +2344,6 @@ static void ice_dis_msix(struct ice_pf *pf) ...@@ -2213,7 +2344,6 @@ static void ice_dis_msix(struct ice_pf *pf)
*/ */
static void ice_clear_interrupt_scheme(struct ice_pf *pf) static void ice_clear_interrupt_scheme(struct ice_pf *pf)
{ {
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
ice_dis_msix(pf); ice_dis_msix(pf);
if (pf->irq_tracker) { if (pf->irq_tracker) {
...@@ -2230,10 +2360,7 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) ...@@ -2230,10 +2360,7 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
{ {
int vectors; int vectors;
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
vectors = ice_ena_msix_range(pf); vectors = ice_ena_msix_range(pf);
else
return -ENODEV;
if (vectors < 0) if (vectors < 0)
return vectors; return vectors;
...@@ -2390,13 +2517,11 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ...@@ -2390,13 +2517,11 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
* the misc functionality and queue processing is combined in * the misc functionality and queue processing is combined in
* the same vector and that gets setup at open. * the same vector and that gets setup at open.
*/ */
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
err = ice_req_irq_msix_misc(pf); err = ice_req_irq_msix_misc(pf);
if (err) { if (err) {
dev_err(dev, "setup of misc vector failed: %d\n", err); dev_err(dev, "setup of misc vector failed: %d\n", err);
goto err_init_interrupt_unroll; goto err_init_interrupt_unroll;
} }
}
/* create switch struct for the switch element created by FW on boot */ /* create switch struct for the switch element created by FW on boot */
pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL); pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
...@@ -3008,10 +3133,7 @@ static int ice_up_complete(struct ice_vsi *vsi) ...@@ -3008,10 +3133,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
int err; int err;
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
ice_vsi_cfg_msix(vsi); ice_vsi_cfg_msix(vsi);
else
return -ENOTSUPP;
/* Enable only Rx rings, Tx rings were enabled by the FW when the /* Enable only Rx rings, Tx rings were enabled by the FW when the
* Tx queue group list was configured and the context bits were * Tx queue group list was configured and the context bits were
...@@ -3159,6 +3281,8 @@ static void ice_update_vsi_stats(struct ice_vsi *vsi) ...@@ -3159,6 +3281,8 @@ static void ice_update_vsi_stats(struct ice_vsi *vsi)
cur_ns->rx_errors = pf->stats.crc_errors + cur_ns->rx_errors = pf->stats.crc_errors +
pf->stats.illegal_bytes; pf->stats.illegal_bytes;
cur_ns->rx_length_errors = pf->stats.rx_len_errors; cur_ns->rx_length_errors = pf->stats.rx_len_errors;
/* record drops from the port level */
cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
} }
} }
...@@ -3176,96 +3300,86 @@ static void ice_update_pf_stats(struct ice_pf *pf) ...@@ -3176,96 +3300,86 @@ static void ice_update_pf_stats(struct ice_pf *pf)
cur_ps = &pf->stats; cur_ps = &pf->stats;
pf_id = hw->pf_id; pf_id = hw->pf_id;
ice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id), ice_stat_update40(hw, GLPRT_GORCL(pf_id), pf->stat_prev_loaded,
pf->stat_prev_loaded, &prev_ps->eth.rx_bytes, &prev_ps->eth.rx_bytes,
&cur_ps->eth.rx_bytes); &cur_ps->eth.rx_bytes);
ice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id), ice_stat_update40(hw, GLPRT_UPRCL(pf_id), pf->stat_prev_loaded,
pf->stat_prev_loaded, &prev_ps->eth.rx_unicast, &prev_ps->eth.rx_unicast,
&cur_ps->eth.rx_unicast); &cur_ps->eth.rx_unicast);
ice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id), ice_stat_update40(hw, GLPRT_MPRCL(pf_id), pf->stat_prev_loaded,
pf->stat_prev_loaded, &prev_ps->eth.rx_multicast, &prev_ps->eth.rx_multicast,
&cur_ps->eth.rx_multicast); &cur_ps->eth.rx_multicast);
ice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id), ice_stat_update40(hw, GLPRT_BPRCL(pf_id), pf->stat_prev_loaded,
pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast, &prev_ps->eth.rx_broadcast,
&cur_ps->eth.rx_broadcast); &cur_ps->eth.rx_broadcast);
ice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id), ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
pf->stat_prev_loaded, &prev_ps->eth.tx_bytes, &prev_ps->eth.rx_discards,
&cur_ps->eth.rx_discards);
ice_stat_update40(hw, GLPRT_GOTCL(pf_id), pf->stat_prev_loaded,
&prev_ps->eth.tx_bytes,
&cur_ps->eth.tx_bytes); &cur_ps->eth.tx_bytes);
ice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id), ice_stat_update40(hw, GLPRT_UPTCL(pf_id), pf->stat_prev_loaded,
pf->stat_prev_loaded, &prev_ps->eth.tx_unicast, &prev_ps->eth.tx_unicast,
&cur_ps->eth.tx_unicast); &cur_ps->eth.tx_unicast);
ice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id), ice_stat_update40(hw, GLPRT_MPTCL(pf_id), pf->stat_prev_loaded,
pf->stat_prev_loaded, &prev_ps->eth.tx_multicast, &prev_ps->eth.tx_multicast,
&cur_ps->eth.tx_multicast); &cur_ps->eth.tx_multicast);
ice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id), ice_stat_update40(hw, GLPRT_BPTCL(pf_id), pf->stat_prev_loaded,
pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast, &prev_ps->eth.tx_broadcast,
&cur_ps->eth.tx_broadcast); &cur_ps->eth.tx_broadcast);
ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded, ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded,
&prev_ps->tx_dropped_link_down, &prev_ps->tx_dropped_link_down,
&cur_ps->tx_dropped_link_down); &cur_ps->tx_dropped_link_down);
ice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id), ice_stat_update40(hw, GLPRT_PRC64L(pf_id), pf->stat_prev_loaded,
pf->stat_prev_loaded, &prev_ps->rx_size_64, &prev_ps->rx_size_64, &cur_ps->rx_size_64);
&cur_ps->rx_size_64);
ice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id), ice_stat_update40(hw, GLPRT_PRC127L(pf_id), pf->stat_prev_loaded,
pf->stat_prev_loaded, &prev_ps->rx_size_127, &prev_ps->rx_size_127, &cur_ps->rx_size_127);
&cur_ps->rx_size_127);
ice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id), ice_stat_update40(hw, GLPRT_PRC255L(pf_id), pf->stat_prev_loaded,
pf->stat_prev_loaded, &prev_ps->rx_size_255, &prev_ps->rx_size_255, &cur_ps->rx_size_255);
&cur_ps->rx_size_255);
ice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id), ice_stat_update40(hw, GLPRT_PRC511L(pf_id), pf->stat_prev_loaded,
pf->stat_prev_loaded, &prev_ps->rx_size_511, &prev_ps->rx_size_511, &cur_ps->rx_size_511);
&cur_ps->rx_size_511);
ice_stat_update40(hw, GLPRT_PRC1023H(pf_id), ice_stat_update40(hw, GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded,
GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded,
&prev_ps->rx_size_1023, &cur_ps->rx_size_1023); &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
ice_stat_update40(hw, GLPRT_PRC1522H(pf_id), ice_stat_update40(hw, GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded,
GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded,
&prev_ps->rx_size_1522, &cur_ps->rx_size_1522); &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
ice_stat_update40(hw, GLPRT_PRC9522H(pf_id), ice_stat_update40(hw, GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded,
GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded,
&prev_ps->rx_size_big, &cur_ps->rx_size_big); &prev_ps->rx_size_big, &cur_ps->rx_size_big);
ice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id), ice_stat_update40(hw, GLPRT_PTC64L(pf_id), pf->stat_prev_loaded,
pf->stat_prev_loaded, &prev_ps->tx_size_64, &prev_ps->tx_size_64, &cur_ps->tx_size_64);
&cur_ps->tx_size_64);
ice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id), ice_stat_update40(hw, GLPRT_PTC127L(pf_id), pf->stat_prev_loaded,
pf->stat_prev_loaded, &prev_ps->tx_size_127, &prev_ps->tx_size_127, &cur_ps->tx_size_127);
&cur_ps->tx_size_127);
ice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id), ice_stat_update40(hw, GLPRT_PTC255L(pf_id), pf->stat_prev_loaded,
pf->stat_prev_loaded, &prev_ps->tx_size_255, &prev_ps->tx_size_255, &cur_ps->tx_size_255);
&cur_ps->tx_size_255);
ice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id), ice_stat_update40(hw, GLPRT_PTC511L(pf_id), pf->stat_prev_loaded,
pf->stat_prev_loaded, &prev_ps->tx_size_511, &prev_ps->tx_size_511, &cur_ps->tx_size_511);
&cur_ps->tx_size_511);
ice_stat_update40(hw, GLPRT_PTC1023H(pf_id), ice_stat_update40(hw, GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded,
GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded,
&prev_ps->tx_size_1023, &cur_ps->tx_size_1023); &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
ice_stat_update40(hw, GLPRT_PTC1522H(pf_id), ice_stat_update40(hw, GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded,
GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded,
&prev_ps->tx_size_1522, &cur_ps->tx_size_1522); &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
ice_stat_update40(hw, GLPRT_PTC9522H(pf_id), ice_stat_update40(hw, GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded,
GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded,
&prev_ps->tx_size_big, &cur_ps->tx_size_big); &prev_ps->tx_size_big, &cur_ps->tx_size_big);
ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded, ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded,
...@@ -3371,85 +3485,6 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) ...@@ -3371,85 +3485,6 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
} }
} }
/**
* ice_force_phys_link_state - Force the physical link state
* @vsi: VSI to force the physical link state to up/down
* @link_up: true/false indicates to set the physical link to up/down
*
* Force the physical link state by getting the current PHY capabilities from
* hardware and setting the PHY config based on the determined capabilities. If
* link changes a link event will be triggered because both the Enable Automatic
* Link Update and LESM Enable bits are set when setting the PHY capabilities.
*
* Returns 0 on success, negative on failure
*/
static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
{
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_aqc_set_phy_cfg_data *cfg;
struct ice_port_info *pi;
struct device *dev;
int retcode;
if (!vsi || !vsi->port_info || !vsi->back)
return -EINVAL;
if (vsi->type != ICE_VSI_PF)
return 0;
dev = &vsi->back->pdev->dev;
pi = vsi->port_info;
pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
NULL);
if (retcode) {
dev_err(dev,
"Failed to get phy capabilities, VSI %d error %d\n",
vsi->vsi_num, retcode);
retcode = -EIO;
goto out;
}
/* No change in link */
if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
goto out;
cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL);
if (!cfg) {
retcode = -ENOMEM;
goto out;
}
cfg->phy_type_low = pcaps->phy_type_low;
cfg->phy_type_high = pcaps->phy_type_high;
cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
cfg->low_power_ctrl = pcaps->low_power_ctrl;
cfg->eee_cap = pcaps->eee_cap;
cfg->eeer_value = pcaps->eeer_value;
cfg->link_fec_opt = pcaps->link_fec_options;
if (link_up)
cfg->caps |= ICE_AQ_PHY_ENA_LINK;
else
cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL);
if (retcode) {
dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
vsi->vsi_num, retcode);
retcode = -EIO;
}
devm_kfree(dev, cfg);
out:
devm_kfree(dev, pcaps);
return retcode;
}
/** /**
* ice_down - Shutdown the connection * ice_down - Shutdown the connection
* @vsi: The VSI being stopped * @vsi: The VSI being stopped
...@@ -3558,24 +3593,6 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) ...@@ -3558,24 +3593,6 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
return err; return err;
} }
/**
* ice_vsi_req_irq - Request IRQ from the OS
* @vsi: The VSI IRQ is being requested for
* @basename: name for the vector
*
* Return 0 on success and a negative value on error
*/
static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename)
{
struct ice_pf *pf = vsi->back;
int err = -EINVAL;
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
err = ice_vsi_req_irq_msix(vsi, basename);
return err;
}
/** /**
* ice_vsi_open - Called when a network interface is made active * ice_vsi_open - Called when a network interface is made active
* @vsi: the VSI to open * @vsi: the VSI to open
...@@ -3605,7 +3622,7 @@ static int ice_vsi_open(struct ice_vsi *vsi) ...@@ -3605,7 +3622,7 @@ static int ice_vsi_open(struct ice_vsi *vsi)
snprintf(int_name, sizeof(int_name) - 1, "%s-%s", snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
dev_driver_string(&pf->pdev->dev), vsi->netdev->name); dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
err = ice_vsi_req_irq(vsi, int_name); err = ice_vsi_req_irq_msix(vsi, int_name);
if (err) if (err)
goto err_setup_rx; goto err_setup_rx;
...@@ -3842,13 +3859,11 @@ static void ice_rebuild(struct ice_pf *pf) ...@@ -3842,13 +3859,11 @@ static void ice_rebuild(struct ice_pf *pf)
} }
/* start misc vector */ /* start misc vector */
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
err = ice_req_irq_msix_misc(pf); err = ice_req_irq_msix_misc(pf);
if (err) { if (err) {
dev_err(dev, "misc vector setup failed: %d\n", err); dev_err(dev, "misc vector setup failed: %d\n", err);
goto err_vsi_rebuild; goto err_vsi_rebuild;
} }
}
/* restart the VSIs that were rebuilt and running before the reset */ /* restart the VSIs that were rebuilt and running before the reset */
err = ice_pf_ena_all_vsi(pf, false); err = ice_pf_ena_all_vsi(pf, false);
...@@ -4244,9 +4259,7 @@ static void ice_tx_timeout(struct net_device *netdev) ...@@ -4244,9 +4259,7 @@ static void ice_tx_timeout(struct net_device *netdev)
head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) & head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) &
QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
/* Read interrupt register */ /* Read interrupt register */
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
val = rd32(hw,
GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, hung_queue, tx_ring->next_to_clean, vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
...@@ -4295,6 +4308,7 @@ int ice_open(struct net_device *netdev) ...@@ -4295,6 +4308,7 @@ int ice_open(struct net_device *netdev)
{ {
struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
struct ice_port_info *pi;
int err; int err;
if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) { if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) {
...@@ -4304,12 +4318,32 @@ int ice_open(struct net_device *netdev) ...@@ -4304,12 +4318,32 @@ int ice_open(struct net_device *netdev)
netif_carrier_off(netdev); netif_carrier_off(netdev);
pi = vsi->port_info;
err = ice_update_link_info(pi);
if (err) {
netdev_err(netdev, "Failed to get link info, error %d\n",
err);
return err;
}
/* Set PHY if there is media, otherwise, turn off PHY */
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
err = ice_force_phys_link_state(vsi, true); err = ice_force_phys_link_state(vsi, true);
if (err) { if (err) {
netdev_err(netdev, netdev_err(netdev,
"Failed to set physical link up, error %d\n", err); "Failed to set physical link up, error %d\n",
err);
return err; return err;
} }
} else {
err = ice_aq_set_link_restart_an(pi, false, NULL);
if (err) {
netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n",
vsi->vsi_num, err);
return err;
}
set_bit(ICE_FLAG_NO_MEDIA, vsi->back->flags);
}
err = ice_vsi_open(vsi); err = ice_vsi_open(vsi);
if (err) if (err)
......
...@@ -377,11 +377,20 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring) ...@@ -377,11 +377,20 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
*/ */
static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
{ {
u16 prev_ntu = rx_ring->next_to_use;
rx_ring->next_to_use = val; rx_ring->next_to_use = val;
/* update next to alloc since we have filled the ring */ /* update next to alloc since we have filled the ring */
rx_ring->next_to_alloc = val; rx_ring->next_to_alloc = val;
/* QRX_TAIL will be updated with any tail value, but hardware ignores
* the lower 3 bits. This makes it so we only bump tail on meaningful
* boundaries. Also, this allows us to bump tail on intervals of 8 up to
* the budget depending on the current traffic load.
*/
val &= ~0x7;
if (prev_ntu != val) {
/* Force memory writes to complete before letting h/w /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only * know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs, * applicable for weak-ordered memory model archs,
...@@ -389,6 +398,7 @@ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) ...@@ -389,6 +398,7 @@ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
*/ */
wmb(); wmb();
writel(val, rx_ring->tail); writel(val, rx_ring->tail);
}
} }
/** /**
...@@ -445,7 +455,13 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) ...@@ -445,7 +455,13 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
* @rx_ring: ring to place buffers on * @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace * @cleaned_count: number of buffers to replace
* *
* Returns false if all allocations were successful, true if any fail * Returns false if all allocations were successful, true if any fail. Returning
* true signals to the caller that we didn't replace cleaned_count buffers and
* there is more work to do.
*
* First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
* buffers. Then bump tail at most one time. Grouping like this lets us avoid
* multiple tail writes per call.
*/ */
bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
{ {
...@@ -462,8 +478,9 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) ...@@ -462,8 +478,9 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
bi = &rx_ring->rx_buf[ntu]; bi = &rx_ring->rx_buf[ntu];
do { do {
/* if we fail here, we have work remaining */
if (!ice_alloc_mapped_page(rx_ring, bi)) if (!ice_alloc_mapped_page(rx_ring, bi))
goto no_bufs; break;
/* sync the buffer for use by the device */ /* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma, dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
...@@ -494,16 +511,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) ...@@ -494,16 +511,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
if (rx_ring->next_to_use != ntu) if (rx_ring->next_to_use != ntu)
ice_release_rx_desc(rx_ring, ntu); ice_release_rx_desc(rx_ring, ntu);
return false; return !!cleaned_count;
no_bufs:
if (rx_ring->next_to_use != ntu)
ice_release_rx_desc(rx_ring, ntu);
/* make sure to come back via polling to try again after
* allocation failure
*/
return true;
} }
/** /**
...@@ -990,7 +998,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ...@@ -990,7 +998,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
{ {
unsigned int total_rx_bytes = 0, total_rx_pkts = 0; unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
bool failure = false; bool failure;
/* start the loop to process Rx packets bounded by 'budget' */ /* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) { while (likely(total_rx_pkts < (unsigned int)budget)) {
...@@ -1002,13 +1010,6 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ...@@ -1002,13 +1010,6 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
u16 vlan_tag = 0; u16 vlan_tag = 0;
u8 rx_ptype; u8 rx_ptype;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= ICE_RX_BUF_WRITE) {
failure = failure ||
ice_alloc_rx_bufs(rx_ring, cleaned_count);
cleaned_count = 0;
}
/* get the Rx desc from Rx ring based on 'next_to_clean' */ /* get the Rx desc from Rx ring based on 'next_to_clean' */
rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
...@@ -1085,6 +1086,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ...@@ -1085,6 +1086,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
total_rx_pkts++; total_rx_pkts++;
} }
/* return up to cleaned_count buffers to hardware */
failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
/* update queue and vector specific stats */ /* update queue and vector specific stats */
u64_stats_update_begin(&rx_ring->syncp); u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.pkts += total_rx_pkts; rx_ring->stats.pkts += total_rx_pkts;
...@@ -1409,7 +1413,6 @@ int ice_napi_poll(struct napi_struct *napi, int budget) ...@@ -1409,7 +1413,6 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
struct ice_q_vector *q_vector = struct ice_q_vector *q_vector =
container_of(napi, struct ice_q_vector, napi); container_of(napi, struct ice_q_vector, napi);
struct ice_vsi *vsi = q_vector->vsi; struct ice_vsi *vsi = q_vector->vsi;
struct ice_pf *pf = vsi->back;
bool clean_complete = true; bool clean_complete = true;
int budget_per_ring = 0; int budget_per_ring = 0;
struct ice_ring *ring; struct ice_ring *ring;
...@@ -1450,7 +1453,6 @@ int ice_napi_poll(struct napi_struct *napi, int budget) ...@@ -1450,7 +1453,6 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* poll us due to busy-polling * poll us due to busy-polling
*/ */
if (likely(napi_complete_done(napi, work_done))) if (likely(napi_complete_done(napi, work_done)))
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
ice_update_ena_itr(vsi, q_vector); ice_update_ena_itr(vsi, q_vector);
return min_t(int, work_done, budget - 1); return min_t(int, work_done, budget - 1);
......
...@@ -297,13 +297,6 @@ void ice_free_vfs(struct ice_pf *pf) ...@@ -297,13 +297,6 @@ void ice_free_vfs(struct ice_pf *pf)
if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) { if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
/* disable VF qp mappings */ /* disable VF qp mappings */
ice_dis_vf_mappings(&pf->vf[i]); ice_dis_vf_mappings(&pf->vf[i]);
/* Set this state so that assigned VF vectors can be
* reclaimed by PF for reuse in ice_vsi_release(). No
* need to clear this bit since pf->vf array is being
* freed anyways after this for loop
*/
set_bit(ICE_VF_STATE_CFG_INTR, pf->vf[i].vf_states);
ice_free_vf_res(&pf->vf[i]); ice_free_vf_res(&pf->vf[i]);
} }
} }
...@@ -551,7 +544,6 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) ...@@ -551,7 +544,6 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
* expect vector assignment to be changed unless there is a request for * expect vector assignment to be changed unless there is a request for
* more vectors. * more vectors.
*/ */
clear_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states);
ice_alloc_vsi_res_exit: ice_alloc_vsi_res_exit:
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
return status; return status;
...@@ -567,11 +559,6 @@ static int ice_alloc_vf_res(struct ice_vf *vf) ...@@ -567,11 +559,6 @@ static int ice_alloc_vf_res(struct ice_vf *vf)
int tx_rx_queue_left; int tx_rx_queue_left;
int status; int status;
/* setup VF VSI and necessary resources */
status = ice_alloc_vsi_res(vf);
if (status)
goto ice_alloc_vf_res_exit;
/* Update number of VF queues, in case VF had requested for queue /* Update number of VF queues, in case VF had requested for queue
* changes * changes
*/ */
...@@ -581,6 +568,11 @@ static int ice_alloc_vf_res(struct ice_vf *vf) ...@@ -581,6 +568,11 @@ static int ice_alloc_vf_res(struct ice_vf *vf)
vf->num_req_qs != vf->num_vf_qs) vf->num_req_qs != vf->num_vf_qs)
vf->num_vf_qs = vf->num_req_qs; vf->num_vf_qs = vf->num_req_qs;
/* setup VF VSI and necessary resources */
status = ice_alloc_vsi_res(vf);
if (status)
goto ice_alloc_vf_res_exit;
if (vf->trusted) if (vf->trusted)
set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
else else
...@@ -1283,9 +1275,6 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) ...@@ -1283,9 +1275,6 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
/* assign default capabilities */ /* assign default capabilities */
set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
vfs[i].spoofchk = true; vfs[i].spoofchk = true;
/* Set this state so that PF driver does VF vector assignment */
set_bit(ICE_VF_STATE_CFG_INTR, vfs[i].vf_states);
} }
pf->num_alloc_vfs = num_alloc_vfs; pf->num_alloc_vfs = num_alloc_vfs;
......
...@@ -30,11 +30,6 @@ enum ice_vf_states { ...@@ -30,11 +30,6 @@ enum ice_vf_states {
ICE_VF_STATE_DIS, ICE_VF_STATE_DIS,
ICE_VF_STATE_MC_PROMISC, ICE_VF_STATE_MC_PROMISC,
ICE_VF_STATE_UC_PROMISC, ICE_VF_STATE_UC_PROMISC,
/* state to indicate if PF needs to do vector assignment for VF.
* This needs to be set during first time VF initialization or later
* when VF asks for more Vectors through virtchnl OP.
*/
ICE_VF_STATE_CFG_INTR,
ICE_VF_STATES_NBITS ICE_VF_STATES_NBITS
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment