Commit e872469c authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2024-02-20 (ice)

This series contains updates to ice driver only.

Yochai sets parent device to properly reflect connection state between
source DPLL and output pin.

Arkadiusz fixes additional issues related to DPLL; proper reporting of
phase_adjust value and preventing use/access of data while resetting.

Amritha resolves ASSERT_RTNL() being triggered on certain reset/rebuild
flows.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue:
  ice: Fix ASSERT_RTNL() warning during certain scenarios
  ice: fix pin phase adjust updates on PF reset
  ice: fix dpll periodic work data updates on PF reset
  ice: fix dpll and dpll_pin data access on PF reset
  ice: fix dpll input pin phase_adjust value updates
  ice: fix connection state of DPLL and out pin
====================
Reviewed-by: default avatarVadim Fedorenko <vadim.fedorenko@linux.dev>
Reviewed-by: default avatarJiri Pirko <jiri@nvidia.com>
Link: https://lore.kernel.org/r/20240220214444.1039759-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 5ae1e992 080b0c8d
...@@ -190,15 +190,13 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) ...@@ -190,15 +190,13 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
q_vector = vsi->q_vectors[v_idx]; q_vector = vsi->q_vectors[v_idx];
ice_for_each_tx_ring(tx_ring, q_vector->tx) { ice_for_each_tx_ring(tx_ring, q_vector->tx) {
if (vsi->netdev) ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX,
netif_queue_set_napi(vsi->netdev, tx_ring->q_index, NULL);
NETDEV_QUEUE_TYPE_TX, NULL);
tx_ring->q_vector = NULL; tx_ring->q_vector = NULL;
} }
ice_for_each_rx_ring(rx_ring, q_vector->rx) { ice_for_each_rx_ring(rx_ring, q_vector->rx) {
if (vsi->netdev) ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX,
netif_queue_set_napi(vsi->netdev, rx_ring->q_index, NULL);
NETDEV_QUEUE_TYPE_RX, NULL);
rx_ring->q_vector = NULL; rx_ring->q_vector = NULL;
} }
......
...@@ -30,6 +30,26 @@ static const char * const pin_type_name[] = { ...@@ -30,6 +30,26 @@ static const char * const pin_type_name[] = {
[ICE_DPLL_PIN_TYPE_RCLK_INPUT] = "rclk-input", [ICE_DPLL_PIN_TYPE_RCLK_INPUT] = "rclk-input",
}; };
/**
* ice_dpll_is_reset - check if reset is in progress
* @pf: private board structure
* @extack: error reporting
*
* If reset is in progress, fill extack with error.
*
* Return:
* * false - no reset in progress
* * true - reset in progress
*/
static bool ice_dpll_is_reset(struct ice_pf *pf, struct netlink_ext_ack *extack)
{
if (ice_is_reset_in_progress(pf->state)) {
NL_SET_ERR_MSG(extack, "PF reset in progress");
return true;
}
return false;
}
/** /**
* ice_dpll_pin_freq_set - set pin's frequency * ice_dpll_pin_freq_set - set pin's frequency
* @pf: private board structure * @pf: private board structure
...@@ -109,6 +129,9 @@ ice_dpll_frequency_set(const struct dpll_pin *pin, void *pin_priv, ...@@ -109,6 +129,9 @@ ice_dpll_frequency_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf; struct ice_pf *pf = d->pf;
int ret; int ret;
if (ice_dpll_is_reset(pf, extack))
return -EBUSY;
mutex_lock(&pf->dplls.lock); mutex_lock(&pf->dplls.lock);
ret = ice_dpll_pin_freq_set(pf, p, pin_type, frequency, extack); ret = ice_dpll_pin_freq_set(pf, p, pin_type, frequency, extack);
mutex_unlock(&pf->dplls.lock); mutex_unlock(&pf->dplls.lock);
...@@ -254,6 +277,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv, ...@@ -254,6 +277,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
* ice_dpll_pin_enable - enable a pin on dplls * ice_dpll_pin_enable - enable a pin on dplls
* @hw: board private hw structure * @hw: board private hw structure
* @pin: pointer to a pin * @pin: pointer to a pin
* @dpll_idx: dpll index to connect to output pin
* @pin_type: type of pin being enabled * @pin_type: type of pin being enabled
* @extack: error reporting * @extack: error reporting
* *
...@@ -266,7 +290,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv, ...@@ -266,7 +290,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
*/ */
static int static int
ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin, ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
enum ice_dpll_pin_type pin_type, u8 dpll_idx, enum ice_dpll_pin_type pin_type,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
u8 flags = 0; u8 flags = 0;
...@@ -280,10 +304,12 @@ ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin, ...@@ -280,10 +304,12 @@ ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
ret = ice_aq_set_input_pin_cfg(hw, pin->idx, 0, flags, 0, 0); ret = ice_aq_set_input_pin_cfg(hw, pin->idx, 0, flags, 0, 0);
break; break;
case ICE_DPLL_PIN_TYPE_OUTPUT: case ICE_DPLL_PIN_TYPE_OUTPUT:
flags = ICE_AQC_SET_CGU_OUT_CFG_UPDATE_SRC_SEL;
if (pin->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) if (pin->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN)
flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN; flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
flags |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN; flags |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN;
ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, 0, 0, 0); ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, dpll_idx,
0, 0);
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -370,7 +396,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin, ...@@ -370,7 +396,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
case ICE_DPLL_PIN_TYPE_INPUT: case ICE_DPLL_PIN_TYPE_INPUT:
ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL, ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL,
NULL, &pin->flags[0], NULL, &pin->flags[0],
&pin->freq, NULL); &pin->freq, &pin->phase_adjust);
if (ret) if (ret)
goto err; goto err;
if (ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN & pin->flags[0]) { if (ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN & pin->flags[0]) {
...@@ -398,14 +424,27 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin, ...@@ -398,14 +424,27 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
break; break;
case ICE_DPLL_PIN_TYPE_OUTPUT: case ICE_DPLL_PIN_TYPE_OUTPUT:
ret = ice_aq_get_output_pin_cfg(&pf->hw, pin->idx, ret = ice_aq_get_output_pin_cfg(&pf->hw, pin->idx,
&pin->flags[0], NULL, &pin->flags[0], &parent,
&pin->freq, NULL); &pin->freq, NULL);
if (ret) if (ret)
goto err; goto err;
if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0])
pin->state[0] = DPLL_PIN_STATE_CONNECTED; parent &= ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL;
else if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) {
pin->state[0] = DPLL_PIN_STATE_DISCONNECTED; pin->state[pf->dplls.eec.dpll_idx] =
parent == pf->dplls.eec.dpll_idx ?
DPLL_PIN_STATE_CONNECTED :
DPLL_PIN_STATE_DISCONNECTED;
pin->state[pf->dplls.pps.dpll_idx] =
parent == pf->dplls.pps.dpll_idx ?
DPLL_PIN_STATE_CONNECTED :
DPLL_PIN_STATE_DISCONNECTED;
} else {
pin->state[pf->dplls.eec.dpll_idx] =
DPLL_PIN_STATE_DISCONNECTED;
pin->state[pf->dplls.pps.dpll_idx] =
DPLL_PIN_STATE_DISCONNECTED;
}
break; break;
case ICE_DPLL_PIN_TYPE_RCLK_INPUT: case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
for (parent = 0; parent < pf->dplls.rclk.num_parents; for (parent = 0; parent < pf->dplls.rclk.num_parents;
...@@ -568,9 +607,13 @@ ice_dpll_pin_state_set(const struct dpll_pin *pin, void *pin_priv, ...@@ -568,9 +607,13 @@ ice_dpll_pin_state_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf; struct ice_pf *pf = d->pf;
int ret; int ret;
if (ice_dpll_is_reset(pf, extack))
return -EBUSY;
mutex_lock(&pf->dplls.lock); mutex_lock(&pf->dplls.lock);
if (enable) if (enable)
ret = ice_dpll_pin_enable(&pf->hw, p, pin_type, extack); ret = ice_dpll_pin_enable(&pf->hw, p, d->dpll_idx, pin_type,
extack);
else else
ret = ice_dpll_pin_disable(&pf->hw, p, pin_type, extack); ret = ice_dpll_pin_disable(&pf->hw, p, pin_type, extack);
if (!ret) if (!ret)
...@@ -603,6 +646,11 @@ ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv, ...@@ -603,6 +646,11 @@ ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
bool enable = state == DPLL_PIN_STATE_CONNECTED; bool enable = state == DPLL_PIN_STATE_CONNECTED;
struct ice_dpll_pin *p = pin_priv;
struct ice_dpll *d = dpll_priv;
if (!enable && p->state[d->dpll_idx] == DPLL_PIN_STATE_DISCONNECTED)
return 0;
return ice_dpll_pin_state_set(pin, pin_priv, dpll, dpll_priv, enable, return ice_dpll_pin_state_set(pin, pin_priv, dpll, dpll_priv, enable,
extack, ICE_DPLL_PIN_TYPE_OUTPUT); extack, ICE_DPLL_PIN_TYPE_OUTPUT);
...@@ -665,14 +713,16 @@ ice_dpll_pin_state_get(const struct dpll_pin *pin, void *pin_priv, ...@@ -665,14 +713,16 @@ ice_dpll_pin_state_get(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf; struct ice_pf *pf = d->pf;
int ret; int ret;
if (ice_dpll_is_reset(pf, extack))
return -EBUSY;
mutex_lock(&pf->dplls.lock); mutex_lock(&pf->dplls.lock);
ret = ice_dpll_pin_state_update(pf, p, pin_type, extack); ret = ice_dpll_pin_state_update(pf, p, pin_type, extack);
if (ret) if (ret)
goto unlock; goto unlock;
if (pin_type == ICE_DPLL_PIN_TYPE_INPUT) if (pin_type == ICE_DPLL_PIN_TYPE_INPUT ||
pin_type == ICE_DPLL_PIN_TYPE_OUTPUT)
*state = p->state[d->dpll_idx]; *state = p->state[d->dpll_idx];
else if (pin_type == ICE_DPLL_PIN_TYPE_OUTPUT)
*state = p->state[0];
ret = 0; ret = 0;
unlock: unlock:
mutex_unlock(&pf->dplls.lock); mutex_unlock(&pf->dplls.lock);
...@@ -790,6 +840,9 @@ ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv, ...@@ -790,6 +840,9 @@ ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf; struct ice_pf *pf = d->pf;
int ret; int ret;
if (ice_dpll_is_reset(pf, extack))
return -EBUSY;
mutex_lock(&pf->dplls.lock); mutex_lock(&pf->dplls.lock);
ret = ice_dpll_hw_input_prio_set(pf, d, p, prio, extack); ret = ice_dpll_hw_input_prio_set(pf, d, p, prio, extack);
mutex_unlock(&pf->dplls.lock); mutex_unlock(&pf->dplls.lock);
...@@ -910,6 +963,9 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv, ...@@ -910,6 +963,9 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
u8 flag, flags_en = 0; u8 flag, flags_en = 0;
int ret; int ret;
if (ice_dpll_is_reset(pf, extack))
return -EBUSY;
mutex_lock(&pf->dplls.lock); mutex_lock(&pf->dplls.lock);
switch (type) { switch (type) {
case ICE_DPLL_PIN_TYPE_INPUT: case ICE_DPLL_PIN_TYPE_INPUT:
...@@ -1069,6 +1125,9 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv, ...@@ -1069,6 +1125,9 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv,
int ret = -EINVAL; int ret = -EINVAL;
u32 hw_idx; u32 hw_idx;
if (ice_dpll_is_reset(pf, extack))
return -EBUSY;
mutex_lock(&pf->dplls.lock); mutex_lock(&pf->dplls.lock);
hw_idx = parent->idx - pf->dplls.base_rclk_idx; hw_idx = parent->idx - pf->dplls.base_rclk_idx;
if (hw_idx >= pf->dplls.num_inputs) if (hw_idx >= pf->dplls.num_inputs)
...@@ -1123,6 +1182,9 @@ ice_dpll_rclk_state_on_pin_get(const struct dpll_pin *pin, void *pin_priv, ...@@ -1123,6 +1182,9 @@ ice_dpll_rclk_state_on_pin_get(const struct dpll_pin *pin, void *pin_priv,
int ret = -EINVAL; int ret = -EINVAL;
u32 hw_idx; u32 hw_idx;
if (ice_dpll_is_reset(pf, extack))
return -EBUSY;
mutex_lock(&pf->dplls.lock); mutex_lock(&pf->dplls.lock);
hw_idx = parent->idx - pf->dplls.base_rclk_idx; hw_idx = parent->idx - pf->dplls.base_rclk_idx;
if (hw_idx >= pf->dplls.num_inputs) if (hw_idx >= pf->dplls.num_inputs)
...@@ -1305,8 +1367,10 @@ static void ice_dpll_periodic_work(struct kthread_work *work) ...@@ -1305,8 +1367,10 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
struct ice_pf *pf = container_of(d, struct ice_pf, dplls); struct ice_pf *pf = container_of(d, struct ice_pf, dplls);
struct ice_dpll *de = &pf->dplls.eec; struct ice_dpll *de = &pf->dplls.eec;
struct ice_dpll *dp = &pf->dplls.pps; struct ice_dpll *dp = &pf->dplls.pps;
int ret; int ret = 0;
if (ice_is_reset_in_progress(pf->state))
goto resched;
mutex_lock(&pf->dplls.lock); mutex_lock(&pf->dplls.lock);
ret = ice_dpll_update_state(pf, de, false); ret = ice_dpll_update_state(pf, de, false);
if (!ret) if (!ret)
...@@ -1326,6 +1390,7 @@ static void ice_dpll_periodic_work(struct kthread_work *work) ...@@ -1326,6 +1390,7 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
ice_dpll_notify_changes(de); ice_dpll_notify_changes(de);
ice_dpll_notify_changes(dp); ice_dpll_notify_changes(dp);
resched:
/* Run twice a second or reschedule if update failed */ /* Run twice a second or reschedule if update failed */
kthread_queue_delayed_work(d->kworker, &d->work, kthread_queue_delayed_work(d->kworker, &d->work,
ret ? msecs_to_jiffies(10) : ret ? msecs_to_jiffies(10) :
......
...@@ -2426,7 +2426,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) ...@@ -2426,7 +2426,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
ice_vsi_map_rings_to_vectors(vsi); ice_vsi_map_rings_to_vectors(vsi);
/* Associate q_vector rings to napi */ /* Associate q_vector rings to napi */
ice_vsi_set_napi_queues(vsi, true); ice_vsi_set_napi_queues(vsi);
vsi->stat_offsets_loaded = false; vsi->stat_offsets_loaded = false;
...@@ -2904,19 +2904,19 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) ...@@ -2904,19 +2904,19 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
} }
/** /**
* ice_queue_set_napi - Set the napi instance for the queue * __ice_queue_set_napi - Set the napi instance for the queue
* @dev: device to which NAPI and queue belong * @dev: device to which NAPI and queue belong
* @queue_index: Index of queue * @queue_index: Index of queue
* @type: queue type as RX or TX * @type: queue type as RX or TX
* @napi: NAPI context * @napi: NAPI context
* @locked: is the rtnl_lock already held * @locked: is the rtnl_lock already held
* *
* Set the napi instance for the queue * Set the napi instance for the queue. Caller indicates the lock status.
*/ */
static void static void
ice_queue_set_napi(struct net_device *dev, unsigned int queue_index, __ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
enum netdev_queue_type type, struct napi_struct *napi, enum netdev_queue_type type, struct napi_struct *napi,
bool locked) bool locked)
{ {
if (!locked) if (!locked)
rtnl_lock(); rtnl_lock();
...@@ -2926,26 +2926,79 @@ ice_queue_set_napi(struct net_device *dev, unsigned int queue_index, ...@@ -2926,26 +2926,79 @@ ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
} }
/** /**
* ice_q_vector_set_napi_queues - Map queue[s] associated with the napi * ice_queue_set_napi - Set the napi instance for the queue
* @vsi: VSI being configured
* @queue_index: Index of queue
* @type: queue type as RX or TX
* @napi: NAPI context
*
* Set the napi instance for the queue. The rtnl lock state is derived from the
* execution path.
*/
void
ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
enum netdev_queue_type type, struct napi_struct *napi)
{
struct ice_pf *pf = vsi->back;
if (!vsi->netdev)
return;
if (current_work() == &pf->serv_task ||
test_bit(ICE_PREPARED_FOR_RESET, pf->state) ||
test_bit(ICE_DOWN, pf->state) ||
test_bit(ICE_SUSPENDED, pf->state))
__ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
false);
else
__ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
true);
}
/**
* __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
* @q_vector: q_vector pointer * @q_vector: q_vector pointer
* @locked: is the rtnl_lock already held * @locked: is the rtnl_lock already held
* *
* Associate the q_vector napi with all the queue[s] on the vector.
* Caller indicates the lock status.
*/
void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
{
struct ice_rx_ring *rx_ring;
struct ice_tx_ring *tx_ring;
ice_for_each_rx_ring(rx_ring, q_vector->rx)
__ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
locked);
ice_for_each_tx_ring(tx_ring, q_vector->tx)
__ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
locked);
/* Also set the interrupt number for the NAPI */
netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
}
/**
* ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
* @q_vector: q_vector pointer
*
* Associate the q_vector napi with all the queue[s] on the vector * Associate the q_vector napi with all the queue[s] on the vector
*/ */
void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked) void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector)
{ {
struct ice_rx_ring *rx_ring; struct ice_rx_ring *rx_ring;
struct ice_tx_ring *tx_ring; struct ice_tx_ring *tx_ring;
ice_for_each_rx_ring(rx_ring, q_vector->rx) ice_for_each_rx_ring(rx_ring, q_vector->rx)
ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index, ice_queue_set_napi(q_vector->vsi, rx_ring->q_index,
NETDEV_QUEUE_TYPE_RX, &q_vector->napi, NETDEV_QUEUE_TYPE_RX, &q_vector->napi);
locked);
ice_for_each_tx_ring(tx_ring, q_vector->tx) ice_for_each_tx_ring(tx_ring, q_vector->tx)
ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index, ice_queue_set_napi(q_vector->vsi, tx_ring->q_index,
NETDEV_QUEUE_TYPE_TX, &q_vector->napi, NETDEV_QUEUE_TYPE_TX, &q_vector->napi);
locked);
/* Also set the interrupt number for the NAPI */ /* Also set the interrupt number for the NAPI */
netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
} }
...@@ -2953,11 +3006,10 @@ void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked) ...@@ -2953,11 +3006,10 @@ void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
/** /**
* ice_vsi_set_napi_queues * ice_vsi_set_napi_queues
* @vsi: VSI pointer * @vsi: VSI pointer
* @locked: is the rtnl_lock already held
* *
* Associate queue[s] with napi for all vectors * Associate queue[s] with napi for all vectors
*/ */
void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked) void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
{ {
int i; int i;
...@@ -2965,7 +3017,7 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked) ...@@ -2965,7 +3017,7 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked)
return; return;
ice_for_each_q_vector(vsi, i) ice_for_each_q_vector(vsi, i)
ice_q_vector_set_napi_queues(vsi->q_vectors[i], locked); ice_q_vector_set_napi_queues(vsi->q_vectors[i]);
} }
/** /**
......
...@@ -91,9 +91,15 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); ...@@ -91,9 +91,15 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi * struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params); ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked); void
ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
enum netdev_queue_type type, struct napi_struct *napi);
void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector);
void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked); void ice_vsi_set_napi_queues(struct ice_vsi *vsi);
int ice_vsi_release(struct ice_vsi *vsi); int ice_vsi_release(struct ice_vsi *vsi);
......
...@@ -3495,7 +3495,7 @@ static void ice_napi_add(struct ice_vsi *vsi) ...@@ -3495,7 +3495,7 @@ static void ice_napi_add(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, v_idx) { ice_for_each_q_vector(vsi, v_idx) {
netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
ice_napi_poll); ice_napi_poll);
ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false); __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
} }
} }
...@@ -5447,6 +5447,7 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf) ...@@ -5447,6 +5447,7 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
if (ret) if (ret)
goto err_reinit; goto err_reinit;
ice_vsi_map_rings_to_vectors(pf->vsi[v]); ice_vsi_map_rings_to_vectors(pf->vsi[v]);
ice_vsi_set_napi_queues(pf->vsi[v]);
} }
ret = ice_req_irq_msix_misc(pf); ret = ice_req_irq_msix_misc(pf);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment