Commit 8c243700 authored by Anirudh Venkataramanan's avatar Anirudh Venkataramanan Committed by Jeff Kirsher

ice: Minor refactor in queue management

Remove q_left_tx and q_left_rx from the PF struct as these can be
obtained by calling ice_get_avail_txq_count and ice_get_avail_rxq_count
respectively.

The function ice_determine_q_usage is only setting num_lan_tx and
num_lan_rx in the PF structure, and these are later assigned to
vsi->alloc_txq and vsi->alloc_rxq respectively. This is an unnecessary
indirection, so remove ice_determine_q_usage and just assign values
for vsi->alloc_txq and vsi->alloc_rxq in ice_vsi_set_num_qs and use
these to set num_lan_tx and num_lan_rx respectively.
Signed-off-by: default avatarAnirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent ea300f41
...@@ -368,8 +368,6 @@ struct ice_pf { ...@@ -368,8 +368,6 @@ struct ice_pf {
u32 num_lan_msix; /* Total MSIX vectors for base driver */ u32 num_lan_msix; /* Total MSIX vectors for base driver */
u16 num_lan_tx; /* num LAN Tx queues setup */ u16 num_lan_tx; /* num LAN Tx queues setup */
u16 num_lan_rx; /* num LAN Rx queues setup */ u16 num_lan_rx; /* num LAN Rx queues setup */
u16 q_left_tx; /* remaining num Tx queues left unclaimed */
u16 q_left_rx; /* remaining num Rx queues left unclaimed */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */ u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
u16 num_alloc_vsi; u16 num_alloc_vsi;
u16 corer_count; /* Core reset count */ u16 corer_count; /* Core reset count */
...@@ -438,6 +436,8 @@ static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf) ...@@ -438,6 +436,8 @@ static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf)
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev); void ice_set_ethtool_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
u16 ice_get_avail_rxq_count(struct ice_pf *pf);
void ice_update_vsi_stats(struct ice_vsi *vsi); void ice_update_vsi_stats(struct ice_vsi *vsi);
void ice_update_pf_stats(struct ice_pf *pf); void ice_update_pf_stats(struct ice_pf *pf);
int ice_up(struct ice_vsi *vsi); int ice_up(struct ice_vsi *vsi);
......
...@@ -343,8 +343,20 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) ...@@ -343,8 +343,20 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
switch (vsi->type) { switch (vsi->type) {
case ICE_VSI_PF: case ICE_VSI_PF:
vsi->alloc_txq = pf->num_lan_tx; vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf),
vsi->alloc_rxq = pf->num_lan_rx; num_online_cpus());
pf->num_lan_tx = vsi->alloc_txq;
/* only 1 Rx queue unless RSS is enabled */
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
vsi->alloc_rxq = 1;
else
vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf),
num_online_cpus());
pf->num_lan_rx = vsi->alloc_rxq;
vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq); vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq);
break; break;
case ICE_VSI_VF: case ICE_VSI_VF:
...@@ -2577,9 +2589,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ...@@ -2577,9 +2589,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret) if (ret)
goto unroll_vector_base; goto unroll_vector_base;
pf->q_left_tx -= vsi->alloc_txq;
pf->q_left_rx -= vsi->alloc_rxq;
/* Do not exit if configuring RSS had an issue, at least /* Do not exit if configuring RSS had an issue, at least
* receive traffic on first queue. Hence no need to capture * receive traffic on first queue. Hence no need to capture
* return value * return value
...@@ -2643,8 +2652,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ...@@ -2643,8 +2652,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ice_vsi_delete(vsi); ice_vsi_delete(vsi);
unroll_get_qs: unroll_get_qs:
ice_vsi_put_qs(vsi); ice_vsi_put_qs(vsi);
pf->q_left_tx += vsi->alloc_txq;
pf->q_left_rx += vsi->alloc_rxq;
ice_vsi_clear(vsi); ice_vsi_clear(vsi);
return NULL; return NULL;
...@@ -2992,8 +2999,6 @@ int ice_vsi_release(struct ice_vsi *vsi) ...@@ -2992,8 +2999,6 @@ int ice_vsi_release(struct ice_vsi *vsi)
ice_vsi_clear_rings(vsi); ice_vsi_clear_rings(vsi);
ice_vsi_put_qs(vsi); ice_vsi_put_qs(vsi);
pf->q_left_tx += vsi->alloc_txq;
pf->q_left_rx += vsi->alloc_rxq;
/* retain SW VSI data structure since it is needed to unregister and /* retain SW VSI data structure since it is needed to unregister and
* free VSI netdev when PF is not in reset recovery pending state,\ * free VSI netdev when PF is not in reset recovery pending state,\
...@@ -3102,8 +3107,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ...@@ -3102,8 +3107,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret) if (ret)
goto err_vectors; goto err_vectors;
pf->q_left_tx -= vsi->alloc_txq;
pf->q_left_rx -= vsi->alloc_rxq;
break; break;
default: default:
break; break;
......
...@@ -2192,36 +2192,48 @@ static int ice_setup_pf_sw(struct ice_pf *pf) ...@@ -2192,36 +2192,48 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
ice_vsi_free_q_vectors(vsi); ice_vsi_free_q_vectors(vsi);
ice_vsi_delete(vsi); ice_vsi_delete(vsi);
ice_vsi_put_qs(vsi); ice_vsi_put_qs(vsi);
pf->q_left_tx += vsi->alloc_txq;
pf->q_left_rx += vsi->alloc_rxq;
ice_vsi_clear(vsi); ice_vsi_clear(vsi);
} }
return status; return status;
} }
/** /**
* ice_determine_q_usage - Calculate queue distribution * ice_get_avail_q_count - Get count of queues in use
* @pf: board private structure * @pf_qmap: bitmap to get queue use count from
* * @lock: pointer to a mutex that protects access to pf_qmap
* Return -ENOMEM if we don't get enough queues for all ports * @size: size of the bitmap
*/ */
static void ice_determine_q_usage(struct ice_pf *pf) static u16
ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
{ {
u16 q_left_tx, q_left_rx; u16 count = 0, bit;
q_left_tx = pf->hw.func_caps.common_cap.num_txq; mutex_lock(lock);
q_left_rx = pf->hw.func_caps.common_cap.num_rxq; for_each_clear_bit(bit, pf_qmap, size)
count++;
mutex_unlock(lock);
pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus()); return count;
}
/* only 1 Rx queue unless RSS is enabled */ /**
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) * ice_get_avail_txq_count - Get count of Tx queues in use
pf->num_lan_rx = 1; * @pf: pointer to an ice_pf instance
else */
pf->num_lan_rx = min_t(int, q_left_rx, num_online_cpus()); u16 ice_get_avail_txq_count(struct ice_pf *pf)
{
return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
pf->max_pf_txqs);
}
pf->q_left_tx = q_left_tx - pf->num_lan_tx; /**
pf->q_left_rx = q_left_rx - pf->num_lan_rx; * ice_get_avail_rxq_count - Get count of Rx queues in use
* @pf: pointer to an ice_pf instance
*/
u16 ice_get_avail_rxq_count(struct ice_pf *pf)
{
return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
pf->max_pf_rxqs);
} }
/** /**
...@@ -2541,8 +2553,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ...@@ -2541,8 +2553,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
} }
} }
ice_determine_q_usage(pf);
pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
if (!pf->num_alloc_vsi) { if (!pf->num_alloc_vsi) {
err = -EIO; err = -EIO;
......
...@@ -595,7 +595,8 @@ static int ice_alloc_vf_res(struct ice_vf *vf) ...@@ -595,7 +595,8 @@ static int ice_alloc_vf_res(struct ice_vf *vf)
/* Update number of VF queues, in case VF had requested for queue /* Update number of VF queues, in case VF had requested for queue
* changes * changes
*/ */
tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx); tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf),
ice_get_avail_rxq_count(pf));
tx_rx_queue_left += ICE_DFLT_QS_PER_VF; tx_rx_queue_left += ICE_DFLT_QS_PER_VF;
if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left && if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
vf->num_req_qs != vf->num_vf_qs) vf->num_req_qs != vf->num_vf_qs)
...@@ -898,11 +899,11 @@ static int ice_check_avail_res(struct ice_pf *pf) ...@@ -898,11 +899,11 @@ static int ice_check_avail_res(struct ice_pf *pf)
* at runtime through Virtchnl, that is the reason we start by reserving * at runtime through Virtchnl, that is the reason we start by reserving
* few queues. * few queues.
*/ */
num_txq = ice_determine_res(pf, pf->q_left_tx, ICE_DFLT_QS_PER_VF, num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
ICE_MIN_QS_PER_VF); ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF);
num_rxq = ice_determine_res(pf, pf->q_left_rx, ICE_DFLT_QS_PER_VF, num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
ICE_MIN_QS_PER_VF); ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF);
if (!num_txq || !num_rxq) if (!num_txq || !num_rxq)
return -EIO; return -EIO;
...@@ -2511,7 +2512,8 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -2511,7 +2512,8 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
} }
cur_queues = vf->num_vf_qs; cur_queues = vf->num_vf_qs;
tx_rx_queue_left = min_t(u16, pf->q_left_tx, pf->q_left_rx); tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
ice_get_avail_rxq_count(pf));
max_allowed_vf_queues = tx_rx_queue_left + cur_queues; max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
if (!req_queues) { if (!req_queues) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment