Commit cd0f4f3b authored by Jacob Keller's avatar Jacob Keller Committed by Tony Nguyen

ice: pass num_vfs to ice_set_per_vf_res()

We are planning to replace the simple array structure tracking VFs with
a hash table. This change will also remove the "num_alloc_vfs" variable.

Instead, new access functions to use the hash table as the source of
truth will be introduced. These will generally be equivalent to existing
checks, except during VF initialization.

Specifically, ice_set_per_vf_res() cannot use the hash table as it will
be operating prior to VF structures being inserted into the hash table.

Instead of using pf->num_alloc_vfs, simply pass the num_vfs value in
from the caller.

Note that a sub-function of ice_set_per_vf_res, ice_determine_res, also
implicitly depends on pf->num_alloc_vfs. Replace ice_determine_res with
a simpler inline implementation based on rounddown_pow_of_two. Note that
we must explicitly check that the argument is non-zero since it does not
play well with zero as a value.

Instead of using the function and while loop, simply calculate the
number of queues we have available by dividing by num_vfs. Check if the
desired queues are available. If not, round down to the nearest power of
2 that fits within our available queues.

This matches the behavior of ice_determine_res but is easier to follow
as simple in-line logic. Remove ice_determine_res entirely.

With this change, we no longer depend on the pf->num_alloc_vfs during
the initialization phase of VFs. This will allow us to safely remove it
in a future planned refactor of the VF data structures.
Signed-off-by: default avatarJacob Keller <jacob.e.keller@intel.com>
Tested-by: default avatarKonrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent b03d519d
...@@ -1069,45 +1069,6 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) ...@@ -1069,45 +1069,6 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq); ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
} }
/**
* ice_determine_res
* @pf: pointer to the PF structure
* @avail_res: available resources in the PF structure
* @max_res: maximum resources that can be given per VF
* @min_res: minimum resources that can be given per VF
*
* Returns non-zero value if resources (queues/vectors) are available or
* returns zero if PF cannot accommodate for all num_alloc_vfs.
*/
static int
ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
{
bool checked_min_res = false;
int res;
/* start by checking if PF can assign max number of resources for
* all num_alloc_vfs.
* if yes, return number per VF
* If no, divide by 2 and roundup, check again
* repeat the loop till we reach a point where even minimum resources
* are not available, in that case return 0
*/
res = max_res;
while ((res >= min_res) && !checked_min_res) {
int num_all_res;
num_all_res = pf->num_alloc_vfs * res;
if (num_all_res <= avail_res)
return res;
if (res == min_res)
checked_min_res = true;
res = DIV_ROUND_UP(res, 2);
}
return 0;
}
/** /**
* ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
* @vf: VF to calculate the register index for * @vf: VF to calculate the register index for
...@@ -1187,6 +1148,7 @@ static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) ...@@ -1187,6 +1148,7 @@ static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
/** /**
* ice_set_per_vf_res - check if vectors and queues are available * ice_set_per_vf_res - check if vectors and queues are available
* @pf: pointer to the PF structure * @pf: pointer to the PF structure
* @num_vfs: the number of SR-IOV VFs being configured
* *
* First, determine HW interrupts from common pool. If we allocate fewer VFs, we * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
* get more vectors and can enable more queues per VF. Note that this does not * get more vectors and can enable more queues per VF. Note that this does not
...@@ -1205,20 +1167,20 @@ static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) ...@@ -1205,20 +1167,20 @@ static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
* Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
* by each VF during VF initialization and reset. * by each VF during VF initialization and reset.
*/ */
static int ice_set_per_vf_res(struct ice_pf *pf) static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
{ {
int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker); int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
int msix_avail_per_vf, msix_avail_for_sriov; int msix_avail_per_vf, msix_avail_for_sriov;
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
u16 num_msix_per_vf, num_txq, num_rxq;
if (!pf->num_alloc_vfs || max_valid_res_idx < 0) if (!num_vfs || max_valid_res_idx < 0)
return -EINVAL; return -EINVAL;
/* determine MSI-X resources per VF */ /* determine MSI-X resources per VF */
msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors - msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
pf->irq_tracker->num_entries; pf->irq_tracker->num_entries;
msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs; msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) { if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
num_msix_per_vf = ICE_NUM_VF_MSIX_MED; num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) { } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
...@@ -1230,32 +1192,35 @@ static int ice_set_per_vf_res(struct ice_pf *pf) ...@@ -1230,32 +1192,35 @@ static int ice_set_per_vf_res(struct ice_pf *pf)
} else { } else {
dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n", dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
msix_avail_for_sriov, ICE_MIN_INTR_PER_VF, msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
pf->num_alloc_vfs); num_vfs);
return -EIO; return -EIO;
} }
/* determine queue resources per VF */ num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf), ICE_MAX_RSS_QS_PER_VF);
min_t(u16, avail_qs = ice_get_avail_txq_count(pf) / num_vfs;
num_msix_per_vf - ICE_NONQ_VECS_VF, if (!avail_qs)
ICE_MAX_RSS_QS_PER_VF), num_txq = 0;
ICE_MIN_QS_PER_VF); else if (num_txq > avail_qs)
num_txq = rounddown_pow_of_two(avail_qs);
num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf), num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
min_t(u16, ICE_MAX_RSS_QS_PER_VF);
num_msix_per_vf - ICE_NONQ_VECS_VF, avail_qs = ice_get_avail_rxq_count(pf) / num_vfs;
ICE_MAX_RSS_QS_PER_VF), if (!avail_qs)
ICE_MIN_QS_PER_VF); num_rxq = 0;
else if (num_rxq > avail_qs)
num_rxq = rounddown_pow_of_two(avail_qs);
if (!num_txq || !num_rxq) { if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) {
dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n", dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
ICE_MIN_QS_PER_VF, pf->num_alloc_vfs); ICE_MIN_QS_PER_VF, num_vfs);
return -EIO; return -EIO;
} }
if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) { if (ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs)) {
dev_err(dev, "Unable to set MSI-X resources for %d VFs\n", dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
pf->num_alloc_vfs); num_vfs);
return -EINVAL; return -EINVAL;
} }
...@@ -1263,7 +1228,7 @@ static int ice_set_per_vf_res(struct ice_pf *pf) ...@@ -1263,7 +1228,7 @@ static int ice_set_per_vf_res(struct ice_pf *pf)
pf->num_qps_per_vf = min_t(int, num_txq, num_rxq); pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
pf->num_msix_per_vf = num_msix_per_vf; pf->num_msix_per_vf = num_msix_per_vf;
dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n", dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf); num_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
return 0; return 0;
} }
...@@ -1977,7 +1942,7 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) ...@@ -1977,7 +1942,7 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
if (ret) if (ret)
goto err_pci_disable_sriov; goto err_pci_disable_sriov;
if (ice_set_per_vf_res(pf)) { if (ice_set_per_vf_res(pf, num_vfs)) {
dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n", dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
num_vfs); num_vfs);
ret = -ENOSPC; ret = -ENOSPC;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment