Commit ac371613 authored by Brett Creeley's avatar Brett Creeley Committed by Jeff Kirsher

ice: Refactor ice_ena_vf_mappings to split MSIX and queue mappings

Currently ice_ena_vf_mappings() does all of the VF's MSIX and queue
mapping in one function. This makes it hard to digest. Fix this by
creating a new function for enabling MSIX mappings and one for enabling
queue mappings.

Also, rename some variables in the functions for clarity.
Signed-off-by: default avatarBrett Creeley <brett.creeley@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent d3112cd1
...@@ -651,55 +651,70 @@ static int ice_alloc_vf_res(struct ice_vf *vf) ...@@ -651,55 +651,70 @@ static int ice_alloc_vf_res(struct ice_vf *vf)
} }
/** /**
* ice_ena_vf_mappings * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
* @vf: pointer to the VF structure * @vf: VF to enable MSIX mappings for
* *
* Enable VF vectors and queues allocation by writing the details into * Some of the registers need to be indexed/configured using hardware global
* respective registers. * device values and other registers need 0-based values, which represent PF
* based values.
*/ */
static void ice_ena_vf_mappings(struct ice_vf *vf) static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
{ {
int abs_vf_id, abs_first, abs_last; int device_based_first_msix, device_based_last_msix;
int pf_based_first_msix, pf_based_last_msix, v;
struct ice_pf *pf = vf->pf; struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi; int device_based_vf_id;
struct device *dev;
int first, last, v;
struct ice_hw *hw; struct ice_hw *hw;
u32 reg; u32 reg;
dev = ice_pf_to_dev(pf);
hw = &pf->hw; hw = &pf->hw;
vsi = pf->vsi[vf->lan_vsi_idx]; pf_based_first_msix = vf->first_vector_idx;
first = vf->first_vector_idx; pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
last = (first + pf->num_msix_per_vf) - 1;
abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id; device_based_first_msix = pf_based_first_msix +
abs_last = (abs_first + pf->num_msix_per_vf) - 1; pf->hw.func_caps.common_cap.msix_vector_first_id;
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; device_based_last_msix =
(device_based_first_msix + pf->num_msix_per_vf) - 1;
/* VF Vector allocation */ device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
reg = (((abs_first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
((abs_last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) | reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
VPINT_ALLOC_VALID_M); VPINT_ALLOC_FIRST_M) |
((device_based_last_msix << VPINT_ALLOC_LAST_S) &
VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
wr32(hw, VPINT_ALLOC(vf->vf_id), reg); wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
reg = (((abs_first << VPINT_ALLOC_PCI_FIRST_S) reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
& VPINT_ALLOC_PCI_FIRST_M) | & VPINT_ALLOC_PCI_FIRST_M) |
((abs_last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) | ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
VPINT_ALLOC_PCI_VALID_M); VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg); wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
/* map the interrupts to its functions */ /* map the interrupts to its functions */
for (v = first; v <= last; v++) { for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) & reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
GLINT_VECT2FUNC_VF_NUM_M) | GLINT_VECT2FUNC_VF_NUM_M) |
((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
GLINT_VECT2FUNC_PF_NUM_M)); GLINT_VECT2FUNC_PF_NUM_M));
wr32(hw, GLINT_VECT2FUNC(v), reg); wr32(hw, GLINT_VECT2FUNC(v), reg);
} }
/* Map mailbox interrupt. We put an explicit 0 here to remind us that /* Map mailbox interrupt to VF MSI-X vector 0 */
* VF admin queue interrupts will go to VF MSI-X vector 0. wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
*/ }
wr32(hw, VPINT_MBX_CTL(abs_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M | 0);
/**
* ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
* @vf: VF to enable the mappings for
* @max_txq: max Tx queues allowed on the VF's VSI
* @max_rxq: max Rx queues allowed on the VF's VSI
*/
static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
{
struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
struct device *dev = ice_pf_to_dev(vf->pf);
struct ice_hw *hw = &vf->pf->hw;
u32 reg;
/* set regardless of mapping mode */ /* set regardless of mapping mode */
wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M); wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
...@@ -711,7 +726,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) ...@@ -711,7 +726,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
*/ */
reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) & reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
VPLAN_TX_QBASE_VFFIRSTQ_M) | VPLAN_TX_QBASE_VFFIRSTQ_M) |
(((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) & (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
VPLAN_TX_QBASE_VFNUMQ_M)); VPLAN_TX_QBASE_VFNUMQ_M));
wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg); wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
} else { } else {
...@@ -729,7 +744,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) ...@@ -729,7 +744,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
*/ */
reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) & reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
VPLAN_RX_QBASE_VFFIRSTQ_M) | VPLAN_RX_QBASE_VFFIRSTQ_M) |
(((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) & (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
VPLAN_RX_QBASE_VFNUMQ_M)); VPLAN_RX_QBASE_VFNUMQ_M));
wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg); wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
} else { } else {
...@@ -737,6 +752,18 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) ...@@ -737,6 +752,18 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
} }
} }
/**
* ice_ena_vf_mappings - enable VF MSIX and queue mapping
* @vf: pointer to the VF structure
*/
static void ice_ena_vf_mappings(struct ice_vf *vf)
{
struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
ice_ena_vf_msix_mappings(vf);
ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
}
/** /**
* ice_determine_res * ice_determine_res
* @pf: pointer to the PF structure * @pf: pointer to the PF structure
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment