Commit 77ca27c4 authored by Paul Greenwalt's avatar Paul Greenwalt Committed by Jeff Kirsher

ice: add support for virtchnl_queue_select.[tx|rx]_queues bitmap

The VF driver can call VIRTCHNL_OP_[ENABLE|DISABLE]_QUEUES separately
for each queue. Add support for virtchnl_queue_select.[tx|rx]_queues
bitmap which is used to indicate which queues to enable and disable.

Add tracing of VF Tx/Rx per queue enable state to avoid enabling enabled
queues and disabling disabled queues. Add total queues enabled count and
clear ICE_VF_STATE_QS_ENA when count is zero.
Signed-off-by: default avatarPaul Greenwalt <paul.greenwalt@intel.com>
Signed-off-by: default avatarPeng Huang <peng.huang@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent d02f734c
...@@ -196,7 +196,10 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) ...@@ -196,7 +196,10 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
* @ena: start or stop the Rx rings * @ena: start or stop the Rx rings
* @rxq_idx: Rx queue index * @rxq_idx: Rx queue index
*/ */
static int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) #ifndef CONFIG_PCI_IOV
static
#endif /* !CONFIG_PCI_IOV */
int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
{ {
int pf_q = vsi->rxq_map[rxq_idx]; int pf_q = vsi->rxq_map[rxq_idx];
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
...@@ -2105,7 +2108,10 @@ void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector) ...@@ -2105,7 +2108,10 @@ void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
* @ring: Tx ring to be stopped * @ring: Tx ring to be stopped
* @txq_meta: Meta data of Tx ring to be stopped * @txq_meta: Meta data of Tx ring to be stopped
*/ */
static int #ifndef CONFIG_PCI_IOV
static
#endif /* !CONFIG_PCI_IOV */
int
ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num, struct ice_ring *ring, u16 rel_vmvf_num, struct ice_ring *ring,
struct ice_txq_meta *txq_meta) struct ice_txq_meta *txq_meta)
...@@ -2165,7 +2171,10 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, ...@@ -2165,7 +2171,10 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
* Set up a helper struct that will contain all the necessary fields that * Set up a helper struct that will contain all the necessary fields that
* are needed for stopping Tx queue * are needed for stopping Tx queue
*/ */
static void #ifndef CONFIG_PCI_IOV
static
#endif /* !CONFIG_PCI_IOV */
void
ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
struct ice_txq_meta *txq_meta) struct ice_txq_meta *txq_meta)
{ {
......
...@@ -39,6 +39,16 @@ ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx); ...@@ -39,6 +39,16 @@ ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
void void
ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx); ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
int
ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num, struct ice_ring *ring,
struct ice_txq_meta *txq_meta);
void ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
struct ice_txq_meta *txq_meta);
int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
#endif /* CONFIG_PCI_IOV */ #endif /* CONFIG_PCI_IOV */
int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid); int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid);
......
...@@ -489,7 +489,7 @@ ice_prepare_for_reset(struct ice_pf *pf) ...@@ -489,7 +489,7 @@ ice_prepare_for_reset(struct ice_pf *pf)
/* Disable VFs until reset is completed */ /* Disable VFs until reset is completed */
for (i = 0; i < pf->num_alloc_vfs; i++) for (i = 0; i < pf->num_alloc_vfs; i++)
clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states); ice_set_vf_state_qs_dis(&pf->vf[i]);
/* disable the VSIs and their queues that are not already DOWN */ /* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi(pf, false); ice_pf_dis_all_vsi(pf, false);
......
...@@ -251,6 +251,35 @@ static int ice_sriov_free_msix_res(struct ice_pf *pf) ...@@ -251,6 +251,35 @@ static int ice_sriov_free_msix_res(struct ice_pf *pf)
return 0; return 0;
} }
/**
* ice_set_vf_state_qs_dis - Set VF queues state to disabled
* @vf: pointer to the VF structure
*/
void ice_set_vf_state_qs_dis(struct ice_vf *vf)
{
/* Clear Rx/Tx enabled queues flag */
bitmap_zero(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF);
bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF);
vf->num_qs_ena = 0;
clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
}
/**
* ice_dis_vf_qs - Disable the VF queues
* @vf: pointer to the VF structure
*/
static void ice_dis_vf_qs(struct ice_vf *vf)
{
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
vsi = pf->vsi[vf->lan_vsi_idx];
ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
ice_vsi_stop_rx_rings(vsi);
ice_set_vf_state_qs_dis(vf);
}
/** /**
* ice_free_vfs - Free all VFs * ice_free_vfs - Free all VFs
* @pf: pointer to the PF structure * @pf: pointer to the PF structure
...@@ -267,19 +296,9 @@ void ice_free_vfs(struct ice_pf *pf) ...@@ -267,19 +296,9 @@ void ice_free_vfs(struct ice_pf *pf)
usleep_range(1000, 2000); usleep_range(1000, 2000);
/* Avoid wait time by stopping all VFs at the same time */ /* Avoid wait time by stopping all VFs at the same time */
for (i = 0; i < pf->num_alloc_vfs; i++) { for (i = 0; i < pf->num_alloc_vfs; i++)
struct ice_vsi *vsi; if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
ice_dis_vf_qs(&pf->vf[i]);
if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
continue;
vsi = pf->vsi[pf->vf[i].lan_vsi_idx];
/* stop rings without wait time */
ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, i);
ice_vsi_stop_rx_rings(vsi);
clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
}
/* Disable IOV before freeing resources. This lets any VF drivers /* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank * running in the host get themselves cleaned up before we yank
...@@ -1055,17 +1074,9 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ...@@ -1055,17 +1074,9 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
for (v = 0; v < pf->num_alloc_vfs; v++) for (v = 0; v < pf->num_alloc_vfs; v++)
ice_trigger_vf_reset(&pf->vf[v], is_vflr); ice_trigger_vf_reset(&pf->vf[v], is_vflr);
for (v = 0; v < pf->num_alloc_vfs; v++) { for (v = 0; v < pf->num_alloc_vfs; v++)
struct ice_vsi *vsi; if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[v].vf_states))
ice_dis_vf_qs(&pf->vf[v]);
vf = &pf->vf[v];
vsi = pf->vsi[vf->lan_vsi_idx];
if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id);
ice_vsi_stop_rx_rings(vsi);
clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
}
}
/* HW requires some time to make sure it can flush the FIFO for a VF /* HW requires some time to make sure it can flush the FIFO for a VF
* when it resets it. Poll the VPGEN_VFRSTAT register for each VF in * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
...@@ -1144,24 +1155,21 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) ...@@ -1144,24 +1155,21 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
/* If the VFs have been disabled, this means something else is /* If the VFs have been disabled, this means something else is
* resetting the VF, so we shouldn't continue. * resetting the VF, so we shouldn't continue.
*/ */
if (test_and_set_bit(__ICE_VF_DIS, pf->state)) if (test_bit(__ICE_VF_DIS, pf->state))
return false; return false;
ice_trigger_vf_reset(vf, is_vflr); ice_trigger_vf_reset(vf, is_vflr);
vsi = pf->vsi[vf->lan_vsi_idx]; vsi = pf->vsi[vf->lan_vsi_idx];
if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id); ice_dis_vf_qs(vf);
ice_vsi_stop_rx_rings(vsi); else
clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
} else {
/* Call Disable LAN Tx queue AQ call even when queues are not /* Call Disable LAN Tx queue AQ call even when queues are not
* enabled. This is needed for successful completiom of VFR * enabled. This is needed for successful completion of VFR
*/ */
ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
NULL, ICE_VF_RESET, vf->vf_id, NULL); NULL, ICE_VF_RESET, vf->vf_id, NULL);
}
hw = &pf->hw; hw = &pf->hw;
/* poll VPGEN_VFRSTAT reg to make sure /* poll VPGEN_VFRSTAT reg to make sure
...@@ -1210,7 +1218,6 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) ...@@ -1210,7 +1218,6 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
ice_cleanup_and_realloc_vf(vf); ice_cleanup_and_realloc_vf(vf);
ice_flush(hw); ice_flush(hw);
clear_bit(__ICE_VF_DIS, pf->state);
return true; return true;
} }
...@@ -1717,10 +1724,12 @@ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid) ...@@ -1717,10 +1724,12 @@ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
* @ring_len: length of ring * @ring_len: length of ring
* *
* check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
* or zero
*/ */
static bool ice_vc_isvalid_ring_len(u16 ring_len) static bool ice_vc_isvalid_ring_len(u16 ring_len)
{ {
return (ring_len >= ICE_MIN_NUM_DESC && return ring_len == 0 ||
(ring_len >= ICE_MIN_NUM_DESC &&
ring_len <= ICE_MAX_NUM_DESC && ring_len <= ICE_MAX_NUM_DESC &&
!(ring_len % ICE_REQ_DESC_MULTIPLE)); !(ring_len % ICE_REQ_DESC_MULTIPLE));
} }
...@@ -1877,6 +1886,8 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1877,6 +1886,8 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
(struct virtchnl_queue_select *)msg; (struct virtchnl_queue_select *)msg;
struct ice_pf *pf = vf->pf; struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi; struct ice_vsi *vsi;
unsigned long q_map;
u16 vf_q_id;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
...@@ -1909,12 +1920,48 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1909,12 +1920,48 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
* Tx queue group list was configured and the context bits were * Tx queue group list was configured and the context bits were
* programmed using ice_vsi_cfg_txqs * programmed using ice_vsi_cfg_txqs
*/ */
if (ice_vsi_start_rx_rings(vsi)) q_map = vqs->rx_queues;
v_ret = VIRTCHNL_STATUS_ERR_PARAM; for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* Skip queue if enabled */
if (test_bit(vf_q_id, vf->rxq_ena))
continue;
if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) {
dev_err(&vsi->back->pdev->dev,
"Failed to enable Rx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
set_bit(vf_q_id, vf->rxq_ena);
vf->num_qs_ena++;
}
vsi = pf->vsi[vf->lan_vsi_idx];
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* Skip queue if enabled */
if (test_bit(vf_q_id, vf->txq_ena))
continue;
set_bit(vf_q_id, vf->txq_ena);
vf->num_qs_ena++;
}
/* Set flag to indicate that queues are enabled */ /* Set flag to indicate that queues are enabled */
if (v_ret == VIRTCHNL_STATUS_SUCCESS) if (v_ret == VIRTCHNL_STATUS_SUCCESS)
set_bit(ICE_VF_STATE_ENA, vf->vf_states); set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
error_param: error_param:
/* send the response to the VF */ /* send the response to the VF */
...@@ -1937,9 +1984,11 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1937,9 +1984,11 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
(struct virtchnl_queue_select *)msg; (struct virtchnl_queue_select *)msg;
struct ice_pf *pf = vf->pf; struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi; struct ice_vsi *vsi;
unsigned long q_map;
u16 vf_q_id;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) && if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
!test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
} }
...@@ -1966,23 +2015,69 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1966,23 +2015,69 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param; goto error_param;
} }
if (ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) { if (vqs->tx_queues) {
dev_err(&vsi->back->pdev->dev, q_map = vqs->tx_queues;
"Failed to stop tx rings on VSI %d\n",
vsi->vsi_num); for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; struct ice_ring *ring = vsi->tx_rings[vf_q_id];
struct ice_txq_meta txq_meta = { 0 };
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* Skip queue if not enabled */
if (!test_bit(vf_q_id, vf->txq_ena))
continue;
ice_fill_txq_meta(vsi, ring, &txq_meta);
if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
ring, &txq_meta)) {
dev_err(&vsi->back->pdev->dev,
"Failed to stop Tx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* Clear enabled queues flag */
clear_bit(vf_q_id, vf->txq_ena);
vf->num_qs_ena--;
}
} }
if (ice_vsi_stop_rx_rings(vsi)) { if (vqs->rx_queues) {
dev_err(&vsi->back->pdev->dev, q_map = vqs->rx_queues;
"Failed to stop rx rings on VSI %d\n",
vsi->vsi_num); for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* Skip queue if not enabled */
if (!test_bit(vf_q_id, vf->rxq_ena))
continue;
if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) {
dev_err(&vsi->back->pdev->dev,
"Failed to stop Rx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* Clear enabled queues flag */
clear_bit(vf_q_id, vf->rxq_ena);
vf->num_qs_ena--;
}
} }
/* Clear enabled queues flag */ /* Clear enabled queues flag */
if (v_ret == VIRTCHNL_STATUS_SUCCESS) if (v_ret == VIRTCHNL_STATUS_SUCCESS && !vf->num_qs_ena)
clear_bit(ICE_VF_STATE_ENA, vf->vf_states); clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
error_param: error_param:
/* send the response to the VF */ /* send the response to the VF */
...@@ -2106,6 +2201,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -2106,6 +2201,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
struct virtchnl_vsi_queue_config_info *qci = struct virtchnl_vsi_queue_config_info *qci =
(struct virtchnl_vsi_queue_config_info *)msg; (struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi; struct virtchnl_queue_pair_info *qpi;
u16 num_rxq = 0, num_txq = 0;
struct ice_pf *pf = vf->pf; struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi; struct ice_vsi *vsi;
int i; int i;
...@@ -2148,33 +2244,44 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -2148,33 +2244,44 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param; goto error_param;
} }
/* copy Tx queue info from VF into VSI */ /* copy Tx queue info from VF into VSI */
vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; if (qpi->txq.ring_len > 0) {
vsi->tx_rings[i]->count = qpi->txq.ring_len; num_txq++;
/* copy Rx queue info from VF into VSI */ vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; vsi->tx_rings[i]->count = qpi->txq.ring_len;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
if (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
qpi->rxq.databuffer_size < 1024) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
} }
vsi->rx_buf_len = qpi->rxq.databuffer_size;
if (qpi->rxq.max_pkt_size >= (16 * 1024) || /* copy Rx queue info from VF into VSI */
qpi->rxq.max_pkt_size < 64) { if (qpi->rxq.ring_len > 0) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; num_rxq++;
goto error_param; vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
if (qpi->rxq.databuffer_size != 0 &&
(qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
qpi->rxq.databuffer_size < 1024)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
vsi->rx_buf_len = qpi->rxq.databuffer_size;
vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
qpi->rxq.max_pkt_size < 64) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
} }
vsi->max_frame = qpi->rxq.max_pkt_size; vsi->max_frame = qpi->rxq.max_pkt_size;
} }
/* VF can request to configure less than allocated queues /* VF can request to configure less than allocated queues
* or default allocated queues. So update the VSI with new number * or default allocated queues. So update the VSI with new number
*/ */
vsi->num_txq = qci->num_queue_pairs; vsi->num_txq = num_txq;
vsi->num_rxq = qci->num_queue_pairs; vsi->num_rxq = num_rxq;
/* All queues of VF VSI are in TC 0 */ /* All queues of VF VSI are in TC 0 */
vsi->tc_cfg.tc_info[0].qcount_tx = qci->num_queue_pairs; vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
vsi->tc_cfg.tc_info[0].qcount_rx = qci->num_queue_pairs; vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi)) if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
......
...@@ -41,9 +41,9 @@ ...@@ -41,9 +41,9 @@
/* Specific VF states */ /* Specific VF states */
enum ice_vf_states { enum ice_vf_states {
ICE_VF_STATE_INIT = 0, ICE_VF_STATE_INIT = 0, /* PF is initializing VF */
ICE_VF_STATE_ACTIVE, ICE_VF_STATE_ACTIVE, /* VF resources are allocated for use */
ICE_VF_STATE_ENA, ICE_VF_STATE_QS_ENA, /* VF queue(s) enabled */
ICE_VF_STATE_DIS, ICE_VF_STATE_DIS,
ICE_VF_STATE_MC_PROMISC, ICE_VF_STATE_MC_PROMISC,
ICE_VF_STATE_UC_PROMISC, ICE_VF_STATE_UC_PROMISC,
...@@ -68,6 +68,8 @@ struct ice_vf { ...@@ -68,6 +68,8 @@ struct ice_vf {
struct virtchnl_version_info vf_ver; struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */ u32 driver_caps; /* reported by VF driver */
struct virtchnl_ether_addr dflt_lan_addr; struct virtchnl_ether_addr dflt_lan_addr;
DECLARE_BITMAP(txq_ena, ICE_MAX_BASE_QS_PER_VF);
DECLARE_BITMAP(rxq_ena, ICE_MAX_BASE_QS_PER_VF);
u16 port_vlan_id; u16 port_vlan_id;
u8 pf_set_mac:1; /* VF MAC address set by VMM admin */ u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
u8 trusted:1; u8 trusted:1;
...@@ -90,6 +92,7 @@ struct ice_vf { ...@@ -90,6 +92,7 @@ struct ice_vf {
u16 num_mac; u16 num_mac;
u16 num_vlan; u16 num_vlan;
u16 num_vf_qs; /* num of queue configured per VF */ u16 num_vf_qs; /* num of queue configured per VF */
u16 num_qs_ena; /* total num of Tx/Rx queue enabled */
}; };
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
...@@ -116,12 +119,15 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state); ...@@ -116,12 +119,15 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena); int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector); int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
void ice_set_vf_state_qs_dis(struct ice_vf *vf);
#else /* CONFIG_PCI_IOV */ #else /* CONFIG_PCI_IOV */
#define ice_process_vflr_event(pf) do {} while (0) #define ice_process_vflr_event(pf) do {} while (0)
#define ice_free_vfs(pf) do {} while (0) #define ice_free_vfs(pf) do {} while (0)
#define ice_vc_process_vf_msg(pf, event) do {} while (0) #define ice_vc_process_vf_msg(pf, event) do {} while (0)
#define ice_vc_notify_link_state(pf) do {} while (0) #define ice_vc_notify_link_state(pf) do {} while (0)
#define ice_vc_notify_reset(pf) do {} while (0) #define ice_vc_notify_reset(pf) do {} while (0)
#define ice_set_vf_state_qs_dis(vf) do {} while (0)
static inline bool static inline bool
ice_reset_all_vfs(struct ice_pf __always_unused *pf, ice_reset_all_vfs(struct ice_pf __always_unused *pf,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment