Commit f0739e65 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2018-11-13

This series contains updates to the ice driver only.

Brett cleans up debug print messages by removing useless or duplicate
messages, and make sure we assign the hardware head pointer to head
instead of the software head pointer.  Resolved an issue when disabling
SRIOV we were trying to stop queues multiple times, so make sure we
disable SRIOV before stopping transmit and receive queues for VF.

Tony fixes a potential NULL pointer dereference during a VF reset.

Anirudh resolves an issue where we were releasing the VSI before
removing the VSI scheduler node, which was resulting in an error "Failed
to set LAN Tx queue context, error: -1".  Also fixed the guaranteed
number of VSIs available and used by discovering the device
capabilities to determine the 'guar_num_vsi' per function, rather than
always using the theoretical max number of VSIs every time.

Dave avoids a deadlock by nesting RTNL locking, so added a boolean to
determine if the RTNL lock is already held.

Lev fixes bad mask values which would break compilation.

Piotr increases the receive queue disable timeout since it can take
additional time to finish all pending queue requests.

Usha resolves an issue of VLAN priority tagged traffic not appearing on
all traffic classes, which was causing ETS bandwidth shaping to not work
as expected.

Henry fixes the reset path to cleanup the old scheduler tree before
rebuilding it.

Md Fahad removes a unnecessary check which was causing a driver load
error on platforms with more than 128 cores.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents bd5196b6 ef878d60
......@@ -52,7 +52,6 @@ extern const char ice_drv_ver[];
#define ICE_MBXQ_LEN 64
#define ICE_MIN_MSIX 2
#define ICE_NO_VSI 0xffff
#define ICE_MAX_VSI_ALLOC 130
#define ICE_MAX_TXQS 2048
#define ICE_MAX_RXQS 2048
#define ICE_VSI_MAP_CONTIG 0
......@@ -113,7 +112,9 @@ extern const char ice_drv_ver[];
struct ice_tc_info {
u16 qoffset;
u16 qcount;
u16 qcount_tx;
u16 qcount_rx;
u8 netdev_tc;
};
struct ice_tc_cfg {
......
......@@ -87,6 +87,7 @@ struct ice_aqc_list_caps {
/* Device/Function buffer entry, repeated per reported capability */
struct ice_aqc_list_caps_elem {
__le16 cap;
#define ICE_AQC_CAPS_VALID_FUNCTIONS 0x0005
#define ICE_AQC_CAPS_SRIOV 0x0012
#define ICE_AQC_CAPS_VF 0x0013
#define ICE_AQC_CAPS_VSI 0x0017
......@@ -1065,10 +1066,10 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_LAST_CMD BIT(0)
#define ICE_AQC_NVM_PCIR_REQ BIT(0) /* Used by NVM Update reply */
#define ICE_AQC_NVM_PRESERVATION_S 1
#define ICE_AQC_NVM_PRESERVATION_M (3 << CSR_AQ_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_NO_PRESERVATION (0 << CSR_AQ_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_PRESERVATION_M (3 << ICE_AQC_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_NO_PRESERVATION (0 << ICE_AQC_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_PRESERVE_ALL BIT(1)
#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << CSR_AQ_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << ICE_AQC_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_FLASH_ONLY BIT(7)
__le16 module_typeid;
__le16 length;
......
......@@ -1386,6 +1386,27 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
}
}
/**
* ice_get_guar_num_vsi - determine number of guar VSI for a PF
* @hw: pointer to the hw structure
*
* Determine the number of valid functions by going through the bitmap returned
* from parsing capabilities and use this to calculate the number of VSI per PF.
*/
static u32 ice_get_guar_num_vsi(struct ice_hw *hw)
{
u8 funcs;
#define ICE_CAPS_VALID_FUNCS_M 0xFF
funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
ICE_CAPS_VALID_FUNCS_M);
if (!funcs)
return 0;
return ICE_MAX_VSI / funcs;
}
/**
* ice_parse_caps - parse function/device capabilities
* @hw: pointer to the hw struct
......@@ -1428,6 +1449,12 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
u16 cap = le16_to_cpu(cap_resp->cap);
switch (cap) {
case ICE_AQC_CAPS_VALID_FUNCTIONS:
caps->valid_functions = number;
ice_debug(hw, ICE_DBG_INIT,
"HW caps: Valid Functions = %d\n",
caps->valid_functions);
break;
case ICE_AQC_CAPS_SRIOV:
caps->sr_iov_1_1 = (number == 1);
ice_debug(hw, ICE_DBG_INIT,
......@@ -1457,10 +1484,10 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
"HW caps: Dev.VSI cnt = %d\n",
dev_p->num_vsi_allocd_to_host);
} else if (func_p) {
func_p->guaranteed_num_vsi = number;
func_p->guar_num_vsi = ice_get_guar_num_vsi(hw);
ice_debug(hw, ICE_DBG_INIT,
"HW caps: Func.VSI cnt = %d\n",
func_p->guaranteed_num_vsi);
number);
}
break;
case ICE_AQC_CAPS_RSS:
......
......@@ -7,6 +7,9 @@
#define _ICE_HW_AUTOGEN_H_
#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
#define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4))
#define QTX_COMM_HEAD_HEAD_S 0
#define QTX_COMM_HEAD_HEAD_M ICE_M(0x1FFF, 0)
#define PF_FW_ARQBAH 0x00080180
#define PF_FW_ARQBAL 0x00080080
#define PF_FW_ARQH 0x00080380
......
......@@ -174,15 +174,15 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
{
int i;
for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) {
for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
break;
usleep_range(10, 20);
usleep_range(20, 40);
}
if (i >= ICE_Q_WAIT_RETRY_LIMIT)
if (i >= ICE_Q_WAIT_MAX_RETRY)
return -ETIMEDOUT;
return 0;
......@@ -774,11 +774,13 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
*/
static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
u16 offset = 0, qmap = 0, numq_tc;
u16 pow = 0, max_rss = 0, qcount;
u16 offset = 0, qmap = 0, tx_count = 0;
u16 qcount_tx = vsi->alloc_txq;
u16 qcount_rx = vsi->alloc_rxq;
u16 tx_numq_tc, rx_numq_tc;
u16 pow = 0, max_rss = 0;
bool ena_tc0 = false;
u8 netdev_tc = 0;
int i;
/* at least TC0 should be enabled by default */
......@@ -794,7 +796,12 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
vsi->tc_cfg.ena_tc |= 1;
}
numq_tc = qcount_rx / vsi->tc_cfg.numtc;
rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc;
if (!rx_numq_tc)
rx_numq_tc = 1;
tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc;
if (!tx_numq_tc)
tx_numq_tc = 1;
/* TC mapping is a function of the number of Rx queues assigned to the
* VSI for each traffic class and the offset of these queues.
......@@ -808,7 +815,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
* Setup number and offset of Rx queues for all TCs for the VSI
*/
qcount = numq_tc;
qcount_rx = rx_numq_tc;
/* qcount will change if RSS is enabled */
if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
......@@ -816,37 +824,41 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
max_rss = ICE_MAX_LG_RSS_QS;
else
max_rss = ICE_MAX_SMALL_RSS_QS;
qcount = min_t(int, numq_tc, max_rss);
qcount = min_t(int, qcount, vsi->rss_size);
qcount_rx = min_t(int, rx_numq_tc, max_rss);
qcount_rx = min_t(int, qcount_rx, vsi->rss_size);
}
}
/* find the (rounded up) power-of-2 of qcount */
pow = order_base_2(qcount);
pow = order_base_2(qcount_rx);
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
/* TC is not enabled */
vsi->tc_cfg.tc_info[i].qoffset = 0;
vsi->tc_cfg.tc_info[i].qcount = 1;
vsi->tc_cfg.tc_info[i].qcount_rx = 1;
vsi->tc_cfg.tc_info[i].qcount_tx = 1;
vsi->tc_cfg.tc_info[i].netdev_tc = 0;
ctxt->info.tc_mapping[i] = 0;
continue;
}
/* TC is enabled */
vsi->tc_cfg.tc_info[i].qoffset = offset;
vsi->tc_cfg.tc_info[i].qcount = qcount;
vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc;
vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
ICE_AQ_VSI_TC_Q_OFFSET_M) |
((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
ICE_AQ_VSI_TC_Q_NUM_M);
offset += qcount;
offset += qcount_rx;
tx_count += tx_numq_tc;
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
}
vsi->num_txq = qcount_tx;
vsi->num_rxq = offset;
vsi->num_txq = tx_count;
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
......@@ -1611,55 +1623,62 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
struct ice_aqc_add_tx_qgrp *qg_buf;
struct ice_aqc_add_txqs_perq *txq;
struct ice_pf *pf = vsi->back;
u8 num_q_grps, q_idx = 0;
enum ice_status status;
u16 buf_len, i, pf_q;
int err = 0, tc = 0;
u8 num_q_grps;
buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
if (!qg_buf)
return -ENOMEM;
if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) {
err = -EINVAL;
goto err_cfg_txqs;
}
qg_buf->num_txqs = 1;
num_q_grps = 1;
/* set up and configure the Tx queues */
ice_for_each_txq(vsi, i) {
struct ice_tlan_ctx tlan_ctx = { 0 };
/* set up and configure the Tx queues for each enabled TC */
for (tc = 0; tc < ICE_MAX_TRAFFIC_CLASS; tc++) {
if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
break;
pf_q = vsi->txq_map[i];
ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
ice_tlan_ctx_info);
for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
struct ice_tlan_ctx tlan_ctx = { 0 };
pf_q = vsi->txq_map[q_idx];
ice_setup_tx_ctx(vsi->tx_rings[q_idx], &tlan_ctx,
pf_q);
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
ice_tlan_ctx_info);
/* init queue specific tail reg. It is referred as
* transmit comm scheduler queue doorbell.
*/
vsi->tx_rings[q_idx]->tail =
pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
num_q_grps, qg_buf, buf_len,
NULL);
if (status) {
dev_err(&vsi->back->pdev->dev,
"Failed to set LAN Tx queue context, error: %d\n",
status);
err = -ENODEV;
goto err_cfg_txqs;
}
/* init queue specific tail reg. It is referred as transmit
* comm scheduler queue doorbell.
*/
vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
num_q_grps, qg_buf, buf_len, NULL);
if (status) {
dev_err(&vsi->back->pdev->dev,
"Failed to set LAN Tx queue context, error: %d\n",
status);
err = -ENODEV;
goto err_cfg_txqs;
}
/* Add Tx Queue TEID into the VSI Tx ring from the
* response. This will complete configuring and
* enabling the queue.
*/
txq = &qg_buf->txqs[0];
if (pf_q == le16_to_cpu(txq->txq_id))
vsi->tx_rings[q_idx]->txq_teid =
le32_to_cpu(txq->q_teid);
/* Add Tx Queue TEID into the VSI Tx ring from the response
* This will complete configuring and enabling the queue.
*/
txq = &qg_buf->txqs[0];
if (pf_q == le16_to_cpu(txq->txq_id))
vsi->tx_rings[i]->txq_teid =
le32_to_cpu(txq->q_teid);
q_idx++;
}
}
err_cfg_txqs:
devm_kfree(&pf->pdev->dev, qg_buf);
......@@ -1908,7 +1927,8 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
ice_for_each_txq(vsi, i) {
u16 v_idx;
if (!vsi->tx_rings || !vsi->tx_rings[i]) {
if (!vsi->tx_rings || !vsi->tx_rings[i] ||
!vsi->tx_rings[i]->q_vector) {
err = -EINVAL;
goto err_out;
}
......@@ -2056,6 +2076,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
/* set RSS capabilities */
ice_vsi_set_rss_params(vsi);
/* set tc configuration */
ice_vsi_set_tc_cfg(vsi);
/* create the VSI */
ret = ice_vsi_init(vsi);
if (ret)
......@@ -2119,11 +2142,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
goto unroll_vsi_init;
}
ice_vsi_set_tc_cfg(vsi);
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++)
max_txqs[i] = vsi->num_txq;
max_txqs[i] = pf->num_lan_tx;
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
......@@ -2491,6 +2512,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
ice_remove_vsi_fltr(&pf->hw, vsi->idx);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_delete(vsi);
ice_vsi_free_q_vectors(vsi);
ice_vsi_clear_rings(vsi);
......@@ -2518,11 +2540,14 @@ int ice_vsi_release(struct ice_vsi *vsi)
int ice_vsi_rebuild(struct ice_vsi *vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf;
int ret, i;
if (!vsi)
return -EINVAL;
pf = vsi->back;
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_free_q_vectors(vsi);
ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
......@@ -2532,6 +2557,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
ice_vsi_free_arrays(vsi, false);
ice_dev_onetime_setup(&vsi->back->hw);
ice_vsi_set_num_qs(vsi);
ice_vsi_set_tc_cfg(vsi);
/* Initialize VSI struct elements and create VSI in FW */
ret = ice_vsi_init(vsi);
......@@ -2578,11 +2604,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
break;
}
ice_vsi_set_tc_cfg(vsi);
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++)
max_txqs[i] = vsi->num_txq;
max_txqs[i] = pf->num_lan_tx;
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
......
......@@ -349,6 +349,9 @@ ice_prepare_for_reset(struct ice_pf *pf)
/* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi(pf);
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
ice_shutdown_all_ctrlq(hw);
set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
......@@ -2091,8 +2094,7 @@ static int ice_probe(struct pci_dev *pdev,
ice_determine_q_usage(pf);
pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC,
hw->func_caps.guaranteed_num_vsi);
pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
if (!pf->num_alloc_vsi) {
err = -EIO;
goto err_init_pf_unroll;
......@@ -2544,7 +2546,6 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
if (err)
return err;
}
err = ice_vsi_cfg_txqs(vsi);
if (!err)
err = ice_vsi_cfg_rxqs(vsi);
......@@ -3138,8 +3139,9 @@ static void ice_vsi_release_all(struct ice_pf *pf)
/**
* ice_dis_vsi - pause a VSI
* @vsi: the VSI being paused
* @locked: is the rtnl_lock already held
*/
static void ice_dis_vsi(struct ice_vsi *vsi)
static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
{
if (test_bit(__ICE_DOWN, vsi->state))
return;
......@@ -3148,9 +3150,13 @@ static void ice_dis_vsi(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_PF && vsi->netdev) {
if (netif_running(vsi->netdev)) {
rtnl_lock();
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
rtnl_unlock();
if (!locked) {
rtnl_lock();
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
rtnl_unlock();
} else {
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
}
} else {
ice_vsi_close(vsi);
}
......@@ -3189,7 +3195,7 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf)
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
ice_dis_vsi(pf->vsi[v]);
ice_dis_vsi(pf->vsi[v], false);
}
/**
......@@ -3691,8 +3697,8 @@ static void ice_tx_timeout(struct net_device *netdev)
struct ice_ring *tx_ring = NULL;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
u32 head, val = 0, i;
int hung_queue = -1;
u32 i;
pf->tx_timeout_count++;
......@@ -3736,17 +3742,20 @@ static void ice_tx_timeout(struct net_device *netdev)
return;
if (tx_ring) {
head = tx_ring->next_to_clean;
struct ice_hw *hw = &pf->hw;
u32 head, val = 0;
head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) &
QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
/* Read interrupt register */
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
val = rd32(&pf->hw,
val = rd32(hw,
GLINT_DYN_CTL(tx_ring->q_vector->v_idx +
tx_ring->vsi->hw_base_vector));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
head, tx_ring->next_to_use,
readl(tx_ring->tail), val);
head, tx_ring->next_to_use, val);
}
pf->tx_timeout_last_recovery = jiffies;
......
......@@ -630,7 +630,7 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
*
* Cleanup scheduling elements from SW DB
*/
static void ice_sched_clear_port(struct ice_port_info *pi)
void ice_sched_clear_port(struct ice_port_info *pi)
{
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return;
......@@ -1527,7 +1527,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
}
/**
* ice_sched_cfg_vsi - configure the new/exisiting VSI
* ice_sched_cfg_vsi - configure the new/existing VSI
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc: TC number
......@@ -1605,3 +1605,109 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
return status;
}
/**
* ice_sched_rm_agg_vsi_entry - remove agg related VSI info entry
* @pi: port information structure
* @vsi_handle: software VSI handle
*
* This function removes single aggregator VSI info entry from
* aggregator list.
*/
static void
ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
{
struct ice_sched_agg_info *agg_info;
struct ice_sched_agg_info *atmp;
list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) {
struct ice_sched_agg_vsi_info *agg_vsi_info;
struct ice_sched_agg_vsi_info *vtmp;
list_for_each_entry_safe(agg_vsi_info, vtmp,
&agg_info->agg_vsi_list, list_entry)
if (agg_vsi_info->vsi_handle == vsi_handle) {
list_del(&agg_vsi_info->list_entry);
devm_kfree(ice_hw_to_dev(pi->hw),
agg_vsi_info);
return;
}
}
}
/**
* ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
* @pi: port information structure
* @vsi_handle: software VSI handle
* @owner: LAN or RDMA
*
* This function removes the VSI and its LAN or RDMA children nodes from the
* scheduler tree.
*/
static enum ice_status
ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
{
enum ice_status status = ICE_ERR_PARAM;
struct ice_vsi_ctx *vsi_ctx;
u8 i, j = 0;
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
return status;
mutex_lock(&pi->sched_lock);
vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
if (!vsi_ctx)
goto exit_sched_rm_vsi_cfg;
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
struct ice_sched_node *vsi_node, *tc_node;
tc_node = ice_sched_get_tc_node(pi, i);
if (!tc_node)
continue;
vsi_node = ice_sched_get_vsi_node(pi->hw, tc_node, vsi_handle);
if (!vsi_node)
continue;
while (j < vsi_node->num_children) {
if (vsi_node->children[j]->owner == owner) {
ice_free_sched_node(pi, vsi_node->children[j]);
/* reset the counter again since the num
* children will be updated after node removal
*/
j = 0;
} else {
j++;
}
}
/* remove the VSI if it has no children */
if (!vsi_node->num_children) {
ice_free_sched_node(pi, vsi_node);
vsi_ctx->sched.vsi_node[i] = NULL;
/* clean up agg related vsi info if any */
ice_sched_rm_agg_vsi_info(pi, vsi_handle);
}
if (owner == ICE_SCHED_NODE_OWNER_LAN)
vsi_ctx->sched.max_lanq[i] = 0;
}
status = 0;
exit_sched_rm_vsi_cfg:
mutex_unlock(&pi->sched_lock);
return status;
}
/**
* ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
* @pi: port information structure
* @vsi_handle: software VSI handle
*
* This function clears the VSI and its LAN children nodes from scheduler tree
* for all TCs.
*/
enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
{
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
}
......@@ -12,6 +12,7 @@
struct ice_sched_agg_vsi_info {
struct list_head list_entry;
DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
u16 vsi_handle;
};
struct ice_sched_agg_info {
......@@ -25,6 +26,7 @@ struct ice_sched_agg_info {
/* FW AQ command calls */
enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
void ice_sched_clear_port(struct ice_port_info *pi);
void ice_sched_cleanup_all(struct ice_hw *hw);
struct ice_sched_node *
ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
......@@ -39,4 +41,5 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_status
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
#endif /* _ICE_SCHED_H_ */
......@@ -124,6 +124,8 @@ struct ice_phy_info {
/* Common HW capabilities for SW use */
struct ice_hw_common_caps {
u32 valid_functions;
/* TX/RX queues */
u16 num_rxq; /* Number/Total RX queues */
u16 rxq_first_id; /* First queue ID for RX queues */
......@@ -150,7 +152,7 @@ struct ice_hw_func_caps {
struct ice_hw_common_caps common_cap;
u32 num_allocd_vfs; /* Number of allocated VFs */
u32 vf_base_id; /* Logical ID of the first VF */
u32 guaranteed_num_vsi;
u32 guar_num_vsi;
};
/* Device wide capabilities */
......
......@@ -215,6 +215,15 @@ void ice_free_vfs(struct ice_pf *pf)
while (test_and_set_bit(__ICE_VF_DIS, pf->state))
usleep_range(1000, 2000);
/* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank
* the carpet out from underneath their feet.
*/
if (!pci_vfs_assigned(pf->pdev))
pci_disable_sriov(pf->pdev);
else
dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
/* Avoid wait time by stopping all VFs at the same time */
for (i = 0; i < pf->num_alloc_vfs; i++) {
if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
......@@ -228,15 +237,6 @@ void ice_free_vfs(struct ice_pf *pf)
clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
}
/* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank
* the carpet out from underneath their feet.
*/
if (!pci_vfs_assigned(pf->pdev))
pci_disable_sriov(pf->pdev);
else
dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
tmp = pf->num_alloc_vfs;
pf->num_vf_qps = 0;
pf->num_alloc_vfs = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment