Commit 0b8515ed authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2019-03-19

This series contains updates to ice driver only.

Michal adds support for the pruning enable flag to avoid seeing
broadcast packets on different VLANs.

Akeem fixes an issue with VF queues being disabled and the VF netdev
network carrier being lost after reset. Fixed an issue issue when doing
PFR and CORER resets, where all VF VSIs need to be reset and rebuilt
with the main VSIs before replaying all VSIs.  Resolved an issue to
properly initialize VFs in the guest OS via PCI passthrough.

Bruce adds a local variable to avoid unnecessary de-references
throughout ice_probe().

Brett cleans up the code a bit by removing the need for a local variable
and re-designs the loop to simply return when get a successful result.
Cleans up the code to replace loop calls with a predefined macro to make
the code more consistent.  Updated the driver to ensure ITR granularity
is always 2 usecs. Refactors the calculation of VSIs per PF into a
general function that can calculate per PF allocations for not just VSIs
but across multiple resource types.  Improve the driver performance of
the driver when using the default settings by determining the ring size
and the number of descriptors for transmit and receive based on a
calculation with the PAGE_SIZE, ICE_MAX_NUM_DESC, and
ICE_REQ_DESC_MULTIPLE.

Chinh fixes an issue, where a reserved bit was possibly being set when
it should never be set.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8d3a3048 ad71b256
......@@ -42,10 +42,21 @@
extern const char ice_drv_ver[];
#define ICE_BAR0 0
#define ICE_DFLT_NUM_DESC 128
#define ICE_REQ_DESC_MULTIPLE 32
#define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE
#define ICE_MAX_NUM_DESC 8160
/* set default number of Rx/Tx descriptors to the minimum between
* ICE_MAX_NUM_DESC and the number of descriptors to fill up an entire page
*/
#define ICE_DFLT_NUM_RX_DESC min_t(u16, ICE_MAX_NUM_DESC, \
ALIGN(PAGE_SIZE / \
sizeof(union ice_32byte_rx_desc), \
ICE_REQ_DESC_MULTIPLE))
#define ICE_DFLT_NUM_TX_DESC min_t(u16, ICE_MAX_NUM_DESC, \
ALIGN(PAGE_SIZE / \
sizeof(struct ice_tx_desc), \
ICE_REQ_DESC_MULTIPLE))
#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_ETHTOOL_FWVER_LEN 32
......@@ -257,7 +268,8 @@ struct ice_vsi {
u16 num_txq; /* Used Tx queues */
u16 alloc_rxq; /* Allocated Rx queues */
u16 num_rxq; /* Used Rx queues */
u16 num_desc;
u16 num_rx_desc;
u16 num_tx_desc;
struct ice_tc_cfg tc_cfg;
} ____cacheline_internodealigned_in_smp;
......
......@@ -953,8 +953,9 @@ struct ice_aqc_set_phy_cfg_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
__le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
u8 caps;
#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
#define ICE_AQ_PHY_ENA_VALID_MASK ICE_M(0xef, 0)
#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2)
#define ICE_AQ_PHY_ENA_LINK BIT(3)
#define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5)
......
......@@ -1415,13 +1415,15 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
}
/**
* ice_get_guar_num_vsi - determine number of guar VSI for a PF
* ice_get_num_per_func - determine number of resources per PF
* @hw: pointer to the hw structure
* @max: value to be evenly split between each PF
*
* Determine the number of valid functions by going through the bitmap returned
* from parsing capabilities and use this to calculate the number of VSI per PF.
* from parsing capabilities and use this to calculate the number of resources
* per PF based on the max value passed in.
*/
static u32 ice_get_guar_num_vsi(struct ice_hw *hw)
static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
{
u8 funcs;
......@@ -1432,7 +1434,7 @@ static u32 ice_get_guar_num_vsi(struct ice_hw *hw)
if (!funcs)
return 0;
return ICE_MAX_VSI / funcs;
return max / funcs;
}
/**
......@@ -1512,7 +1514,8 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
"HW caps: Dev.VSI cnt = %d\n",
dev_p->num_vsi_allocd_to_host);
} else if (func_p) {
func_p->guar_num_vsi = ice_get_guar_num_vsi(hw);
func_p->guar_num_vsi =
ice_get_num_per_func(hw, ICE_MAX_VSI);
ice_debug(hw, ICE_DBG_INIT,
"HW caps: Func.VSI cnt = %d\n",
number);
......@@ -1929,6 +1932,15 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
if (!cfg)
return ICE_ERR_PARAM;
/* Ensure that only valid bits of cfg->caps can be turned on. */
if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
ice_debug(hw, ICE_DBG_PHY,
"Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
cfg->caps);
cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
}
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
desc.params.set_phy.lport_num = lport;
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
......@@ -2027,8 +2039,10 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
/* clear the old pause settings */
cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
ICE_AQC_PHY_EN_RX_LINK_PAUSE);
/* set the new capabilities */
cfg.caps |= pause_mask;
/* If the capabilities have changed, then set the new config */
if (cfg.caps != pcaps->caps) {
int retry_count, retry_max = 10;
......
......@@ -1400,13 +1400,12 @@ ice_set_link_ksettings(struct net_device *netdev,
return -EOPNOTSUPP;
/* Check if this is lan vsi */
for (idx = 0 ; idx < pf->num_alloc_vsi ; idx++) {
ice_for_each_vsi(pf, idx)
if (pf->vsi[idx]->type == ICE_VSI_PF) {
if (np->vsi != pf->vsi[idx])
return -EOPNOTSUPP;
break;
}
}
if (p->phy.media_type != ICE_MEDIA_BASET &&
p->phy.media_type != ICE_MEDIA_FIBER &&
......
......@@ -106,6 +106,16 @@
#define VPGEN_VFRTRIG_VFSWR_M BIT(0)
#define PFHMC_ERRORDATA 0x00520500
#define PFHMC_ERRORINFO 0x00520400
#define GLINT_CTL 0x0016CC54
#define GLINT_CTL_DIS_AUTOMASK_M BIT(0)
#define GLINT_CTL_ITR_GRAN_200_S 16
#define GLINT_CTL_ITR_GRAN_200_M ICE_M(0xF, 16)
#define GLINT_CTL_ITR_GRAN_100_S 20
#define GLINT_CTL_ITR_GRAN_100_M ICE_M(0xF, 20)
#define GLINT_CTL_ITR_GRAN_50_S 24
#define GLINT_CTL_ITR_GRAN_50_M ICE_M(0xF, 24)
#define GLINT_CTL_ITR_GRAN_25_S 28
#define GLINT_CTL_ITR_GRAN_25_M ICE_M(0xF, 28)
#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
#define GLINT_DYN_CTL_INTENA_M BIT(0)
#define GLINT_DYN_CTL_CLEARPBA_M BIT(1)
......
......@@ -175,17 +175,14 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
int i;
for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
break;
if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
QRX_CTRL_QENA_STAT_M))
return 0;
usleep_range(20, 40);
}
if (i >= ICE_Q_WAIT_MAX_RETRY)
return -ETIMEDOUT;
return 0;
return -ETIMEDOUT;
}
/**
......@@ -279,7 +276,26 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
}
/**
* ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI
* ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
* @vsi: the VSI being configured
*/
static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
break;
default:
dev_dbg(&vsi->back->pdev->dev,
"Not setting number of Tx/Rx descriptors for VSI type %d\n",
vsi->type);
break;
}
}
/**
* ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
* @vsi: the VSI being configured
*
* Return 0 on success and a negative value on error
......@@ -292,7 +308,6 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
case ICE_VSI_PF:
vsi->alloc_txq = pf->num_lan_tx;
vsi->alloc_rxq = pf->num_lan_rx;
vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
break;
case ICE_VSI_VF:
......@@ -310,6 +325,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->type);
break;
}
ice_vsi_set_num_desc(vsi);
}
/**
......@@ -1215,7 +1232,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->ring_active = false;
ring->vsi = vsi;
ring->dev = &pf->pdev->dev;
ring->count = vsi->num_desc;
ring->count = vsi->num_tx_desc;
vsi->tx_rings[i] = ring;
}
......@@ -1234,7 +1251,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->vsi = vsi;
ring->netdev = vsi->netdev;
ring->dev = &pf->pdev->dev;
ring->count = vsi->num_desc;
ring->count = vsi->num_rx_desc;
vsi->rx_rings[i] = ring;
}
......@@ -1716,6 +1733,37 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
return 0;
}
/**
* ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
* @hw: board specific structure
*/
static void ice_cfg_itr_gran(struct ice_hw *hw)
{
u32 regval = rd32(hw, GLINT_CTL);
/* no need to update global register if ITR gran is already set */
if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
(((regval & GLINT_CTL_ITR_GRAN_200_M) >>
GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
(((regval & GLINT_CTL_ITR_GRAN_100_M) >>
GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
(((regval & GLINT_CTL_ITR_GRAN_50_M) >>
GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
(((regval & GLINT_CTL_ITR_GRAN_25_M) >>
GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
return;
regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
GLINT_CTL_ITR_GRAN_200_M) |
((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
GLINT_CTL_ITR_GRAN_100_M) |
((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
GLINT_CTL_ITR_GRAN_50_M) |
((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
GLINT_CTL_ITR_GRAN_25_M);
wr32(hw, GLINT_CTL, regval);
}
/**
* ice_cfg_itr - configure the initial interrupt throttle values
* @hw: pointer to the HW structure
......@@ -1728,6 +1776,8 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
static void
ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
{
ice_cfg_itr_gran(hw);
if (q_vector->num_ring_rx) {
struct ice_ring_container *rc = &q_vector->rx;
......
......@@ -322,7 +322,7 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf)
clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
for (v = 0; v < pf->num_alloc_vsi; v++)
ice_for_each_vsi(pf, v)
if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
ice_vsi_sync_fltr(pf->vsi[v])) {
/* come back and try again later */
......@@ -394,6 +394,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_rebuild(pf);
clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
clear_bit(__ICE_PFR_REQ, pf->state);
ice_reset_all_vfs(pf, true);
}
}
......@@ -436,6 +437,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
clear_bit(__ICE_PFR_REQ, pf->state);
clear_bit(__ICE_CORER_REQ, pf->state);
clear_bit(__ICE_GLOBR_REQ, pf->state);
ice_reset_all_vfs(pf, true);
}
return;
......@@ -642,7 +644,7 @@ static void ice_watchdog_subtask(struct ice_pf *pf)
* can look at updated numbers whenever it cares to
*/
ice_update_pf_stats(pf);
for (i = 0; i < pf->num_alloc_vsi; i++)
ice_for_each_vsi(pf, i)
if (pf->vsi[i] && pf->vsi[i]->netdev)
ice_update_vsi_stats(pf->vsi[i]);
}
......@@ -2032,23 +2034,6 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
return 0;
}
/**
* ice_verify_itr_gran - verify driver's assumption of ITR granularity
* @pf: pointer to the PF structure
*
* There is no error returned here because the driver will be able to handle a
* different ITR granularity, but interrupt moderation will not be accurate if
* the driver's assumptions are not verified. This assumption is made so we can
* use constants in the hot path instead of accessing structure members.
*/
static void ice_verify_itr_gran(struct ice_pf *pf)
{
if (pf->hw.itr_gran != (ICE_ITR_GRAN_S << 1))
dev_warn(&pf->pdev->dev,
"%d ITR granularity assumption is invalid, actual ITR granularity is %d. Interrupt moderation will be inaccurate!\n",
(ICE_ITR_GRAN_S << 1), pf->hw.itr_gran);
}
/**
* ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
* @pf: pointer to the PF structure
......@@ -2075,6 +2060,7 @@ static void ice_verify_cacheline_size(struct ice_pf *pf)
static int ice_probe(struct pci_dev *pdev,
const struct pci_device_id __always_unused *ent)
{
struct device *dev = &pdev->dev;
struct ice_pf *pf;
struct ice_hw *hw;
int err;
......@@ -2086,20 +2072,20 @@ static int ice_probe(struct pci_dev *pdev,
err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
if (err) {
dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err);
dev_err(dev, "BAR0 I/O map error %d\n", err);
return err;
}
pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL);
pf = devm_kzalloc(dev, sizeof(*pf), GFP_KERNEL);
if (!pf)
return -ENOMEM;
/* set up for high or low dma */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (err)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
dev_err(dev, "DMA configuration failed: 0x%x\n", err);
return err;
}
......@@ -2133,12 +2119,12 @@ static int ice_probe(struct pci_dev *pdev,
err = ice_init_hw(hw);
if (err) {
dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err);
dev_err(dev, "ice_init_hw failed: %d\n", err);
err = -EIO;
goto err_exit_unroll;
}
dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n",
dev_info(dev, "firmware %d.%d.%05d api %d.%d\n",
hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
hw->api_maj_ver, hw->api_min_ver);
......@@ -2152,8 +2138,8 @@ static int ice_probe(struct pci_dev *pdev,
goto err_init_pf_unroll;
}
pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi,
sizeof(*pf->vsi), GFP_KERNEL);
pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
GFP_KERNEL);
if (!pf->vsi) {
err = -ENOMEM;
goto err_init_pf_unroll;
......@@ -2161,8 +2147,7 @@ static int ice_probe(struct pci_dev *pdev,
err = ice_init_interrupt_scheme(pf);
if (err) {
dev_err(&pdev->dev,
"ice_init_interrupt_scheme failed: %d\n", err);
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
goto err_init_interrupt_unroll;
}
......@@ -2178,15 +2163,13 @@ static int ice_probe(struct pci_dev *pdev,
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
err = ice_req_irq_msix_misc(pf);
if (err) {
dev_err(&pdev->dev,
"setup of misc vector failed: %d\n", err);
dev_err(dev, "setup of misc vector failed: %d\n", err);
goto err_init_interrupt_unroll;
}
}
/* create switch struct for the switch element created by FW on boot */
pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(*pf->first_sw),
GFP_KERNEL);
pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
if (!pf->first_sw) {
err = -ENOMEM;
goto err_msix_misc_unroll;
......@@ -2204,8 +2187,7 @@ static int ice_probe(struct pci_dev *pdev,
err = ice_setup_pf_sw(pf);
if (err) {
dev_err(&pdev->dev,
"probe failed due to setup pf switch:%d\n", err);
dev_err(dev, "probe failed due to setup pf switch:%d\n", err);
goto err_alloc_sw_unroll;
}
......@@ -2215,7 +2197,6 @@ static int ice_probe(struct pci_dev *pdev,
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
ice_verify_cacheline_size(pf);
ice_verify_itr_gran(pf);
return 0;
......@@ -2227,7 +2208,7 @@ static int ice_probe(struct pci_dev *pdev,
ice_free_irq_msix_misc(pf);
err_init_interrupt_unroll:
ice_clear_interrupt_scheme(pf);
devm_kfree(&pdev->dev, pf->vsi);
devm_kfree(dev, pf->vsi);
err_init_pf_unroll:
ice_deinit_pf(pf);
ice_deinit_hw(hw);
......@@ -3276,7 +3257,7 @@ static void ice_vsi_release_all(struct ice_pf *pf)
if (!pf->vsi)
return;
for (i = 0; i < pf->num_alloc_vsi; i++) {
ice_for_each_vsi(pf, i) {
if (!pf->vsi[i])
continue;
......@@ -3375,16 +3356,12 @@ static int ice_vsi_rebuild_all(struct ice_pf *pf)
int i;
/* loop through pf->vsi array and reinit the VSI if found */
for (i = 0; i < pf->num_alloc_vsi; i++) {
ice_for_each_vsi(pf, i) {
int err;
if (!pf->vsi[i])
continue;
/* VF VSI rebuild isn't supported yet */
if (pf->vsi[i]->type == ICE_VSI_VF)
continue;
err = ice_vsi_rebuild(pf->vsi[i]);
if (err) {
dev_err(&pf->pdev->dev,
......@@ -3412,7 +3389,7 @@ static int ice_vsi_replay_all(struct ice_pf *pf)
int i;
/* loop through pf->vsi array and replay the VSI if found */
for (i = 0; i < pf->num_alloc_vsi; i++) {
ice_for_each_vsi(pf, i) {
if (!pf->vsi[i])
continue;
......@@ -3521,9 +3498,7 @@ static void ice_rebuild(struct ice_pf *pf)
goto err_vsi_rebuild;
}
ice_reset_all_vfs(pf, true);
for (i = 0; i < pf->num_alloc_vsi; i++) {
ice_for_each_vsi(pf, i) {
bool link_up;
if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF)
......
......@@ -236,9 +236,9 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring)
if (!tx_ring->tx_buf)
return -ENOMEM;
/* round up to nearest 4K */
/* round up to nearest page */
tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
4096);
PAGE_SIZE);
tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
GFP_KERNEL);
if (!tx_ring->desc) {
......@@ -339,9 +339,9 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
if (!rx_ring->rx_buf)
return -ENOMEM;
/* round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union ice_32byte_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
/* round up to nearest page */
rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
PAGE_SIZE);
rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
GFP_KERNEL);
if (!rx_ring->desc) {
......
......@@ -125,6 +125,7 @@ enum ice_rx_dtype {
#define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC))
#define ITR_TO_REG(setting) ((setting) & ~ICE_ITR_DYNAMIC)
#define ICE_ITR_GRAN_S 1 /* Assume ITR granularity is 2us */
#define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S)
#define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */
#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~ICE_ITR_MASK)
......
......@@ -24,6 +24,7 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc)
/* debug masks - set these bits in hw->debug_mask to control output */
#define ICE_DBG_INIT BIT_ULL(1)
#define ICE_DBG_LINK BIT_ULL(4)
#define ICE_DBG_PHY BIT_ULL(5)
#define ICE_DBG_QCTX BIT_ULL(6)
#define ICE_DBG_NVM BIT_ULL(7)
#define ICE_DBG_LAN BIT_ULL(8)
......
......@@ -343,11 +343,41 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr)
}
/**
* ice_vsi_set_pvid - Set port VLAN id for the VSI
* @vsi: the VSI being changed
* ice_vsi_set_pvid_fill_ctxt - Set VSI ctxt for add pvid
* @ctxt: the vsi ctxt to fill
* @vid: the VLAN id to set as a PVID
*/
static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
static void ice_vsi_set_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt, u16 vid)
{
ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
ICE_AQ_VSI_PVLAN_INSERT_PVID |
ICE_AQ_VSI_VLAN_EMOD_STR);
ctxt->info.pvid = cpu_to_le16(vid);
ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
ICE_AQ_VSI_PROP_SW_VALID);
}
/**
* ice_vsi_kill_pvid_fill_ctxt - Set VSI ctx for remove pvid
* @ctxt: the VSI ctxt to fill
*/
static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt)
{
ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
ICE_AQ_VSI_PROP_SW_VALID);
}
/**
* ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
* @vsi: the VSI to update
* @vid: the VLAN id to set as a PVID
* @enable: true for enable pvid false for disable
*/
static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
{
struct device *dev = &vsi->back->pdev->dev;
struct ice_hw *hw = &vsi->back->hw;
......@@ -359,45 +389,26 @@ static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
if (!ctxt)
return -ENOMEM;
ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
ICE_AQ_VSI_PVLAN_INSERT_PVID |
ICE_AQ_VSI_VLAN_EMOD_STR);
ctxt->info.pvid = cpu_to_le16(vid);
ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
ctxt->info = vsi->info;
if (enable)
ice_vsi_set_pvid_fill_ctxt(ctxt, vid);
else
ice_vsi_kill_pvid_fill_ctxt(ctxt);
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
dev_info(dev, "update VSI for port VLAN failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
}
vsi->info.pvid = ctxt->info.pvid;
vsi->info.vlan_flags = ctxt->info.vlan_flags;
vsi->info = ctxt->info;
out:
devm_kfree(dev, ctxt);
return ret;
}
/**
* ice_vsi_kill_pvid - Remove port VLAN id from the VSI
* @vsi: the VSI being changed
*/
static int ice_vsi_kill_pvid(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
if (ice_vsi_manage_vlan_stripping(vsi, false)) {
dev_err(&pf->pdev->dev, "Error removing Port VLAN on VSI %i\n",
vsi->vsi_num);
return -ENODEV;
}
vsi->info.pvid = 0;
return 0;
}
/**
* ice_vf_vsi_setup - Set up a VF VSI
* @pf: board private structure
......@@ -447,7 +458,7 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
/* Check if port VLAN exist before, and restore it accordingly */
if (vf->port_vlan_id)
ice_vsi_set_pvid(vsi, vf->port_vlan_id);
ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true);
eth_broadcast_addr(broadcast);
......@@ -764,6 +775,7 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
{
struct ice_hw *hw = &pf->hw;
struct ice_vf *vf;
int v, i;
/* If we don't have any VFs, then there is nothing to reset */
......@@ -778,12 +790,17 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
for (v = 0; v < pf->num_alloc_vfs; v++)
ice_trigger_vf_reset(&pf->vf[v], is_vflr);
/* Call Disable LAN Tx queue AQ call with VFR bit set and 0
* queues to inform Firmware about VF reset.
*/
for (v = 0; v < pf->num_alloc_vfs; v++)
ice_dis_vsi_txq(pf->vsi[0]->port_info, 0, NULL, NULL,
ICE_VF_RESET, v, NULL);
for (v = 0; v < pf->num_alloc_vfs; v++) {
struct ice_vsi *vsi;
vf = &pf->vf[v];
vsi = pf->vsi[vf->lan_vsi_idx];
if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id);
ice_vsi_stop_rx_rings(vsi);
clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
}
}
/* HW requires some time to make sure it can flush the FIFO for a VF
* when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
......@@ -796,9 +813,9 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
/* Check each VF in sequence */
while (v < pf->num_alloc_vfs) {
struct ice_vf *vf = &pf->vf[v];
u32 reg;
vf = &pf->vf[v];
reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
if (!(reg & VPGEN_VFRSTAT_VFRD_M))
break;
......@@ -1012,7 +1029,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
pf->num_alloc_vfs = num_alloc_vfs;
/* VF resources get allocated during reset */
if (!ice_reset_all_vfs(pf, false))
if (!ice_reset_all_vfs(pf, true))
goto err_unroll_sriov;
goto err_unroll_intr;
......@@ -1368,7 +1385,7 @@ static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
{
int i;
for (i = 0; i < pf->num_alloc_vsi; i++)
ice_for_each_vsi(pf, i)
if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
return pf->vsi[i];
......@@ -2093,11 +2110,12 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
VLAN_VID_MASK));
if (vlan_id || qos) {
ret = ice_vsi_set_pvid(vsi, vlanprio);
ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
if (ret)
goto error_set_pvid;
} else {
ice_vsi_kill_pvid(vsi);
ice_vsi_manage_pvid(vsi, 0, false);
vsi->info.pvid = 0;
}
if (vlan_id) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment