Commit 9073989a authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2019-05-04

This series contains updates to the ice driver only.

Jesse updated the driver to make more functions consistent in their use
of a local variable for vsi->back.  Updates the driver to use bit fields
when possible to avoid wasting lots of storage space to store single bit
values.  Optimized the driver to be more memory efficient by moving
structure members around that are not in are hot path.

Michal updates the driver to disable the VF if malicious device driver
(MDD) event is detected by the hardware.  Adds checks to validate the
messages coming from the VF driver.  Tightens up the sniffing of the
driver so that transmit traffic so that VF's cannot see what is on other
VSIs.

Tony fixed the driver so that receive stripping state won't change every
time transmit insertion is changed.  Cleanup the __always_unused
attribute, now that the variable is being used.  Fixed the function
which evaluates setting of features to ensure that can evaluate and set
multiple features in a single function call.

Akeem fixes the driver so that we do not attempt to remove a VLAN filter
that does not exist.  Adds support for adding a ethertype based filter
rule on VSI and describe it in a very long run-on sentence. :-)

Bruce cleans up static analysis warnings by removing a local variable
initialization that is not needed.

Brett makes the allocate/deallocate more consistent in all the driver
flows for VSI q_vectors.  In addition, makes setting/getting coalesce
settings more consistent throughout the driver.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ba6223fc 64439f8f
...@@ -83,6 +83,8 @@ extern const char ice_drv_ver[]; ...@@ -83,6 +83,8 @@ extern const char ice_drv_ver[];
#define ICE_MAX_QS_PER_VF 256 #define ICE_MAX_QS_PER_VF 256
#define ICE_MIN_QS_PER_VF 1 #define ICE_MIN_QS_PER_VF 1
#define ICE_DFLT_QS_PER_VF 4 #define ICE_DFLT_QS_PER_VF 4
#define ICE_NONQ_VECS_VF 1
#define ICE_MAX_SCATTER_QS_PER_VF 16
#define ICE_MAX_BASE_QS_PER_VF 16 #define ICE_MAX_BASE_QS_PER_VF 16
#define ICE_MAX_INTR_PER_VF 65 #define ICE_MAX_INTR_PER_VF 65
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) #define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
...@@ -253,6 +255,8 @@ struct ice_vsi { ...@@ -253,6 +255,8 @@ struct ice_vsi {
s16 vf_id; /* VF ID for SR-IOV VSIs */ s16 vf_id; /* VF ID for SR-IOV VSIs */
u16 ethtype; /* Ethernet protocol for pause frame */
/* RSS config */ /* RSS config */
u16 rss_table_size; /* HW RSS table size */ u16 rss_table_size; /* HW RSS table size */
u16 rss_size; /* Allocated RSS queues */ u16 rss_size; /* Allocated RSS queues */
......
...@@ -1880,10 +1880,10 @@ void ...@@ -1880,10 +1880,10 @@ void
ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
u16 link_speeds_bitmap) u16 link_speeds_bitmap)
{ {
u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN;
u64 pt_high; u64 pt_high;
u64 pt_low; u64 pt_low;
int index; int index;
u16 speed;
/* We first check with low part of phy_type */ /* We first check with low part of phy_type */
for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
......
...@@ -79,6 +79,7 @@ struct ice_rq_event_info { ...@@ -79,6 +79,7 @@ struct ice_rq_event_info {
/* Control Queue information */ /* Control Queue information */
struct ice_ctl_q_info { struct ice_ctl_q_info {
enum ice_ctl_q qtype; enum ice_ctl_q qtype;
enum ice_aq_err rq_last_status; /* last status on receive queue */
struct ice_ctl_q_ring rq; /* receive queue */ struct ice_ctl_q_ring rq; /* receive queue */
struct ice_ctl_q_ring sq; /* send queue */ struct ice_ctl_q_ring sq; /* send queue */
u32 sq_cmd_timeout; /* send queue cmd write back timeout */ u32 sq_cmd_timeout; /* send queue cmd write back timeout */
...@@ -86,10 +87,9 @@ struct ice_ctl_q_info { ...@@ -86,10 +87,9 @@ struct ice_ctl_q_info {
u16 num_sq_entries; /* send queue depth */ u16 num_sq_entries; /* send queue depth */
u16 rq_buf_size; /* receive queue buffer size */ u16 rq_buf_size; /* receive queue buffer size */
u16 sq_buf_size; /* send queue buffer size */ u16 sq_buf_size; /* send queue buffer size */
enum ice_aq_err sq_last_status; /* last status on send queue */
struct mutex sq_lock; /* Send queue lock */ struct mutex sq_lock; /* Send queue lock */
struct mutex rq_lock; /* Receive queue lock */ struct mutex rq_lock; /* Receive queue lock */
enum ice_aq_err sq_last_status; /* last status on send queue */
enum ice_aq_err rq_last_status; /* last status on receive queue */
}; };
#endif /* _ICE_CONTROLQ_H_ */ #endif /* _ICE_CONTROLQ_H_ */
...@@ -1251,7 +1251,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks, ...@@ -1251,7 +1251,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
*/ */
static void static void
ice_get_settings_link_down(struct ethtool_link_ksettings *ks, ice_get_settings_link_down(struct ethtool_link_ksettings *ks,
struct net_device __always_unused *netdev) struct net_device *netdev)
{ {
/* link is down and the driver needs to fall back on /* link is down and the driver needs to fall back on
* supported PHY types to figure out what info to display * supported PHY types to figure out what info to display
...@@ -2254,50 +2254,61 @@ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type, ...@@ -2254,50 +2254,61 @@ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
return 0; return 0;
} }
/**
* ice_get_q_coalesce - get a queue's ITR/INTRL (coalesce) settings
* @vsi: VSI associated to the queue for getting ITR/INTRL (coalesce) settings
* @ec: coalesce settings to program the device with
* @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
*
* Return 0 on success, and negative under the following conditions:
* 1. Getting Tx or Rx ITR/INTRL (coalesce) settings failed.
* 2. The q_num passed in is not a valid number/index for Tx and Rx rings.
*/
static int
ice_get_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
{
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
&vsi->rx_rings[q_num]->q_vector->rx))
return -EINVAL;
if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
&vsi->tx_rings[q_num]->q_vector->tx))
return -EINVAL;
} else if (q_num < vsi->num_rxq) {
if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
&vsi->rx_rings[q_num]->q_vector->rx))
return -EINVAL;
} else if (q_num < vsi->num_txq) {
if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
&vsi->tx_rings[q_num]->q_vector->tx))
return -EINVAL;
} else {
return -EINVAL;
}
return 0;
}
/** /**
* __ice_get_coalesce - get ITR/INTRL values for the device * __ice_get_coalesce - get ITR/INTRL values for the device
* @netdev: pointer to the netdev associated with this query * @netdev: pointer to the netdev associated with this query
* @ec: ethtool structure to fill with driver's coalesce settings * @ec: ethtool structure to fill with driver's coalesce settings
* @q_num: queue number to get the coalesce settings for * @q_num: queue number to get the coalesce settings for
*
* If the caller passes in a negative q_num then we return coalesce settings
* based on queue number 0, else use the actual q_num passed in.
*/ */
static int static int
__ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, __ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
int q_num) int q_num)
{ {
struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_netdev_priv *np = netdev_priv(netdev);
int tx = -EINVAL, rx = -EINVAL;
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
if (q_num < 0) { if (q_num < 0)
rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER, q_num = 0;
&vsi->rx_rings[0]->q_vector->rx);
tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
&vsi->tx_rings[0]->q_vector->tx);
goto update_coalesced_frames; if (ice_get_q_coalesce(vsi, ec, q_num))
}
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
&vsi->rx_rings[q_num]->q_vector->rx);
tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
&vsi->tx_rings[q_num]->q_vector->tx);
} else if (q_num < vsi->num_rxq) {
rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
&vsi->rx_rings[q_num]->q_vector->rx);
} else if (q_num < vsi->num_txq) {
tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
&vsi->tx_rings[q_num]->q_vector->tx);
} else {
/* q_num is invalid for both Rx and Tx queues */
return -EINVAL;
}
update_coalesced_frames:
/* either q_num is invalid for both Rx and Tx queues or setting coalesce
* failed completely
*/
if (tx && rx)
return -EINVAL; return -EINVAL;
if (q_num < vsi->num_txq) if (q_num < vsi->num_txq)
...@@ -2423,54 +2434,77 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, ...@@ -2423,54 +2434,77 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
return 0; return 0;
} }
/**
* ice_set_q_coalesce - set a queue's ITR/INTRL (coalesce) settings
* @vsi: VSI associated to the queue that need updating
* @ec: coalesce settings to program the device with
* @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
*
* Return 0 on success, and negative under the following conditions:
* 1. Setting Tx or Rx ITR/INTRL (coalesce) settings failed.
* 2. The q_num passed in is not a valid number/index for Tx and Rx rings.
*/
static int
ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
{
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
&vsi->rx_rings[q_num]->q_vector->rx,
vsi))
return -EINVAL;
if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
&vsi->tx_rings[q_num]->q_vector->tx,
vsi))
return -EINVAL;
} else if (q_num < vsi->num_rxq) {
if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
&vsi->rx_rings[q_num]->q_vector->rx,
vsi))
return -EINVAL;
} else if (q_num < vsi->num_txq) {
if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
&vsi->tx_rings[q_num]->q_vector->tx,
vsi))
return -EINVAL;
} else {
return -EINVAL;
}
return 0;
}
/**
* __ice_set_coalesce - set ITR/INTRL values for the device
* @netdev: pointer to the netdev associated with this query
* @ec: ethtool structure to fill with driver's coalesce settings
* @q_num: queue number to get the coalesce settings for
*
* If the caller passes in a negative q_num then we set the coalesce settings
* for all Tx/Rx queues, else use the actual q_num passed in.
*/
static int static int
__ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
int q_num) int q_num)
{ {
struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_netdev_priv *np = netdev_priv(netdev);
int rx = -EINVAL, tx = -EINVAL;
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
if (q_num < 0) { if (q_num < 0) {
int i; int i;
ice_for_each_q_vector(vsi, i) { ice_for_each_q_vector(vsi, i) {
struct ice_q_vector *q_vector = vsi->q_vectors[i]; if (ice_set_q_coalesce(vsi, ec, i))
if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
&q_vector->rx, vsi) ||
ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
&q_vector->tx, vsi))
return -EINVAL; return -EINVAL;
} }
goto set_work_lmt; goto set_work_lmt;
} }
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) { if (ice_set_q_coalesce(vsi, ec, q_num))
rx = ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
&vsi->rx_rings[q_num]->q_vector->rx,
vsi);
tx = ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
&vsi->tx_rings[q_num]->q_vector->tx,
vsi);
} else if (q_num < vsi->num_rxq) {
rx = ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
&vsi->rx_rings[q_num]->q_vector->rx,
vsi);
} else if (q_num < vsi->num_txq) {
tx = ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
&vsi->tx_rings[q_num]->q_vector->tx,
vsi);
}
/* either q_num is invalid for both Rx and Tx queues or setting coalesce
* failed completely
*/
if (rx && tx)
return -EINVAL; return -EINVAL;
set_work_lmt: set_work_lmt:
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
vsi->work_lmt = max(ec->tx_max_coalesced_frames_irq, vsi->work_lmt = max(ec->tx_max_coalesced_frames_irq,
ec->rx_max_coalesced_frames_irq); ec->rx_max_coalesced_frames_irq);
......
...@@ -163,11 +163,15 @@ ...@@ -163,11 +163,15 @@
#define PFINT_OICR_ENA 0x0016C900 #define PFINT_OICR_ENA 0x0016C900
#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4)) #define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4))
#define QINT_RQCTL_MSIX_INDX_S 0 #define QINT_RQCTL_MSIX_INDX_S 0
#define QINT_RQCTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define QINT_RQCTL_ITR_INDX_S 11 #define QINT_RQCTL_ITR_INDX_S 11
#define QINT_RQCTL_ITR_INDX_M ICE_M(0x3, 11)
#define QINT_RQCTL_CAUSE_ENA_M BIT(30) #define QINT_RQCTL_CAUSE_ENA_M BIT(30)
#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4)) #define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4))
#define QINT_TQCTL_MSIX_INDX_S 0 #define QINT_TQCTL_MSIX_INDX_S 0
#define QINT_TQCTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define QINT_TQCTL_ITR_INDX_S 11 #define QINT_TQCTL_ITR_INDX_S 11
#define QINT_TQCTL_ITR_INDX_M ICE_M(0x3, 11)
#define QINT_TQCTL_CAUSE_ENA_M BIT(30) #define QINT_TQCTL_CAUSE_ENA_M BIT(30)
#define VPINT_ALLOC(_VF) (0x001D1000 + ((_VF) * 4)) #define VPINT_ALLOC(_VF) (0x001D1000 + ((_VF) * 4))
#define VPINT_ALLOC_FIRST_S 0 #define VPINT_ALLOC_FIRST_S 0
......
This diff is collapsed.
...@@ -317,42 +317,22 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) ...@@ -317,42 +317,22 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) { test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) {
clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
if (vsi->current_netdev_flags & IFF_PROMISC) { if (vsi->current_netdev_flags & IFF_PROMISC) {
/* Apply Tx filter rule to get traffic from VMs */
status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
ICE_FLTR_TX);
if (status) {
netdev_err(netdev, "Error setting default VSI %i tx rule\n",
vsi->vsi_num);
vsi->current_netdev_flags &= ~IFF_PROMISC;
err = -EIO;
goto out_promisc;
}
/* Apply Rx filter rule to get traffic from wire */ /* Apply Rx filter rule to get traffic from wire */
status = ice_cfg_dflt_vsi(hw, vsi->idx, true, status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
ICE_FLTR_RX); ICE_FLTR_RX);
if (status) { if (status) {
netdev_err(netdev, "Error setting default VSI %i rx rule\n", netdev_err(netdev, "Error setting default VSI %i Rx rule\n",
vsi->vsi_num); vsi->vsi_num);
vsi->current_netdev_flags &= ~IFF_PROMISC; vsi->current_netdev_flags &= ~IFF_PROMISC;
err = -EIO; err = -EIO;
goto out_promisc; goto out_promisc;
} }
} else { } else {
/* Clear Tx filter rule to stop traffic from VMs */
status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
ICE_FLTR_TX);
if (status) {
netdev_err(netdev, "Error clearing default VSI %i tx rule\n",
vsi->vsi_num);
vsi->current_netdev_flags |= IFF_PROMISC;
err = -EIO;
goto out_promisc;
}
/* Clear Rx filter to remove traffic from wire */ /* Clear Rx filter to remove traffic from wire */
status = ice_cfg_dflt_vsi(hw, vsi->idx, false, status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
ICE_FLTR_RX); ICE_FLTR_RX);
if (status) { if (status) {
netdev_err(netdev, "Error clearing default VSI %i rx rule\n", netdev_err(netdev, "Error clearing default VSI %i Rx rule\n",
vsi->vsi_num); vsi->vsi_num);
vsi->current_netdev_flags |= IFF_PROMISC; vsi->current_netdev_flags |= IFF_PROMISC;
err = -EIO; err = -EIO;
...@@ -1185,10 +1165,12 @@ static void ice_handle_mdd_event(struct ice_pf *pf) ...@@ -1185,10 +1165,12 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
struct ice_vf *vf = &pf->vf[i]; struct ice_vf *vf = &pf->vf[i];
mdd_detected = false;
reg = rd32(hw, VP_MDET_TX_PQM(i)); reg = rd32(hw, VP_MDET_TX_PQM(i));
if (reg & VP_MDET_TX_PQM_VALID_M) { if (reg & VP_MDET_TX_PQM_VALID_M) {
wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
vf->num_mdd_events++; mdd_detected = true;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i); i);
} }
...@@ -1196,7 +1178,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) ...@@ -1196,7 +1178,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, VP_MDET_TX_TCLAN(i)); reg = rd32(hw, VP_MDET_TX_TCLAN(i));
if (reg & VP_MDET_TX_TCLAN_VALID_M) { if (reg & VP_MDET_TX_TCLAN_VALID_M) {
wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
vf->num_mdd_events++; mdd_detected = true;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i); i);
} }
...@@ -1204,7 +1186,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) ...@@ -1204,7 +1186,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, VP_MDET_TX_TDPU(i)); reg = rd32(hw, VP_MDET_TX_TDPU(i));
if (reg & VP_MDET_TX_TDPU_VALID_M) { if (reg & VP_MDET_TX_TDPU_VALID_M) {
wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
vf->num_mdd_events++; mdd_detected = true;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i); i);
} }
...@@ -1212,14 +1194,13 @@ static void ice_handle_mdd_event(struct ice_pf *pf) ...@@ -1212,14 +1194,13 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, VP_MDET_RX(i)); reg = rd32(hw, VP_MDET_RX(i));
if (reg & VP_MDET_RX_VALID_M) { if (reg & VP_MDET_RX_VALID_M) {
wr32(hw, VP_MDET_RX(i), 0xFFFF); wr32(hw, VP_MDET_RX(i), 0xFFFF);
vf->num_mdd_events++; mdd_detected = true;
dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
i); i);
} }
if (vf->num_mdd_events > ICE_DFLT_NUM_MDD_EVENTS_ALLOWED) { if (mdd_detected) {
dev_info(&pf->pdev->dev, vf->num_mdd_events++;
"Too many MDD events on VF %d, disabled\n", i);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"Use PF Control I/F to re-enable the VF\n"); "Use PF Control I/F to re-enable the VF\n");
set_bit(ICE_VF_STATE_DIS, vf->vf_states); set_bit(ICE_VF_STATE_DIS, vf->vf_states);
...@@ -2872,6 +2853,9 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) ...@@ -2872,6 +2853,9 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
int ret = 0; int ret = 0;
/* Multiple features can be changed in one call so keep features in
* separate if/else statements to guarantee each feature is checked
*/
if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
ret = ice_vsi_manage_rss_lut(vsi, true); ret = ice_vsi_manage_rss_lut(vsi, true);
else if (!(features & NETIF_F_RXHASH) && else if (!(features & NETIF_F_RXHASH) &&
...@@ -2884,7 +2868,8 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) ...@@ -2884,7 +2868,8 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
ret = ice_vsi_manage_vlan_stripping(vsi, false); ret = ice_vsi_manage_vlan_stripping(vsi, false);
else if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
!(netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
ret = ice_vsi_manage_vlan_insertion(vsi); ret = ice_vsi_manage_vlan_insertion(vsi);
else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) && else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
......
...@@ -1969,6 +1969,65 @@ ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) ...@@ -1969,6 +1969,65 @@ ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
return 0; return 0;
} }
/**
* ice_add_eth_mac - Add ethertype and MAC based filter rule
* @hw: pointer to the hardware structure
* @em_list: list of ether type MAC filter, MAC is optional
*/
enum ice_status
ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
{
struct ice_fltr_list_entry *em_list_itr;
if (!em_list || !hw)
return ICE_ERR_PARAM;
list_for_each_entry(em_list_itr, em_list, list_entry) {
enum ice_sw_lkup_type l_type =
em_list_itr->fltr_info.lkup_type;
if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
l_type != ICE_SW_LKUP_ETHERTYPE)
return ICE_ERR_PARAM;
em_list_itr->fltr_info.flag = ICE_FLTR_TX;
em_list_itr->status = ice_add_rule_internal(hw, l_type,
em_list_itr);
if (em_list_itr->status)
return em_list_itr->status;
}
return 0;
}
/**
* ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
* @hw: pointer to the hardware structure
* @em_list: list of ethertype or ethertype MAC entries
*/
enum ice_status
ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
{
struct ice_fltr_list_entry *em_list_itr, *tmp;
if (!em_list || !hw)
return ICE_ERR_PARAM;
list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
enum ice_sw_lkup_type l_type =
em_list_itr->fltr_info.lkup_type;
if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
l_type != ICE_SW_LKUP_ETHERTYPE)
return ICE_ERR_PARAM;
em_list_itr->status = ice_remove_rule_internal(hw, l_type,
em_list_itr);
if (em_list_itr->status)
return em_list_itr->status;
}
return 0;
}
/** /**
* ice_rem_sw_rule_info * ice_rem_sw_rule_info
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
......
...@@ -218,6 +218,10 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw); ...@@ -218,6 +218,10 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw); enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst); enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst); enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
enum ice_status
ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list);
enum ice_status
ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle); void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
enum ice_status enum ice_status
ice_add_vlan(struct ice_hw *hw, struct list_head *m_list); ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
......
...@@ -1849,6 +1849,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) ...@@ -1849,6 +1849,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
if (err < 0) if (err < 0)
return err; return err;
/* cppcheck-suppress unreadVariable */
ip.hdr = skb_network_header(skb); ip.hdr = skb_network_header(skb);
l4.hdr = skb_transport_header(skb); l4.hdr = skb_transport_header(skb);
......
...@@ -326,6 +326,8 @@ struct ice_port_info { ...@@ -326,6 +326,8 @@ struct ice_port_info {
u8 port_state; u8 port_state;
#define ICE_SCHED_PORT_STATE_INIT 0x0 #define ICE_SCHED_PORT_STATE_INIT 0x0
#define ICE_SCHED_PORT_STATE_READY 0x1 #define ICE_SCHED_PORT_STATE_READY 0x1
u8 lport;
#define ICE_LPORT_MASK 0xff
u16 dflt_tx_vsi_rule_id; u16 dflt_tx_vsi_rule_id;
u16 dflt_tx_vsi_num; u16 dflt_tx_vsi_num;
u16 dflt_rx_vsi_rule_id; u16 dflt_rx_vsi_rule_id;
...@@ -339,11 +341,9 @@ struct ice_port_info { ...@@ -339,11 +341,9 @@ struct ice_port_info {
struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */ struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */ struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */
/* LLDP/DCBX Status */ /* LLDP/DCBX Status */
u8 dcbx_status; u8 dcbx_status:3; /* see ICE_DCBX_STATUS_DIS */
u8 is_sw_lldp; u8 is_sw_lldp:1;
u8 lport; u8 is_vf:1;
#define ICE_LPORT_MASK 0xff
u8 is_vf;
}; };
struct ice_switch_info { struct ice_switch_info {
......
...@@ -1814,14 +1814,22 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) ...@@ -1814,14 +1814,22 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
struct ice_vsi *vsi = NULL; struct ice_vsi *vsi = NULL;
struct ice_pf *pf = vf->pf; struct ice_pf *pf = vf->pf;
unsigned long qmap; unsigned long qmap;
u16 num_q_vectors;
int i; int i;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { num_q_vectors = irqmap_info->num_vectors - ICE_NONQ_VECS_VF;
vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
!vsi || vsi->num_q_vectors < num_q_vectors ||
irqmap_info->num_vectors == 0) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
} }
for (i = 0; i < irqmap_info->num_vectors; i++) { for (i = 0; i < num_q_vectors; i++) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
map = &irqmap_info->vecmap[i]; map = &irqmap_info->vecmap[i];
vector_id = map->vector_id; vector_id = map->vector_id;
...@@ -1833,36 +1841,26 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) ...@@ -1833,36 +1841,26 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
goto error_param; goto error_param;
} }
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* lookout for the invalid queue index */ /* lookout for the invalid queue index */
qmap = map->rxq_map; qmap = map->rxq_map;
q_vector->num_ring_rx = 0;
for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) { for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
struct ice_q_vector *q_vector;
if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) { if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
} }
q_vector = vsi->q_vectors[i];
q_vector->num_ring_rx++; q_vector->num_ring_rx++;
q_vector->rx.itr_idx = map->rxitr_idx; q_vector->rx.itr_idx = map->rxitr_idx;
vsi->rx_rings[vsi_q_id]->q_vector = q_vector; vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
} }
qmap = map->txq_map; qmap = map->txq_map;
q_vector->num_ring_tx = 0;
for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) { for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
struct ice_q_vector *q_vector;
if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) { if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
} }
q_vector = vsi->q_vectors[i];
q_vector->num_ring_tx++; q_vector->num_ring_tx++;
q_vector->tx.itr_idx = map->txitr_idx; q_vector->tx.itr_idx = map->txitr_idx;
vsi->tx_rings[vsi_q_id]->q_vector = q_vector; vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
...@@ -2402,7 +2400,17 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) ...@@ -2402,7 +2400,17 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
} }
} }
} else { } else {
for (i = 0; i < vfl->num_elements; i++) { /* In case of non_trusted VF, number of VLAN elements passed
* to PF for removal might be greater than number of VLANs
* filter programmed for that VF - So, use actual number of
* VLANS added earlier with add VLAN opcode. In order to avoid
* removing VLAN that doesn't exist, which result to sending
* erroneous failed message back to the VF
*/
int num_vf_vlan;
num_vf_vlan = vf->num_vlan;
for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
u16 vid = vfl->vlan_id[i]; u16 vid = vfl->vlan_id[i];
/* Make sure ice_vsi_kill_vlan is successful before /* Make sure ice_vsi_kill_vlan is successful before
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment