Commit 4c7d2e17 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
100GbE Intel Wired LAN Driver Updates 2022-03-09

This series contains updates to ice driver only.

Martyna implements switchdev filtering on inner EtherType field for
tunnels.

Marcin adds reporting of slowpath statistics for port representors.

Jonathan Toppins changes a non-fatal link error message from warning to
debug.

Maciej removes unnecessary checks in ice_clean_tx_irq().

Amritha adds support for ADQ to match outer destination MAC for tunnels.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
  ice: Add support for outer dest MAC for ADQ tunnels
  ice: avoid XDP checks in ice_clean_tx_irq()
  ice: change "can't set link" message to dbg level
  ice: Add slow path offload stats on port representor in switchdev
  ice: Add support for inner etype in switchdev
====================

Link: https://lore.kernel.org/r/20220309190315.1380414-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 46224853 02ddec19
...@@ -834,6 +834,9 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf); ...@@ -834,6 +834,9 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf);
int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx); int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx);
void ice_update_vsi_stats(struct ice_vsi *vsi); void ice_update_vsi_stats(struct ice_vsi *vsi);
void ice_update_pf_stats(struct ice_pf *pf); void ice_update_pf_stats(struct ice_pf *pf);
void
ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
struct ice_q_stats stats, u64 *pkts, u64 *bytes);
int ice_up(struct ice_vsi *vsi); int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi); int ice_down(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi); int ice_vsi_cfg(struct ice_vsi *vsi);
......
...@@ -3960,9 +3960,9 @@ int ice_set_link(struct ice_vsi *vsi, bool ena) ...@@ -3960,9 +3960,9 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
*/ */
if (status == -EIO) { if (status == -EIO) {
if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
dev_warn(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n", dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
(ena ? "ON" : "OFF"), status, (ena ? "ON" : "OFF"), status,
ice_aq_str(hw->adminq.sq_last_status)); ice_aq_str(hw->adminq.sq_last_status));
} else if (status) { } else if (status) {
dev_err(dev, "can't set link to %s, err %d aq_err %s\n", dev_err(dev, "can't set link to %s, err %d aq_err %s\n",
(ena ? "ON" : "OFF"), status, (ena ? "ON" : "OFF"), status,
......
...@@ -6134,9 +6134,9 @@ int ice_up(struct ice_vsi *vsi) ...@@ -6134,9 +6134,9 @@ int ice_up(struct ice_vsi *vsi)
* This function fetches stats from the ring considering the atomic operations * This function fetches stats from the ring considering the atomic operations
* that needs to be performed to read u64 values in 32 bit machine. * that needs to be performed to read u64 values in 32 bit machine.
*/ */
static void void
ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats stats, ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
u64 *pkts, u64 *bytes) struct ice_q_stats stats, u64 *pkts, u64 *bytes)
{ {
unsigned int start; unsigned int start;
......
...@@ -29,6 +29,7 @@ enum ice_protocol_type { ...@@ -29,6 +29,7 @@ enum ice_protocol_type {
ICE_MAC_OFOS = 0, ICE_MAC_OFOS = 0,
ICE_MAC_IL, ICE_MAC_IL,
ICE_ETYPE_OL, ICE_ETYPE_OL,
ICE_ETYPE_IL,
ICE_VLAN_OFOS, ICE_VLAN_OFOS,
ICE_IPV4_OFOS, ICE_IPV4_OFOS,
ICE_IPV4_IL, ICE_IPV4_IL,
...@@ -92,6 +93,7 @@ enum ice_prot_id { ...@@ -92,6 +93,7 @@ enum ice_prot_id {
#define ICE_MAC_OFOS_HW 1 #define ICE_MAC_OFOS_HW 1
#define ICE_MAC_IL_HW 4 #define ICE_MAC_IL_HW 4
#define ICE_ETYPE_OL_HW 9 #define ICE_ETYPE_OL_HW 9
#define ICE_ETYPE_IL_HW 10
#define ICE_VLAN_OF_HW 16 #define ICE_VLAN_OF_HW 16
#define ICE_VLAN_OL_HW 17 #define ICE_VLAN_OL_HW 17
#define ICE_IPV4_OFOS_HW 32 #define ICE_IPV4_OFOS_HW 32
......
...@@ -142,6 +142,59 @@ ice_repr_get_devlink_port(struct net_device *netdev) ...@@ -142,6 +142,59 @@ ice_repr_get_devlink_port(struct net_device *netdev)
return &repr->vf->devlink_port; return &repr->vf->devlink_port;
} }
/**
* ice_repr_sp_stats64 - get slow path stats for port representor
* @dev: network interface device structure
* @stats: netlink stats structure
*
* RX/TX stats are being swapped here to be consistent with VF stats. In slow
* path, port representor receives data when the corresponding VF is sending it
* (and vice versa), TX and RX bytes/packets are effectively swapped on port
* representor.
*/
static int
ice_repr_sp_stats64(const struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct ice_netdev_priv *np = netdev_priv(dev);
int vf_id = np->repr->vf->vf_id;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
u64 pkts, bytes;
tx_ring = np->vsi->tx_rings[vf_id];
ice_fetch_u64_stats_per_ring(&tx_ring->syncp, tx_ring->stats,
&pkts, &bytes);
stats->rx_packets = pkts;
stats->rx_bytes = bytes;
rx_ring = np->vsi->rx_rings[vf_id];
ice_fetch_u64_stats_per_ring(&rx_ring->syncp, rx_ring->stats,
&pkts, &bytes);
stats->tx_packets = pkts;
stats->tx_bytes = bytes;
stats->tx_dropped = rx_ring->rx_stats.alloc_page_failed +
rx_ring->rx_stats.alloc_buf_failed;
return 0;
}
static bool
ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id)
{
return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
}
static int
ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev,
void *sp)
{
if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
return -EINVAL;
}
static int static int
ice_repr_setup_tc_cls_flower(struct ice_repr *repr, ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
struct flow_cls_offload *flower) struct flow_cls_offload *flower)
...@@ -199,6 +252,8 @@ static const struct net_device_ops ice_repr_netdev_ops = { ...@@ -199,6 +252,8 @@ static const struct net_device_ops ice_repr_netdev_ops = {
.ndo_start_xmit = ice_eswitch_port_start_xmit, .ndo_start_xmit = ice_eswitch_port_start_xmit,
.ndo_get_devlink_port = ice_repr_get_devlink_port, .ndo_get_devlink_port = ice_repr_get_devlink_port,
.ndo_setup_tc = ice_repr_setup_tc, .ndo_setup_tc = ice_repr_setup_tc,
.ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
.ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
}; };
/** /**
......
This diff is collapsed.
...@@ -24,6 +24,9 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, ...@@ -24,6 +24,9 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
lkups_cnt++; lkups_cnt++;
if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)
lkups_cnt++;
if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 | ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
...@@ -33,9 +36,7 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, ...@@ -33,9 +36,7 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT)
lkups_cnt++; lkups_cnt++;
/* currently inner etype filter isn't supported */ if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID)
if ((flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) &&
fltr->tunnel_type == TNL_LAST)
lkups_cnt++; lkups_cnt++;
/* are MAC fields specified? */ /* are MAC fields specified? */
...@@ -64,6 +65,11 @@ static enum ice_protocol_type ice_proto_type_from_mac(bool inner) ...@@ -64,6 +65,11 @@ static enum ice_protocol_type ice_proto_type_from_mac(bool inner)
return inner ? ICE_MAC_IL : ICE_MAC_OFOS; return inner ? ICE_MAC_IL : ICE_MAC_OFOS;
} }
static enum ice_protocol_type ice_proto_type_from_etype(bool inner)
{
return inner ? ICE_ETYPE_IL : ICE_ETYPE_OL;
}
static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner) static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner)
{ {
return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS; return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS;
...@@ -145,6 +151,15 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, ...@@ -145,6 +151,15 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
} }
} }
if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) {
list[i].type = ice_proto_type_from_mac(false);
ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
hdr->l2_key.dst_mac);
ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
hdr->l2_mask.dst_mac);
i++;
}
if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) { ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) {
list[i].type = ice_proto_type_from_ipv4(false); list[i].type = ice_proto_type_from_ipv4(false);
...@@ -224,8 +239,10 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, ...@@ -224,8 +239,10 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
headers = &tc_fltr->inner_headers; headers = &tc_fltr->inner_headers;
inner = true; inner = true;
} else if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) { }
list[i].type = ICE_ETYPE_OL;
if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
list[i].type = ice_proto_type_from_etype(inner);
list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto; list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto;
list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto; list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto;
i++; i++;
...@@ -1059,12 +1076,24 @@ ice_handle_tclass_action(struct ice_vsi *vsi, ...@@ -1059,12 +1076,24 @@ ice_handle_tclass_action(struct ice_vsi *vsi,
* this code won't do anything * this code won't do anything
* 2. For non-tunnel, if user didn't specify MAC address, add implicit * 2. For non-tunnel, if user didn't specify MAC address, add implicit
* dest MAC to be lower netdev's active unicast MAC address * dest MAC to be lower netdev's active unicast MAC address
* 3. For tunnel, as of now TC-filter through flower classifier doesn't
* have provision for user to specify outer DMAC, hence driver to
* implicitly add outer dest MAC to be lower netdev's active unicast
* MAC address.
*/ */
if (!(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) { if (fltr->tunnel_type != TNL_LAST &&
ether_addr_copy(fltr->outer_headers.l2_key.dst_mac, !(fltr->flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC))
main_vsi->netdev->dev_addr); fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DST_MAC;
eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
if (fltr->tunnel_type == TNL_LAST &&
!(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC))
fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC; fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
if (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
ICE_TC_FLWR_FIELD_ENC_DST_MAC)) {
ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
vsi->netdev->dev_addr);
memset(fltr->outer_headers.l2_mask.dst_mac, 0xff, ETH_ALEN);
} }
/* validate specified dest MAC address, make sure either it belongs to /* validate specified dest MAC address, make sure either it belongs to
......
...@@ -223,8 +223,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) ...@@ -223,8 +223,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
struct ice_tx_buf *tx_buf; struct ice_tx_buf *tx_buf;
/* get the bql data ready */ /* get the bql data ready */
if (!ice_ring_is_xdp(tx_ring)) netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
tx_buf = &tx_ring->tx_buf[i]; tx_buf = &tx_ring->tx_buf[i];
tx_desc = ICE_TX_DESC(tx_ring, i); tx_desc = ICE_TX_DESC(tx_ring, i);
...@@ -313,10 +312,6 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) ...@@ -313,10 +312,6 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
tx_ring->next_to_clean = i; tx_ring->next_to_clean = i;
ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
if (ice_ring_is_xdp(tx_ring))
return !!budget;
netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes); netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes);
#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment