Commit a425a973 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'gve-add-flow-steering-support'

Ziwei Xiao says:

====================
gve: Add flow steering support

To support flow steering in GVE driver, there are two adminq changes
need to be made in advance.

The first one is adding adminq mutex lock, which is to allow the
incoming flow steering operations to be able to temporarily drop the
rtnl_lock to reduce the latency for registering flow rules among
several NICs at the same time. This could be achieved by the future
changes to reduce the drivers' dependencies on the rtnl lock for
particular ethtool ops.

The second one is to add the extended adminq command so that we can
support larger adminq command such as configure_flow_rule command. In
that patch, there is a new added function called
gve_adminq_execute_extended_cmd with the attribute of __maybe_unused.
That attribute will be removed in the third patch of this series where
it will use the previously unused function.

And the other three patches are needed for the actual flow steering
feature support in driver.
====================

Link: https://patch.msgid.link/20240625001232.1476315-1-ziweixiao@google.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 63173885 6f3bc487
# Makefile for the Google virtual Ethernet (gve) driver
obj-$(CONFIG_GVE) += gve.o
gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o
gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o
/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
* Google virtual Ethernet (gve) driver
*
* Copyright (C) 2015-2021 Google, Inc.
* Copyright (C) 2015-2024 Google LLC
*/
#ifndef _GVE_H_
......@@ -60,6 +60,11 @@
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
#define GVE_FLOW_RULES_CACHE_SIZE \
(GVE_ADMINQ_BUFFER_SIZE / sizeof(struct gve_adminq_queried_flow_rule))
#define GVE_FLOW_RULE_IDS_CACHE_SIZE \
(GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location))
#define GVE_XDP_ACTIONS 5
#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
......@@ -678,6 +683,39 @@ enum gve_queue_format {
GVE_DQO_QPL_FORMAT = 0x4,
};
struct gve_flow_spec {
__be32 src_ip[4];
__be32 dst_ip[4];
union {
struct {
__be16 src_port;
__be16 dst_port;
};
__be32 spi;
};
union {
u8 tos;
u8 tclass;
};
};
struct gve_flow_rule {
u32 location;
u16 flow_type;
u16 action;
struct gve_flow_spec key;
struct gve_flow_spec mask;
};
struct gve_flow_rules_cache {
bool rules_cache_synced; /* False if the driver's rules_cache is outdated */
struct gve_adminq_queried_flow_rule *rules_cache;
__be32 *rule_ids_cache;
/* The total number of queried rules that stored in the caches */
u32 rules_cache_num;
u32 rule_ids_cache_num;
};
struct gve_priv {
struct net_device *dev;
struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
......@@ -724,6 +762,7 @@ struct gve_priv {
union gve_adminq_command *adminq;
dma_addr_t adminq_bus_addr;
struct dma_pool *adminq_pool;
struct mutex adminq_lock; /* Protects adminq command execution */
u32 adminq_mask; /* masks prod_cnt to adminq size */
u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
......@@ -743,6 +782,8 @@ struct gve_priv {
u32 adminq_report_link_speed_cnt;
u32 adminq_get_ptype_map_cnt;
u32 adminq_verify_driver_compatibility_cnt;
u32 adminq_query_flow_rules_cnt;
u32 adminq_cfg_flow_rule_cnt;
/* Global stats */
u32 interface_up_cnt; /* count of times interface turned up since last reset */
......@@ -785,6 +826,11 @@ struct gve_priv {
u16 header_buf_size; /* device configured, header-split supported if non-zero */
bool header_split_enabled; /* True if the header split is enabled by the user */
u32 max_flow_rules;
u32 num_flow_rules;
struct gve_flow_rules_cache flow_rules_cache;
};
enum gve_service_task_flags_bit {
......@@ -1124,6 +1170,12 @@ int gve_adjust_config(struct gve_priv *priv,
int gve_adjust_queues(struct gve_priv *priv,
struct gve_queue_config new_rx_config,
struct gve_queue_config new_tx_config);
/* flow steering rule */
int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs);
int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_flow_rules_reset(struct gve_priv *priv);
/* report stats handling */
void gve_handle_report_stats(struct gve_priv *priv);
/* exported by ethtool.c */
......
......@@ -25,6 +25,19 @@ enum gve_adminq_opcodes {
GVE_ADMINQ_REPORT_LINK_SPEED = 0xD,
GVE_ADMINQ_GET_PTYPE_MAP = 0xE,
GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY = 0xF,
GVE_ADMINQ_QUERY_FLOW_RULES = 0x10,
/* For commands that are larger than 56 bytes */
GVE_ADMINQ_EXTENDED_COMMAND = 0xFF,
};
/* The normal adminq command is restricted to be 56 bytes at maximum. For the
* longer adminq command, it is wrapped by GVE_ADMINQ_EXTENDED_COMMAND with
* inner opcode of gve_adminq_extended_cmd_opcodes specified. The inner command
* is written in the dma memory allocated by GVE_ADMINQ_EXTENDED_COMMAND.
*/
enum gve_adminq_extended_cmd_opcodes {
GVE_ADMINQ_CONFIGURE_FLOW_RULE = 0x101,
};
/* Admin queue status codes */
......@@ -143,6 +156,14 @@ struct gve_device_option_modify_ring {
static_assert(sizeof(struct gve_device_option_modify_ring) == 12);
struct gve_device_option_flow_steering {
__be32 supported_features_mask;
__be32 reserved;
__be32 max_flow_rules;
};
static_assert(sizeof(struct gve_device_option_flow_steering) == 12);
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
......@@ -160,6 +181,7 @@ enum gve_dev_opt_id {
GVE_DEV_OPT_ID_DQO_QPL = 0x7,
GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
GVE_DEV_OPT_ID_FLOW_STEERING = 0xb,
};
enum gve_dev_opt_req_feat_mask {
......@@ -171,12 +193,14 @@ enum gve_dev_opt_req_feat_mask {
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING = 0x0,
};
enum gve_sup_feature_mask {
GVE_SUP_MODIFY_RING_MASK = 1 << 0,
GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
GVE_SUP_FLOW_STEERING_MASK = 1 << 5,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
......@@ -208,6 +232,14 @@ enum gve_driver_capbility {
#define GVE_DRIVER_CAPABILITY_FLAGS3 0x0
#define GVE_DRIVER_CAPABILITY_FLAGS4 0x0
struct gve_adminq_extended_command {
__be32 inner_opcode;
__be32 inner_length;
__be64 inner_command_addr;
};
static_assert(sizeof(struct gve_adminq_extended_command) == 16);
struct gve_driver_info {
u8 os_type; /* 0x01 = Linux */
u8 driver_major;
......@@ -412,6 +444,71 @@ struct gve_adminq_get_ptype_map {
__be64 ptype_map_addr;
};
/* Flow-steering related definitions */
enum gve_adminq_flow_rule_cfg_opcode {
GVE_FLOW_RULE_CFG_ADD = 0,
GVE_FLOW_RULE_CFG_DEL = 1,
GVE_FLOW_RULE_CFG_RESET = 2,
};
enum gve_adminq_flow_rule_query_opcode {
GVE_FLOW_RULE_QUERY_RULES = 0,
GVE_FLOW_RULE_QUERY_IDS = 1,
GVE_FLOW_RULE_QUERY_STATS = 2,
};
enum gve_adminq_flow_type {
GVE_FLOW_TYPE_TCPV4,
GVE_FLOW_TYPE_UDPV4,
GVE_FLOW_TYPE_SCTPV4,
GVE_FLOW_TYPE_AHV4,
GVE_FLOW_TYPE_ESPV4,
GVE_FLOW_TYPE_TCPV6,
GVE_FLOW_TYPE_UDPV6,
GVE_FLOW_TYPE_SCTPV6,
GVE_FLOW_TYPE_AHV6,
GVE_FLOW_TYPE_ESPV6,
};
/* Flow-steering command */
struct gve_adminq_flow_rule {
__be16 flow_type;
__be16 action; /* RX queue id */
struct gve_flow_spec key;
struct gve_flow_spec mask;
};
struct gve_adminq_configure_flow_rule {
__be16 opcode;
u8 padding[2];
struct gve_adminq_flow_rule rule;
__be32 location;
};
static_assert(sizeof(struct gve_adminq_configure_flow_rule) == 92);
struct gve_query_flow_rules_descriptor {
__be32 num_flow_rules;
__be32 max_flow_rules;
__be32 num_queried_rules;
__be32 total_length;
};
struct gve_adminq_queried_flow_rule {
__be32 location;
struct gve_adminq_flow_rule flow_rule;
};
struct gve_adminq_query_flow_rules {
__be16 opcode;
u8 padding[2];
__be32 starting_rule_id;
__be64 available_length; /* The dma memory length that the driver allocated */
__be64 rule_descriptor_addr; /* The dma memory address */
};
static_assert(sizeof(struct gve_adminq_query_flow_rules) == 24);
union gve_adminq_command {
struct {
__be32 opcode;
......@@ -432,6 +529,8 @@ union gve_adminq_command {
struct gve_adminq_get_ptype_map get_ptype_map;
struct gve_adminq_verify_driver_compatibility
verify_driver_compatibility;
struct gve_adminq_query_flow_rules query_flow_rules;
struct gve_adminq_extended_command extended_command;
};
};
u8 reserved[64];
......@@ -465,6 +564,10 @@ int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
u64 driver_info_len,
dma_addr_t driver_info_addr);
int gve_adminq_report_link_speed(struct gve_priv *priv);
int gve_adminq_add_flow_rule(struct gve_priv *priv, struct gve_adminq_flow_rule *rule, u32 loc);
int gve_adminq_del_flow_rule(struct gve_priv *priv, u32 loc);
int gve_adminq_reset_flow_rules(struct gve_priv *priv);
int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc);
struct gve_ptype_lut;
int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
......
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Google virtual Ethernet (gve) driver
*
* Copyright (C) 2015-2021 Google, Inc.
* Copyright (C) 2015-2024 Google LLC
*/
#include <linux/rtnetlink.h>
......@@ -74,7 +74,8 @@ static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
"adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt",
"adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
"adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
"adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt"
"adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt",
"adminq_query_flow_rules", "adminq_cfg_flow_rule",
};
static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
......@@ -450,6 +451,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = priv->adminq_report_stats_cnt;
data[i++] = priv->adminq_report_link_speed_cnt;
data[i++] = priv->adminq_get_ptype_map_cnt;
data[i++] = priv->adminq_query_flow_rules_cnt;
data[i++] = priv->adminq_cfg_flow_rule_cnt;
}
static void gve_get_channels(struct net_device *netdev,
......@@ -772,6 +775,69 @@ static int gve_set_coalesce(struct net_device *netdev,
return 0;
}
static int gve_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct gve_priv *priv = netdev_priv(netdev);
int err = 0;
if (!(netdev->features & NETIF_F_NTUPLE))
return -EOPNOTSUPP;
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
err = gve_add_flow_rule(priv, cmd);
break;
case ETHTOOL_SRXCLSRLDEL:
err = gve_del_flow_rule(priv, cmd);
break;
case ETHTOOL_SRXFH:
err = -EOPNOTSUPP;
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct gve_priv *priv = netdev_priv(netdev);
int err = 0;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = priv->rx_cfg.num_queues;
break;
case ETHTOOL_GRXCLSRLCNT:
if (!priv->max_flow_rules)
return -EOPNOTSUPP;
err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_STATS, 0);
if (err)
return err;
cmd->rule_cnt = priv->num_flow_rules;
cmd->data = priv->max_flow_rules;
break;
case ETHTOOL_GRXCLSRULE:
err = gve_get_flow_rule_entry(priv, cmd);
break;
case ETHTOOL_GRXCLSRLALL:
err = gve_get_flow_rule_ids(priv, cmd, (u32 *)rule_locs);
break;
case ETHTOOL_GRXFH:
err = -EOPNOTSUPP;
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
const struct ethtool_ops gve_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
......@@ -783,6 +849,8 @@ const struct ethtool_ops gve_ethtool_ops = {
.get_msglevel = gve_get_msglevel,
.set_channels = gve_set_channels,
.get_channels = gve_get_channels,
.set_rxnfc = gve_set_rxnfc,
.get_rxnfc = gve_get_rxnfc,
.get_link = ethtool_op_get_link,
.get_coalesce = gve_get_coalesce,
.set_coalesce = gve_set_coalesce,
......
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Google virtual Ethernet (gve) driver
*
* Copyright (C) 2015-2024 Google LLC
*/
#include "gve.h"
#include "gve_adminq.h"
static
int gve_fill_ethtool_flow_spec(struct ethtool_rx_flow_spec *fsp,
struct gve_adminq_queried_flow_rule *rule)
{
struct gve_adminq_flow_rule *flow_rule = &rule->flow_rule;
static const u16 flow_type_lut[] = {
[GVE_FLOW_TYPE_TCPV4] = TCP_V4_FLOW,
[GVE_FLOW_TYPE_UDPV4] = UDP_V4_FLOW,
[GVE_FLOW_TYPE_SCTPV4] = SCTP_V4_FLOW,
[GVE_FLOW_TYPE_AHV4] = AH_V4_FLOW,
[GVE_FLOW_TYPE_ESPV4] = ESP_V4_FLOW,
[GVE_FLOW_TYPE_TCPV6] = TCP_V6_FLOW,
[GVE_FLOW_TYPE_UDPV6] = UDP_V6_FLOW,
[GVE_FLOW_TYPE_SCTPV6] = SCTP_V6_FLOW,
[GVE_FLOW_TYPE_AHV6] = AH_V6_FLOW,
[GVE_FLOW_TYPE_ESPV6] = ESP_V6_FLOW,
};
if (be16_to_cpu(flow_rule->flow_type) >= ARRAY_SIZE(flow_type_lut))
return -EINVAL;
fsp->flow_type = flow_type_lut[be16_to_cpu(flow_rule->flow_type)];
memset(&fsp->h_u, 0, sizeof(fsp->h_u));
memset(&fsp->h_ext, 0, sizeof(fsp->h_ext));
memset(&fsp->m_u, 0, sizeof(fsp->m_u));
memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
switch (fsp->flow_type) {
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
fsp->h_u.tcp_ip4_spec.ip4src = flow_rule->key.src_ip[0];
fsp->h_u.tcp_ip4_spec.ip4dst = flow_rule->key.dst_ip[0];
fsp->h_u.tcp_ip4_spec.psrc = flow_rule->key.src_port;
fsp->h_u.tcp_ip4_spec.pdst = flow_rule->key.dst_port;
fsp->h_u.tcp_ip4_spec.tos = flow_rule->key.tos;
fsp->m_u.tcp_ip4_spec.ip4src = flow_rule->mask.src_ip[0];
fsp->m_u.tcp_ip4_spec.ip4dst = flow_rule->mask.dst_ip[0];
fsp->m_u.tcp_ip4_spec.psrc = flow_rule->mask.src_port;
fsp->m_u.tcp_ip4_spec.pdst = flow_rule->mask.dst_port;
fsp->m_u.tcp_ip4_spec.tos = flow_rule->mask.tos;
break;
case AH_V4_FLOW:
case ESP_V4_FLOW:
fsp->h_u.ah_ip4_spec.ip4src = flow_rule->key.src_ip[0];
fsp->h_u.ah_ip4_spec.ip4dst = flow_rule->key.dst_ip[0];
fsp->h_u.ah_ip4_spec.spi = flow_rule->key.spi;
fsp->h_u.ah_ip4_spec.tos = flow_rule->key.tos;
fsp->m_u.ah_ip4_spec.ip4src = flow_rule->mask.src_ip[0];
fsp->m_u.ah_ip4_spec.ip4dst = flow_rule->mask.dst_ip[0];
fsp->m_u.ah_ip4_spec.spi = flow_rule->mask.spi;
fsp->m_u.ah_ip4_spec.tos = flow_rule->mask.tos;
break;
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
memcpy(fsp->h_u.tcp_ip6_spec.ip6src, &flow_rule->key.src_ip,
sizeof(struct in6_addr));
memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, &flow_rule->key.dst_ip,
sizeof(struct in6_addr));
fsp->h_u.tcp_ip6_spec.psrc = flow_rule->key.src_port;
fsp->h_u.tcp_ip6_spec.pdst = flow_rule->key.dst_port;
fsp->h_u.tcp_ip6_spec.tclass = flow_rule->key.tclass;
memcpy(fsp->m_u.tcp_ip6_spec.ip6src, &flow_rule->mask.src_ip,
sizeof(struct in6_addr));
memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, &flow_rule->mask.dst_ip,
sizeof(struct in6_addr));
fsp->m_u.tcp_ip6_spec.psrc = flow_rule->mask.src_port;
fsp->m_u.tcp_ip6_spec.pdst = flow_rule->mask.dst_port;
fsp->m_u.tcp_ip6_spec.tclass = flow_rule->mask.tclass;
break;
case AH_V6_FLOW:
case ESP_V6_FLOW:
memcpy(fsp->h_u.ah_ip6_spec.ip6src, &flow_rule->key.src_ip,
sizeof(struct in6_addr));
memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &flow_rule->key.dst_ip,
sizeof(struct in6_addr));
fsp->h_u.ah_ip6_spec.spi = flow_rule->key.spi;
fsp->h_u.ah_ip6_spec.tclass = flow_rule->key.tclass;
memcpy(fsp->m_u.ah_ip6_spec.ip6src, &flow_rule->mask.src_ip,
sizeof(struct in6_addr));
memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &flow_rule->mask.dst_ip,
sizeof(struct in6_addr));
fsp->m_u.ah_ip6_spec.spi = flow_rule->mask.spi;
fsp->m_u.ah_ip6_spec.tclass = flow_rule->mask.tclass;
break;
default:
return -EINVAL;
}
fsp->ring_cookie = be16_to_cpu(flow_rule->action);
return 0;
}
static int gve_generate_flow_rule(struct gve_priv *priv, struct ethtool_rx_flow_spec *fsp,
struct gve_adminq_flow_rule *rule)
{
static const u16 flow_type_lut[] = {
[TCP_V4_FLOW] = GVE_FLOW_TYPE_TCPV4,
[UDP_V4_FLOW] = GVE_FLOW_TYPE_UDPV4,
[SCTP_V4_FLOW] = GVE_FLOW_TYPE_SCTPV4,
[AH_V4_FLOW] = GVE_FLOW_TYPE_AHV4,
[ESP_V4_FLOW] = GVE_FLOW_TYPE_ESPV4,
[TCP_V6_FLOW] = GVE_FLOW_TYPE_TCPV6,
[UDP_V6_FLOW] = GVE_FLOW_TYPE_UDPV6,
[SCTP_V6_FLOW] = GVE_FLOW_TYPE_SCTPV6,
[AH_V6_FLOW] = GVE_FLOW_TYPE_AHV6,
[ESP_V6_FLOW] = GVE_FLOW_TYPE_ESPV6,
};
u32 flow_type;
if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
return -EOPNOTSUPP;
if (fsp->ring_cookie >= priv->rx_cfg.num_queues)
return -EINVAL;
rule->action = cpu_to_be16(fsp->ring_cookie);
flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
if (!flow_type || flow_type >= ARRAY_SIZE(flow_type_lut))
return -EINVAL;
rule->flow_type = cpu_to_be16(flow_type_lut[flow_type]);
switch (flow_type) {
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
rule->key.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
rule->key.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
rule->key.src_port = fsp->h_u.tcp_ip4_spec.psrc;
rule->key.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
rule->mask.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
rule->mask.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
rule->mask.src_port = fsp->m_u.tcp_ip4_spec.psrc;
rule->mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
break;
case AH_V4_FLOW:
case ESP_V4_FLOW:
rule->key.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
rule->key.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
rule->key.spi = fsp->h_u.ah_ip4_spec.spi;
rule->mask.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
rule->mask.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
rule->mask.spi = fsp->m_u.ah_ip4_spec.spi;
break;
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
memcpy(&rule->key.src_ip, fsp->h_u.tcp_ip6_spec.ip6src,
sizeof(struct in6_addr));
memcpy(&rule->key.dst_ip, fsp->h_u.tcp_ip6_spec.ip6dst,
sizeof(struct in6_addr));
rule->key.src_port = fsp->h_u.tcp_ip6_spec.psrc;
rule->key.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
memcpy(&rule->mask.src_ip, fsp->m_u.tcp_ip6_spec.ip6src,
sizeof(struct in6_addr));
memcpy(&rule->mask.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst,
sizeof(struct in6_addr));
rule->mask.src_port = fsp->m_u.tcp_ip6_spec.psrc;
rule->mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
break;
case AH_V6_FLOW:
case ESP_V6_FLOW:
memcpy(&rule->key.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
sizeof(struct in6_addr));
memcpy(&rule->key.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
sizeof(struct in6_addr));
rule->key.spi = fsp->h_u.ah_ip6_spec.spi;
memcpy(&rule->mask.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
sizeof(struct in6_addr));
memcpy(&rule->mask.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
sizeof(struct in6_addr));
rule->key.spi = fsp->h_u.ah_ip6_spec.spi;
break;
default:
/* not doing un-parsed flow types */
return -EINVAL;
}
return 0;
}
int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd)
{
struct gve_adminq_queried_flow_rule *rules_cache = priv->flow_rules_cache.rules_cache;
struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
u32 *cache_num = &priv->flow_rules_cache.rules_cache_num;
struct gve_adminq_queried_flow_rule *rule = NULL;
int err = 0;
u32 i;
if (!priv->max_flow_rules)
return -EOPNOTSUPP;
if (!priv->flow_rules_cache.rules_cache_synced ||
fsp->location < be32_to_cpu(rules_cache[0].location) ||
fsp->location > be32_to_cpu(rules_cache[*cache_num - 1].location)) {
err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_RULES, fsp->location);
if (err)
return err;
priv->flow_rules_cache.rules_cache_synced = true;
}
for (i = 0; i < *cache_num; i++) {
if (fsp->location == be32_to_cpu(rules_cache[i].location)) {
rule = &rules_cache[i];
break;
}
}
if (!rule)
return -EINVAL;
err = gve_fill_ethtool_flow_spec(fsp, rule);
return err;
}
int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
__be32 *rule_ids_cache = priv->flow_rules_cache.rule_ids_cache;
u32 *cache_num = &priv->flow_rules_cache.rule_ids_cache_num;
u32 starting_rule_id = 0;
u32 i = 0, j = 0;
int err = 0;
if (!priv->max_flow_rules)
return -EOPNOTSUPP;
do {
err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_IDS,
starting_rule_id);
if (err)
return err;
for (i = 0; i < *cache_num; i++) {
if (j >= cmd->rule_cnt)
return -EMSGSIZE;
rule_locs[j++] = be32_to_cpu(rule_ids_cache[i]);
starting_rule_id = be32_to_cpu(rule_ids_cache[i]) + 1;
}
} while (*cache_num != 0);
cmd->data = priv->max_flow_rules;
return err;
}
int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp = &cmd->fs;
struct gve_adminq_flow_rule *rule = NULL;
int err;
if (!priv->max_flow_rules)
return -EOPNOTSUPP;
rule = kvzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule)
return -ENOMEM;
err = gve_generate_flow_rule(priv, fsp, rule);
if (err)
goto out;
err = gve_adminq_add_flow_rule(priv, rule, fsp->location);
out:
kvfree(rule);
if (err)
dev_err(&priv->pdev->dev, "Failed to add the flow rule: %u", fsp->location);
return err;
}
int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
if (!priv->max_flow_rules)
return -EOPNOTSUPP;
return gve_adminq_del_flow_rule(priv, fsp->location);
}
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Google virtual Ethernet (gve) driver
*
* Copyright (C) 2015-2021 Google, Inc.
* Copyright (C) 2015-2024 Google LLC
*/
#include <linux/bpf.h>
......@@ -141,6 +141,49 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
}
}
static int gve_alloc_flow_rule_caches(struct gve_priv *priv)
{
struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache;
int err = 0;
if (!priv->max_flow_rules)
return 0;
flow_rules_cache->rules_cache =
kvcalloc(GVE_FLOW_RULES_CACHE_SIZE, sizeof(*flow_rules_cache->rules_cache),
GFP_KERNEL);
if (!flow_rules_cache->rules_cache) {
dev_err(&priv->pdev->dev, "Cannot alloc flow rules cache\n");
return -ENOMEM;
}
flow_rules_cache->rule_ids_cache =
kvcalloc(GVE_FLOW_RULE_IDS_CACHE_SIZE, sizeof(*flow_rules_cache->rule_ids_cache),
GFP_KERNEL);
if (!flow_rules_cache->rule_ids_cache) {
dev_err(&priv->pdev->dev, "Cannot alloc flow rule ids cache\n");
err = -ENOMEM;
goto free_rules_cache;
}
return 0;
free_rules_cache:
kvfree(flow_rules_cache->rules_cache);
flow_rules_cache->rules_cache = NULL;
return err;
}
static void gve_free_flow_rule_caches(struct gve_priv *priv)
{
struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache;
kvfree(flow_rules_cache->rule_ids_cache);
flow_rules_cache->rule_ids_cache = NULL;
kvfree(flow_rules_cache->rules_cache);
flow_rules_cache->rules_cache = NULL;
}
static int gve_alloc_counter_array(struct gve_priv *priv)
{
priv->counter_array =
......@@ -521,9 +564,12 @@ static int gve_setup_device_resources(struct gve_priv *priv)
{
int err;
err = gve_alloc_counter_array(priv);
err = gve_alloc_flow_rule_caches(priv);
if (err)
return err;
err = gve_alloc_counter_array(priv);
if (err)
goto abort_with_flow_rule_caches;
err = gve_alloc_notify_blocks(priv);
if (err)
goto abort_with_counter;
......@@ -575,6 +621,8 @@ static int gve_setup_device_resources(struct gve_priv *priv)
gve_free_notify_blocks(priv);
abort_with_counter:
gve_free_counter_array(priv);
abort_with_flow_rule_caches:
gve_free_flow_rule_caches(priv);
return err;
}
......@@ -587,6 +635,12 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
/* Tell device its resources are being freed */
if (gve_get_device_resources_ok(priv)) {
err = gve_flow_rules_reset(priv);
if (err) {
dev_err(&priv->pdev->dev,
"Failed to reset flow rules: err=%d\n", err);
gve_trigger_reset(priv);
}
/* detach the stats report */
err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
if (err) {
......@@ -606,6 +660,7 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
kvfree(priv->ptype_lut_dqo);
priv->ptype_lut_dqo = NULL;
gve_free_flow_rule_caches(priv);
gve_free_counter_array(priv);
gve_free_notify_blocks(priv);
gve_free_stats_report(priv);
......@@ -1730,6 +1785,14 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
int gve_flow_rules_reset(struct gve_priv *priv)
{
if (!priv->max_flow_rules)
return 0;
return gve_adminq_reset_flow_rules(priv);
}
int gve_adjust_config(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
......@@ -2003,15 +2066,21 @@ static int gve_set_features(struct net_device *netdev,
netdev->features ^= NETIF_F_LRO;
if (netif_carrier_ok(netdev)) {
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if (err) {
/* Revert the change on error. */
netdev->features = orig_features;
return err;
}
if (err)
goto revert_features;
}
}
if ((netdev->features & NETIF_F_NTUPLE) && !(features & NETIF_F_NTUPLE)) {
err = gve_flow_rules_reset(priv);
if (err)
goto revert_features;
}
return 0;
revert_features:
netdev->features = orig_features;
return err;
}
static const struct net_device_ops gve_netdev_ops = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment