Commit 3e0144ad authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-flow-director'

Huazhong Tan says:

====================
net: hns3: refactor and new features for flow director

This patchset refactor some functions and add some new features for
flow director.

patch 1~3: refactor large functions
patch 4, 7: add traffic class and user-def field support for ethtool
patch 5: refactor flow director configuration
patch 6: clean up for hns3_del_all_fd_entries()

change log:
V1->V2: modifies patch 5 as Jakub suggested, keep configuring
	ethtool/tc flower rules synchronously while aRFS
	asynchronously.
	changes the usecnt of user-def rule checking in patch 7.
	removes previous patches 8 and 9 from this series, since
	there are issues that need further discussion.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 405a129f 67b0e142
...@@ -612,8 +612,6 @@ struct hnae3_ae_ops { ...@@ -612,8 +612,6 @@ struct hnae3_ae_ops {
struct ethtool_rxnfc *cmd); struct ethtool_rxnfc *cmd);
int (*del_fd_entry)(struct hnae3_handle *handle, int (*del_fd_entry)(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd); struct ethtool_rxnfc *cmd);
void (*del_all_fd_entries)(struct hnae3_handle *handle,
bool clear_list);
int (*get_fd_rule_cnt)(struct hnae3_handle *handle, int (*get_fd_rule_cnt)(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd); struct ethtool_rxnfc *cmd);
int (*get_fd_rule_info)(struct hnae3_handle *handle, int (*get_fd_rule_info)(struct hnae3_handle *handle,
......
...@@ -4143,14 +4143,6 @@ static void hns3_uninit_phy(struct net_device *netdev) ...@@ -4143,14 +4143,6 @@ static void hns3_uninit_phy(struct net_device *netdev)
h->ae_algo->ops->mac_disconnect_phy(h); h->ae_algo->ops->mac_disconnect_phy(h);
} }
static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
if (h->ae_algo->ops->del_all_fd_entries)
h->ae_algo->ops->del_all_fd_entries(h, clear_list);
}
static int hns3_client_start(struct hnae3_handle *handle) static int hns3_client_start(struct hnae3_handle *handle)
{ {
if (!handle->ae_algo->ops->client_start) if (!handle->ae_algo->ops->client_start)
...@@ -4337,8 +4329,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) ...@@ -4337,8 +4329,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_nic_uninit_irq(priv); hns3_nic_uninit_irq(priv);
hns3_del_all_fd_rules(netdev, true);
hns3_clear_all_ring(handle, true); hns3_clear_all_ring(handle, true);
hns3_nic_uninit_vector_data(priv); hns3_nic_uninit_vector_data(priv);
......
...@@ -243,6 +243,7 @@ enum hclge_opcode_type { ...@@ -243,6 +243,7 @@ enum hclge_opcode_type {
HCLGE_OPC_FD_KEY_CONFIG = 0x1202, HCLGE_OPC_FD_KEY_CONFIG = 0x1202,
HCLGE_OPC_FD_TCAM_OP = 0x1203, HCLGE_OPC_FD_TCAM_OP = 0x1203,
HCLGE_OPC_FD_AD_OP = 0x1204, HCLGE_OPC_FD_AD_OP = 0x1204,
HCLGE_OPC_FD_USER_DEF_OP = 0x1207,
/* MDIO command */ /* MDIO command */
HCLGE_OPC_MDIO_CONFIG = 0x1900, HCLGE_OPC_MDIO_CONFIG = 0x1900,
...@@ -1082,6 +1083,19 @@ struct hclge_fd_ad_config_cmd { ...@@ -1082,6 +1083,19 @@ struct hclge_fd_ad_config_cmd {
u8 rsv2[8]; u8 rsv2[8];
}; };
#define HCLGE_FD_USER_DEF_OFT_S 0
#define HCLGE_FD_USER_DEF_OFT_M GENMASK(14, 0)
#define HCLGE_FD_USER_DEF_EN_B 15
struct hclge_fd_user_def_cfg_cmd {
__le16 ol2_cfg;
__le16 l2_cfg;
__le16 ol3_cfg;
__le16 l3_cfg;
__le16 ol4_cfg;
__le16 l4_cfg;
u8 rsv[12];
};
struct hclge_get_m7_bd_cmd { struct hclge_get_m7_bd_cmd {
__le32 bd_num; __le32 bd_num;
u8 rsv[20]; u8 rsv[20];
......
...@@ -62,7 +62,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev); ...@@ -62,7 +62,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle); static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
static void hclge_rfs_filter_expire(struct hclge_dev *hdev); static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
static void hclge_clear_arfs_rules(struct hnae3_handle *handle); static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
unsigned long *addr); unsigned long *addr);
static int hclge_set_default_loopback(struct hclge_dev *hdev); static int hclge_set_default_loopback(struct hclge_dev *hdev);
...@@ -70,6 +70,7 @@ static int hclge_set_default_loopback(struct hclge_dev *hdev); ...@@ -70,6 +70,7 @@ static int hclge_set_default_loopback(struct hclge_dev *hdev);
static void hclge_sync_mac_table(struct hclge_dev *hdev); static void hclge_sync_mac_table(struct hclge_dev *hdev);
static void hclge_restore_hw_table(struct hclge_dev *hdev); static void hclge_restore_hw_table(struct hclge_dev *hdev);
static void hclge_sync_promisc_mode(struct hclge_dev *hdev); static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
static void hclge_sync_fd_table(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo; static struct hnae3_ae_algo ae_algo;
...@@ -384,36 +385,62 @@ static const struct key_info meta_data_key_info[] = { ...@@ -384,36 +385,62 @@ static const struct key_info meta_data_key_info[] = {
}; };
static const struct key_info tuple_key_info[] = { static const struct key_info tuple_key_info[] = {
{ OUTER_DST_MAC, 48}, { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
{ OUTER_SRC_MAC, 48}, { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
{ OUTER_VLAN_TAG_FST, 16}, { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
{ OUTER_VLAN_TAG_SEC, 16}, { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
{ OUTER_ETH_TYPE, 16}, { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
{ OUTER_L2_RSV, 16}, { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
{ OUTER_IP_TOS, 8}, { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
{ OUTER_IP_PROTO, 8}, { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
{ OUTER_SRC_IP, 32}, { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
{ OUTER_DST_IP, 32}, { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
{ OUTER_L3_RSV, 16}, { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
{ OUTER_SRC_PORT, 16}, { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
{ OUTER_DST_PORT, 16}, { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
{ OUTER_L4_RSV, 32}, { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
{ OUTER_TUN_VNI, 24}, { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
{ OUTER_TUN_FLOW_ID, 8}, { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
{ INNER_DST_MAC, 48}, { INNER_DST_MAC, 48, KEY_OPT_MAC,
{ INNER_SRC_MAC, 48}, offsetof(struct hclge_fd_rule, tuples.dst_mac),
{ INNER_VLAN_TAG_FST, 16}, offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
{ INNER_VLAN_TAG_SEC, 16}, { INNER_SRC_MAC, 48, KEY_OPT_MAC,
{ INNER_ETH_TYPE, 16}, offsetof(struct hclge_fd_rule, tuples.src_mac),
{ INNER_L2_RSV, 16}, offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
{ INNER_IP_TOS, 8}, { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
{ INNER_IP_PROTO, 8}, offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
{ INNER_SRC_IP, 32}, offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
{ INNER_DST_IP, 32}, { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
{ INNER_L3_RSV, 16}, { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
{ INNER_SRC_PORT, 16}, offsetof(struct hclge_fd_rule, tuples.ether_proto),
{ INNER_DST_PORT, 16}, offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
{ INNER_L4_RSV, 32}, { INNER_L2_RSV, 16, KEY_OPT_LE16,
offsetof(struct hclge_fd_rule, tuples.l2_user_def),
offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
{ INNER_IP_TOS, 8, KEY_OPT_U8,
offsetof(struct hclge_fd_rule, tuples.ip_tos),
offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
{ INNER_IP_PROTO, 8, KEY_OPT_U8,
offsetof(struct hclge_fd_rule, tuples.ip_proto),
offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
{ INNER_SRC_IP, 32, KEY_OPT_IP,
offsetof(struct hclge_fd_rule, tuples.src_ip),
offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
{ INNER_DST_IP, 32, KEY_OPT_IP,
offsetof(struct hclge_fd_rule, tuples.dst_ip),
offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
{ INNER_L3_RSV, 16, KEY_OPT_LE16,
offsetof(struct hclge_fd_rule, tuples.l3_user_def),
offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
{ INNER_SRC_PORT, 16, KEY_OPT_LE16,
offsetof(struct hclge_fd_rule, tuples.src_port),
offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
{ INNER_DST_PORT, 16, KEY_OPT_LE16,
offsetof(struct hclge_fd_rule, tuples.dst_port),
offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
{ INNER_L4_RSV, 32, KEY_OPT_LE32,
offsetof(struct hclge_fd_rule, tuples.l4_user_def),
offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
}; };
static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
...@@ -4241,6 +4268,7 @@ static void hclge_periodic_service_task(struct hclge_dev *hdev) ...@@ -4241,6 +4268,7 @@ static void hclge_periodic_service_task(struct hclge_dev *hdev)
hclge_update_link_status(hdev); hclge_update_link_status(hdev);
hclge_sync_mac_table(hdev); hclge_sync_mac_table(hdev);
hclge_sync_promisc_mode(hdev); hclge_sync_promisc_mode(hdev);
hclge_sync_fd_table(hdev);
if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
delta = jiffies - hdev->last_serv_processed; delta = jiffies - hdev->last_serv_processed;
...@@ -5142,6 +5170,285 @@ static void hclge_request_update_promisc_mode(struct hnae3_handle *handle) ...@@ -5142,6 +5170,285 @@ static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
} }
static void hclge_sync_fd_state(struct hclge_dev *hdev)
{
if (hlist_empty(&hdev->fd_rule_list))
hdev->fd_active_type = HCLGE_FD_RULE_NONE;
}
static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
{
if (!test_bit(location, hdev->fd_bmap)) {
set_bit(location, hdev->fd_bmap);
hdev->hclge_fd_rule_num++;
}
}
static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
{
if (test_bit(location, hdev->fd_bmap)) {
clear_bit(location, hdev->fd_bmap);
hdev->hclge_fd_rule_num--;
}
}
static void hclge_fd_free_node(struct hclge_dev *hdev,
struct hclge_fd_rule *rule)
{
hlist_del(&rule->rule_node);
kfree(rule);
hclge_sync_fd_state(hdev);
}
static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
struct hclge_fd_rule *old_rule,
struct hclge_fd_rule *new_rule,
enum HCLGE_FD_NODE_STATE state)
{
switch (state) {
case HCLGE_FD_TO_ADD:
case HCLGE_FD_ACTIVE:
/* 1) if the new state is TO_ADD, just replace the old rule
* with the same location, no matter its state, because the
* new rule will be configured to the hardware.
* 2) if the new state is ACTIVE, it means the new rule
* has been configured to the hardware, so just replace
* the old rule node with the same location.
* 3) for it doesn't add a new node to the list, so it's
* unnecessary to update the rule number and fd_bmap.
*/
new_rule->rule_node.next = old_rule->rule_node.next;
new_rule->rule_node.pprev = old_rule->rule_node.pprev;
memcpy(old_rule, new_rule, sizeof(*old_rule));
kfree(new_rule);
break;
case HCLGE_FD_DELETED:
hclge_fd_dec_rule_cnt(hdev, old_rule->location);
hclge_fd_free_node(hdev, old_rule);
break;
case HCLGE_FD_TO_DEL:
/* if new request is TO_DEL, and old rule is existent
* 1) the state of old rule is TO_DEL, we need do nothing,
* because we delete rule by location, other rule content
* is unncessary.
* 2) the state of old rule is ACTIVE, we need to change its
* state to TO_DEL, so the rule will be deleted when periodic
* task being scheduled.
* 3) the state of old rule is TO_ADD, it means the rule hasn't
* been added to hardware, so we just delete the rule node from
* fd_rule_list directly.
*/
if (old_rule->state == HCLGE_FD_TO_ADD) {
hclge_fd_dec_rule_cnt(hdev, old_rule->location);
hclge_fd_free_node(hdev, old_rule);
return;
}
old_rule->state = HCLGE_FD_TO_DEL;
break;
}
}
static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
u16 location,
struct hclge_fd_rule **parent)
{
struct hclge_fd_rule *rule;
struct hlist_node *node;
hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
if (rule->location == location)
return rule;
else if (rule->location > location)
return NULL;
/* record the parent node, use to keep the nodes in fd_rule_list
* in ascend order.
*/
*parent = rule;
}
return NULL;
}
/* insert fd rule node in ascend order according to rule->location */
static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
struct hclge_fd_rule *rule,
struct hclge_fd_rule *parent)
{
INIT_HLIST_NODE(&rule->rule_node);
if (parent)
hlist_add_behind(&rule->rule_node, &parent->rule_node);
else
hlist_add_head(&rule->rule_node, hlist);
}
static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
struct hclge_fd_user_def_cfg *cfg)
{
struct hclge_fd_user_def_cfg_cmd *req;
struct hclge_desc desc;
u16 data = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
req->ol2_cfg = cpu_to_le16(data);
data = 0;
hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
req->ol3_cfg = cpu_to_le16(data);
data = 0;
hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
req->ol4_cfg = cpu_to_le16(data);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
"failed to set fd user def data, ret= %d\n", ret);
return ret;
}
static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
{
int ret;
if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
return;
if (!locked)
spin_lock_bh(&hdev->fd_rule_lock);
ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
if (ret)
set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
if (!locked)
spin_unlock_bh(&hdev->fd_rule_lock);
}
static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
struct hclge_fd_rule *rule)
{
struct hlist_head *hlist = &hdev->fd_rule_list;
struct hclge_fd_rule *fd_rule, *parent = NULL;
struct hclge_fd_user_def_info *info, *old_info;
struct hclge_fd_user_def_cfg *cfg;
if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
return 0;
/* for valid layer is start from 1, so need minus 1 to get the cfg */
cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
info = &rule->ep.user_def;
if (!cfg->ref_cnt || cfg->offset == info->offset)
return 0;
if (cfg->ref_cnt > 1)
goto error;
fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
if (fd_rule) {
old_info = &fd_rule->ep.user_def;
if (info->layer == old_info->layer)
return 0;
}
error:
dev_err(&hdev->pdev->dev,
"No available offset for layer%d fd rule, each layer only support one user def offset.\n",
info->layer + 1);
return -ENOSPC;
}
static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
struct hclge_fd_rule *rule)
{
struct hclge_fd_user_def_cfg *cfg;
if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
return;
cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
if (!cfg->ref_cnt) {
cfg->offset = rule->ep.user_def.offset;
set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
}
cfg->ref_cnt++;
}
static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
struct hclge_fd_rule *rule)
{
struct hclge_fd_user_def_cfg *cfg;
if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
return;
cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
if (!cfg->ref_cnt)
return;
cfg->ref_cnt--;
if (!cfg->ref_cnt) {
cfg->offset = 0;
set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
}
}
static void hclge_update_fd_list(struct hclge_dev *hdev,
enum HCLGE_FD_NODE_STATE state, u16 location,
struct hclge_fd_rule *new_rule)
{
struct hlist_head *hlist = &hdev->fd_rule_list;
struct hclge_fd_rule *fd_rule, *parent = NULL;
fd_rule = hclge_find_fd_rule(hlist, location, &parent);
if (fd_rule) {
hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
if (state == HCLGE_FD_ACTIVE)
hclge_fd_inc_user_def_refcnt(hdev, new_rule);
hclge_sync_fd_user_def_cfg(hdev, true);
hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
return;
}
/* it's unlikely to fail here, because we have checked the rule
* exist before.
*/
if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
dev_warn(&hdev->pdev->dev,
"failed to delete fd rule %u, it's inexistent\n",
location);
return;
}
hclge_fd_inc_user_def_refcnt(hdev, new_rule);
hclge_sync_fd_user_def_cfg(hdev, true);
hclge_fd_insert_rule_node(hlist, new_rule, parent);
hclge_fd_inc_rule_cnt(hdev, new_rule->location);
if (state == HCLGE_FD_TO_ADD) {
set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
hclge_task_schedule(hdev, 0);
}
}
static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
{ {
struct hclge_get_fd_mode_cmd *req; struct hclge_get_fd_mode_cmd *req;
...@@ -5220,6 +5527,17 @@ static int hclge_set_fd_key_config(struct hclge_dev *hdev, ...@@ -5220,6 +5527,17 @@ static int hclge_set_fd_key_config(struct hclge_dev *hdev,
return ret; return ret;
} }
static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
{
struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
spin_lock_bh(&hdev->fd_rule_lock);
memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
spin_unlock_bh(&hdev->fd_rule_lock);
hclge_fd_set_user_def_cmd(hdev, cfg);
}
static int hclge_init_fd_config(struct hclge_dev *hdev) static int hclge_init_fd_config(struct hclge_dev *hdev)
{ {
#define LOW_2_WORDS 0x03 #define LOW_2_WORDS 0x03
...@@ -5260,9 +5578,12 @@ static int hclge_init_fd_config(struct hclge_dev *hdev) ...@@ -5260,9 +5578,12 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
/* If use max 400bit key, we can support tuples for ether type */ /* If use max 400bit key, we can support tuples for ether type */
if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
key_cfg->tuple_active |= key_cfg->tuple_active |=
BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
}
/* roce_type is used to filter roce frames /* roce_type is used to filter roce frames
* dst_vport is used to specify the rule * dst_vport is used to specify the rule
...@@ -5371,96 +5692,57 @@ static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, ...@@ -5371,96 +5692,57 @@ static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
struct hclge_fd_rule *rule) struct hclge_fd_rule *rule)
{ {
int offset, moffset, ip_offset;
enum HCLGE_FD_KEY_OPT key_opt;
u16 tmp_x_s, tmp_y_s; u16 tmp_x_s, tmp_y_s;
u32 tmp_x_l, tmp_y_l; u32 tmp_x_l, tmp_y_l;
u8 *p = (u8 *)rule;
int i; int i;
if (rule->unused_tuple & tuple_bit) if (rule->unused_tuple & BIT(tuple_bit))
return true; return true;
switch (tuple_bit) { key_opt = tuple_key_info[tuple_bit].key_opt;
case BIT(INNER_DST_MAC): offset = tuple_key_info[tuple_bit].offset;
for (i = 0; i < ETH_ALEN; i++) { moffset = tuple_key_info[tuple_bit].moffset;
calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
rule->tuples_mask.dst_mac[i]);
calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
rule->tuples_mask.dst_mac[i]);
}
return true;
case BIT(INNER_SRC_MAC):
for (i = 0; i < ETH_ALEN; i++) {
calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
rule->tuples_mask.src_mac[i]);
calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
rule->tuples_mask.src_mac[i]);
}
return true; switch (key_opt) {
case BIT(INNER_VLAN_TAG_FST): case KEY_OPT_U8:
calc_x(tmp_x_s, rule->tuples.vlan_tag1, calc_x(*key_x, p[offset], p[moffset]);
rule->tuples_mask.vlan_tag1); calc_y(*key_y, p[offset], p[moffset]);
calc_y(tmp_y_s, rule->tuples.vlan_tag1,
rule->tuples_mask.vlan_tag1);
*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
return true; return true;
case BIT(INNER_ETH_TYPE): case KEY_OPT_LE16:
calc_x(tmp_x_s, rule->tuples.ether_proto, calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
rule->tuples_mask.ether_proto); calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
calc_y(tmp_y_s, rule->tuples.ether_proto,
rule->tuples_mask.ether_proto);
*(__le16 *)key_x = cpu_to_le16(tmp_x_s); *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
*(__le16 *)key_y = cpu_to_le16(tmp_y_s); *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
return true; return true;
case BIT(INNER_IP_TOS): case KEY_OPT_LE32:
calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
return true;
case BIT(INNER_IP_PROTO):
calc_x(*key_x, rule->tuples.ip_proto,
rule->tuples_mask.ip_proto);
calc_y(*key_y, rule->tuples.ip_proto,
rule->tuples_mask.ip_proto);
return true;
case BIT(INNER_SRC_IP):
calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
rule->tuples_mask.src_ip[IPV4_INDEX]);
calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
rule->tuples_mask.src_ip[IPV4_INDEX]);
*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
return true;
case BIT(INNER_DST_IP):
calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
rule->tuples_mask.dst_ip[IPV4_INDEX]);
calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
rule->tuples_mask.dst_ip[IPV4_INDEX]);
*(__le32 *)key_x = cpu_to_le32(tmp_x_l); *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
*(__le32 *)key_y = cpu_to_le32(tmp_y_l); *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
return true; return true;
case BIT(INNER_SRC_PORT): case KEY_OPT_MAC:
calc_x(tmp_x_s, rule->tuples.src_port, for (i = 0; i < ETH_ALEN; i++) {
rule->tuples_mask.src_port); calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
calc_y(tmp_y_s, rule->tuples.src_port, p[moffset + i]);
rule->tuples_mask.src_port); calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
*(__le16 *)key_x = cpu_to_le16(tmp_x_s); p[moffset + i]);
*(__le16 *)key_y = cpu_to_le16(tmp_y_s); }
return true; return true;
case BIT(INNER_DST_PORT): case KEY_OPT_IP:
calc_x(tmp_x_s, rule->tuples.dst_port, ip_offset = IPV4_INDEX * sizeof(u32);
rule->tuples_mask.dst_port); calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
calc_y(tmp_y_s, rule->tuples.dst_port, *(u32 *)(&p[moffset + ip_offset]));
rule->tuples_mask.dst_port); calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
*(__le16 *)key_x = cpu_to_le16(tmp_x_s); *(u32 *)(&p[moffset + ip_offset]));
*(__le16 *)key_y = cpu_to_le16(tmp_y_s); *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
return true; return true;
default: default:
...@@ -5548,12 +5830,12 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage, ...@@ -5548,12 +5830,12 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
for (i = 0 ; i < MAX_TUPLE; i++) { for (i = 0 ; i < MAX_TUPLE; i++) {
bool tuple_valid; bool tuple_valid;
u32 check_tuple;
tuple_size = tuple_key_info[i].key_length / 8; tuple_size = tuple_key_info[i].key_length / 8;
check_tuple = key_cfg->tuple_active & BIT(i); if (!(key_cfg->tuple_active & BIT(i)))
continue;
tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x, tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
cur_key_y, rule); cur_key_y, rule);
if (tuple_valid) { if (tuple_valid) {
cur_key_x += tuple_size; cur_key_x += tuple_size;
...@@ -5684,8 +5966,7 @@ static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec, ...@@ -5684,8 +5966,7 @@ static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
if (!spec || !unused_tuple) if (!spec || !unused_tuple)
return -EINVAL; return -EINVAL;
*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
BIT(INNER_IP_TOS);
/* check whether src/dst ip address used */ /* check whether src/dst ip address used */
if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
...@@ -5700,8 +5981,8 @@ static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec, ...@@ -5700,8 +5981,8 @@ static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
if (!spec->pdst) if (!spec->pdst)
*unused_tuple |= BIT(INNER_DST_PORT); *unused_tuple |= BIT(INNER_DST_PORT);
if (spec->tclass) if (!spec->tclass)
return -EOPNOTSUPP; *unused_tuple |= BIT(INNER_IP_TOS);
return 0; return 0;
} }
...@@ -5713,7 +5994,7 @@ static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec, ...@@ -5713,7 +5994,7 @@ static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
return -EINVAL; return -EINVAL;
*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
/* check whether src/dst ip address used */ /* check whether src/dst ip address used */
if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
...@@ -5725,8 +6006,8 @@ static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec, ...@@ -5725,8 +6006,8 @@ static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
if (!spec->l4_proto) if (!spec->l4_proto)
*unused_tuple |= BIT(INNER_IP_PROTO); *unused_tuple |= BIT(INNER_IP_PROTO);
if (spec->tclass) if (!spec->tclass)
return -EOPNOTSUPP; *unused_tuple |= BIT(INNER_IP_TOS);
if (spec->l4_4_bytes) if (spec->l4_4_bytes)
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -5796,9 +6077,98 @@ static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev, ...@@ -5796,9 +6077,98 @@ static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
return 0; return 0;
} }
static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
struct hclge_fd_user_def_info *info)
{
switch (flow_type) {
case ETHER_FLOW:
info->layer = HCLGE_FD_USER_DEF_L2;
*unused_tuple &= ~BIT(INNER_L2_RSV);
break;
case IP_USER_FLOW:
case IPV6_USER_FLOW:
info->layer = HCLGE_FD_USER_DEF_L3;
*unused_tuple &= ~BIT(INNER_L3_RSV);
break;
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case TCP_V6_FLOW:
case UDP_V6_FLOW:
info->layer = HCLGE_FD_USER_DEF_L4;
*unused_tuple &= ~BIT(INNER_L4_RSV);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
{
return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
}
static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
struct ethtool_rx_flow_spec *fs,
u32 *unused_tuple,
struct hclge_fd_user_def_info *info)
{
u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
u16 data, offset, data_mask, offset_mask;
int ret;
info->layer = HCLGE_FD_USER_DEF_NONE;
*unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
return 0;
/* user-def data from ethtool is 64 bit value, the bit0~15 is used
* for data, and bit32~47 is used for offset.
*/
data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
return -EOPNOTSUPP;
}
if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
dev_err(&hdev->pdev->dev,
"user-def offset[%u] should be no more than %u\n",
offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
return -EINVAL;
}
if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
return -EINVAL;
}
ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
if (ret) {
dev_err(&hdev->pdev->dev,
"unsupported flow type for user-def bytes, ret = %d\n",
ret);
return ret;
}
info->data = data;
info->data_mask = data_mask;
info->offset = offset;
return 0;
}
static int hclge_fd_check_spec(struct hclge_dev *hdev, static int hclge_fd_check_spec(struct hclge_dev *hdev,
struct ethtool_rx_flow_spec *fs, struct ethtool_rx_flow_spec *fs,
u32 *unused_tuple) u32 *unused_tuple,
struct hclge_fd_user_def_info *info)
{ {
u32 flow_type; u32 flow_type;
int ret; int ret;
...@@ -5811,11 +6181,9 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev, ...@@ -5811,11 +6181,9 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev,
return -EINVAL; return -EINVAL;
} }
if ((fs->flow_type & FLOW_EXT) && ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
(fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) { if (ret)
dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); return ret;
return -EOPNOTSUPP;
}
flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
switch (flow_type) { switch (flow_type) {
...@@ -5867,84 +6235,10 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev, ...@@ -5867,84 +6235,10 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev,
return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple); return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
} }
static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location) static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
{
struct hclge_fd_rule *rule = NULL;
struct hlist_node *node2;
spin_lock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
if (rule->location >= location)
break;
}
spin_unlock_bh(&hdev->fd_rule_lock);
return rule && rule->location == location;
}
/* make sure being called after lock up with fd_rule_lock */
static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
struct hclge_fd_rule *new_rule,
u16 location,
bool is_add)
{
struct hclge_fd_rule *rule = NULL, *parent = NULL;
struct hlist_node *node2;
if (is_add && !new_rule)
return -EINVAL;
hlist_for_each_entry_safe(rule, node2,
&hdev->fd_rule_list, rule_node) {
if (rule->location >= location)
break;
parent = rule;
}
if (rule && rule->location == location) {
hlist_del(&rule->rule_node);
kfree(rule);
hdev->hclge_fd_rule_num--;
if (!is_add) {
if (!hdev->hclge_fd_rule_num)
hdev->fd_active_type = HCLGE_FD_RULE_NONE;
clear_bit(location, hdev->fd_bmap);
return 0;
}
} else if (!is_add) {
dev_err(&hdev->pdev->dev,
"delete fail, rule %u is inexistent\n",
location);
return -EINVAL;
}
INIT_HLIST_NODE(&new_rule->rule_node);
if (parent)
hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
else
hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
set_bit(location, hdev->fd_bmap);
hdev->hclge_fd_rule_num++;
hdev->fd_active_type = new_rule->rule_type;
return 0;
}
static int hclge_fd_get_tuple(struct hclge_dev *hdev,
struct ethtool_rx_flow_spec *fs, struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule) struct hclge_fd_rule *rule, u8 ip_proto)
{ {
u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
switch (flow_type) {
case SCTP_V4_FLOW:
case TCP_V4_FLOW:
case UDP_V4_FLOW:
rule->tuples.src_ip[IPV4_INDEX] = rule->tuples.src_ip[IPV4_INDEX] =
be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
rule->tuples_mask.src_ip[IPV4_INDEX] = rule->tuples_mask.src_ip[IPV4_INDEX] =
...@@ -5956,12 +6250,10 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev, ...@@ -5956,12 +6250,10 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
rule->tuples_mask.src_port = rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
rule->tuples_mask.dst_port = rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
...@@ -5969,8 +6261,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev, ...@@ -5969,8 +6261,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
rule->tuples.ether_proto = ETH_P_IP; rule->tuples.ether_proto = ETH_P_IP;
rule->tuples_mask.ether_proto = 0xFFFF; rule->tuples_mask.ether_proto = 0xFFFF;
break; rule->tuples.ip_proto = ip_proto;
case IP_USER_FLOW: rule->tuples_mask.ip_proto = 0xFF;
}
static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule)
{
rule->tuples.src_ip[IPV4_INDEX] = rule->tuples.src_ip[IPV4_INDEX] =
be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
rule->tuples_mask.src_ip[IPV4_INDEX] = rule->tuples_mask.src_ip[IPV4_INDEX] =
...@@ -5989,95 +6287,142 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev, ...@@ -5989,95 +6287,142 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
rule->tuples.ether_proto = ETH_P_IP; rule->tuples.ether_proto = ETH_P_IP;
rule->tuples_mask.ether_proto = 0xFFFF; rule->tuples_mask.ether_proto = 0xFFFF;
}
break; static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
case SCTP_V6_FLOW: struct ethtool_rx_flow_spec *fs,
case TCP_V6_FLOW: struct hclge_fd_rule *rule, u8 ip_proto)
case UDP_V6_FLOW: {
be32_to_cpu_array(rule->tuples.src_ip, be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE); IPV6_SIZE);
be32_to_cpu_array(rule->tuples_mask.src_ip, be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE); IPV6_SIZE);
be32_to_cpu_array(rule->tuples.dst_ip, be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE); IPV6_SIZE);
be32_to_cpu_array(rule->tuples_mask.dst_ip, be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE); IPV6_SIZE);
rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
rule->tuples_mask.src_port = rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
rule->tuples_mask.dst_port = rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
rule->tuples.ether_proto = ETH_P_IPV6; rule->tuples.ether_proto = ETH_P_IPV6;
rule->tuples_mask.ether_proto = 0xFFFF; rule->tuples_mask.ether_proto = 0xFFFF;
break; rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
case IPV6_USER_FLOW: rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
be32_to_cpu_array(rule->tuples.src_ip,
fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
be32_to_cpu_array(rule->tuples_mask.src_ip,
fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
be32_to_cpu_array(rule->tuples.dst_ip, rule->tuples.ip_proto = ip_proto;
fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE); rule->tuples_mask.ip_proto = 0xFF;
be32_to_cpu_array(rule->tuples_mask.dst_ip, }
fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule)
{
be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
IPV6_SIZE);
be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
IPV6_SIZE);
be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
IPV6_SIZE);
be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
IPV6_SIZE);
rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
rule->tuples.ether_proto = ETH_P_IPV6; rule->tuples.ether_proto = ETH_P_IPV6;
rule->tuples_mask.ether_proto = 0xFFFF; rule->tuples_mask.ether_proto = 0xFFFF;
}
break; static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
case ETHER_FLOW: struct ethtool_rx_flow_spec *fs,
ether_addr_copy(rule->tuples.src_mac, struct hclge_fd_rule *rule)
fs->h_u.ether_spec.h_source); {
ether_addr_copy(rule->tuples_mask.src_mac, ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
fs->m_u.ether_spec.h_source); ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
ether_addr_copy(rule->tuples.dst_mac, ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
fs->h_u.ether_spec.h_dest); ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
ether_addr_copy(rule->tuples_mask.dst_mac,
fs->m_u.ether_spec.h_dest);
rule->tuples.ether_proto = rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
be16_to_cpu(fs->h_u.ether_spec.h_proto); rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
rule->tuples_mask.ether_proto = }
be16_to_cpu(fs->m_u.ether_spec.h_proto);
static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
struct hclge_fd_rule *rule)
{
switch (info->layer) {
case HCLGE_FD_USER_DEF_L2:
rule->tuples.l2_user_def = info->data;
rule->tuples_mask.l2_user_def = info->data_mask;
break;
case HCLGE_FD_USER_DEF_L3:
rule->tuples.l3_user_def = info->data;
rule->tuples_mask.l3_user_def = info->data_mask;
break;
case HCLGE_FD_USER_DEF_L4:
rule->tuples.l4_user_def = (u32)info->data << 16;
rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
break; break;
default: default:
return -EOPNOTSUPP; break;
} }
rule->ep.user_def = *info;
}
static int hclge_fd_get_tuple(struct hclge_dev *hdev,
struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule,
struct hclge_fd_user_def_info *info)
{
u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
switch (flow_type) { switch (flow_type) {
case SCTP_V4_FLOW: case SCTP_V4_FLOW:
hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
break;
case TCP_V4_FLOW:
hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
break;
case UDP_V4_FLOW:
hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
break;
case IP_USER_FLOW:
hclge_fd_get_ip4_tuple(hdev, fs, rule);
break;
case SCTP_V6_FLOW: case SCTP_V6_FLOW:
rule->tuples.ip_proto = IPPROTO_SCTP; hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
rule->tuples_mask.ip_proto = 0xFF;
break; break;
case TCP_V4_FLOW:
case TCP_V6_FLOW: case TCP_V6_FLOW:
rule->tuples.ip_proto = IPPROTO_TCP; hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
rule->tuples_mask.ip_proto = 0xFF;
break; break;
case UDP_V4_FLOW:
case UDP_V6_FLOW: case UDP_V6_FLOW:
rule->tuples.ip_proto = IPPROTO_UDP; hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
rule->tuples_mask.ip_proto = 0xFF;
break; break;
default: case IPV6_USER_FLOW:
hclge_fd_get_ip6_tuple(hdev, fs, rule);
break;
case ETHER_FLOW:
hclge_fd_get_ether_tuple(hdev, fs, rule);
break; break;
default:
return -EOPNOTSUPP;
} }
if (fs->flow_type & FLOW_EXT) { if (fs->flow_type & FLOW_EXT) {
rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
hclge_fd_get_user_def_tuple(info, rule);
} }
if (fs->flow_type & FLOW_MAC_EXT) { if (fs->flow_type & FLOW_MAC_EXT) {
...@@ -6088,33 +6433,52 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev, ...@@ -6088,33 +6433,52 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
return 0; return 0;
} }
/* make sure being called after lock up with fd_rule_lock */
static int hclge_fd_config_rule(struct hclge_dev *hdev, static int hclge_fd_config_rule(struct hclge_dev *hdev,
struct hclge_fd_rule *rule) struct hclge_fd_rule *rule)
{ {
int ret; int ret;
if (!rule) { ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
if (ret)
return ret;
return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
}
static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
struct hclge_fd_rule *rule)
{
int ret;
spin_lock_bh(&hdev->fd_rule_lock);
if (hdev->fd_active_type != rule->rule_type &&
(hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"The flow director rule is NULL\n"); "mode conflict(new type %d, active type %d), please delete existent rules first\n",
rule->rule_type, hdev->fd_active_type);
spin_unlock_bh(&hdev->fd_rule_lock);
return -EINVAL; return -EINVAL;
} }
/* it will never fail here, so needn't to check return value */ ret = hclge_fd_check_user_def_refcnt(hdev, rule);
hclge_fd_update_rule_list(hdev, rule, rule->location, true); if (ret)
goto out;
ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); ret = hclge_clear_arfs_rules(hdev);
if (ret) if (ret)
goto clear_rule; goto out;
ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); ret = hclge_fd_config_rule(hdev, rule);
if (ret) if (ret)
goto clear_rule; goto out;
return 0; hclge_update_fd_list(hdev, HCLGE_FD_ACTIVE, rule->location, rule);
hdev->fd_active_type = rule->rule_type;
clear_rule: out:
hclge_fd_update_rule_list(hdev, rule, rule->location, false); spin_unlock_bh(&hdev->fd_rule_lock);
return ret; return ret;
} }
...@@ -6126,11 +6490,48 @@ static bool hclge_is_cls_flower_active(struct hnae3_handle *handle) ...@@ -6126,11 +6490,48 @@ static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE; return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
} }
static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
u16 *vport_id, u8 *action, u16 *queue_id)
{
struct hclge_vport *vport = hdev->vport;
if (ring_cookie == RX_CLS_FLOW_DISC) {
*action = HCLGE_FD_ACTION_DROP_PACKET;
} else {
u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
u16 tqps;
if (vf > hdev->num_req_vfs) {
dev_err(&hdev->pdev->dev,
"Error: vf id (%u) > max vf num (%u)\n",
vf, hdev->num_req_vfs);
return -EINVAL;
}
*vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
tqps = hdev->vport[vf].nic.kinfo.num_tqps;
if (ring >= tqps) {
dev_err(&hdev->pdev->dev,
"Error: queue id (%u) > max tqp num (%u)\n",
ring, tqps - 1);
return -EINVAL;
}
*action = HCLGE_FD_ACTION_SELECT_QUEUE;
*queue_id = ring;
}
return 0;
}
static int hclge_add_fd_entry(struct hnae3_handle *handle, static int hclge_add_fd_entry(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd) struct ethtool_rxnfc *cmd)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
struct hclge_fd_user_def_info info;
u16 dst_vport_id = 0, q_index = 0; u16 dst_vport_id = 0, q_index = 0;
struct ethtool_rx_flow_spec *fs; struct ethtool_rx_flow_spec *fs;
struct hclge_fd_rule *rule; struct hclge_fd_rule *rule;
...@@ -6150,51 +6551,22 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, ...@@ -6150,51 +6551,22 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (hclge_is_cls_flower_active(handle)) {
dev_err(&hdev->pdev->dev,
"please delete all exist cls flower rules first\n");
return -EINVAL;
}
fs = (struct ethtool_rx_flow_spec *)&cmd->fs; fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
ret = hclge_fd_check_spec(hdev, fs, &unused); ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
if (ret) if (ret)
return ret; return ret;
if (fs->ring_cookie == RX_CLS_FLOW_DISC) { ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
action = HCLGE_FD_ACTION_DROP_PACKET; &action, &q_index);
} else { if (ret)
u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); return ret;
u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
u16 tqps;
if (vf > hdev->num_req_vfs) {
dev_err(&hdev->pdev->dev,
"Error: vf id (%u) > max vf num (%u)\n",
vf, hdev->num_req_vfs);
return -EINVAL;
}
dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
if (ring >= tqps) {
dev_err(&hdev->pdev->dev,
"Error: queue id (%u) > max tqp num (%u)\n",
ring, tqps - 1);
return -EINVAL;
}
action = HCLGE_FD_ACTION_SELECT_QUEUE;
q_index = ring;
}
rule = kzalloc(sizeof(*rule), GFP_KERNEL); rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule) if (!rule)
return -ENOMEM; return -ENOMEM;
ret = hclge_fd_get_tuple(hdev, fs, rule); ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
if (ret) { if (ret) {
kfree(rule); kfree(rule);
return ret; return ret;
...@@ -6208,15 +6580,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, ...@@ -6208,15 +6580,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
rule->action = action; rule->action = action;
rule->rule_type = HCLGE_FD_EP_ACTIVE; rule->rule_type = HCLGE_FD_EP_ACTIVE;
/* to avoid rule conflict, when user configure rule by ethtool, ret = hclge_add_fd_entry_common(hdev, rule);
* we need to clear all arfs rules if (ret)
*/ kfree(rule);
spin_lock_bh(&hdev->fd_rule_lock);
hclge_clear_arfs_rules(handle);
ret = hclge_fd_config_rule(hdev, rule);
spin_unlock_bh(&hdev->fd_rule_lock);
return ret; return ret;
} }
...@@ -6237,32 +6603,30 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle, ...@@ -6237,32 +6603,30 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
return -EINVAL; return -EINVAL;
if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num || spin_lock_bh(&hdev->fd_rule_lock);
!hclge_fd_rule_exist(hdev, fs->location)) { if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
!test_bit(fs->location, hdev->fd_bmap)) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Delete fail, rule %u is inexistent\n", fs->location); "Delete fail, rule %u is inexistent\n", fs->location);
spin_unlock_bh(&hdev->fd_rule_lock);
return -ENOENT; return -ENOENT;
} }
ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location, ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
NULL, false); NULL, false);
if (ret) if (ret)
return ret; goto out;
spin_lock_bh(&hdev->fd_rule_lock); hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
out:
spin_unlock_bh(&hdev->fd_rule_lock); spin_unlock_bh(&hdev->fd_rule_lock);
return ret; return ret;
} }
/* make sure being called after lock up with fd_rule_lock */ static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
bool clear_list) bool clear_list)
{ {
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule; struct hclge_fd_rule *rule;
struct hlist_node *node; struct hlist_node *node;
u16 location; u16 location;
...@@ -6270,6 +6634,8 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, ...@@ -6270,6 +6634,8 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
if (!hnae3_dev_fd_supported(hdev)) if (!hnae3_dev_fd_supported(hdev))
return; return;
spin_lock_bh(&hdev->fd_rule_lock);
for_each_set_bit(location, hdev->fd_bmap, for_each_set_bit(location, hdev->fd_bmap,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location, hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
...@@ -6286,6 +6652,14 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, ...@@ -6286,6 +6652,14 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
bitmap_zero(hdev->fd_bmap, bitmap_zero(hdev->fd_bmap,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
} }
spin_unlock_bh(&hdev->fd_rule_lock);
}
static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
{
hclge_clear_fd_rules_in_list(hdev, true);
hclge_fd_disable_user_def(hdev);
} }
static int hclge_restore_fd_entries(struct hnae3_handle *handle) static int hclge_restore_fd_entries(struct hnae3_handle *handle)
...@@ -6294,7 +6668,6 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle) ...@@ -6294,7 +6668,6 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule; struct hclge_fd_rule *rule;
struct hlist_node *node; struct hlist_node *node;
int ret;
/* Return ok here, because reset error handling will check this /* Return ok here, because reset error handling will check this
* return value. If error is returned here, the reset process will * return value. If error is returned here, the reset process will
...@@ -6309,25 +6682,11 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle) ...@@ -6309,25 +6682,11 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
spin_lock_bh(&hdev->fd_rule_lock); spin_lock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); if (rule->state == HCLGE_FD_ACTIVE)
if (!ret) rule->state = HCLGE_FD_TO_ADD;
ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
if (ret) {
dev_warn(&hdev->pdev->dev,
"Restore rule %u failed, remove it\n",
rule->location);
clear_bit(rule->location, hdev->fd_bmap);
hlist_del(&rule->rule_node);
kfree(rule);
hdev->hclge_fd_rule_num--;
}
} }
if (hdev->hclge_fd_rule_num)
hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
spin_unlock_bh(&hdev->fd_rule_lock); spin_unlock_bh(&hdev->fd_rule_lock);
set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
return 0; return 0;
} }
...@@ -6415,6 +6774,10 @@ static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule, ...@@ -6415,6 +6774,10 @@ static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip, cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
IPV6_SIZE); IPV6_SIZE);
spec->tclass = rule->tuples.ip_tos;
spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
0 : rule->tuples_mask.ip_tos;
spec->psrc = cpu_to_be16(rule->tuples.src_port); spec->psrc = cpu_to_be16(rule->tuples.src_port);
spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
0 : cpu_to_be16(rule->tuples_mask.src_port); 0 : cpu_to_be16(rule->tuples_mask.src_port);
...@@ -6442,6 +6805,10 @@ static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule, ...@@ -6442,6 +6805,10 @@ static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
cpu_to_be32_array(spec_mask->ip6dst, cpu_to_be32_array(spec_mask->ip6dst,
rule->tuples_mask.dst_ip, IPV6_SIZE); rule->tuples_mask.dst_ip, IPV6_SIZE);
spec->tclass = rule->tuples.ip_tos;
spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
0 : rule->tuples_mask.ip_tos;
spec->l4_proto = rule->tuples.ip_proto; spec->l4_proto = rule->tuples.ip_proto;
spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
0 : rule->tuples_mask.ip_proto; 0 : rule->tuples_mask.ip_proto;
...@@ -6469,6 +6836,24 @@ static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule, ...@@ -6469,6 +6836,24 @@ static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
0 : cpu_to_be16(rule->tuples_mask.ether_proto); 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
} }
static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule)
{
if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
fs->h_ext.data[0] = 0;
fs->h_ext.data[1] = 0;
fs->m_ext.data[0] = 0;
fs->m_ext.data[1] = 0;
} else {
fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
fs->m_ext.data[0] =
cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
}
}
static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs, static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule) struct hclge_fd_rule *rule)
{ {
...@@ -6477,6 +6862,8 @@ static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs, ...@@ -6477,6 +6862,8 @@ static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
fs->m_ext.vlan_tci = fs->m_ext.vlan_tci =
rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
0 : cpu_to_be16(rule->tuples_mask.vlan_tag1); 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
hclge_fd_get_user_def_info(fs, rule);
} }
if (fs->flow_type & FLOW_MAC_EXT) { if (fs->flow_type & FLOW_MAC_EXT) {
...@@ -6588,6 +6975,9 @@ static int hclge_get_all_rules(struct hnae3_handle *handle, ...@@ -6588,6 +6975,9 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
return -EMSGSIZE; return -EMSGSIZE;
} }
if (rule->state == HCLGE_FD_TO_DEL)
continue;
rule_locs[cnt] = rule->location; rule_locs[cnt] = rule->location;
cnt++; cnt++;
} }
...@@ -6669,9 +7059,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, ...@@ -6669,9 +7059,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
struct hclge_fd_rule_tuples new_tuples = {}; struct hclge_fd_rule_tuples new_tuples = {};
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule; struct hclge_fd_rule *rule;
u16 tmp_queue_id;
u16 bit_id; u16 bit_id;
int ret;
if (!hnae3_dev_fd_supported(hdev)) if (!hnae3_dev_fd_supported(hdev))
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -6707,34 +7095,20 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, ...@@ -6707,34 +7095,20 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
return -ENOMEM; return -ENOMEM;
} }
set_bit(bit_id, hdev->fd_bmap);
rule->location = bit_id; rule->location = bit_id;
rule->arfs.flow_id = flow_id; rule->arfs.flow_id = flow_id;
rule->queue_id = queue_id; rule->queue_id = queue_id;
hclge_fd_build_arfs_rule(&new_tuples, rule); hclge_fd_build_arfs_rule(&new_tuples, rule);
ret = hclge_fd_config_rule(hdev, rule); hclge_update_fd_list(hdev, HCLGE_FD_TO_ADD, rule->location,
rule);
spin_unlock_bh(&hdev->fd_rule_lock); hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
} else if (rule->queue_id != queue_id) {
if (ret)
return ret;
return rule->location;
}
spin_unlock_bh(&hdev->fd_rule_lock);
if (rule->queue_id == queue_id)
return rule->location;
tmp_queue_id = rule->queue_id;
rule->queue_id = queue_id; rule->queue_id = queue_id;
ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); rule->state = HCLGE_FD_TO_ADD;
if (ret) { set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
rule->queue_id = tmp_queue_id; hclge_task_schedule(hdev, 0);
return ret;
} }
spin_unlock_bh(&hdev->fd_rule_lock);
return rule->location; return rule->location;
} }
...@@ -6744,7 +7118,6 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev) ...@@ -6744,7 +7118,6 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
struct hnae3_handle *handle = &hdev->vport[0].nic; struct hnae3_handle *handle = &hdev->vport[0].nic;
struct hclge_fd_rule *rule; struct hclge_fd_rule *rule;
struct hlist_node *node; struct hlist_node *node;
HLIST_HEAD(del_list);
spin_lock_bh(&hdev->fd_rule_lock); spin_lock_bh(&hdev->fd_rule_lock);
if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) { if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
...@@ -6752,33 +7125,50 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev) ...@@ -6752,33 +7125,50 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
return; return;
} }
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
if (rule->state != HCLGE_FD_ACTIVE)
continue;
if (rps_may_expire_flow(handle->netdev, rule->queue_id, if (rps_may_expire_flow(handle->netdev, rule->queue_id,
rule->arfs.flow_id, rule->location)) { rule->arfs.flow_id, rule->location)) {
hlist_del_init(&rule->rule_node); rule->state = HCLGE_FD_TO_DEL;
hlist_add_head(&rule->rule_node, &del_list); set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
hdev->hclge_fd_rule_num--;
clear_bit(rule->location, hdev->fd_bmap);
} }
} }
spin_unlock_bh(&hdev->fd_rule_lock); spin_unlock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
rule->location, NULL, false);
kfree(rule);
}
#endif #endif
} }
/* make sure being called after lock up with fd_rule_lock */ /* make sure being called after lock up with fd_rule_lock */
static void hclge_clear_arfs_rules(struct hnae3_handle *handle) static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
{ {
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_fd_rule *rule;
struct hclge_dev *hdev = vport->back; struct hlist_node *node;
int ret;
if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
return 0;
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
switch (rule->state) {
case HCLGE_FD_TO_DEL:
case HCLGE_FD_ACTIVE:
ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
rule->location, NULL, false);
if (ret)
return ret;
fallthrough;
case HCLGE_FD_TO_ADD:
hclge_fd_dec_rule_cnt(hdev, rule->location);
hlist_del(&rule->rule_node);
kfree(rule);
break;
default:
break;
}
}
hclge_sync_fd_state(hdev);
if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE) return 0;
hclge_del_all_fd_entries(handle, true);
#endif #endif
} }
...@@ -6961,12 +7351,6 @@ static int hclge_add_cls_flower(struct hnae3_handle *handle, ...@@ -6961,12 +7351,6 @@ static int hclge_add_cls_flower(struct hnae3_handle *handle,
struct hclge_fd_rule *rule; struct hclge_fd_rule *rule;
int ret; int ret;
if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
dev_err(&hdev->pdev->dev,
"please remove all exist fd rules via ethtool first\n");
return -EINVAL;
}
ret = hclge_check_cls_flower(hdev, cls_flower, tc); ret = hclge_check_cls_flower(hdev, cls_flower, tc);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
...@@ -6979,8 +7363,10 @@ static int hclge_add_cls_flower(struct hnae3_handle *handle, ...@@ -6979,8 +7363,10 @@ static int hclge_add_cls_flower(struct hnae3_handle *handle,
return -ENOMEM; return -ENOMEM;
ret = hclge_parse_cls_flower(hdev, cls_flower, rule); ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
if (ret) if (ret) {
goto err; kfree(rule);
return ret;
}
rule->action = HCLGE_FD_ACTION_SELECT_TC; rule->action = HCLGE_FD_ACTION_SELECT_TC;
rule->cls_flower.tc = tc; rule->cls_flower.tc = tc;
...@@ -6989,22 +7375,10 @@ static int hclge_add_cls_flower(struct hnae3_handle *handle, ...@@ -6989,22 +7375,10 @@ static int hclge_add_cls_flower(struct hnae3_handle *handle,
rule->cls_flower.cookie = cls_flower->cookie; rule->cls_flower.cookie = cls_flower->cookie;
rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE; rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
spin_lock_bh(&hdev->fd_rule_lock); ret = hclge_add_fd_entry_common(hdev, rule);
hclge_clear_arfs_rules(handle); if (ret)
ret = hclge_fd_config_rule(hdev, rule);
spin_unlock_bh(&hdev->fd_rule_lock);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to add cls flower rule, ret = %d\n", ret);
goto err;
}
return 0;
err:
kfree(rule); kfree(rule);
return ret; return ret;
} }
...@@ -7041,25 +7415,66 @@ static int hclge_del_cls_flower(struct hnae3_handle *handle, ...@@ -7041,25 +7415,66 @@ static int hclge_del_cls_flower(struct hnae3_handle *handle,
ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location, ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
NULL, false); NULL, false);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev,
"failed to delete cls flower rule %u, ret = %d\n",
rule->location, ret);
spin_unlock_bh(&hdev->fd_rule_lock); spin_unlock_bh(&hdev->fd_rule_lock);
return ret; return ret;
} }
ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false); hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to delete cls flower rule %u in list, ret = %d\n",
rule->location, ret);
spin_unlock_bh(&hdev->fd_rule_lock); spin_unlock_bh(&hdev->fd_rule_lock);
return ret;
return 0;
}
static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
{
struct hclge_fd_rule *rule;
struct hlist_node *node;
int ret = 0;
if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
return;
spin_lock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
switch (rule->state) {
case HCLGE_FD_TO_ADD:
ret = hclge_fd_config_rule(hdev, rule);
if (ret)
goto out;
rule->state = HCLGE_FD_ACTIVE;
break;
case HCLGE_FD_TO_DEL:
ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
rule->location, NULL, false);
if (ret)
goto out;
hclge_fd_dec_rule_cnt(hdev, rule->location);
hclge_fd_free_node(hdev, rule);
break;
default:
break;
}
} }
out:
if (ret)
set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
spin_unlock_bh(&hdev->fd_rule_lock); spin_unlock_bh(&hdev->fd_rule_lock);
}
return 0; static void hclge_sync_fd_table(struct hclge_dev *hdev)
{
if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
hclge_clear_fd_rules_in_list(hdev, clear_list);
}
hclge_sync_fd_user_def_cfg(hdev, false);
hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
} }
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
...@@ -7099,18 +7514,15 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) ...@@ -7099,18 +7514,15 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
bool clear;
hdev->fd_en = enable; hdev->fd_en = enable;
clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
if (!enable) { if (!enable)
spin_lock_bh(&hdev->fd_rule_lock); set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
hclge_del_all_fd_entries(handle, clear); else
spin_unlock_bh(&hdev->fd_rule_lock);
} else {
hclge_restore_fd_entries(handle); hclge_restore_fd_entries(handle);
}
hclge_task_schedule(hdev, 0);
} }
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
...@@ -7581,7 +7993,7 @@ static void hclge_ae_stop(struct hnae3_handle *handle) ...@@ -7581,7 +7993,7 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
set_bit(HCLGE_STATE_DOWN, &hdev->state); set_bit(HCLGE_STATE_DOWN, &hdev->state);
spin_lock_bh(&hdev->fd_rule_lock); spin_lock_bh(&hdev->fd_rule_lock);
hclge_clear_arfs_rules(handle); hclge_clear_arfs_rules(hdev);
spin_unlock_bh(&hdev->fd_rule_lock); spin_unlock_bh(&hdev->fd_rule_lock);
/* If it is not PF reset, the firmware will disable the MAC, /* If it is not PF reset, the firmware will disable the MAC,
...@@ -9617,7 +10029,7 @@ static void hclge_restore_hw_table(struct hclge_dev *hdev) ...@@ -9617,7 +10029,7 @@ static void hclge_restore_hw_table(struct hclge_dev *hdev)
hclge_restore_mac_table_common(vport); hclge_restore_mac_table_common(vport);
hclge_restore_vport_vlan_table(vport); hclge_restore_vport_vlan_table(vport);
set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
hclge_restore_fd_entries(handle); hclge_restore_fd_entries(handle);
} }
...@@ -11306,6 +11718,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -11306,6 +11718,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_misc_affinity_teardown(hdev); hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev); hclge_state_uninit(hdev);
hclge_uninit_mac_table(hdev); hclge_uninit_mac_table(hdev);
hclge_del_all_fd_entries(hdev);
if (mac->phydev) if (mac->phydev)
mdiobus_unregister(mac->mdio_bus); mdiobus_unregister(mac->mdio_bus);
...@@ -12129,7 +12542,6 @@ static const struct hnae3_ae_ops hclge_ops = { ...@@ -12129,7 +12542,6 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_link_mode = hclge_get_link_mode, .get_link_mode = hclge_get_link_mode,
.add_fd_entry = hclge_add_fd_entry, .add_fd_entry = hclge_add_fd_entry,
.del_fd_entry = hclge_del_fd_entry, .del_fd_entry = hclge_del_fd_entry,
.del_all_fd_entries = hclge_del_all_fd_entries,
.get_fd_rule_cnt = hclge_get_fd_rule_cnt, .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
.get_fd_rule_info = hclge_get_fd_rule_info, .get_fd_rule_info = hclge_get_fd_rule_info,
.get_fd_all_rules = hclge_get_all_rules, .get_fd_all_rules = hclge_get_all_rules,
......
...@@ -223,6 +223,9 @@ enum HCLGE_DEV_STATE { ...@@ -223,6 +223,9 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_LINK_UPDATING, HCLGE_STATE_LINK_UPDATING,
HCLGE_STATE_PROMISC_CHANGED, HCLGE_STATE_PROMISC_CHANGED,
HCLGE_STATE_RST_FAIL, HCLGE_STATE_RST_FAIL,
HCLGE_STATE_FD_TBL_CHANGED,
HCLGE_STATE_FD_CLEAR_ALL,
HCLGE_STATE_FD_USER_DEF_CHANGED,
HCLGE_STATE_MAX HCLGE_STATE_MAX
}; };
...@@ -536,6 +539,9 @@ enum HCLGE_FD_TUPLE { ...@@ -536,6 +539,9 @@ enum HCLGE_FD_TUPLE {
MAX_TUPLE, MAX_TUPLE,
}; };
#define HCLGE_FD_TUPLE_USER_DEF_TUPLES \
(BIT(INNER_L2_RSV) | BIT(INNER_L3_RSV) | BIT(INNER_L4_RSV))
enum HCLGE_FD_META_DATA { enum HCLGE_FD_META_DATA {
PACKET_TYPE_ID, PACKET_TYPE_ID,
IP_FRAGEMENT, IP_FRAGEMENT,
...@@ -548,9 +554,21 @@ enum HCLGE_FD_META_DATA { ...@@ -548,9 +554,21 @@ enum HCLGE_FD_META_DATA {
MAX_META_DATA, MAX_META_DATA,
}; };
enum HCLGE_FD_KEY_OPT {
KEY_OPT_U8,
KEY_OPT_LE16,
KEY_OPT_LE32,
KEY_OPT_MAC,
KEY_OPT_IP,
KEY_OPT_VNI,
};
struct key_info { struct key_info {
u8 key_type; u8 key_type;
u8 key_length; /* use bit as unit */ u8 key_length; /* use bit as unit */
enum HCLGE_FD_KEY_OPT key_opt;
int offset;
int moffset;
}; };
#define MAX_KEY_LENGTH 400 #define MAX_KEY_LENGTH 400
...@@ -558,6 +576,11 @@ struct key_info { ...@@ -558,6 +576,11 @@ struct key_info {
#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4) #define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4)
#define MAX_META_DATA_LENGTH 32 #define MAX_META_DATA_LENGTH 32
#define HCLGE_FD_MAX_USER_DEF_OFFSET 9000
#define HCLGE_FD_USER_DEF_DATA GENMASK(15, 0)
#define HCLGE_FD_USER_DEF_OFFSET GENMASK(15, 0)
#define HCLGE_FD_USER_DEF_OFFSET_UNMASK GENMASK(15, 0)
/* assigned by firmware, the real filter number for each pf may be less */ /* assigned by firmware, the real filter number for each pf may be less */
#define MAX_FD_FILTER_NUM 4096 #define MAX_FD_FILTER_NUM 4096
#define HCLGE_ARFS_EXPIRE_INTERVAL 5UL #define HCLGE_ARFS_EXPIRE_INTERVAL 5UL
...@@ -580,6 +603,33 @@ enum HCLGE_FD_ACTION { ...@@ -580,6 +603,33 @@ enum HCLGE_FD_ACTION {
HCLGE_FD_ACTION_SELECT_TC, HCLGE_FD_ACTION_SELECT_TC,
}; };
enum HCLGE_FD_NODE_STATE {
HCLGE_FD_TO_ADD,
HCLGE_FD_TO_DEL,
HCLGE_FD_ACTIVE,
HCLGE_FD_DELETED,
};
enum HCLGE_FD_USER_DEF_LAYER {
HCLGE_FD_USER_DEF_NONE,
HCLGE_FD_USER_DEF_L2,
HCLGE_FD_USER_DEF_L3,
HCLGE_FD_USER_DEF_L4,
};
#define HCLGE_FD_USER_DEF_LAYER_NUM 3
struct hclge_fd_user_def_cfg {
u16 ref_cnt;
u16 offset;
};
struct hclge_fd_user_def_info {
enum HCLGE_FD_USER_DEF_LAYER layer;
u16 data;
u16 data_mask;
u16 offset;
};
struct hclge_fd_key_cfg { struct hclge_fd_key_cfg {
u8 key_sel; u8 key_sel;
u8 inner_sipv6_word_en; u8 inner_sipv6_word_en;
...@@ -596,6 +646,7 @@ struct hclge_fd_cfg { ...@@ -596,6 +646,7 @@ struct hclge_fd_cfg {
u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */ u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */
u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */ u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */
struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM]; struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM];
struct hclge_fd_user_def_cfg user_def_cfg[HCLGE_FD_USER_DEF_LAYER_NUM];
}; };
#define IPV4_INDEX 3 #define IPV4_INDEX 3
...@@ -612,6 +663,9 @@ struct hclge_fd_rule_tuples { ...@@ -612,6 +663,9 @@ struct hclge_fd_rule_tuples {
u16 dst_port; u16 dst_port;
u16 vlan_tag1; u16 vlan_tag1;
u16 ether_proto; u16 ether_proto;
u16 l2_user_def;
u16 l3_user_def;
u32 l4_user_def;
u8 ip_tos; u8 ip_tos;
u8 ip_proto; u8 ip_proto;
}; };
...@@ -630,11 +684,15 @@ struct hclge_fd_rule { ...@@ -630,11 +684,15 @@ struct hclge_fd_rule {
struct { struct {
u16 flow_id; /* only used for arfs */ u16 flow_id; /* only used for arfs */
} arfs; } arfs;
struct {
struct hclge_fd_user_def_info user_def;
} ep;
}; };
u16 queue_id; u16 queue_id;
u16 vf_id; u16 vf_id;
u16 location; u16 location;
enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type; enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type;
enum HCLGE_FD_NODE_STATE state;
u8 action; u8 action;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment