Commit fb72699d authored by Jian Shen's avatar Jian Shen Committed by David S. Miller

net: hns3: refactor for function hclge_fd_convert_tuple

Currently, there are too many branches for hclge_fd_convert_tuple().
And it may be more when add new tuples. Refactor it by sorting the
tuples according to their length. So it only needs several KEY_OPT
now, and being flexible to add new tuples.
Signed-off-by: default avatarJian Shen <shenjian15@huawei.com>
Signed-off-by: default avatarHuazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 74b755d1
...@@ -384,36 +384,56 @@ static const struct key_info meta_data_key_info[] = { ...@@ -384,36 +384,56 @@ static const struct key_info meta_data_key_info[] = {
}; };
static const struct key_info tuple_key_info[] = { static const struct key_info tuple_key_info[] = {
{ OUTER_DST_MAC, 48}, { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
{ OUTER_SRC_MAC, 48}, { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
{ OUTER_VLAN_TAG_FST, 16}, { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
{ OUTER_VLAN_TAG_SEC, 16}, { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
{ OUTER_ETH_TYPE, 16}, { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
{ OUTER_L2_RSV, 16}, { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
{ OUTER_IP_TOS, 8}, { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
{ OUTER_IP_PROTO, 8}, { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
{ OUTER_SRC_IP, 32}, { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
{ OUTER_DST_IP, 32}, { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
{ OUTER_L3_RSV, 16}, { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
{ OUTER_SRC_PORT, 16}, { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
{ OUTER_DST_PORT, 16}, { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
{ OUTER_L4_RSV, 32}, { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
{ OUTER_TUN_VNI, 24}, { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
{ OUTER_TUN_FLOW_ID, 8}, { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
{ INNER_DST_MAC, 48}, { INNER_DST_MAC, 48, KEY_OPT_MAC,
{ INNER_SRC_MAC, 48}, offsetof(struct hclge_fd_rule, tuples.dst_mac),
{ INNER_VLAN_TAG_FST, 16}, offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
{ INNER_VLAN_TAG_SEC, 16}, { INNER_SRC_MAC, 48, KEY_OPT_MAC,
{ INNER_ETH_TYPE, 16}, offsetof(struct hclge_fd_rule, tuples.src_mac),
{ INNER_L2_RSV, 16}, offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
{ INNER_IP_TOS, 8}, { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
{ INNER_IP_PROTO, 8}, offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
{ INNER_SRC_IP, 32}, offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
{ INNER_DST_IP, 32}, { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
{ INNER_L3_RSV, 16}, { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
{ INNER_SRC_PORT, 16}, offsetof(struct hclge_fd_rule, tuples.ether_proto),
{ INNER_DST_PORT, 16}, offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
{ INNER_L4_RSV, 32}, { INNER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
{ INNER_IP_TOS, 8, KEY_OPT_U8,
offsetof(struct hclge_fd_rule, tuples.ip_tos),
offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
{ INNER_IP_PROTO, 8, KEY_OPT_U8,
offsetof(struct hclge_fd_rule, tuples.ip_proto),
offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
{ INNER_SRC_IP, 32, KEY_OPT_IP,
offsetof(struct hclge_fd_rule, tuples.src_ip),
offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
{ INNER_DST_IP, 32, KEY_OPT_IP,
offsetof(struct hclge_fd_rule, tuples.dst_ip),
offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
{ INNER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
{ INNER_SRC_PORT, 16, KEY_OPT_LE16,
offsetof(struct hclge_fd_rule, tuples.src_port),
offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
{ INNER_DST_PORT, 16, KEY_OPT_LE16,
offsetof(struct hclge_fd_rule, tuples.dst_port),
offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
{ INNER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
}; };
static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
...@@ -5371,96 +5391,57 @@ static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, ...@@ -5371,96 +5391,57 @@ static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
struct hclge_fd_rule *rule) struct hclge_fd_rule *rule)
{ {
int offset, moffset, ip_offset;
enum HCLGE_FD_KEY_OPT key_opt;
u16 tmp_x_s, tmp_y_s; u16 tmp_x_s, tmp_y_s;
u32 tmp_x_l, tmp_y_l; u32 tmp_x_l, tmp_y_l;
u8 *p = (u8 *)rule;
int i; int i;
if (rule->unused_tuple & tuple_bit) if (rule->unused_tuple & BIT(tuple_bit))
return true; return true;
switch (tuple_bit) { key_opt = tuple_key_info[tuple_bit].key_opt;
case BIT(INNER_DST_MAC): offset = tuple_key_info[tuple_bit].offset;
for (i = 0; i < ETH_ALEN; i++) { moffset = tuple_key_info[tuple_bit].moffset;
calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
rule->tuples_mask.dst_mac[i]);
calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
rule->tuples_mask.dst_mac[i]);
}
return true;
case BIT(INNER_SRC_MAC):
for (i = 0; i < ETH_ALEN; i++) {
calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
rule->tuples_mask.src_mac[i]);
calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
rule->tuples_mask.src_mac[i]);
}
return true; switch (key_opt) {
case BIT(INNER_VLAN_TAG_FST): case KEY_OPT_U8:
calc_x(tmp_x_s, rule->tuples.vlan_tag1, calc_x(*key_x, p[offset], p[moffset]);
rule->tuples_mask.vlan_tag1); calc_y(*key_y, p[offset], p[moffset]);
calc_y(tmp_y_s, rule->tuples.vlan_tag1,
rule->tuples_mask.vlan_tag1);
*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
return true; return true;
case BIT(INNER_ETH_TYPE): case KEY_OPT_LE16:
calc_x(tmp_x_s, rule->tuples.ether_proto, calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
rule->tuples_mask.ether_proto); calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
calc_y(tmp_y_s, rule->tuples.ether_proto,
rule->tuples_mask.ether_proto);
*(__le16 *)key_x = cpu_to_le16(tmp_x_s); *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
*(__le16 *)key_y = cpu_to_le16(tmp_y_s); *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
return true; return true;
case BIT(INNER_IP_TOS): case KEY_OPT_LE32:
calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
return true;
case BIT(INNER_IP_PROTO):
calc_x(*key_x, rule->tuples.ip_proto,
rule->tuples_mask.ip_proto);
calc_y(*key_y, rule->tuples.ip_proto,
rule->tuples_mask.ip_proto);
return true;
case BIT(INNER_SRC_IP):
calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
rule->tuples_mask.src_ip[IPV4_INDEX]);
calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
rule->tuples_mask.src_ip[IPV4_INDEX]);
*(__le32 *)key_x = cpu_to_le32(tmp_x_l); *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
*(__le32 *)key_y = cpu_to_le32(tmp_y_l); *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
return true; return true;
case BIT(INNER_DST_IP): case KEY_OPT_MAC:
calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX], for (i = 0; i < ETH_ALEN; i++) {
rule->tuples_mask.dst_ip[IPV4_INDEX]); calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX], p[moffset + i]);
rule->tuples_mask.dst_ip[IPV4_INDEX]); calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
*(__le32 *)key_x = cpu_to_le32(tmp_x_l); p[moffset + i]);
*(__le32 *)key_y = cpu_to_le32(tmp_y_l); }
return true;
case BIT(INNER_SRC_PORT):
calc_x(tmp_x_s, rule->tuples.src_port,
rule->tuples_mask.src_port);
calc_y(tmp_y_s, rule->tuples.src_port,
rule->tuples_mask.src_port);
*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
return true; return true;
case BIT(INNER_DST_PORT): case KEY_OPT_IP:
calc_x(tmp_x_s, rule->tuples.dst_port, ip_offset = IPV4_INDEX * sizeof(u32);
rule->tuples_mask.dst_port); calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
calc_y(tmp_y_s, rule->tuples.dst_port, *(u32 *)(&p[moffset + ip_offset]));
rule->tuples_mask.dst_port); calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
*(__le16 *)key_x = cpu_to_le16(tmp_x_s); *(u32 *)(&p[moffset + ip_offset]));
*(__le16 *)key_y = cpu_to_le16(tmp_y_s); *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
return true; return true;
default: default:
...@@ -5548,12 +5529,12 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage, ...@@ -5548,12 +5529,12 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
for (i = 0 ; i < MAX_TUPLE; i++) { for (i = 0 ; i < MAX_TUPLE; i++) {
bool tuple_valid; bool tuple_valid;
u32 check_tuple;
tuple_size = tuple_key_info[i].key_length / 8; tuple_size = tuple_key_info[i].key_length / 8;
check_tuple = key_cfg->tuple_active & BIT(i); if (!(key_cfg->tuple_active & BIT(i)))
continue;
tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x, tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
cur_key_y, rule); cur_key_y, rule);
if (tuple_valid) { if (tuple_valid) {
cur_key_x += tuple_size; cur_key_x += tuple_size;
......
...@@ -548,9 +548,21 @@ enum HCLGE_FD_META_DATA { ...@@ -548,9 +548,21 @@ enum HCLGE_FD_META_DATA {
MAX_META_DATA, MAX_META_DATA,
}; };
enum HCLGE_FD_KEY_OPT {
KEY_OPT_U8,
KEY_OPT_LE16,
KEY_OPT_LE32,
KEY_OPT_MAC,
KEY_OPT_IP,
KEY_OPT_VNI,
};
struct key_info { struct key_info {
u8 key_type; u8 key_type;
u8 key_length; /* use bit as unit */ u8 key_length; /* use bit as unit */
enum HCLGE_FD_KEY_OPT key_opt;
int offset;
int moffset;
}; };
#define MAX_KEY_LENGTH 400 #define MAX_KEY_LENGTH 400
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment