Commit 0a6ce1e3 authored by David S. Miller's avatar David S. Miller

Merge tag 'shared-for-4.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/leon/linux-rdma

Saeed Mahameed says:

====================
Mellanox mlx5 core driver updates 2016-10-25

This series contains some updates and fixes of mlx5 core and
IB drivers with the addition of two features that demand
new low level commands and infrastructure updates.
 - SRIOV VF max rate limit support
 - mlx5e tc support for FWD rules with counter.

Needed for both net and rdma subsystems.

Updates and Fixes:
From Saeed Mahameed (2):
  - mlx5 IB: Skip handling unknown mlx5 events
  - Add ConnectX-5 PCIe 4.0 VF device ID

From Artemy Kovalyov (2):
  - Update struct mlx5_ifc_xrqc_bits
  - Ensure SRQ physical address structure endianness

From Eugenia Emantayev (1):
  - Fix length of async_event_mask

New Features:
From Mohamad Haj Yahia (3): mlx5 SRIOV VF max rate limit support
  - Introduce TSAR manipulation firmware commands
  - Introduce E-switch QoS management
  - Add SRIOV VF max rate configuration support

From Mark Bloch (7): mlx5e Tc support for FWD rule with counter
  - Don't unlock fte while still using it
  - Use fte status to decide on firmware command
  - Refactor find_flow_rule
  - Group similar rules under the same fte
  - Add multi dest support
  - Add option to add fwd rule with counter
  - mlx5e tc support for FWD rule with counter
  Mark here fixed two trivial issues with the flow steering core, and did
  some refactoring in the flow steering API to support adding mulit destination
  rules to the same hardware flow table entry at once.  In the last two patches
  added the ability to populate a flow rule with a flow counter to the same flow entry.

V2: Dropped some patches that added new structures without adding any usage of them.
    Added SRIOV VF max rate configuration support patch that introduces
    the usage of the TSAR infrastructure.
    Added flow steering fixes and refactoring in addition to mlx5 tc
    support for forward rule with counter.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d46b6349 e37a79e5
...@@ -1771,13 +1771,13 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id) ...@@ -1771,13 +1771,13 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
mutex_lock(&dev->flow_db.lock); mutex_lock(&dev->flow_db.lock);
list_for_each_entry_safe(iter, tmp, &handler->list, list) { list_for_each_entry_safe(iter, tmp, &handler->list, list) {
mlx5_del_flow_rule(iter->rule); mlx5_del_flow_rules(iter->rule);
put_flow_table(dev, iter->prio, true); put_flow_table(dev, iter->prio, true);
list_del(&iter->list); list_del(&iter->list);
kfree(iter); kfree(iter);
} }
mlx5_del_flow_rule(handler->rule); mlx5_del_flow_rules(handler->rule);
put_flow_table(dev, handler->prio, true); put_flow_table(dev, handler->prio, true);
mutex_unlock(&dev->flow_db.lock); mutex_unlock(&dev->flow_db.lock);
...@@ -1907,10 +1907,10 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, ...@@ -1907,10 +1907,10 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
handler->rule = mlx5_add_flow_rule(ft, spec, handler->rule = mlx5_add_flow_rules(ft, spec,
action, action,
MLX5_FS_DEFAULT_FLOW_TAG, MLX5_FS_DEFAULT_FLOW_TAG,
dst); dst, 1);
if (IS_ERR(handler->rule)) { if (IS_ERR(handler->rule)) {
err = PTR_ERR(handler->rule); err = PTR_ERR(handler->rule);
...@@ -1941,7 +1941,7 @@ static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *de ...@@ -1941,7 +1941,7 @@ static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *de
handler_dst = create_flow_rule(dev, ft_prio, handler_dst = create_flow_rule(dev, ft_prio,
flow_attr, dst); flow_attr, dst);
if (IS_ERR(handler_dst)) { if (IS_ERR(handler_dst)) {
mlx5_del_flow_rule(handler->rule); mlx5_del_flow_rules(handler->rule);
ft_prio->refcount--; ft_prio->refcount--;
kfree(handler); kfree(handler);
handler = handler_dst; handler = handler_dst;
...@@ -2004,7 +2004,7 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de ...@@ -2004,7 +2004,7 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de
&leftovers_specs[LEFTOVERS_UC].flow_attr, &leftovers_specs[LEFTOVERS_UC].flow_attr,
dst); dst);
if (IS_ERR(handler_ucast)) { if (IS_ERR(handler_ucast)) {
mlx5_del_flow_rule(handler->rule); mlx5_del_flow_rules(handler->rule);
ft_prio->refcount--; ft_prio->refcount--;
kfree(handler); kfree(handler);
handler = handler_ucast; handler = handler_ucast;
...@@ -2046,7 +2046,7 @@ static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev, ...@@ -2046,7 +2046,7 @@ static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
return handler_rx; return handler_rx;
err_tx: err_tx:
mlx5_del_flow_rule(handler_rx->rule); mlx5_del_flow_rules(handler_rx->rule);
ft_rx->refcount--; ft_rx->refcount--;
kfree(handler_rx); kfree(handler_rx);
err: err:
...@@ -2358,6 +2358,8 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, ...@@ -2358,6 +2358,8 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
ibev.event = IB_EVENT_CLIENT_REREGISTER; ibev.event = IB_EVENT_CLIENT_REREGISTER;
port = (u8)param; port = (u8)param;
break; break;
default:
return;
} }
ibev.device = &ibdev->ib_dev; ibev.device = &ibdev->ib_dev;
......
...@@ -153,7 +153,7 @@ struct mlx5_ib_flow_handler { ...@@ -153,7 +153,7 @@ struct mlx5_ib_flow_handler {
struct list_head list; struct list_head list;
struct ib_flow ibflow; struct ib_flow ibflow;
struct mlx5_ib_flow_prio *prio; struct mlx5_ib_flow_prio *prio;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
}; };
struct mlx5_ib_flow_db { struct mlx5_ib_flow_db {
......
...@@ -318,6 +318,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -318,6 +318,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
return MLX5_CMD_STAT_OK; return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP: case MLX5_CMD_OP_QUERY_HCA_CAP:
...@@ -419,11 +421,14 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -419,11 +421,14 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_FLOW_TABLE: case MLX5_CMD_OP_QUERY_FLOW_TABLE:
case MLX5_CMD_OP_CREATE_FLOW_GROUP: case MLX5_CMD_OP_CREATE_FLOW_GROUP:
case MLX5_CMD_OP_QUERY_FLOW_GROUP: case MLX5_CMD_OP_QUERY_FLOW_GROUP:
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
case MLX5_CMD_OP_QUERY_FLOW_COUNTER: case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
*status = MLX5_DRIVER_STATUS_ABORTED; *status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND; *synd = MLX5_DRIVER_SYND;
return -EIO; return -EIO;
...@@ -580,6 +585,12 @@ const char *mlx5_command_str(int command) ...@@ -580,6 +585,12 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE); MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER); MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER);
MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER); MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
default: return "unknown command opcode"; default: return "unknown command opcode";
} }
} }
......
...@@ -524,7 +524,7 @@ struct mlx5e_vxlan_db { ...@@ -524,7 +524,7 @@ struct mlx5e_vxlan_db {
struct mlx5e_l2_rule { struct mlx5e_l2_rule {
u8 addr[ETH_ALEN + 2]; u8 addr[ETH_ALEN + 2];
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
}; };
struct mlx5e_flow_table { struct mlx5e_flow_table {
...@@ -545,10 +545,10 @@ struct mlx5e_tc_table { ...@@ -545,10 +545,10 @@ struct mlx5e_tc_table {
struct mlx5e_vlan_table { struct mlx5e_vlan_table {
struct mlx5e_flow_table ft; struct mlx5e_flow_table ft;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct mlx5_flow_rule *active_vlans_rule[VLAN_N_VID]; struct mlx5_flow_handle *active_vlans_rule[VLAN_N_VID];
struct mlx5_flow_rule *untagged_rule; struct mlx5_flow_handle *untagged_rule;
struct mlx5_flow_rule *any_vlan_rule; struct mlx5_flow_handle *any_vlan_rule;
bool filter_disabled; bool filter_disabled;
}; };
struct mlx5e_l2_table { struct mlx5e_l2_table {
...@@ -566,14 +566,14 @@ struct mlx5e_l2_table { ...@@ -566,14 +566,14 @@ struct mlx5e_l2_table {
/* L3/L4 traffic type classifier */ /* L3/L4 traffic type classifier */
struct mlx5e_ttc_table { struct mlx5e_ttc_table {
struct mlx5e_flow_table ft; struct mlx5e_flow_table ft;
struct mlx5_flow_rule *rules[MLX5E_NUM_TT]; struct mlx5_flow_handle *rules[MLX5E_NUM_TT];
}; };
#define ARFS_HASH_SHIFT BITS_PER_BYTE #define ARFS_HASH_SHIFT BITS_PER_BYTE
#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE) #define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
struct arfs_table { struct arfs_table {
struct mlx5e_flow_table ft; struct mlx5e_flow_table ft;
struct mlx5_flow_rule *default_rule; struct mlx5_flow_handle *default_rule;
struct hlist_head rules_hash[ARFS_HASH_SIZE]; struct hlist_head rules_hash[ARFS_HASH_SIZE];
}; };
......
...@@ -56,7 +56,7 @@ struct arfs_tuple { ...@@ -56,7 +56,7 @@ struct arfs_tuple {
struct arfs_rule { struct arfs_rule {
struct mlx5e_priv *priv; struct mlx5e_priv *priv;
struct work_struct arfs_work; struct work_struct arfs_work;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
struct hlist_node hlist; struct hlist_node hlist;
int rxq; int rxq;
/* Flow ID passed to ndo_rx_flow_steer */ /* Flow ID passed to ndo_rx_flow_steer */
...@@ -104,7 +104,7 @@ static int arfs_disable(struct mlx5e_priv *priv) ...@@ -104,7 +104,7 @@ static int arfs_disable(struct mlx5e_priv *priv)
tt = arfs_get_tt(i); tt = arfs_get_tt(i);
/* Modify ttc rules destination to bypass the aRFS tables*/ /* Modify ttc rules destination to bypass the aRFS tables*/
err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt], err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
&dest); &dest, NULL);
if (err) { if (err) {
netdev_err(priv->netdev, netdev_err(priv->netdev,
"%s: modify ttc destination failed\n", "%s: modify ttc destination failed\n",
...@@ -137,7 +137,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv) ...@@ -137,7 +137,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv)
tt = arfs_get_tt(i); tt = arfs_get_tt(i);
/* Modify ttc rules destination to point on the aRFS FTs */ /* Modify ttc rules destination to point on the aRFS FTs */
err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt], err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
&dest); &dest, NULL);
if (err) { if (err) {
netdev_err(priv->netdev, netdev_err(priv->netdev,
"%s: modify ttc destination failed err=%d\n", "%s: modify ttc destination failed err=%d\n",
...@@ -151,7 +151,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv) ...@@ -151,7 +151,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv)
static void arfs_destroy_table(struct arfs_table *arfs_t) static void arfs_destroy_table(struct arfs_table *arfs_t)
{ {
mlx5_del_flow_rule(arfs_t->default_rule); mlx5_del_flow_rules(arfs_t->default_rule);
mlx5e_destroy_flow_table(&arfs_t->ft); mlx5e_destroy_flow_table(&arfs_t->ft);
} }
...@@ -205,10 +205,10 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, ...@@ -205,10 +205,10 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
goto out; goto out;
} }
arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, spec, arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, MLX5_FS_DEFAULT_FLOW_TAG,
&dest); &dest, 1);
if (IS_ERR(arfs_t->default_rule)) { if (IS_ERR(arfs_t->default_rule)) {
err = PTR_ERR(arfs_t->default_rule); err = PTR_ERR(arfs_t->default_rule);
arfs_t->default_rule = NULL; arfs_t->default_rule = NULL;
...@@ -396,7 +396,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv) ...@@ -396,7 +396,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
spin_unlock_bh(&priv->fs.arfs.arfs_lock); spin_unlock_bh(&priv->fs.arfs.arfs_lock);
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) { hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
if (arfs_rule->rule) if (arfs_rule->rule)
mlx5_del_flow_rule(arfs_rule->rule); mlx5_del_flow_rules(arfs_rule->rule);
hlist_del(&arfs_rule->hlist); hlist_del(&arfs_rule->hlist);
kfree(arfs_rule); kfree(arfs_rule);
} }
...@@ -420,7 +420,7 @@ static void arfs_del_rules(struct mlx5e_priv *priv) ...@@ -420,7 +420,7 @@ static void arfs_del_rules(struct mlx5e_priv *priv)
hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) { hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
cancel_work_sync(&rule->arfs_work); cancel_work_sync(&rule->arfs_work);
if (rule->rule) if (rule->rule)
mlx5_del_flow_rule(rule->rule); mlx5_del_flow_rules(rule->rule);
hlist_del(&rule->hlist); hlist_del(&rule->hlist);
kfree(rule); kfree(rule);
} }
...@@ -462,12 +462,12 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs, ...@@ -462,12 +462,12 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
return NULL; return NULL;
} }
static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv, static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule) struct arfs_rule *arfs_rule)
{ {
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple; struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_rule *rule = NULL; struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct arfs_table *arfs_table; struct arfs_table *arfs_table;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
...@@ -544,9 +544,9 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv, ...@@ -544,9 +544,9 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
} }
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn; dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
rule = mlx5_add_flow_rule(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, rule = mlx5_add_flow_rules(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, MLX5_FS_DEFAULT_FLOW_TAG,
&dest); &dest, 1);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n", netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
...@@ -559,14 +559,14 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv, ...@@ -559,14 +559,14 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
} }
static void arfs_modify_rule_rq(struct mlx5e_priv *priv, static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
struct mlx5_flow_rule *rule, u16 rxq) struct mlx5_flow_handle *rule, u16 rxq)
{ {
struct mlx5_flow_destination dst; struct mlx5_flow_destination dst;
int err = 0; int err = 0;
dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR; dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dst.tir_num = priv->direct_tir[rxq].tirn; dst.tir_num = priv->direct_tir[rxq].tirn;
err = mlx5_modify_rule_destination(rule, &dst); err = mlx5_modify_rule_destination(rule, &dst, NULL);
if (err) if (err)
netdev_warn(priv->netdev, netdev_warn(priv->netdev,
"Failed to modfiy aRFS rule destination to rq=%d\n", rxq); "Failed to modfiy aRFS rule destination to rq=%d\n", rxq);
...@@ -578,7 +578,7 @@ static void arfs_handle_work(struct work_struct *work) ...@@ -578,7 +578,7 @@ static void arfs_handle_work(struct work_struct *work)
struct arfs_rule, struct arfs_rule,
arfs_work); arfs_work);
struct mlx5e_priv *priv = arfs_rule->priv; struct mlx5e_priv *priv = arfs_rule->priv;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
......
...@@ -160,7 +160,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, ...@@ -160,7 +160,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
{ {
struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_rule **rule_p; struct mlx5_flow_handle **rule_p;
int err = 0; int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
...@@ -187,10 +187,10 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, ...@@ -187,10 +187,10 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
break; break;
} }
*rule_p = mlx5_add_flow_rule(ft, spec, *rule_p = mlx5_add_flow_rules(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, MLX5_FS_DEFAULT_FLOW_TAG,
&dest); &dest, 1);
if (IS_ERR(*rule_p)) { if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p); err = PTR_ERR(*rule_p);
...@@ -229,20 +229,20 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, ...@@ -229,20 +229,20 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
switch (rule_type) { switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED: case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
if (priv->fs.vlan.untagged_rule) { if (priv->fs.vlan.untagged_rule) {
mlx5_del_flow_rule(priv->fs.vlan.untagged_rule); mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
priv->fs.vlan.untagged_rule = NULL; priv->fs.vlan.untagged_rule = NULL;
} }
break; break;
case MLX5E_VLAN_RULE_TYPE_ANY_VID: case MLX5E_VLAN_RULE_TYPE_ANY_VID:
if (priv->fs.vlan.any_vlan_rule) { if (priv->fs.vlan.any_vlan_rule) {
mlx5_del_flow_rule(priv->fs.vlan.any_vlan_rule); mlx5_del_flow_rules(priv->fs.vlan.any_vlan_rule);
priv->fs.vlan.any_vlan_rule = NULL; priv->fs.vlan.any_vlan_rule = NULL;
} }
break; break;
case MLX5E_VLAN_RULE_TYPE_MATCH_VID: case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
mlx5e_vport_context_update_vlans(priv); mlx5e_vport_context_update_vlans(priv);
if (priv->fs.vlan.active_vlans_rule[vid]) { if (priv->fs.vlan.active_vlans_rule[vid]) {
mlx5_del_flow_rule(priv->fs.vlan.active_vlans_rule[vid]); mlx5_del_flow_rules(priv->fs.vlan.active_vlans_rule[vid]);
priv->fs.vlan.active_vlans_rule[vid] = NULL; priv->fs.vlan.active_vlans_rule[vid] = NULL;
} }
mlx5e_vport_context_update_vlans(priv); mlx5e_vport_context_update_vlans(priv);
...@@ -560,7 +560,7 @@ static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc) ...@@ -560,7 +560,7 @@ static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
for (i = 0; i < MLX5E_NUM_TT; i++) { for (i = 0; i < MLX5E_NUM_TT; i++) {
if (!IS_ERR_OR_NULL(ttc->rules[i])) { if (!IS_ERR_OR_NULL(ttc->rules[i])) {
mlx5_del_flow_rule(ttc->rules[i]); mlx5_del_flow_rules(ttc->rules[i]);
ttc->rules[i] = NULL; ttc->rules[i] = NULL;
} }
} }
...@@ -616,13 +616,14 @@ static struct { ...@@ -616,13 +616,14 @@ static struct {
}, },
}; };
static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, static struct mlx5_flow_handle *
struct mlx5_flow_table *ft, mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
struct mlx5_flow_destination *dest, struct mlx5_flow_table *ft,
u16 etype, struct mlx5_flow_destination *dest,
u8 proto) u16 etype,
u8 proto)
{ {
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
int err = 0; int err = 0;
...@@ -643,10 +644,10 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, ...@@ -643,10 +644,10 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype); MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
} }
rule = mlx5_add_flow_rule(ft, spec, rule = mlx5_add_flow_rules(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, MLX5_FS_DEFAULT_FLOW_TAG,
dest); dest, 1);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule failed\n", __func__); netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
...@@ -660,7 +661,7 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv) ...@@ -660,7 +661,7 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
{ {
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5e_ttc_table *ttc; struct mlx5e_ttc_table *ttc;
struct mlx5_flow_rule **rules; struct mlx5_flow_handle **rules;
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
int tt; int tt;
int err; int err;
...@@ -801,7 +802,7 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, ...@@ -801,7 +802,7 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai) struct mlx5e_l2_rule *ai)
{ {
if (!IS_ERR_OR_NULL(ai->rule)) { if (!IS_ERR_OR_NULL(ai->rule)) {
mlx5_del_flow_rule(ai->rule); mlx5_del_flow_rules(ai->rule);
ai->rule = NULL; ai->rule = NULL;
} }
} }
...@@ -847,9 +848,9 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, ...@@ -847,9 +848,9 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
break; break;
} }
ai->rule = mlx5_add_flow_rule(ft, spec, ai->rule = mlx5_add_flow_rules(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, &dest); MLX5_FS_DEFAULT_FLOW_TAG, &dest, 1);
if (IS_ERR(ai->rule)) { if (IS_ERR(ai->rule)) {
netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n", netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
__func__, mv_dmac); __func__, mv_dmac);
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
struct mlx5e_ethtool_rule { struct mlx5e_ethtool_rule {
struct list_head list; struct list_head list;
struct ethtool_rx_flow_spec flow_spec; struct ethtool_rx_flow_spec flow_spec;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
struct mlx5e_ethtool_table *eth_ft; struct mlx5e_ethtool_table *eth_ft;
}; };
...@@ -284,13 +284,14 @@ static bool outer_header_zero(u32 *match_criteria) ...@@ -284,13 +284,14 @@ static bool outer_header_zero(u32 *match_criteria)
size - 1); size - 1);
} }
static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv, static struct mlx5_flow_handle *
struct mlx5_flow_table *ft, add_ethtool_flow_rule(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs) struct mlx5_flow_table *ft,
struct ethtool_rx_flow_spec *fs)
{ {
struct mlx5_flow_destination *dst = NULL; struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
int err = 0; int err = 0;
u32 action; u32 action;
...@@ -317,8 +318,8 @@ static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv, ...@@ -317,8 +318,8 @@ static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv,
} }
spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria)); spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
rule = mlx5_add_flow_rule(ft, spec, action, rule = mlx5_add_flow_rules(ft, spec, action,
MLX5_FS_DEFAULT_FLOW_TAG, dst); MLX5_FS_DEFAULT_FLOW_TAG, dst, 1);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n", netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
...@@ -335,7 +336,7 @@ static void del_ethtool_rule(struct mlx5e_priv *priv, ...@@ -335,7 +336,7 @@ static void del_ethtool_rule(struct mlx5e_priv *priv,
struct mlx5e_ethtool_rule *eth_rule) struct mlx5e_ethtool_rule *eth_rule)
{ {
if (eth_rule->rule) if (eth_rule->rule)
mlx5_del_flow_rule(eth_rule->rule); mlx5_del_flow_rules(eth_rule->rule);
list_del(&eth_rule->list); list_del(&eth_rule->list);
priv->fs.ethtool.tot_num_rules--; priv->fs.ethtool.tot_num_rules--;
put_flow_table(eth_rule->eth_ft); put_flow_table(eth_rule->eth_ft);
...@@ -475,7 +476,7 @@ int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv, ...@@ -475,7 +476,7 @@ int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
{ {
struct mlx5e_ethtool_table *eth_ft; struct mlx5e_ethtool_table *eth_ft;
struct mlx5e_ethtool_rule *eth_rule; struct mlx5e_ethtool_rule *eth_rule;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
int num_tuples; int num_tuples;
int err; int err;
......
...@@ -2925,6 +2925,20 @@ static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting) ...@@ -2925,6 +2925,20 @@ static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting); return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
} }
static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
int max_tx_rate)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
if (min_tx_rate)
return -EOPNOTSUPP;
return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
max_tx_rate);
}
static int mlx5_vport_link2ifla(u8 esw_link) static int mlx5_vport_link2ifla(u8 esw_link)
{ {
switch (esw_link) { switch (esw_link) {
...@@ -3232,6 +3246,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { ...@@ -3232,6 +3246,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
.ndo_set_vf_vlan = mlx5e_set_vf_vlan, .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
.ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk, .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
.ndo_set_vf_trust = mlx5e_set_vf_trust, .ndo_set_vf_trust = mlx5e_set_vf_trust,
.ndo_set_vf_rate = mlx5e_set_vf_rate,
.ndo_get_vf_config = mlx5e_get_vf_config, .ndo_get_vf_config = mlx5e_get_vf_config,
.ndo_set_vf_link_state = mlx5e_set_vf_link_state, .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats, .ndo_get_vf_stats = mlx5e_get_vf_stats,
......
...@@ -328,7 +328,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) ...@@ -328,7 +328,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_eswitch_rep *rep = priv->ppriv; struct mlx5_eswitch_rep *rep = priv->ppriv;
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_flow_rule *flow_rule; struct mlx5_flow_handle *flow_rule;
int err; int err;
int i; int i;
...@@ -360,7 +360,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) ...@@ -360,7 +360,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
return 0; return 0;
err_del_flow_rule: err_del_flow_rule:
mlx5_del_flow_rule(rep->vport_rx_rule); mlx5_del_flow_rules(rep->vport_rx_rule);
err_destroy_direct_tirs: err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_direct_tirs(priv);
err_destroy_direct_rqts: err_destroy_direct_rqts:
...@@ -375,7 +375,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) ...@@ -375,7 +375,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
int i; int i;
mlx5e_tc_cleanup(priv); mlx5e_tc_cleanup(priv);
mlx5_del_flow_rule(rep->vport_rx_rule); mlx5_del_flow_rules(rep->vport_rx_rule);
mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_direct_tirs(priv);
for (i = 0; i < priv->params.num_channels; i++) for (i = 0; i < priv->params.num_channels; i++)
mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
......
...@@ -47,21 +47,22 @@ ...@@ -47,21 +47,22 @@
struct mlx5e_tc_flow { struct mlx5e_tc_flow {
struct rhash_head node; struct rhash_head node;
u64 cookie; u64 cookie;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
struct mlx5_esw_flow_attr *attr; struct mlx5_esw_flow_attr *attr;
}; };
#define MLX5E_TC_TABLE_NUM_ENTRIES 1024 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
#define MLX5E_TC_TABLE_NUM_GROUPS 4 #define MLX5E_TC_TABLE_NUM_GROUPS 4
static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, static struct mlx5_flow_handle *
struct mlx5_flow_spec *spec, mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
u32 action, u32 flow_tag) struct mlx5_flow_spec *spec,
u32 action, u32 flow_tag)
{ {
struct mlx5_core_dev *dev = priv->mdev; struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_flow_destination dest = { 0 }; struct mlx5_flow_destination dest = { 0 };
struct mlx5_fc *counter = NULL; struct mlx5_fc *counter = NULL;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
bool table_created = false; bool table_created = false;
if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
...@@ -94,9 +95,9 @@ static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, ...@@ -94,9 +95,9 @@ static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
} }
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
rule = mlx5_add_flow_rule(priv->fs.tc.t, spec, rule = mlx5_add_flow_rules(priv->fs.tc.t, spec,
action, flow_tag, action, flow_tag,
&dest); &dest, 1);
if (IS_ERR(rule)) if (IS_ERR(rule))
goto err_add_rule; goto err_add_rule;
...@@ -114,9 +115,10 @@ static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, ...@@ -114,9 +115,10 @@ static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
return rule; return rule;
} }
static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, static struct mlx5_flow_handle *
struct mlx5_flow_spec *spec, mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr) struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int err; int err;
...@@ -129,7 +131,7 @@ static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -129,7 +131,7 @@ static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
} }
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5_flow_rule *rule, struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *attr) struct mlx5_esw_flow_attr *attr)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
...@@ -140,7 +142,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, ...@@ -140,7 +142,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
if (esw && esw->mode == SRIOV_OFFLOADS) if (esw && esw->mode == SRIOV_OFFLOADS)
mlx5_eswitch_del_vlan_action(esw, attr); mlx5_eswitch_del_vlan_action(esw, attr);
mlx5_del_flow_rule(rule); mlx5_del_flow_rules(rule);
mlx5_fc_destroy(priv->mdev, counter); mlx5_fc_destroy(priv->mdev, counter);
...@@ -417,7 +419,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -417,7 +419,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EINVAL; return -EINVAL;
} }
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
out_priv = netdev_priv(out_dev); out_priv = netdev_priv(out_dev);
attr->out_rep = out_priv->ppriv; attr->out_rep = out_priv->ppriv;
continue; continue;
...@@ -450,7 +453,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, ...@@ -450,7 +453,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
u32 flow_tag, action; u32 flow_tag, action;
struct mlx5e_tc_flow *flow; struct mlx5e_tc_flow *flow;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
struct mlx5_flow_rule *old = NULL; struct mlx5_flow_handle *old = NULL;
struct mlx5_esw_flow_attr *old_attr = NULL; struct mlx5_esw_flow_attr *old_attr = NULL;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
...@@ -511,7 +514,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, ...@@ -511,7 +514,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
goto out; goto out;
err_del_rule: err_del_rule:
mlx5_del_flow_rule(flow->rule); mlx5_del_flow_rules(flow->rule);
err_free: err_free:
if (!old) if (!old)
......
...@@ -469,7 +469,7 @@ void mlx5_eq_cleanup(struct mlx5_core_dev *dev) ...@@ -469,7 +469,7 @@ void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
int mlx5_start_eqs(struct mlx5_core_dev *dev) int mlx5_start_eqs(struct mlx5_core_dev *dev)
{ {
struct mlx5_eq_table *table = &dev->priv.eq_table; struct mlx5_eq_table *table = &dev->priv.eq_table;
u32 async_event_mask = MLX5_ASYNC_EVENT_MASK; u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
int err; int err;
if (MLX5_CAP_GEN(dev, pg)) if (MLX5_CAP_GEN(dev, pg))
......
...@@ -56,7 +56,7 @@ struct esw_uc_addr { ...@@ -56,7 +56,7 @@ struct esw_uc_addr {
/* E-Switch MC FDB table hash node */ /* E-Switch MC FDB table hash node */
struct esw_mc_addr { /* SRIOV only */ struct esw_mc_addr { /* SRIOV only */
struct l2addr_node node; struct l2addr_node node;
struct mlx5_flow_rule *uplink_rule; /* Forward to uplink rule */ struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
u32 refcnt; u32 refcnt;
}; };
...@@ -65,7 +65,7 @@ struct vport_addr { ...@@ -65,7 +65,7 @@ struct vport_addr {
struct l2addr_node node; struct l2addr_node node;
u8 action; u8 action;
u32 vport; u32 vport;
struct mlx5_flow_rule *flow_rule; /* SRIOV only */ struct mlx5_flow_handle *flow_rule; /* SRIOV only */
/* A flag indicating that mac was added due to mc promiscuous vport */ /* A flag indicating that mac was added due to mc promiscuous vport */
bool mc_promisc; bool mc_promisc;
}; };
...@@ -237,13 +237,13 @@ static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index) ...@@ -237,13 +237,13 @@ static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index)
} }
/* E-Switch FDB */ /* E-Switch FDB */
static struct mlx5_flow_rule * static struct mlx5_flow_handle *
__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN]) u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
{ {
int match_header = (is_zero_ether_addr(mac_c) ? 0 : int match_header = (is_zero_ether_addr(mac_c) ? 0 :
MLX5_MATCH_OUTER_HEADERS); MLX5_MATCH_OUTER_HEADERS);
struct mlx5_flow_rule *flow_rule = NULL; struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
void *mv_misc = NULL; void *mv_misc = NULL;
...@@ -286,9 +286,9 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, ...@@ -286,9 +286,9 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
dmac_v, dmac_c, vport); dmac_v, dmac_c, vport);
spec->match_criteria_enable = match_header; spec->match_criteria_enable = match_header;
flow_rule = flow_rule =
mlx5_add_flow_rule(esw->fdb_table.fdb, spec, mlx5_add_flow_rules(esw->fdb_table.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest); 0, &dest, 1);
if (IS_ERR(flow_rule)) { if (IS_ERR(flow_rule)) {
esw_warn(esw->dev, esw_warn(esw->dev,
"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
...@@ -300,7 +300,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, ...@@ -300,7 +300,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
return flow_rule; return flow_rule;
} }
static struct mlx5_flow_rule * static struct mlx5_flow_handle *
esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport) esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
{ {
u8 mac_c[ETH_ALEN]; u8 mac_c[ETH_ALEN];
...@@ -309,7 +309,7 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport) ...@@ -309,7 +309,7 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac); return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
} }
static struct mlx5_flow_rule * static struct mlx5_flow_handle *
esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport) esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
{ {
u8 mac_c[ETH_ALEN]; u8 mac_c[ETH_ALEN];
...@@ -322,7 +322,7 @@ esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport) ...@@ -322,7 +322,7 @@ esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v); return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
} }
static struct mlx5_flow_rule * static struct mlx5_flow_handle *
esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport) esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
{ {
u8 mac_c[ETH_ALEN]; u8 mac_c[ETH_ALEN];
...@@ -515,7 +515,7 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) ...@@ -515,7 +515,7 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
del_l2_table_entry(esw->dev, esw_uc->table_index); del_l2_table_entry(esw->dev, esw_uc->table_index);
if (vaddr->flow_rule) if (vaddr->flow_rule)
mlx5_del_flow_rule(vaddr->flow_rule); mlx5_del_flow_rules(vaddr->flow_rule);
vaddr->flow_rule = NULL; vaddr->flow_rule = NULL;
l2addr_hash_del(esw_uc); l2addr_hash_del(esw_uc);
...@@ -562,7 +562,7 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw, ...@@ -562,7 +562,7 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw,
case MLX5_ACTION_DEL: case MLX5_ACTION_DEL:
if (!iter_vaddr) if (!iter_vaddr)
continue; continue;
mlx5_del_flow_rule(iter_vaddr->flow_rule); mlx5_del_flow_rules(iter_vaddr->flow_rule);
l2addr_hash_del(iter_vaddr); l2addr_hash_del(iter_vaddr);
break; break;
} }
...@@ -632,7 +632,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) ...@@ -632,7 +632,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
esw_mc->uplink_rule); esw_mc->uplink_rule);
if (vaddr->flow_rule) if (vaddr->flow_rule)
mlx5_del_flow_rule(vaddr->flow_rule); mlx5_del_flow_rules(vaddr->flow_rule);
vaddr->flow_rule = NULL; vaddr->flow_rule = NULL;
/* If the multicast mac is added as a result of mc promiscuous vport, /* If the multicast mac is added as a result of mc promiscuous vport,
...@@ -645,7 +645,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) ...@@ -645,7 +645,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
update_allmulti_vports(esw, vaddr, esw_mc); update_allmulti_vports(esw, vaddr, esw_mc);
if (esw_mc->uplink_rule) if (esw_mc->uplink_rule)
mlx5_del_flow_rule(esw_mc->uplink_rule); mlx5_del_flow_rules(esw_mc->uplink_rule);
l2addr_hash_del(esw_mc); l2addr_hash_del(esw_mc);
return 0; return 0;
...@@ -828,14 +828,14 @@ static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num, ...@@ -828,14 +828,14 @@ static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
UPLINK_VPORT); UPLINK_VPORT);
allmulti_addr->refcnt++; allmulti_addr->refcnt++;
} else if (vport->allmulti_rule) { } else if (vport->allmulti_rule) {
mlx5_del_flow_rule(vport->allmulti_rule); mlx5_del_flow_rules(vport->allmulti_rule);
vport->allmulti_rule = NULL; vport->allmulti_rule = NULL;
if (--allmulti_addr->refcnt > 0) if (--allmulti_addr->refcnt > 0)
goto promisc; goto promisc;
if (allmulti_addr->uplink_rule) if (allmulti_addr->uplink_rule)
mlx5_del_flow_rule(allmulti_addr->uplink_rule); mlx5_del_flow_rules(allmulti_addr->uplink_rule);
allmulti_addr->uplink_rule = NULL; allmulti_addr->uplink_rule = NULL;
} }
...@@ -847,7 +847,7 @@ static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num, ...@@ -847,7 +847,7 @@ static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw, vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
vport_num); vport_num);
} else if (vport->promisc_rule) { } else if (vport->promisc_rule) {
mlx5_del_flow_rule(vport->promisc_rule); mlx5_del_flow_rules(vport->promisc_rule);
vport->promisc_rule = NULL; vport->promisc_rule = NULL;
} }
} }
...@@ -1018,10 +1018,10 @@ static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, ...@@ -1018,10 +1018,10 @@ static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport) struct mlx5_vport *vport)
{ {
if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
mlx5_del_flow_rule(vport->egress.allowed_vlan); mlx5_del_flow_rules(vport->egress.allowed_vlan);
if (!IS_ERR_OR_NULL(vport->egress.drop_rule)) if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
mlx5_del_flow_rule(vport->egress.drop_rule); mlx5_del_flow_rules(vport->egress.drop_rule);
vport->egress.allowed_vlan = NULL; vport->egress.allowed_vlan = NULL;
vport->egress.drop_rule = NULL; vport->egress.drop_rule = NULL;
...@@ -1179,10 +1179,10 @@ static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, ...@@ -1179,10 +1179,10 @@ static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport) struct mlx5_vport *vport)
{ {
if (!IS_ERR_OR_NULL(vport->ingress.drop_rule)) if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
mlx5_del_flow_rule(vport->ingress.drop_rule); mlx5_del_flow_rules(vport->ingress.drop_rule);
if (!IS_ERR_OR_NULL(vport->ingress.allow_rule)) if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
mlx5_del_flow_rule(vport->ingress.allow_rule); mlx5_del_flow_rules(vport->ingress.allow_rule);
vport->ingress.drop_rule = NULL; vport->ingress.drop_rule = NULL;
vport->ingress.allow_rule = NULL; vport->ingress.allow_rule = NULL;
...@@ -1265,9 +1265,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, ...@@ -1265,9 +1265,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
vport->ingress.allow_rule = vport->ingress.allow_rule =
mlx5_add_flow_rule(vport->ingress.acl, spec, mlx5_add_flow_rules(vport->ingress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_ALLOW, MLX5_FLOW_CONTEXT_ACTION_ALLOW,
0, NULL); 0, NULL, 0);
if (IS_ERR(vport->ingress.allow_rule)) { if (IS_ERR(vport->ingress.allow_rule)) {
err = PTR_ERR(vport->ingress.allow_rule); err = PTR_ERR(vport->ingress.allow_rule);
esw_warn(esw->dev, esw_warn(esw->dev,
...@@ -1279,9 +1279,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, ...@@ -1279,9 +1279,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
memset(spec, 0, sizeof(*spec)); memset(spec, 0, sizeof(*spec));
vport->ingress.drop_rule = vport->ingress.drop_rule =
mlx5_add_flow_rule(vport->ingress.acl, spec, mlx5_add_flow_rules(vport->ingress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_DROP, MLX5_FLOW_CONTEXT_ACTION_DROP,
0, NULL); 0, NULL, 0);
if (IS_ERR(vport->ingress.drop_rule)) { if (IS_ERR(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule); err = PTR_ERR(vport->ingress.drop_rule);
esw_warn(esw->dev, esw_warn(esw->dev,
...@@ -1339,9 +1339,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, ...@@ -1339,9 +1339,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
vport->egress.allowed_vlan = vport->egress.allowed_vlan =
mlx5_add_flow_rule(vport->egress.acl, spec, mlx5_add_flow_rules(vport->egress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_ALLOW, MLX5_FLOW_CONTEXT_ACTION_ALLOW,
0, NULL); 0, NULL, 0);
if (IS_ERR(vport->egress.allowed_vlan)) { if (IS_ERR(vport->egress.allowed_vlan)) {
err = PTR_ERR(vport->egress.allowed_vlan); err = PTR_ERR(vport->egress.allowed_vlan);
esw_warn(esw->dev, esw_warn(esw->dev,
...@@ -1354,9 +1354,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, ...@@ -1354,9 +1354,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
/* Drop others rule (star rule) */ /* Drop others rule (star rule) */
memset(spec, 0, sizeof(*spec)); memset(spec, 0, sizeof(*spec));
vport->egress.drop_rule = vport->egress.drop_rule =
mlx5_add_flow_rule(vport->egress.acl, spec, mlx5_add_flow_rules(vport->egress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_DROP, MLX5_FLOW_CONTEXT_ACTION_DROP,
0, NULL); 0, NULL, 0);
if (IS_ERR(vport->egress.drop_rule)) { if (IS_ERR(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule); err = PTR_ERR(vport->egress.drop_rule);
esw_warn(esw->dev, esw_warn(esw->dev,
...@@ -1369,6 +1369,147 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, ...@@ -1369,6 +1369,147 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
return err; return err;
} }
/* Vport QoS management */
static int esw_create_tsar(struct mlx5_eswitch *esw)
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
struct mlx5_core_dev *dev = esw->dev;
int err;
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
return 0;
if (esw->qos.enabled)
return -EEXIST;
err = mlx5_create_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
&tsar_ctx,
&esw->qos.root_tsar_id);
if (err) {
esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
return err;
}
esw->qos.enabled = true;
return 0;
}
static void esw_destroy_tsar(struct mlx5_eswitch *esw)
{
int err;
if (!esw->qos.enabled)
return;
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
SCHEDULING_HIERARCHY_E_SWITCH,
esw->qos.root_tsar_id);
if (err)
esw_warn(esw->dev, "E-Switch destroy TSAR failed (%d)\n", err);
esw->qos.enabled = false;
}
static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
u32 initial_max_rate)
{
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
struct mlx5_vport *vport = &esw->vports[vport_num];
struct mlx5_core_dev *dev = esw->dev;
void *vport_elem;
int err = 0;
if (!esw->qos.enabled || !MLX5_CAP_GEN(dev, qos) ||
!MLX5_CAP_QOS(dev, esw_scheduling))
return 0;
if (vport->qos.enabled)
return -EEXIST;
MLX5_SET(scheduling_context, &sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
element_attributes);
MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
esw->qos.root_tsar_id);
MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
initial_max_rate);
err = mlx5_create_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
&sched_ctx,
&vport->qos.esw_tsar_ix);
if (err) {
esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
vport_num, err);
return err;
}
vport->qos.enabled = true;
return 0;
}
static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num)
{
struct mlx5_vport *vport = &esw->vports[vport_num];
int err = 0;
if (!vport->qos.enabled)
return;
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
SCHEDULING_HIERARCHY_E_SWITCH,
vport->qos.esw_tsar_ix);
if (err)
esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
vport_num, err);
vport->qos.enabled = false;
}
static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
u32 max_rate)
{
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
struct mlx5_vport *vport = &esw->vports[vport_num];
struct mlx5_core_dev *dev = esw->dev;
void *vport_elem;
u32 bitmask = 0;
int err = 0;
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
return -EOPNOTSUPP;
if (!vport->qos.enabled)
return -EIO;
MLX5_SET(scheduling_context, &sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
element_attributes);
MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
esw->qos.root_tsar_id);
MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
max_rate);
bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
err = mlx5_modify_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
&sched_ctx,
vport->qos.esw_tsar_ix,
bitmask);
if (err) {
esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
vport_num, err);
return err;
}
return 0;
}
static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
{ {
((u8 *)node_guid)[7] = mac[0]; ((u8 *)node_guid)[7] = mac[0];
...@@ -1404,6 +1545,7 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw, ...@@ -1404,6 +1545,7 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
esw_vport_egress_config(esw, vport); esw_vport_egress_config(esw, vport);
} }
} }
static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
int enable_events) int enable_events)
{ {
...@@ -1417,6 +1559,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, ...@@ -1417,6 +1559,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
/* Restore old vport configuration */ /* Restore old vport configuration */
esw_apply_vport_conf(esw, vport); esw_apply_vport_conf(esw, vport);
/* Attach vport to the eswitch rate limiter */
if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate))
esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
/* Sync with current vport context */ /* Sync with current vport context */
vport->enabled_events = enable_events; vport->enabled_events = enable_events;
vport->enabled = true; vport->enabled = true;
...@@ -1455,7 +1601,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) ...@@ -1455,7 +1601,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
*/ */
esw_vport_change_handle_locked(vport); esw_vport_change_handle_locked(vport);
vport->enabled_events = 0; vport->enabled_events = 0;
esw_vport_disable_qos(esw, vport_num);
if (vport_num && esw->mode == SRIOV_LEGACY) { if (vport_num && esw->mode == SRIOV_LEGACY) {
mlx5_modify_vport_admin_state(esw->dev, mlx5_modify_vport_admin_state(esw->dev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
...@@ -1501,6 +1647,10 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) ...@@ -1501,6 +1647,10 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
if (err) if (err)
goto abort; goto abort;
err = esw_create_tsar(esw);
if (err)
esw_warn(esw->dev, "Failed to create eswitch TSAR");
enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : UC_ADDR_CHANGE; enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : UC_ADDR_CHANGE;
for (i = 0; i <= nvfs; i++) for (i = 0; i <= nvfs; i++)
esw_enable_vport(esw, i, enabled_events); esw_enable_vport(esw, i, enabled_events);
...@@ -1535,7 +1685,9 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) ...@@ -1535,7 +1685,9 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
esw_disable_vport(esw, i); esw_disable_vport(esw, i);
if (mc_promisc && mc_promisc->uplink_rule) if (mc_promisc && mc_promisc->uplink_rule)
mlx5_del_flow_rule(mc_promisc->uplink_rule); mlx5_del_flow_rules(mc_promisc->uplink_rule);
esw_destroy_tsar(esw);
if (esw->mode == SRIOV_LEGACY) if (esw->mode == SRIOV_LEGACY)
esw_destroy_legacy_fdb_table(esw); esw_destroy_legacy_fdb_table(esw);
...@@ -1795,6 +1947,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, ...@@ -1795,6 +1947,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
ivi->qos = evport->info.qos; ivi->qos = evport->info.qos;
ivi->spoofchk = evport->info.spoofchk; ivi->spoofchk = evport->info.spoofchk;
ivi->trusted = evport->info.trusted; ivi->trusted = evport->info.trusted;
ivi->max_tx_rate = evport->info.max_rate;
mutex_unlock(&esw->state_lock); mutex_unlock(&esw->state_lock);
return 0; return 0;
...@@ -1888,6 +2041,27 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, ...@@ -1888,6 +2041,27 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
return 0; return 0;
} }
int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw,
int vport, u32 max_rate)
{
struct mlx5_vport *evport;
int err = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
mutex_lock(&esw->state_lock);
evport = &esw->vports[vport];
err = esw_vport_qos_config(esw, vport, max_rate);
if (!err)
evport->info.max_rate = max_rate;
mutex_unlock(&esw->state_lock);
return err;
}
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
int vport, int vport,
struct ifla_vf_stats *vf_stats) struct ifla_vf_stats *vf_stats)
......
...@@ -97,16 +97,16 @@ struct vport_ingress { ...@@ -97,16 +97,16 @@ struct vport_ingress {
struct mlx5_flow_group *allow_spoofchk_only_grp; struct mlx5_flow_group *allow_spoofchk_only_grp;
struct mlx5_flow_group *allow_untagged_only_grp; struct mlx5_flow_group *allow_untagged_only_grp;
struct mlx5_flow_group *drop_grp; struct mlx5_flow_group *drop_grp;
struct mlx5_flow_rule *allow_rule; struct mlx5_flow_handle *allow_rule;
struct mlx5_flow_rule *drop_rule; struct mlx5_flow_handle *drop_rule;
}; };
struct vport_egress { struct vport_egress {
struct mlx5_flow_table *acl; struct mlx5_flow_table *acl;
struct mlx5_flow_group *allowed_vlans_grp; struct mlx5_flow_group *allowed_vlans_grp;
struct mlx5_flow_group *drop_grp; struct mlx5_flow_group *drop_grp;
struct mlx5_flow_rule *allowed_vlan; struct mlx5_flow_handle *allowed_vlan;
struct mlx5_flow_rule *drop_rule; struct mlx5_flow_handle *drop_rule;
}; };
struct mlx5_vport_info { struct mlx5_vport_info {
...@@ -115,6 +115,7 @@ struct mlx5_vport_info { ...@@ -115,6 +115,7 @@ struct mlx5_vport_info {
u8 qos; u8 qos;
u64 node_guid; u64 node_guid;
int link_state; int link_state;
u32 max_rate;
bool spoofchk; bool spoofchk;
bool trusted; bool trusted;
}; };
...@@ -124,8 +125,8 @@ struct mlx5_vport { ...@@ -124,8 +125,8 @@ struct mlx5_vport {
int vport; int vport;
struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE]; struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE];
struct mlx5_flow_rule *promisc_rule; struct mlx5_flow_handle *promisc_rule;
struct mlx5_flow_rule *allmulti_rule; struct mlx5_flow_handle *allmulti_rule;
struct work_struct vport_change_handler; struct work_struct vport_change_handler;
struct vport_ingress ingress; struct vport_ingress ingress;
...@@ -133,6 +134,11 @@ struct mlx5_vport { ...@@ -133,6 +134,11 @@ struct mlx5_vport {
struct mlx5_vport_info info; struct mlx5_vport_info info;
struct {
bool enabled;
u32 esw_tsar_ix;
} qos;
bool enabled; bool enabled;
u16 enabled_events; u16 enabled_events;
}; };
...@@ -156,7 +162,7 @@ struct mlx5_eswitch_fdb { ...@@ -156,7 +162,7 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_table *fdb; struct mlx5_flow_table *fdb;
struct mlx5_flow_group *send_to_vport_grp; struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *miss_grp; struct mlx5_flow_group *miss_grp;
struct mlx5_flow_rule *miss_rule; struct mlx5_flow_handle *miss_rule;
int vlan_push_pop_refcount; int vlan_push_pop_refcount;
} offloads; } offloads;
}; };
...@@ -169,7 +175,7 @@ enum { ...@@ -169,7 +175,7 @@ enum {
}; };
struct mlx5_esw_sq { struct mlx5_esw_sq {
struct mlx5_flow_rule *send_to_vport_rule; struct mlx5_flow_handle *send_to_vport_rule;
struct list_head list; struct list_head list;
}; };
...@@ -182,7 +188,7 @@ struct mlx5_eswitch_rep { ...@@ -182,7 +188,7 @@ struct mlx5_eswitch_rep {
u8 hw_id[ETH_ALEN]; u8 hw_id[ETH_ALEN];
void *priv_data; void *priv_data;
struct mlx5_flow_rule *vport_rx_rule; struct mlx5_flow_handle *vport_rx_rule;
struct list_head vport_sqs_list; struct list_head vport_sqs_list;
u16 vlan; u16 vlan;
u32 vlan_refcount; u32 vlan_refcount;
...@@ -209,6 +215,12 @@ struct mlx5_eswitch { ...@@ -209,6 +215,12 @@ struct mlx5_eswitch {
*/ */
struct mutex state_lock; struct mutex state_lock;
struct esw_mc_addr *mc_promisc; struct esw_mc_addr *mc_promisc;
struct {
bool enabled;
u32 root_tsar_id;
} qos;
struct mlx5_esw_offload offloads; struct mlx5_esw_offload offloads;
int mode; int mode;
}; };
...@@ -234,6 +246,8 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, ...@@ -234,6 +246,8 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
int vport, bool spoofchk); int vport, bool spoofchk);
int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
int vport_num, bool setting); int vport_num, bool setting);
int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw,
int vport, u32 max_rate);
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi); int vport, struct ifla_vf_info *ivi);
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
...@@ -243,11 +257,11 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, ...@@ -243,11 +257,11 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
struct mlx5_flow_spec; struct mlx5_flow_spec;
struct mlx5_esw_flow_attr; struct mlx5_esw_flow_attr;
struct mlx5_flow_rule * struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr); struct mlx5_esw_flow_attr *attr);
struct mlx5_flow_rule * struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn); mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
enum { enum {
......
...@@ -43,16 +43,17 @@ enum { ...@@ -43,16 +43,17 @@ enum {
FDB_SLOW_PATH FDB_SLOW_PATH
}; };
struct mlx5_flow_rule * struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr) struct mlx5_esw_flow_attr *attr)
{ {
struct mlx5_flow_destination dest = { 0 }; struct mlx5_flow_destination dest[2] = {};
struct mlx5_fc *counter = NULL; struct mlx5_fc *counter = NULL;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
void *misc; void *misc;
int action; int action;
int i = 0;
if (esw->mode != SRIOV_OFFLOADS) if (esw->mode != SRIOV_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
...@@ -60,15 +61,17 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -60,15 +61,17 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
action = attr->action; action = attr->action;
if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = attr->out_rep->vport; dest[i].vport_num = attr->out_rep->vport;
action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; i++;
} else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { }
if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(esw->dev, true); counter = mlx5_fc_create(esw->dev, true);
if (IS_ERR(counter)) if (IS_ERR(counter))
return ERR_CAST(counter); return ERR_CAST(counter);
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest.counter = counter; dest[i].counter = counter;
i++;
} }
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
...@@ -80,9 +83,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -80,9 +83,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
MLX5_MATCH_MISC_PARAMETERS; MLX5_MATCH_MISC_PARAMETERS;
rule = mlx5_add_flow_rule((struct mlx5_flow_table *)esw->fdb_table.fdb, rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
spec, action, 0, &dest); spec, action, 0, dest, i);
if (IS_ERR(rule)) if (IS_ERR(rule))
mlx5_fc_destroy(esw->dev, counter); mlx5_fc_destroy(esw->dev, counter);
...@@ -269,11 +271,11 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, ...@@ -269,11 +271,11 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
return err; return err;
} }
static struct mlx5_flow_rule * static struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn) mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
{ {
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_rule *flow_rule; struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
void *misc; void *misc;
...@@ -296,9 +298,9 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn ...@@ -296,9 +298,9 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = vport; dest.vport_num = vport;
flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec, flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest); 0, &dest, 1);
if (IS_ERR(flow_rule)) if (IS_ERR(flow_rule))
esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule)); esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
out: out:
...@@ -315,7 +317,7 @@ void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw, ...@@ -315,7 +317,7 @@ void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
return; return;
list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) { list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
mlx5_del_flow_rule(esw_sq->send_to_vport_rule); mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
list_del(&esw_sq->list); list_del(&esw_sq->list);
kfree(esw_sq); kfree(esw_sq);
} }
...@@ -325,7 +327,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, ...@@ -325,7 +327,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, struct mlx5_eswitch_rep *rep,
u16 *sqns_array, int sqns_num) u16 *sqns_array, int sqns_num)
{ {
struct mlx5_flow_rule *flow_rule; struct mlx5_flow_handle *flow_rule;
struct mlx5_esw_sq *esw_sq; struct mlx5_esw_sq *esw_sq;
int err; int err;
int i; int i;
...@@ -362,7 +364,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, ...@@ -362,7 +364,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
{ {
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_rule *flow_rule = NULL; struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
int err = 0; int err = 0;
...@@ -376,9 +378,9 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) ...@@ -376,9 +378,9 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = 0; dest.vport_num = 0;
flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec, flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest); 0, &dest, 1);
if (IS_ERR(flow_rule)) { if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule); err = PTR_ERR(flow_rule);
esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err); esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
...@@ -501,7 +503,7 @@ static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw) ...@@ -501,7 +503,7 @@ static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
return; return;
esw_debug(esw->dev, "Destroy offloads FDB Table\n"); esw_debug(esw->dev, "Destroy offloads FDB Table\n");
mlx5_del_flow_rule(esw->fdb_table.offloads.miss_rule); mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
...@@ -585,11 +587,11 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw) ...@@ -585,11 +587,11 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
mlx5_destroy_flow_group(esw->offloads.vport_rx_group); mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
} }
struct mlx5_flow_rule * struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
{ {
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_rule *flow_rule; struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
void *misc; void *misc;
...@@ -610,9 +612,9 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) ...@@ -610,9 +612,9 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = tirn; dest.tir_num = tirn;
flow_rule = mlx5_add_flow_rule(esw->offloads.ft_offloads, spec, flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest); 0, &dest, 1);
if (IS_ERR(flow_rule)) { if (IS_ERR(flow_rule)) {
esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule)); esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
goto out; goto out;
......
...@@ -153,6 +153,11 @@ static void del_rule(struct fs_node *node); ...@@ -153,6 +153,11 @@ static void del_rule(struct fs_node *node);
static void del_flow_table(struct fs_node *node); static void del_flow_table(struct fs_node *node);
static void del_flow_group(struct fs_node *node); static void del_flow_group(struct fs_node *node);
static void del_fte(struct fs_node *node); static void del_fte(struct fs_node *node);
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
struct mlx5_flow_destination *d2);
static struct mlx5_flow_rule *
find_flow_rule(struct fs_fte *fte,
struct mlx5_flow_destination *dest);
static void tree_init_node(struct fs_node *node, static void tree_init_node(struct fs_node *node,
unsigned int refcount, unsigned int refcount,
...@@ -369,6 +374,7 @@ static void del_rule(struct fs_node *node) ...@@ -369,6 +374,7 @@ static void del_rule(struct fs_node *node)
struct mlx5_core_dev *dev = get_dev(node); struct mlx5_core_dev *dev = get_dev(node);
int match_len = MLX5_ST_SZ_BYTES(fte_match_param); int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
int err; int err;
bool update_fte = false;
match_value = mlx5_vzalloc(match_len); match_value = mlx5_vzalloc(match_len);
if (!match_value) { if (!match_value) {
...@@ -387,13 +393,23 @@ static void del_rule(struct fs_node *node) ...@@ -387,13 +393,23 @@ static void del_rule(struct fs_node *node)
list_del(&rule->next_ft); list_del(&rule->next_ft);
mutex_unlock(&rule->dest_attr.ft->lock); mutex_unlock(&rule->dest_attr.ft->lock);
} }
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
--fte->dests_size) {
modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
fte->action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
update_fte = true;
goto out;
}
if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
--fte->dests_size) { --fte->dests_size) {
modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST), modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
err = mlx5_cmd_update_fte(dev, ft, update_fte = true;
fg->id, }
modify_mask, out:
fte); if (update_fte && fte->dests_size) {
err = mlx5_cmd_update_fte(dev, ft, fg->id, modify_mask, fte);
if (err) if (err)
mlx5_core_warn(dev, mlx5_core_warn(dev,
"%s can't del rule fg id=%d fte_index=%d\n", "%s can't del rule fg id=%d fte_index=%d\n",
...@@ -641,8 +657,8 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio ...@@ -641,8 +657,8 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
return err; return err;
} }
int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
struct mlx5_flow_destination *dest) struct mlx5_flow_destination *dest)
{ {
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg; struct mlx5_flow_group *fg;
...@@ -667,6 +683,28 @@ int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, ...@@ -667,6 +683,28 @@ int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
return err; return err;
} }
int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
struct mlx5_flow_destination *new_dest,
struct mlx5_flow_destination *old_dest)
{
int i;
if (!old_dest) {
if (handle->num_rules != 1)
return -EINVAL;
return _mlx5_modify_rule_destination(handle->rule[0],
new_dest);
}
for (i = 0; i < handle->num_rules; i++) {
if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
return _mlx5_modify_rule_destination(handle->rule[i],
new_dest);
}
return -EINVAL;
}
/* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */ /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
static int connect_fwd_rules(struct mlx5_core_dev *dev, static int connect_fwd_rules(struct mlx5_core_dev *dev,
struct mlx5_flow_table *new_next_ft, struct mlx5_flow_table *new_next_ft,
...@@ -689,7 +727,7 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev, ...@@ -689,7 +727,7 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev,
list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules); list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
mutex_unlock(&old_next_ft->lock); mutex_unlock(&old_next_ft->lock);
list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) { list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
err = mlx5_modify_rule_destination(iter, &dest); err = _mlx5_modify_rule_destination(iter, &dest);
if (err) if (err)
pr_err("mlx5_core: failed to modify rule to point on flow table %d\n", pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
new_next_ft->id); new_next_ft->id);
...@@ -918,55 +956,133 @@ static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest) ...@@ -918,55 +956,133 @@ static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
return rule; return rule;
} }
/* fte should not be deleted while calling this function */ static struct mlx5_flow_handle *alloc_handle(int num_rules)
static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte, {
struct mlx5_flow_group *fg, struct mlx5_flow_handle *handle;
struct mlx5_flow_destination *dest)
handle = kzalloc(sizeof(*handle) + sizeof(handle->rule[0]) *
num_rules, GFP_KERNEL);
if (!handle)
return NULL;
handle->num_rules = num_rules;
return handle;
}
static void destroy_flow_handle(struct fs_fte *fte,
struct mlx5_flow_handle *handle,
struct mlx5_flow_destination *dest,
int i)
{
for (; --i >= 0;) {
if (atomic_dec_and_test(&handle->rule[i]->node.refcount)) {
fte->dests_size--;
list_del(&handle->rule[i]->node.list);
kfree(handle->rule[i]);
}
}
kfree(handle);
}
static struct mlx5_flow_handle *
create_flow_handle(struct fs_fte *fte,
struct mlx5_flow_destination *dest,
int dest_num,
int *modify_mask,
bool *new_rule)
{ {
struct mlx5_flow_handle *handle;
struct mlx5_flow_rule *rule = NULL;
static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
int type;
int i = 0;
handle = alloc_handle((dest_num) ? dest_num : 1);
if (!handle)
return ERR_PTR(-ENOMEM);
do {
if (dest) {
rule = find_flow_rule(fte, dest + i);
if (rule) {
atomic_inc(&rule->node.refcount);
goto rule_found;
}
}
*new_rule = true;
rule = alloc_rule(dest + i);
if (!rule)
goto free_rules;
/* Add dest to dests list- we need flow tables to be in the
* end of the list for forward to next prio rules.
*/
tree_init_node(&rule->node, 1, del_rule);
if (dest &&
dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
list_add(&rule->node.list, &fte->node.children);
else
list_add_tail(&rule->node.list, &fte->node.children);
if (dest) {
fte->dests_size++;
type = dest[i].type ==
MLX5_FLOW_DESTINATION_TYPE_COUNTER;
*modify_mask |= type ? count : dst;
}
rule_found:
handle->rule[i] = rule;
} while (++i < dest_num);
return handle;
free_rules:
destroy_flow_handle(fte, handle, dest, i);
return ERR_PTR(-ENOMEM);
}
/* fte should not be deleted while calling this function */
static struct mlx5_flow_handle *
add_rule_fte(struct fs_fte *fte,
struct mlx5_flow_group *fg,
struct mlx5_flow_destination *dest,
int dest_num,
bool update_action)
{
struct mlx5_flow_handle *handle;
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
struct mlx5_flow_rule *rule;
int modify_mask = 0; int modify_mask = 0;
int err; int err;
bool new_rule = false;
rule = alloc_rule(dest); handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
if (!rule) &new_rule);
return ERR_PTR(-ENOMEM); if (IS_ERR(handle) || !new_rule)
goto out;
fs_get_obj(ft, fg->node.parent);
/* Add dest to dests list- we need flow tables to be in the
* end of the list for forward to next prio rules.
*/
tree_init_node(&rule->node, 1, del_rule);
if (dest && dest->type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
list_add(&rule->node.list, &fte->node.children);
else
list_add_tail(&rule->node.list, &fte->node.children);
if (dest) {
fte->dests_size++;
modify_mask |= dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ? if (update_action)
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS) : modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
}
if (fte->dests_size == 1 || !dest) fs_get_obj(ft, fg->node.parent);
if (!(fte->status & FS_FTE_STATUS_EXISTING))
err = mlx5_cmd_create_fte(get_dev(&ft->node), err = mlx5_cmd_create_fte(get_dev(&ft->node),
ft, fg->id, fte); ft, fg->id, fte);
else else
err = mlx5_cmd_update_fte(get_dev(&ft->node), err = mlx5_cmd_update_fte(get_dev(&ft->node),
ft, fg->id, modify_mask, fte); ft, fg->id, modify_mask, fte);
if (err) if (err)
goto free_rule; goto free_handle;
fte->status |= FS_FTE_STATUS_EXISTING; fte->status |= FS_FTE_STATUS_EXISTING;
return rule; out:
return handle;
free_rule: free_handle:
list_del(&rule->node.list); destroy_flow_handle(fte, handle, dest, handle->num_rules);
kfree(rule);
if (dest)
fte->dests_size--;
return ERR_PTR(err); return ERR_PTR(err);
} }
...@@ -1067,71 +1183,81 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft, ...@@ -1067,71 +1183,81 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
return fg; return fg;
} }
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
struct mlx5_flow_destination *d2)
{
if (d1->type == d2->type) {
if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
d1->vport_num == d2->vport_num) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
d1->ft == d2->ft) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
d1->tir_num == d2->tir_num))
return true;
}
return false;
}
static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte, static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
struct mlx5_flow_destination *dest) struct mlx5_flow_destination *dest)
{ {
struct mlx5_flow_rule *rule; struct mlx5_flow_rule *rule;
list_for_each_entry(rule, &fte->node.children, node.list) { list_for_each_entry(rule, &fte->node.children, node.list) {
if (rule->dest_attr.type == dest->type) { if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
if ((dest->type == MLX5_FLOW_DESTINATION_TYPE_VPORT && return rule;
dest->vport_num == rule->dest_attr.vport_num) ||
(dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
dest->ft == rule->dest_attr.ft) ||
(dest->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
dest->tir_num == rule->dest_attr.tir_num))
return rule;
}
} }
return NULL; return NULL;
} }
static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg, static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
u32 *match_value, u32 *match_value,
u8 action, u8 action,
u32 flow_tag, u32 flow_tag,
struct mlx5_flow_destination *dest) struct mlx5_flow_destination *dest,
int dest_num)
{ {
struct fs_fte *fte; struct mlx5_flow_handle *handle;
struct mlx5_flow_rule *rule;
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
struct list_head *prev; struct list_head *prev;
struct fs_fte *fte;
int i;
nested_lock_ref_node(&fg->node, FS_MUTEX_PARENT); nested_lock_ref_node(&fg->node, FS_MUTEX_PARENT);
fs_for_each_fte(fte, fg) { fs_for_each_fte(fte, fg) {
nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD); nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
if (compare_match_value(&fg->mask, match_value, &fte->val) && if (compare_match_value(&fg->mask, match_value, &fte->val) &&
action == fte->action && flow_tag == fte->flow_tag) { (action & fte->action) && flow_tag == fte->flow_tag) {
rule = find_flow_rule(fte, dest); int old_action = fte->action;
if (rule) {
atomic_inc(&rule->node.refcount); fte->action |= action;
unlock_ref_node(&fte->node); handle = add_rule_fte(fte, fg, dest, dest_num,
unlock_ref_node(&fg->node); old_action != action);
return rule; if (IS_ERR(handle)) {
fte->action = old_action;
goto unlock_fte;
} else {
goto add_rules;
} }
rule = add_rule_fte(fte, fg, dest);
unlock_ref_node(&fte->node);
if (IS_ERR(rule))
goto unlock_fg;
else
goto add_rule;
} }
unlock_ref_node(&fte->node); unlock_ref_node(&fte->node);
} }
fs_get_obj(ft, fg->node.parent); fs_get_obj(ft, fg->node.parent);
if (fg->num_ftes >= fg->max_ftes) { if (fg->num_ftes >= fg->max_ftes) {
rule = ERR_PTR(-ENOSPC); handle = ERR_PTR(-ENOSPC);
goto unlock_fg; goto unlock_fg;
} }
fte = create_fte(fg, match_value, action, flow_tag, &prev); fte = create_fte(fg, match_value, action, flow_tag, &prev);
if (IS_ERR(fte)) { if (IS_ERR(fte)) {
rule = (void *)fte; handle = (void *)fte;
goto unlock_fg; goto unlock_fg;
} }
tree_init_node(&fte->node, 0, del_fte); tree_init_node(&fte->node, 0, del_fte);
rule = add_rule_fte(fte, fg, dest); nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
if (IS_ERR(rule)) { handle = add_rule_fte(fte, fg, dest, dest_num, false);
if (IS_ERR(handle)) {
kfree(fte); kfree(fte);
goto unlock_fg; goto unlock_fg;
} }
...@@ -1140,19 +1266,24 @@ static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg, ...@@ -1140,19 +1266,24 @@ static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
tree_add_node(&fte->node, &fg->node); tree_add_node(&fte->node, &fg->node);
list_add(&fte->node.list, prev); list_add(&fte->node.list, prev);
add_rule: add_rules:
tree_add_node(&rule->node, &fte->node); for (i = 0; i < handle->num_rules; i++) {
if (atomic_read(&handle->rule[i]->node.refcount) == 1)
tree_add_node(&handle->rule[i]->node, &fte->node);
}
unlock_fte:
unlock_ref_node(&fte->node);
unlock_fg: unlock_fg:
unlock_ref_node(&fg->node); unlock_ref_node(&fg->node);
return rule; return handle;
} }
struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule) struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handle)
{ {
struct mlx5_flow_rule *dst; struct mlx5_flow_rule *dst;
struct fs_fte *fte; struct fs_fte *fte;
fs_get_obj(fte, rule->node.parent); fs_get_obj(fte, handle->rule[0]->node.parent);
fs_for_each_dst(dst, fte) { fs_for_each_dst(dst, fte) {
if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
...@@ -1170,8 +1301,9 @@ static bool counter_is_valid(struct mlx5_fc *counter, u32 action) ...@@ -1170,8 +1301,9 @@ static bool counter_is_valid(struct mlx5_fc *counter, u32 action)
if (!counter) if (!counter)
return false; return false;
/* Hardware support counter for a drop action only */ return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
return action == (MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT); MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) &&
(action & MLX5_FLOW_CONTEXT_ACTION_COUNT);
} }
static bool dest_is_valid(struct mlx5_flow_destination *dest, static bool dest_is_valid(struct mlx5_flow_destination *dest,
...@@ -1191,18 +1323,22 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest, ...@@ -1191,18 +1323,22 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest,
return true; return true;
} }
static struct mlx5_flow_rule * static struct mlx5_flow_handle *
_mlx5_add_flow_rule(struct mlx5_flow_table *ft, _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
u32 action, u32 action,
u32 flow_tag, u32 flow_tag,
struct mlx5_flow_destination *dest) struct mlx5_flow_destination *dest,
int dest_num)
{ {
struct mlx5_flow_group *g; struct mlx5_flow_group *g;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
int i;
if (!dest_is_valid(dest, action, ft)) for (i = 0; i < dest_num; i++) {
return ERR_PTR(-EINVAL); if (!dest_is_valid(&dest[i], action, ft))
return ERR_PTR(-EINVAL);
}
nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT); nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
fs_for_each_fg(g, ft) fs_for_each_fg(g, ft)
...@@ -1211,7 +1347,7 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft, ...@@ -1211,7 +1347,7 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
g->mask.match_criteria, g->mask.match_criteria,
spec->match_criteria)) { spec->match_criteria)) {
rule = add_rule_fg(g, spec->match_value, rule = add_rule_fg(g, spec->match_value,
action, flow_tag, dest); action, flow_tag, dest, dest_num);
if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC) if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC)
goto unlock; goto unlock;
} }
...@@ -1224,7 +1360,7 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft, ...@@ -1224,7 +1360,7 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
} }
rule = add_rule_fg(g, spec->match_value, rule = add_rule_fg(g, spec->match_value,
action, flow_tag, dest); action, flow_tag, dest, dest_num);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
/* Remove assumes refcount > 0 and autogroup creates a group /* Remove assumes refcount > 0 and autogroup creates a group
* with a refcount = 0. * with a refcount = 0.
...@@ -1245,17 +1381,18 @@ static bool fwd_next_prio_supported(struct mlx5_flow_table *ft) ...@@ -1245,17 +1381,18 @@ static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
(MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs))); (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
} }
struct mlx5_flow_rule * struct mlx5_flow_handle *
mlx5_add_flow_rule(struct mlx5_flow_table *ft, mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
u32 action, u32 action,
u32 flow_tag, u32 flow_tag,
struct mlx5_flow_destination *dest) struct mlx5_flow_destination *dest,
int dest_num)
{ {
struct mlx5_flow_root_namespace *root = find_root(&ft->node); struct mlx5_flow_root_namespace *root = find_root(&ft->node);
struct mlx5_flow_destination gen_dest; struct mlx5_flow_destination gen_dest;
struct mlx5_flow_table *next_ft = NULL; struct mlx5_flow_table *next_ft = NULL;
struct mlx5_flow_rule *rule = NULL; struct mlx5_flow_handle *handle = NULL;
u32 sw_action = action; u32 sw_action = action;
struct fs_prio *prio; struct fs_prio *prio;
...@@ -1271,6 +1408,7 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft, ...@@ -1271,6 +1408,7 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
gen_dest.ft = next_ft; gen_dest.ft = next_ft;
dest = &gen_dest; dest = &gen_dest;
dest_num = 1;
action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
} else { } else {
mutex_unlock(&root->chain_lock); mutex_unlock(&root->chain_lock);
...@@ -1278,27 +1416,33 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft, ...@@ -1278,27 +1416,33 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
} }
} }
rule = _mlx5_add_flow_rule(ft, spec, action, flow_tag, dest); handle = _mlx5_add_flow_rules(ft, spec, action, flow_tag, dest,
dest_num);
if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!IS_ERR_OR_NULL(rule) && if (!IS_ERR_OR_NULL(handle) &&
(list_empty(&rule->next_ft))) { (list_empty(&handle->rule[0]->next_ft))) {
mutex_lock(&next_ft->lock); mutex_lock(&next_ft->lock);
list_add(&rule->next_ft, &next_ft->fwd_rules); list_add(&handle->rule[0]->next_ft,
&next_ft->fwd_rules);
mutex_unlock(&next_ft->lock); mutex_unlock(&next_ft->lock);
rule->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; handle->rule[0]->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
} }
mutex_unlock(&root->chain_lock); mutex_unlock(&root->chain_lock);
} }
return rule; return handle;
} }
EXPORT_SYMBOL(mlx5_add_flow_rule); EXPORT_SYMBOL(mlx5_add_flow_rules);
void mlx5_del_flow_rule(struct mlx5_flow_rule *rule) void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
{ {
tree_remove_node(&rule->node); int i;
for (i = handle->num_rules - 1; i >= 0; i--)
tree_remove_node(&handle->rule[i]->node);
kfree(handle);
} }
EXPORT_SYMBOL(mlx5_del_flow_rule); EXPORT_SYMBOL(mlx5_del_flow_rules);
/* Assuming prio->node.children(flow tables) is sorted by level */ /* Assuming prio->node.children(flow tables) is sorted by level */
static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft) static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
......
...@@ -94,6 +94,11 @@ struct mlx5_flow_rule { ...@@ -94,6 +94,11 @@ struct mlx5_flow_rule {
u32 sw_action; u32 sw_action;
}; };
struct mlx5_flow_handle {
int num_rules;
struct mlx5_flow_rule *rule[];
};
/* Type of children is mlx5_flow_group */ /* Type of children is mlx5_flow_group */
struct mlx5_flow_table { struct mlx5_flow_table {
struct fs_node node; struct fs_node node;
......
...@@ -1423,6 +1423,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = { ...@@ -1423,6 +1423,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */ { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */ { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
{ PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */ { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */
{ PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5, PCIe 4.0 VF */
{ 0, } { 0, }
}; };
......
...@@ -92,6 +92,13 @@ int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs); ...@@ -92,6 +92,13 @@ int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev); bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev);
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
void *context, u32 *element_id);
int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
void *context, u32 element_id,
u32 modify_bitmask);
int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id);
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev); int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev); cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev);
u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx); u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
......
...@@ -36,6 +36,71 @@ ...@@ -36,6 +36,71 @@
#include <linux/mlx5/cmd.h> #include <linux/mlx5/cmd.h>
#include "mlx5_core.h" #include "mlx5_core.h"
/* Scheduling element fw management */
int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
void *ctx, u32 *element_id)
{
u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0};
u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0};
void *schedc;
int err;
schedc = MLX5_ADDR_OF(create_scheduling_element_in, in,
scheduling_context);
MLX5_SET(create_scheduling_element_in, in, opcode,
MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT);
MLX5_SET(create_scheduling_element_in, in, scheduling_hierarchy,
hierarchy);
memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
*element_id = MLX5_GET(create_scheduling_element_out, out,
scheduling_element_id);
return 0;
}
int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
void *ctx, u32 element_id,
u32 modify_bitmask)
{
u32 in[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0};
u32 out[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0};
void *schedc;
schedc = MLX5_ADDR_OF(modify_scheduling_element_in, in,
scheduling_context);
MLX5_SET(modify_scheduling_element_in, in, opcode,
MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT);
MLX5_SET(modify_scheduling_element_in, in, scheduling_element_id,
element_id);
MLX5_SET(modify_scheduling_element_in, in, modify_bitmask,
modify_bitmask);
MLX5_SET(modify_scheduling_element_in, in, scheduling_hierarchy,
hierarchy);
memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id)
{
u32 in[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0};
u32 out[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0};
MLX5_SET(destroy_scheduling_element_in, in, opcode,
MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
MLX5_SET(destroy_scheduling_element_in, in, scheduling_element_id,
element_id);
MLX5_SET(destroy_scheduling_element_in, in, scheduling_hierarchy,
hierarchy);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
/* Finds an entry where we can register the given rate /* Finds an entry where we can register the given rate
* If the rate already exists, return the entry where it is registered, * If the rate already exists, return the entry where it is registered,
* otherwise return the first available entry. * otherwise return the first available entry.
......
...@@ -69,8 +69,8 @@ enum mlx5_flow_namespace_type { ...@@ -69,8 +69,8 @@ enum mlx5_flow_namespace_type {
struct mlx5_flow_table; struct mlx5_flow_table;
struct mlx5_flow_group; struct mlx5_flow_group;
struct mlx5_flow_rule;
struct mlx5_flow_namespace; struct mlx5_flow_namespace;
struct mlx5_flow_handle;
struct mlx5_flow_spec { struct mlx5_flow_spec {
u8 match_criteria_enable; u8 match_criteria_enable;
...@@ -127,18 +127,20 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); ...@@ -127,18 +127,20 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
/* Single destination per rule. /* Single destination per rule.
* Group ID is implied by the match criteria. * Group ID is implied by the match criteria.
*/ */
struct mlx5_flow_rule * struct mlx5_flow_handle *
mlx5_add_flow_rule(struct mlx5_flow_table *ft, mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
u32 action, u32 action,
u32 flow_tag, u32 flow_tag,
struct mlx5_flow_destination *dest); struct mlx5_flow_destination *dest,
void mlx5_del_flow_rule(struct mlx5_flow_rule *fr); int dest_num);
void mlx5_del_flow_rules(struct mlx5_flow_handle *fr);
int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
struct mlx5_flow_destination *dest); int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
struct mlx5_flow_destination *new_dest,
struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule); struct mlx5_flow_destination *old_dest);
struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler);
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter, void mlx5_fc_query_cached(struct mlx5_fc *counter,
......
...@@ -145,6 +145,12 @@ enum { ...@@ -145,6 +145,12 @@ enum {
MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
MLX5_CMD_OP_SET_RATE_LIMIT = 0x780, MLX5_CMD_OP_SET_RATE_LIMIT = 0x780,
MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781, MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781,
MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782,
MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783,
MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT = 0x784,
MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT = 0x785,
MLX5_CMD_OP_CREATE_QOS_PARA_VPORT = 0x786,
MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT = 0x787,
MLX5_CMD_OP_ALLOC_PD = 0x800, MLX5_CMD_OP_ALLOC_PD = 0x800,
MLX5_CMD_OP_DEALLOC_PD = 0x801, MLX5_CMD_OP_DEALLOC_PD = 0x801,
MLX5_CMD_OP_ALLOC_UAR = 0x802, MLX5_CMD_OP_ALLOC_UAR = 0x802,
...@@ -537,13 +543,27 @@ struct mlx5_ifc_e_switch_cap_bits { ...@@ -537,13 +543,27 @@ struct mlx5_ifc_e_switch_cap_bits {
struct mlx5_ifc_qos_cap_bits { struct mlx5_ifc_qos_cap_bits {
u8 packet_pacing[0x1]; u8 packet_pacing[0x1];
u8 reserved_0[0x1f]; u8 esw_scheduling[0x1];
u8 reserved_1[0x20]; u8 reserved_at_2[0x1e];
u8 reserved_at_20[0x20];
u8 packet_pacing_max_rate[0x20]; u8 packet_pacing_max_rate[0x20];
u8 packet_pacing_min_rate[0x20]; u8 packet_pacing_min_rate[0x20];
u8 reserved_2[0x10];
u8 reserved_at_80[0x10];
u8 packet_pacing_rate_table_size[0x10]; u8 packet_pacing_rate_table_size[0x10];
u8 reserved_3[0x760];
u8 esw_element_type[0x10];
u8 esw_tsar_type[0x10];
u8 reserved_at_c0[0x10];
u8 max_qos_para_vport[0x10];
u8 max_tsar_bw_share[0x20];
u8 reserved_at_100[0x700];
}; };
struct mlx5_ifc_per_protocol_networking_offload_caps_bits { struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
...@@ -2333,6 +2353,30 @@ struct mlx5_ifc_sqc_bits { ...@@ -2333,6 +2353,30 @@ struct mlx5_ifc_sqc_bits {
struct mlx5_ifc_wq_bits wq; struct mlx5_ifc_wq_bits wq;
}; };
enum {
SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR = 0x0,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2,
SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
};
struct mlx5_ifc_scheduling_context_bits {
u8 element_type[0x8];
u8 reserved_at_8[0x18];
u8 element_attributes[0x20];
u8 parent_element_id[0x20];
u8 reserved_at_60[0x40];
u8 bw_share[0x20];
u8 max_average_bw[0x20];
u8 reserved_at_e0[0x120];
};
struct mlx5_ifc_rqtc_bits { struct mlx5_ifc_rqtc_bits {
u8 reserved_at_0[0xa0]; u8 reserved_at_0[0xa0];
...@@ -2844,7 +2888,7 @@ struct mlx5_ifc_xrqc_bits { ...@@ -2844,7 +2888,7 @@ struct mlx5_ifc_xrqc_bits {
struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context; struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context;
u8 reserved_at_180[0x200]; u8 reserved_at_180[0x880];
struct mlx5_ifc_wq_bits wq; struct mlx5_ifc_wq_bits wq;
}; };
...@@ -2920,6 +2964,29 @@ struct mlx5_ifc_register_loopback_control_bits { ...@@ -2920,6 +2964,29 @@ struct mlx5_ifc_register_loopback_control_bits {
u8 reserved_at_20[0x60]; u8 reserved_at_20[0x60];
}; };
struct mlx5_ifc_vport_tc_element_bits {
u8 traffic_class[0x4];
u8 reserved_at_4[0xc];
u8 vport_number[0x10];
};
struct mlx5_ifc_vport_element_bits {
u8 reserved_at_0[0x10];
u8 vport_number[0x10];
};
enum {
TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0,
TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1,
TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2,
};
struct mlx5_ifc_tsar_element_bits {
u8 reserved_at_0[0x8];
u8 tsar_type[0x8];
u8 reserved_at_10[0x10];
};
struct mlx5_ifc_teardown_hca_out_bits { struct mlx5_ifc_teardown_hca_out_bits {
u8 status[0x8]; u8 status[0x8];
u8 reserved_at_8[0x18]; u8 reserved_at_8[0x18];
...@@ -3540,6 +3607,39 @@ struct mlx5_ifc_query_special_contexts_in_bits { ...@@ -3540,6 +3607,39 @@ struct mlx5_ifc_query_special_contexts_in_bits {
u8 reserved_at_40[0x40]; u8 reserved_at_40[0x40];
}; };
struct mlx5_ifc_query_scheduling_element_out_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0xc0];
struct mlx5_ifc_scheduling_context_bits scheduling_context;
u8 reserved_at_300[0x100];
};
enum {
SCHEDULING_HIERARCHY_E_SWITCH = 0x2,
};
struct mlx5_ifc_query_scheduling_element_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 scheduling_hierarchy[0x8];
u8 reserved_at_48[0x18];
u8 scheduling_element_id[0x20];
u8 reserved_at_80[0x180];
};
struct mlx5_ifc_query_rqt_out_bits { struct mlx5_ifc_query_rqt_out_bits {
u8 status[0x8]; u8 status[0x8];
u8 reserved_at_8[0x18]; u8 reserved_at_8[0x18];
...@@ -4725,6 +4825,43 @@ struct mlx5_ifc_modify_sq_in_bits { ...@@ -4725,6 +4825,43 @@ struct mlx5_ifc_modify_sq_in_bits {
struct mlx5_ifc_sqc_bits ctx; struct mlx5_ifc_sqc_bits ctx;
}; };
struct mlx5_ifc_modify_scheduling_element_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x1c0];
};
enum {
MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE = 0x1,
MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW = 0x2,
};
struct mlx5_ifc_modify_scheduling_element_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 scheduling_hierarchy[0x8];
u8 reserved_at_48[0x18];
u8 scheduling_element_id[0x20];
u8 reserved_at_80[0x20];
u8 modify_bitmask[0x20];
u8 reserved_at_c0[0x40];
struct mlx5_ifc_scheduling_context_bits scheduling_context;
u8 reserved_at_300[0x100];
};
struct mlx5_ifc_modify_rqt_out_bits { struct mlx5_ifc_modify_rqt_out_bits {
u8 status[0x8]; u8 status[0x8];
u8 reserved_at_8[0x18]; u8 reserved_at_8[0x18];
...@@ -5390,6 +5527,30 @@ struct mlx5_ifc_destroy_sq_in_bits { ...@@ -5390,6 +5527,30 @@ struct mlx5_ifc_destroy_sq_in_bits {
u8 reserved_at_60[0x20]; u8 reserved_at_60[0x20];
}; };
struct mlx5_ifc_destroy_scheduling_element_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x1c0];
};
struct mlx5_ifc_destroy_scheduling_element_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 scheduling_hierarchy[0x8];
u8 reserved_at_48[0x18];
u8 scheduling_element_id[0x20];
u8 reserved_at_80[0x180];
};
struct mlx5_ifc_destroy_rqt_out_bits { struct mlx5_ifc_destroy_rqt_out_bits {
u8 status[0x8]; u8 status[0x8];
u8 reserved_at_8[0x18]; u8 reserved_at_8[0x18];
...@@ -6017,6 +6178,36 @@ struct mlx5_ifc_create_sq_in_bits { ...@@ -6017,6 +6178,36 @@ struct mlx5_ifc_create_sq_in_bits {
struct mlx5_ifc_sqc_bits ctx; struct mlx5_ifc_sqc_bits ctx;
}; };
struct mlx5_ifc_create_scheduling_element_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x40];
u8 scheduling_element_id[0x20];
u8 reserved_at_a0[0x160];
};
struct mlx5_ifc_create_scheduling_element_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 scheduling_hierarchy[0x8];
u8 reserved_at_48[0x18];
u8 reserved_at_60[0xa0];
struct mlx5_ifc_scheduling_context_bits scheduling_context;
u8 reserved_at_300[0x100];
};
struct mlx5_ifc_create_rqt_out_bits { struct mlx5_ifc_create_rqt_out_bits {
u8 status[0x8]; u8 status[0x8];
u8 reserved_at_8[0x18]; u8 reserved_at_8[0x18];
......
...@@ -55,7 +55,7 @@ struct mlx5_srq_attr { ...@@ -55,7 +55,7 @@ struct mlx5_srq_attr {
u32 lwm; u32 lwm;
u32 user_index; u32 user_index;
u64 db_record; u64 db_record;
u64 *pas; __be64 *pas;
}; };
struct mlx5_core_dev; struct mlx5_core_dev;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment