Commit 0a6ce1e3 authored by David S. Miller's avatar David S. Miller

Merge tag 'shared-for-4.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/leon/linux-rdma

Saeed Mahameed says:

====================
Mellanox mlx5 core driver updates 2016-10-25

This series contains some updates and fixes of mlx5 core and
IB drivers with the addition of two features that demand
new low level commands and infrastructure updates.
 - SRIOV VF max rate limit support
 - mlx5e tc support for FWD rules with counter.

Needed for both net and rdma subsystems.

Updates and Fixes:
From Saeed Mahameed (2):
  - mlx5 IB: Skip handling unknown mlx5 events
  - Add ConnectX-5 PCIe 4.0 VF device ID

From Artemy Kovalyov (2):
  - Update struct mlx5_ifc_xrqc_bits
  - Ensure SRQ physical address structure endianness

From Eugenia Emantayev (1):
  - Fix length of async_event_mask

New Features:
From Mohamad Haj Yahia (3): mlx5 SRIOV VF max rate limit support
  - Introduce TSAR manipulation firmware commands
  - Introduce E-switch QoS management
  - Add SRIOV VF max rate configuration support

From Mark Bloch (7): mlx5e Tc support for FWD rule with counter
  - Don't unlock fte while still using it
  - Use fte status to decide on firmware command
  - Refactor find_flow_rule
  - Group similar rules under the same fte
  - Add multi dest support
  - Add option to add fwd rule with counter
  - mlx5e tc support for FWD rule with counter
  Mark here fixed two trivial issues with the flow steering core, and did
  some refactoring in the flow steering API to support adding mulit destination
  rules to the same hardware flow table entry at once.  In the last two patches
  added the ability to populate a flow rule with a flow counter to the same flow entry.

V2: Dropped some patches that added new structures without adding any usage of them.
    Added SRIOV VF max rate configuration support patch that introduces
    the usage of the TSAR infrastructure.
    Added flow steering fixes and refactoring in addition to mlx5 tc
    support for forward rule with counter.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d46b6349 e37a79e5
...@@ -1771,13 +1771,13 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id) ...@@ -1771,13 +1771,13 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
mutex_lock(&dev->flow_db.lock); mutex_lock(&dev->flow_db.lock);
list_for_each_entry_safe(iter, tmp, &handler->list, list) { list_for_each_entry_safe(iter, tmp, &handler->list, list) {
mlx5_del_flow_rule(iter->rule); mlx5_del_flow_rules(iter->rule);
put_flow_table(dev, iter->prio, true); put_flow_table(dev, iter->prio, true);
list_del(&iter->list); list_del(&iter->list);
kfree(iter); kfree(iter);
} }
mlx5_del_flow_rule(handler->rule); mlx5_del_flow_rules(handler->rule);
put_flow_table(dev, handler->prio, true); put_flow_table(dev, handler->prio, true);
mutex_unlock(&dev->flow_db.lock); mutex_unlock(&dev->flow_db.lock);
...@@ -1907,10 +1907,10 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, ...@@ -1907,10 +1907,10 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
handler->rule = mlx5_add_flow_rule(ft, spec, handler->rule = mlx5_add_flow_rules(ft, spec,
action, action,
MLX5_FS_DEFAULT_FLOW_TAG, MLX5_FS_DEFAULT_FLOW_TAG,
dst); dst, 1);
if (IS_ERR(handler->rule)) { if (IS_ERR(handler->rule)) {
err = PTR_ERR(handler->rule); err = PTR_ERR(handler->rule);
...@@ -1941,7 +1941,7 @@ static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *de ...@@ -1941,7 +1941,7 @@ static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *de
handler_dst = create_flow_rule(dev, ft_prio, handler_dst = create_flow_rule(dev, ft_prio,
flow_attr, dst); flow_attr, dst);
if (IS_ERR(handler_dst)) { if (IS_ERR(handler_dst)) {
mlx5_del_flow_rule(handler->rule); mlx5_del_flow_rules(handler->rule);
ft_prio->refcount--; ft_prio->refcount--;
kfree(handler); kfree(handler);
handler = handler_dst; handler = handler_dst;
...@@ -2004,7 +2004,7 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de ...@@ -2004,7 +2004,7 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de
&leftovers_specs[LEFTOVERS_UC].flow_attr, &leftovers_specs[LEFTOVERS_UC].flow_attr,
dst); dst);
if (IS_ERR(handler_ucast)) { if (IS_ERR(handler_ucast)) {
mlx5_del_flow_rule(handler->rule); mlx5_del_flow_rules(handler->rule);
ft_prio->refcount--; ft_prio->refcount--;
kfree(handler); kfree(handler);
handler = handler_ucast; handler = handler_ucast;
...@@ -2046,7 +2046,7 @@ static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev, ...@@ -2046,7 +2046,7 @@ static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
return handler_rx; return handler_rx;
err_tx: err_tx:
mlx5_del_flow_rule(handler_rx->rule); mlx5_del_flow_rules(handler_rx->rule);
ft_rx->refcount--; ft_rx->refcount--;
kfree(handler_rx); kfree(handler_rx);
err: err:
...@@ -2358,6 +2358,8 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, ...@@ -2358,6 +2358,8 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
ibev.event = IB_EVENT_CLIENT_REREGISTER; ibev.event = IB_EVENT_CLIENT_REREGISTER;
port = (u8)param; port = (u8)param;
break; break;
default:
return;
} }
ibev.device = &ibdev->ib_dev; ibev.device = &ibdev->ib_dev;
......
...@@ -153,7 +153,7 @@ struct mlx5_ib_flow_handler { ...@@ -153,7 +153,7 @@ struct mlx5_ib_flow_handler {
struct list_head list; struct list_head list;
struct ib_flow ibflow; struct ib_flow ibflow;
struct mlx5_ib_flow_prio *prio; struct mlx5_ib_flow_prio *prio;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
}; };
struct mlx5_ib_flow_db { struct mlx5_ib_flow_db {
......
...@@ -318,6 +318,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -318,6 +318,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
return MLX5_CMD_STAT_OK; return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP: case MLX5_CMD_OP_QUERY_HCA_CAP:
...@@ -419,11 +421,14 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -419,11 +421,14 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_FLOW_TABLE: case MLX5_CMD_OP_QUERY_FLOW_TABLE:
case MLX5_CMD_OP_CREATE_FLOW_GROUP: case MLX5_CMD_OP_CREATE_FLOW_GROUP:
case MLX5_CMD_OP_QUERY_FLOW_GROUP: case MLX5_CMD_OP_QUERY_FLOW_GROUP:
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
case MLX5_CMD_OP_QUERY_FLOW_COUNTER: case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
*status = MLX5_DRIVER_STATUS_ABORTED; *status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND; *synd = MLX5_DRIVER_SYND;
return -EIO; return -EIO;
...@@ -580,6 +585,12 @@ const char *mlx5_command_str(int command) ...@@ -580,6 +585,12 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE); MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER); MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER);
MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER); MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
default: return "unknown command opcode"; default: return "unknown command opcode";
} }
} }
......
...@@ -524,7 +524,7 @@ struct mlx5e_vxlan_db { ...@@ -524,7 +524,7 @@ struct mlx5e_vxlan_db {
struct mlx5e_l2_rule { struct mlx5e_l2_rule {
u8 addr[ETH_ALEN + 2]; u8 addr[ETH_ALEN + 2];
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
}; };
struct mlx5e_flow_table { struct mlx5e_flow_table {
...@@ -545,9 +545,9 @@ struct mlx5e_tc_table { ...@@ -545,9 +545,9 @@ struct mlx5e_tc_table {
struct mlx5e_vlan_table { struct mlx5e_vlan_table {
struct mlx5e_flow_table ft; struct mlx5e_flow_table ft;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct mlx5_flow_rule *active_vlans_rule[VLAN_N_VID]; struct mlx5_flow_handle *active_vlans_rule[VLAN_N_VID];
struct mlx5_flow_rule *untagged_rule; struct mlx5_flow_handle *untagged_rule;
struct mlx5_flow_rule *any_vlan_rule; struct mlx5_flow_handle *any_vlan_rule;
bool filter_disabled; bool filter_disabled;
}; };
...@@ -566,14 +566,14 @@ struct mlx5e_l2_table { ...@@ -566,14 +566,14 @@ struct mlx5e_l2_table {
/* L3/L4 traffic type classifier */ /* L3/L4 traffic type classifier */
struct mlx5e_ttc_table { struct mlx5e_ttc_table {
struct mlx5e_flow_table ft; struct mlx5e_flow_table ft;
struct mlx5_flow_rule *rules[MLX5E_NUM_TT]; struct mlx5_flow_handle *rules[MLX5E_NUM_TT];
}; };
#define ARFS_HASH_SHIFT BITS_PER_BYTE #define ARFS_HASH_SHIFT BITS_PER_BYTE
#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE) #define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
struct arfs_table { struct arfs_table {
struct mlx5e_flow_table ft; struct mlx5e_flow_table ft;
struct mlx5_flow_rule *default_rule; struct mlx5_flow_handle *default_rule;
struct hlist_head rules_hash[ARFS_HASH_SIZE]; struct hlist_head rules_hash[ARFS_HASH_SIZE];
}; };
......
...@@ -56,7 +56,7 @@ struct arfs_tuple { ...@@ -56,7 +56,7 @@ struct arfs_tuple {
struct arfs_rule { struct arfs_rule {
struct mlx5e_priv *priv; struct mlx5e_priv *priv;
struct work_struct arfs_work; struct work_struct arfs_work;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
struct hlist_node hlist; struct hlist_node hlist;
int rxq; int rxq;
/* Flow ID passed to ndo_rx_flow_steer */ /* Flow ID passed to ndo_rx_flow_steer */
...@@ -104,7 +104,7 @@ static int arfs_disable(struct mlx5e_priv *priv) ...@@ -104,7 +104,7 @@ static int arfs_disable(struct mlx5e_priv *priv)
tt = arfs_get_tt(i); tt = arfs_get_tt(i);
/* Modify ttc rules destination to bypass the aRFS tables*/ /* Modify ttc rules destination to bypass the aRFS tables*/
err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt], err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
&dest); &dest, NULL);
if (err) { if (err) {
netdev_err(priv->netdev, netdev_err(priv->netdev,
"%s: modify ttc destination failed\n", "%s: modify ttc destination failed\n",
...@@ -137,7 +137,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv) ...@@ -137,7 +137,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv)
tt = arfs_get_tt(i); tt = arfs_get_tt(i);
/* Modify ttc rules destination to point on the aRFS FTs */ /* Modify ttc rules destination to point on the aRFS FTs */
err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt], err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
&dest); &dest, NULL);
if (err) { if (err) {
netdev_err(priv->netdev, netdev_err(priv->netdev,
"%s: modify ttc destination failed err=%d\n", "%s: modify ttc destination failed err=%d\n",
...@@ -151,7 +151,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv) ...@@ -151,7 +151,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv)
static void arfs_destroy_table(struct arfs_table *arfs_t) static void arfs_destroy_table(struct arfs_table *arfs_t)
{ {
mlx5_del_flow_rule(arfs_t->default_rule); mlx5_del_flow_rules(arfs_t->default_rule);
mlx5e_destroy_flow_table(&arfs_t->ft); mlx5e_destroy_flow_table(&arfs_t->ft);
} }
...@@ -205,10 +205,10 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, ...@@ -205,10 +205,10 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
goto out; goto out;
} }
arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, spec, arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, MLX5_FS_DEFAULT_FLOW_TAG,
&dest); &dest, 1);
if (IS_ERR(arfs_t->default_rule)) { if (IS_ERR(arfs_t->default_rule)) {
err = PTR_ERR(arfs_t->default_rule); err = PTR_ERR(arfs_t->default_rule);
arfs_t->default_rule = NULL; arfs_t->default_rule = NULL;
...@@ -396,7 +396,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv) ...@@ -396,7 +396,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
spin_unlock_bh(&priv->fs.arfs.arfs_lock); spin_unlock_bh(&priv->fs.arfs.arfs_lock);
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) { hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
if (arfs_rule->rule) if (arfs_rule->rule)
mlx5_del_flow_rule(arfs_rule->rule); mlx5_del_flow_rules(arfs_rule->rule);
hlist_del(&arfs_rule->hlist); hlist_del(&arfs_rule->hlist);
kfree(arfs_rule); kfree(arfs_rule);
} }
...@@ -420,7 +420,7 @@ static void arfs_del_rules(struct mlx5e_priv *priv) ...@@ -420,7 +420,7 @@ static void arfs_del_rules(struct mlx5e_priv *priv)
hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) { hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
cancel_work_sync(&rule->arfs_work); cancel_work_sync(&rule->arfs_work);
if (rule->rule) if (rule->rule)
mlx5_del_flow_rule(rule->rule); mlx5_del_flow_rules(rule->rule);
hlist_del(&rule->hlist); hlist_del(&rule->hlist);
kfree(rule); kfree(rule);
} }
...@@ -462,12 +462,12 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs, ...@@ -462,12 +462,12 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
return NULL; return NULL;
} }
static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv, static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule) struct arfs_rule *arfs_rule)
{ {
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple; struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_rule *rule = NULL; struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct arfs_table *arfs_table; struct arfs_table *arfs_table;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
...@@ -544,9 +544,9 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv, ...@@ -544,9 +544,9 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
} }
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn; dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
rule = mlx5_add_flow_rule(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, rule = mlx5_add_flow_rules(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, MLX5_FS_DEFAULT_FLOW_TAG,
&dest); &dest, 1);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n", netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
...@@ -559,14 +559,14 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv, ...@@ -559,14 +559,14 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
} }
static void arfs_modify_rule_rq(struct mlx5e_priv *priv, static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
struct mlx5_flow_rule *rule, u16 rxq) struct mlx5_flow_handle *rule, u16 rxq)
{ {
struct mlx5_flow_destination dst; struct mlx5_flow_destination dst;
int err = 0; int err = 0;
dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR; dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dst.tir_num = priv->direct_tir[rxq].tirn; dst.tir_num = priv->direct_tir[rxq].tirn;
err = mlx5_modify_rule_destination(rule, &dst); err = mlx5_modify_rule_destination(rule, &dst, NULL);
if (err) if (err)
netdev_warn(priv->netdev, netdev_warn(priv->netdev,
"Failed to modfiy aRFS rule destination to rq=%d\n", rxq); "Failed to modfiy aRFS rule destination to rq=%d\n", rxq);
...@@ -578,7 +578,7 @@ static void arfs_handle_work(struct work_struct *work) ...@@ -578,7 +578,7 @@ static void arfs_handle_work(struct work_struct *work)
struct arfs_rule, struct arfs_rule,
arfs_work); arfs_work);
struct mlx5e_priv *priv = arfs_rule->priv; struct mlx5e_priv *priv = arfs_rule->priv;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
......
...@@ -160,7 +160,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, ...@@ -160,7 +160,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
{ {
struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_rule **rule_p; struct mlx5_flow_handle **rule_p;
int err = 0; int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
...@@ -187,10 +187,10 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, ...@@ -187,10 +187,10 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
break; break;
} }
*rule_p = mlx5_add_flow_rule(ft, spec, *rule_p = mlx5_add_flow_rules(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, MLX5_FS_DEFAULT_FLOW_TAG,
&dest); &dest, 1);
if (IS_ERR(*rule_p)) { if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p); err = PTR_ERR(*rule_p);
...@@ -229,20 +229,20 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, ...@@ -229,20 +229,20 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
switch (rule_type) { switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED: case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
if (priv->fs.vlan.untagged_rule) { if (priv->fs.vlan.untagged_rule) {
mlx5_del_flow_rule(priv->fs.vlan.untagged_rule); mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
priv->fs.vlan.untagged_rule = NULL; priv->fs.vlan.untagged_rule = NULL;
} }
break; break;
case MLX5E_VLAN_RULE_TYPE_ANY_VID: case MLX5E_VLAN_RULE_TYPE_ANY_VID:
if (priv->fs.vlan.any_vlan_rule) { if (priv->fs.vlan.any_vlan_rule) {
mlx5_del_flow_rule(priv->fs.vlan.any_vlan_rule); mlx5_del_flow_rules(priv->fs.vlan.any_vlan_rule);
priv->fs.vlan.any_vlan_rule = NULL; priv->fs.vlan.any_vlan_rule = NULL;
} }
break; break;
case MLX5E_VLAN_RULE_TYPE_MATCH_VID: case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
mlx5e_vport_context_update_vlans(priv); mlx5e_vport_context_update_vlans(priv);
if (priv->fs.vlan.active_vlans_rule[vid]) { if (priv->fs.vlan.active_vlans_rule[vid]) {
mlx5_del_flow_rule(priv->fs.vlan.active_vlans_rule[vid]); mlx5_del_flow_rules(priv->fs.vlan.active_vlans_rule[vid]);
priv->fs.vlan.active_vlans_rule[vid] = NULL; priv->fs.vlan.active_vlans_rule[vid] = NULL;
} }
mlx5e_vport_context_update_vlans(priv); mlx5e_vport_context_update_vlans(priv);
...@@ -560,7 +560,7 @@ static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc) ...@@ -560,7 +560,7 @@ static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
for (i = 0; i < MLX5E_NUM_TT; i++) { for (i = 0; i < MLX5E_NUM_TT; i++) {
if (!IS_ERR_OR_NULL(ttc->rules[i])) { if (!IS_ERR_OR_NULL(ttc->rules[i])) {
mlx5_del_flow_rule(ttc->rules[i]); mlx5_del_flow_rules(ttc->rules[i]);
ttc->rules[i] = NULL; ttc->rules[i] = NULL;
} }
} }
...@@ -616,13 +616,14 @@ static struct { ...@@ -616,13 +616,14 @@ static struct {
}, },
}; };
static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, static struct mlx5_flow_handle *
mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
struct mlx5_flow_destination *dest, struct mlx5_flow_destination *dest,
u16 etype, u16 etype,
u8 proto) u8 proto)
{ {
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
int err = 0; int err = 0;
...@@ -643,10 +644,10 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, ...@@ -643,10 +644,10 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype); MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
} }
rule = mlx5_add_flow_rule(ft, spec, rule = mlx5_add_flow_rules(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, MLX5_FS_DEFAULT_FLOW_TAG,
dest); dest, 1);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule failed\n", __func__); netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
...@@ -660,7 +661,7 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv) ...@@ -660,7 +661,7 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
{ {
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5e_ttc_table *ttc; struct mlx5e_ttc_table *ttc;
struct mlx5_flow_rule **rules; struct mlx5_flow_handle **rules;
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
int tt; int tt;
int err; int err;
...@@ -801,7 +802,7 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, ...@@ -801,7 +802,7 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai) struct mlx5e_l2_rule *ai)
{ {
if (!IS_ERR_OR_NULL(ai->rule)) { if (!IS_ERR_OR_NULL(ai->rule)) {
mlx5_del_flow_rule(ai->rule); mlx5_del_flow_rules(ai->rule);
ai->rule = NULL; ai->rule = NULL;
} }
} }
...@@ -847,9 +848,9 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, ...@@ -847,9 +848,9 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
break; break;
} }
ai->rule = mlx5_add_flow_rule(ft, spec, ai->rule = mlx5_add_flow_rules(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, &dest); MLX5_FS_DEFAULT_FLOW_TAG, &dest, 1);
if (IS_ERR(ai->rule)) { if (IS_ERR(ai->rule)) {
netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n", netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
__func__, mv_dmac); __func__, mv_dmac);
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
struct mlx5e_ethtool_rule { struct mlx5e_ethtool_rule {
struct list_head list; struct list_head list;
struct ethtool_rx_flow_spec flow_spec; struct ethtool_rx_flow_spec flow_spec;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
struct mlx5e_ethtool_table *eth_ft; struct mlx5e_ethtool_table *eth_ft;
}; };
...@@ -284,13 +284,14 @@ static bool outer_header_zero(u32 *match_criteria) ...@@ -284,13 +284,14 @@ static bool outer_header_zero(u32 *match_criteria)
size - 1); size - 1);
} }
static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv, static struct mlx5_flow_handle *
add_ethtool_flow_rule(struct mlx5e_priv *priv,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
struct ethtool_rx_flow_spec *fs) struct ethtool_rx_flow_spec *fs)
{ {
struct mlx5_flow_destination *dst = NULL; struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
int err = 0; int err = 0;
u32 action; u32 action;
...@@ -317,8 +318,8 @@ static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv, ...@@ -317,8 +318,8 @@ static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv,
} }
spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria)); spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
rule = mlx5_add_flow_rule(ft, spec, action, rule = mlx5_add_flow_rules(ft, spec, action,
MLX5_FS_DEFAULT_FLOW_TAG, dst); MLX5_FS_DEFAULT_FLOW_TAG, dst, 1);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n", netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
...@@ -335,7 +336,7 @@ static void del_ethtool_rule(struct mlx5e_priv *priv, ...@@ -335,7 +336,7 @@ static void del_ethtool_rule(struct mlx5e_priv *priv,
struct mlx5e_ethtool_rule *eth_rule) struct mlx5e_ethtool_rule *eth_rule)
{ {
if (eth_rule->rule) if (eth_rule->rule)
mlx5_del_flow_rule(eth_rule->rule); mlx5_del_flow_rules(eth_rule->rule);
list_del(&eth_rule->list); list_del(&eth_rule->list);
priv->fs.ethtool.tot_num_rules--; priv->fs.ethtool.tot_num_rules--;
put_flow_table(eth_rule->eth_ft); put_flow_table(eth_rule->eth_ft);
...@@ -475,7 +476,7 @@ int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv, ...@@ -475,7 +476,7 @@ int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
{ {
struct mlx5e_ethtool_table *eth_ft; struct mlx5e_ethtool_table *eth_ft;
struct mlx5e_ethtool_rule *eth_rule; struct mlx5e_ethtool_rule *eth_rule;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
int num_tuples; int num_tuples;
int err; int err;
......
...@@ -2925,6 +2925,20 @@ static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting) ...@@ -2925,6 +2925,20 @@ static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting); return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
} }
static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
int max_tx_rate)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
if (min_tx_rate)
return -EOPNOTSUPP;
return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
max_tx_rate);
}
static int mlx5_vport_link2ifla(u8 esw_link) static int mlx5_vport_link2ifla(u8 esw_link)
{ {
switch (esw_link) { switch (esw_link) {
...@@ -3232,6 +3246,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { ...@@ -3232,6 +3246,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
.ndo_set_vf_vlan = mlx5e_set_vf_vlan, .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
.ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk, .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
.ndo_set_vf_trust = mlx5e_set_vf_trust, .ndo_set_vf_trust = mlx5e_set_vf_trust,
.ndo_set_vf_rate = mlx5e_set_vf_rate,
.ndo_get_vf_config = mlx5e_get_vf_config, .ndo_get_vf_config = mlx5e_get_vf_config,
.ndo_set_vf_link_state = mlx5e_set_vf_link_state, .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats, .ndo_get_vf_stats = mlx5e_get_vf_stats,
......
...@@ -328,7 +328,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) ...@@ -328,7 +328,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_eswitch_rep *rep = priv->ppriv; struct mlx5_eswitch_rep *rep = priv->ppriv;
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_flow_rule *flow_rule; struct mlx5_flow_handle *flow_rule;
int err; int err;
int i; int i;
...@@ -360,7 +360,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) ...@@ -360,7 +360,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
return 0; return 0;
err_del_flow_rule: err_del_flow_rule:
mlx5_del_flow_rule(rep->vport_rx_rule); mlx5_del_flow_rules(rep->vport_rx_rule);
err_destroy_direct_tirs: err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_direct_tirs(priv);
err_destroy_direct_rqts: err_destroy_direct_rqts:
...@@ -375,7 +375,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) ...@@ -375,7 +375,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
int i; int i;
mlx5e_tc_cleanup(priv); mlx5e_tc_cleanup(priv);
mlx5_del_flow_rule(rep->vport_rx_rule); mlx5_del_flow_rules(rep->vport_rx_rule);
mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_direct_tirs(priv);
for (i = 0; i < priv->params.num_channels; i++) for (i = 0; i < priv->params.num_channels; i++)
mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
......
...@@ -47,21 +47,22 @@ ...@@ -47,21 +47,22 @@
struct mlx5e_tc_flow { struct mlx5e_tc_flow {
struct rhash_head node; struct rhash_head node;
u64 cookie; u64 cookie;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
struct mlx5_esw_flow_attr *attr; struct mlx5_esw_flow_attr *attr;
}; };
#define MLX5E_TC_TABLE_NUM_ENTRIES 1024 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
#define MLX5E_TC_TABLE_NUM_GROUPS 4 #define MLX5E_TC_TABLE_NUM_GROUPS 4
static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, static struct mlx5_flow_handle *
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
u32 action, u32 flow_tag) u32 action, u32 flow_tag)
{ {
struct mlx5_core_dev *dev = priv->mdev; struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_flow_destination dest = { 0 }; struct mlx5_flow_destination dest = { 0 };
struct mlx5_fc *counter = NULL; struct mlx5_fc *counter = NULL;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
bool table_created = false; bool table_created = false;
if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
...@@ -94,9 +95,9 @@ static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, ...@@ -94,9 +95,9 @@ static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
} }
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
rule = mlx5_add_flow_rule(priv->fs.tc.t, spec, rule = mlx5_add_flow_rules(priv->fs.tc.t, spec,
action, flow_tag, action, flow_tag,
&dest); &dest, 1);
if (IS_ERR(rule)) if (IS_ERR(rule))
goto err_add_rule; goto err_add_rule;
...@@ -114,7 +115,8 @@ static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, ...@@ -114,7 +115,8 @@ static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
return rule; return rule;
} }
static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, static struct mlx5_flow_handle *
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr) struct mlx5_esw_flow_attr *attr)
{ {
...@@ -129,7 +131,7 @@ static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -129,7 +131,7 @@ static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
} }
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5_flow_rule *rule, struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *attr) struct mlx5_esw_flow_attr *attr)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
...@@ -140,7 +142,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, ...@@ -140,7 +142,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
if (esw && esw->mode == SRIOV_OFFLOADS) if (esw && esw->mode == SRIOV_OFFLOADS)
mlx5_eswitch_del_vlan_action(esw, attr); mlx5_eswitch_del_vlan_action(esw, attr);
mlx5_del_flow_rule(rule); mlx5_del_flow_rules(rule);
mlx5_fc_destroy(priv->mdev, counter); mlx5_fc_destroy(priv->mdev, counter);
...@@ -417,7 +419,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -417,7 +419,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EINVAL; return -EINVAL;
} }
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
out_priv = netdev_priv(out_dev); out_priv = netdev_priv(out_dev);
attr->out_rep = out_priv->ppriv; attr->out_rep = out_priv->ppriv;
continue; continue;
...@@ -450,7 +453,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, ...@@ -450,7 +453,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
u32 flow_tag, action; u32 flow_tag, action;
struct mlx5e_tc_flow *flow; struct mlx5e_tc_flow *flow;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
struct mlx5_flow_rule *old = NULL; struct mlx5_flow_handle *old = NULL;
struct mlx5_esw_flow_attr *old_attr = NULL; struct mlx5_esw_flow_attr *old_attr = NULL;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
...@@ -511,7 +514,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, ...@@ -511,7 +514,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
goto out; goto out;
err_del_rule: err_del_rule:
mlx5_del_flow_rule(flow->rule); mlx5_del_flow_rules(flow->rule);
err_free: err_free:
if (!old) if (!old)
......
...@@ -469,7 +469,7 @@ void mlx5_eq_cleanup(struct mlx5_core_dev *dev) ...@@ -469,7 +469,7 @@ void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
int mlx5_start_eqs(struct mlx5_core_dev *dev) int mlx5_start_eqs(struct mlx5_core_dev *dev)
{ {
struct mlx5_eq_table *table = &dev->priv.eq_table; struct mlx5_eq_table *table = &dev->priv.eq_table;
u32 async_event_mask = MLX5_ASYNC_EVENT_MASK; u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
int err; int err;
if (MLX5_CAP_GEN(dev, pg)) if (MLX5_CAP_GEN(dev, pg))
......
...@@ -97,16 +97,16 @@ struct vport_ingress { ...@@ -97,16 +97,16 @@ struct vport_ingress {
struct mlx5_flow_group *allow_spoofchk_only_grp; struct mlx5_flow_group *allow_spoofchk_only_grp;
struct mlx5_flow_group *allow_untagged_only_grp; struct mlx5_flow_group *allow_untagged_only_grp;
struct mlx5_flow_group *drop_grp; struct mlx5_flow_group *drop_grp;
struct mlx5_flow_rule *allow_rule; struct mlx5_flow_handle *allow_rule;
struct mlx5_flow_rule *drop_rule; struct mlx5_flow_handle *drop_rule;
}; };
struct vport_egress { struct vport_egress {
struct mlx5_flow_table *acl; struct mlx5_flow_table *acl;
struct mlx5_flow_group *allowed_vlans_grp; struct mlx5_flow_group *allowed_vlans_grp;
struct mlx5_flow_group *drop_grp; struct mlx5_flow_group *drop_grp;
struct mlx5_flow_rule *allowed_vlan; struct mlx5_flow_handle *allowed_vlan;
struct mlx5_flow_rule *drop_rule; struct mlx5_flow_handle *drop_rule;
}; };
struct mlx5_vport_info { struct mlx5_vport_info {
...@@ -115,6 +115,7 @@ struct mlx5_vport_info { ...@@ -115,6 +115,7 @@ struct mlx5_vport_info {
u8 qos; u8 qos;
u64 node_guid; u64 node_guid;
int link_state; int link_state;
u32 max_rate;
bool spoofchk; bool spoofchk;
bool trusted; bool trusted;
}; };
...@@ -124,8 +125,8 @@ struct mlx5_vport { ...@@ -124,8 +125,8 @@ struct mlx5_vport {
int vport; int vport;
struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE]; struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE];
struct mlx5_flow_rule *promisc_rule; struct mlx5_flow_handle *promisc_rule;
struct mlx5_flow_rule *allmulti_rule; struct mlx5_flow_handle *allmulti_rule;
struct work_struct vport_change_handler; struct work_struct vport_change_handler;
struct vport_ingress ingress; struct vport_ingress ingress;
...@@ -133,6 +134,11 @@ struct mlx5_vport { ...@@ -133,6 +134,11 @@ struct mlx5_vport {
struct mlx5_vport_info info; struct mlx5_vport_info info;
struct {
bool enabled;
u32 esw_tsar_ix;
} qos;
bool enabled; bool enabled;
u16 enabled_events; u16 enabled_events;
}; };
...@@ -156,7 +162,7 @@ struct mlx5_eswitch_fdb { ...@@ -156,7 +162,7 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_table *fdb; struct mlx5_flow_table *fdb;
struct mlx5_flow_group *send_to_vport_grp; struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *miss_grp; struct mlx5_flow_group *miss_grp;
struct mlx5_flow_rule *miss_rule; struct mlx5_flow_handle *miss_rule;
int vlan_push_pop_refcount; int vlan_push_pop_refcount;
} offloads; } offloads;
}; };
...@@ -169,7 +175,7 @@ enum { ...@@ -169,7 +175,7 @@ enum {
}; };
struct mlx5_esw_sq { struct mlx5_esw_sq {
struct mlx5_flow_rule *send_to_vport_rule; struct mlx5_flow_handle *send_to_vport_rule;
struct list_head list; struct list_head list;
}; };
...@@ -182,7 +188,7 @@ struct mlx5_eswitch_rep { ...@@ -182,7 +188,7 @@ struct mlx5_eswitch_rep {
u8 hw_id[ETH_ALEN]; u8 hw_id[ETH_ALEN];
void *priv_data; void *priv_data;
struct mlx5_flow_rule *vport_rx_rule; struct mlx5_flow_handle *vport_rx_rule;
struct list_head vport_sqs_list; struct list_head vport_sqs_list;
u16 vlan; u16 vlan;
u32 vlan_refcount; u32 vlan_refcount;
...@@ -209,6 +215,12 @@ struct mlx5_eswitch { ...@@ -209,6 +215,12 @@ struct mlx5_eswitch {
*/ */
struct mutex state_lock; struct mutex state_lock;
struct esw_mc_addr *mc_promisc; struct esw_mc_addr *mc_promisc;
struct {
bool enabled;
u32 root_tsar_id;
} qos;
struct mlx5_esw_offload offloads; struct mlx5_esw_offload offloads;
int mode; int mode;
}; };
...@@ -234,6 +246,8 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, ...@@ -234,6 +246,8 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
int vport, bool spoofchk); int vport, bool spoofchk);
int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
int vport_num, bool setting); int vport_num, bool setting);
int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw,
int vport, u32 max_rate);
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi); int vport, struct ifla_vf_info *ivi);
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
...@@ -243,11 +257,11 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, ...@@ -243,11 +257,11 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
struct mlx5_flow_spec; struct mlx5_flow_spec;
struct mlx5_esw_flow_attr; struct mlx5_esw_flow_attr;
struct mlx5_flow_rule * struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr); struct mlx5_esw_flow_attr *attr);
struct mlx5_flow_rule * struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn); mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
enum { enum {
......
...@@ -43,16 +43,17 @@ enum { ...@@ -43,16 +43,17 @@ enum {
FDB_SLOW_PATH FDB_SLOW_PATH
}; };
struct mlx5_flow_rule * struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr) struct mlx5_esw_flow_attr *attr)
{ {
struct mlx5_flow_destination dest = { 0 }; struct mlx5_flow_destination dest[2] = {};
struct mlx5_fc *counter = NULL; struct mlx5_fc *counter = NULL;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
void *misc; void *misc;
int action; int action;
int i = 0;
if (esw->mode != SRIOV_OFFLOADS) if (esw->mode != SRIOV_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
...@@ -60,15 +61,17 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -60,15 +61,17 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
action = attr->action; action = attr->action;
if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = attr->out_rep->vport; dest[i].vport_num = attr->out_rep->vport;
action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; i++;
} else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { }
if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(esw->dev, true); counter = mlx5_fc_create(esw->dev, true);
if (IS_ERR(counter)) if (IS_ERR(counter))
return ERR_CAST(counter); return ERR_CAST(counter);
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest.counter = counter; dest[i].counter = counter;
i++;
} }
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
...@@ -80,9 +83,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -80,9 +83,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
MLX5_MATCH_MISC_PARAMETERS; MLX5_MATCH_MISC_PARAMETERS;
rule = mlx5_add_flow_rule((struct mlx5_flow_table *)esw->fdb_table.fdb, rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
spec, action, 0, &dest); spec, action, 0, dest, i);
if (IS_ERR(rule)) if (IS_ERR(rule))
mlx5_fc_destroy(esw->dev, counter); mlx5_fc_destroy(esw->dev, counter);
...@@ -269,11 +271,11 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, ...@@ -269,11 +271,11 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
return err; return err;
} }
static struct mlx5_flow_rule * static struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn) mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
{ {
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_rule *flow_rule; struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
void *misc; void *misc;
...@@ -296,9 +298,9 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn ...@@ -296,9 +298,9 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = vport; dest.vport_num = vport;
flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec, flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest); 0, &dest, 1);
if (IS_ERR(flow_rule)) if (IS_ERR(flow_rule))
esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule)); esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
out: out:
...@@ -315,7 +317,7 @@ void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw, ...@@ -315,7 +317,7 @@ void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
return; return;
list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) { list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
mlx5_del_flow_rule(esw_sq->send_to_vport_rule); mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
list_del(&esw_sq->list); list_del(&esw_sq->list);
kfree(esw_sq); kfree(esw_sq);
} }
...@@ -325,7 +327,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, ...@@ -325,7 +327,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, struct mlx5_eswitch_rep *rep,
u16 *sqns_array, int sqns_num) u16 *sqns_array, int sqns_num)
{ {
struct mlx5_flow_rule *flow_rule; struct mlx5_flow_handle *flow_rule;
struct mlx5_esw_sq *esw_sq; struct mlx5_esw_sq *esw_sq;
int err; int err;
int i; int i;
...@@ -362,7 +364,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, ...@@ -362,7 +364,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
{ {
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_rule *flow_rule = NULL; struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
int err = 0; int err = 0;
...@@ -376,9 +378,9 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) ...@@ -376,9 +378,9 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = 0; dest.vport_num = 0;
flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec, flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest); 0, &dest, 1);
if (IS_ERR(flow_rule)) { if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule); err = PTR_ERR(flow_rule);
esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err); esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
...@@ -501,7 +503,7 @@ static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw) ...@@ -501,7 +503,7 @@ static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
return; return;
esw_debug(esw->dev, "Destroy offloads FDB Table\n"); esw_debug(esw->dev, "Destroy offloads FDB Table\n");
mlx5_del_flow_rule(esw->fdb_table.offloads.miss_rule); mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
...@@ -585,11 +587,11 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw) ...@@ -585,11 +587,11 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
mlx5_destroy_flow_group(esw->offloads.vport_rx_group); mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
} }
struct mlx5_flow_rule * struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
{ {
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_rule *flow_rule; struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
void *misc; void *misc;
...@@ -610,9 +612,9 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) ...@@ -610,9 +612,9 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = tirn; dest.tir_num = tirn;
flow_rule = mlx5_add_flow_rule(esw->offloads.ft_offloads, spec, flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest); 0, &dest, 1);
if (IS_ERR(flow_rule)) { if (IS_ERR(flow_rule)) {
esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule)); esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
goto out; goto out;
......
...@@ -94,6 +94,11 @@ struct mlx5_flow_rule { ...@@ -94,6 +94,11 @@ struct mlx5_flow_rule {
u32 sw_action; u32 sw_action;
}; };
struct mlx5_flow_handle {
int num_rules;
struct mlx5_flow_rule *rule[];
};
/* Type of children is mlx5_flow_group */ /* Type of children is mlx5_flow_group */
struct mlx5_flow_table { struct mlx5_flow_table {
struct fs_node node; struct fs_node node;
......
...@@ -1423,6 +1423,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = { ...@@ -1423,6 +1423,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */ { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */ { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
{ PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */ { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */
{ PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5, PCIe 4.0 VF */
{ 0, } { 0, }
}; };
......
...@@ -92,6 +92,13 @@ int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs); ...@@ -92,6 +92,13 @@ int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev); bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev);
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
void *context, u32 *element_id);
int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
void *context, u32 element_id,
u32 modify_bitmask);
int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id);
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev); int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev); cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev);
u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx); u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
......
...@@ -36,6 +36,71 @@ ...@@ -36,6 +36,71 @@
#include <linux/mlx5/cmd.h> #include <linux/mlx5/cmd.h>
#include "mlx5_core.h" #include "mlx5_core.h"
/* Scheduling element fw management */
int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
void *ctx, u32 *element_id)
{
u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0};
u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0};
void *schedc;
int err;
schedc = MLX5_ADDR_OF(create_scheduling_element_in, in,
scheduling_context);
MLX5_SET(create_scheduling_element_in, in, opcode,
MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT);
MLX5_SET(create_scheduling_element_in, in, scheduling_hierarchy,
hierarchy);
memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
*element_id = MLX5_GET(create_scheduling_element_out, out,
scheduling_element_id);
return 0;
}
int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
void *ctx, u32 element_id,
u32 modify_bitmask)
{
u32 in[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0};
u32 out[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0};
void *schedc;
schedc = MLX5_ADDR_OF(modify_scheduling_element_in, in,
scheduling_context);
MLX5_SET(modify_scheduling_element_in, in, opcode,
MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT);
MLX5_SET(modify_scheduling_element_in, in, scheduling_element_id,
element_id);
MLX5_SET(modify_scheduling_element_in, in, modify_bitmask,
modify_bitmask);
MLX5_SET(modify_scheduling_element_in, in, scheduling_hierarchy,
hierarchy);
memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id)
{
u32 in[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0};
u32 out[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0};
MLX5_SET(destroy_scheduling_element_in, in, opcode,
MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
MLX5_SET(destroy_scheduling_element_in, in, scheduling_element_id,
element_id);
MLX5_SET(destroy_scheduling_element_in, in, scheduling_hierarchy,
hierarchy);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
/* Finds an entry where we can register the given rate /* Finds an entry where we can register the given rate
* If the rate already exists, return the entry where it is registered, * If the rate already exists, return the entry where it is registered,
* otherwise return the first available entry. * otherwise return the first available entry.
......
...@@ -69,8 +69,8 @@ enum mlx5_flow_namespace_type { ...@@ -69,8 +69,8 @@ enum mlx5_flow_namespace_type {
struct mlx5_flow_table; struct mlx5_flow_table;
struct mlx5_flow_group; struct mlx5_flow_group;
struct mlx5_flow_rule;
struct mlx5_flow_namespace; struct mlx5_flow_namespace;
struct mlx5_flow_handle;
struct mlx5_flow_spec { struct mlx5_flow_spec {
u8 match_criteria_enable; u8 match_criteria_enable;
...@@ -127,18 +127,20 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); ...@@ -127,18 +127,20 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
/* Single destination per rule. /* Single destination per rule.
* Group ID is implied by the match criteria. * Group ID is implied by the match criteria.
*/ */
struct mlx5_flow_rule * struct mlx5_flow_handle *
mlx5_add_flow_rule(struct mlx5_flow_table *ft, mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
u32 action, u32 action,
u32 flow_tag, u32 flow_tag,
struct mlx5_flow_destination *dest); struct mlx5_flow_destination *dest,
void mlx5_del_flow_rule(struct mlx5_flow_rule *fr); int dest_num);
void mlx5_del_flow_rules(struct mlx5_flow_handle *fr);
int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
struct mlx5_flow_destination *dest); struct mlx5_flow_destination *new_dest,
struct mlx5_flow_destination *old_dest);
struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule); struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler);
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter, void mlx5_fc_query_cached(struct mlx5_fc *counter,
......
...@@ -145,6 +145,12 @@ enum { ...@@ -145,6 +145,12 @@ enum {
MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
MLX5_CMD_OP_SET_RATE_LIMIT = 0x780, MLX5_CMD_OP_SET_RATE_LIMIT = 0x780,
MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781, MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781,
MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782,
MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783,
MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT = 0x784,
MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT = 0x785,
MLX5_CMD_OP_CREATE_QOS_PARA_VPORT = 0x786,
MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT = 0x787,
MLX5_CMD_OP_ALLOC_PD = 0x800, MLX5_CMD_OP_ALLOC_PD = 0x800,
MLX5_CMD_OP_DEALLOC_PD = 0x801, MLX5_CMD_OP_DEALLOC_PD = 0x801,
MLX5_CMD_OP_ALLOC_UAR = 0x802, MLX5_CMD_OP_ALLOC_UAR = 0x802,
...@@ -537,13 +543,27 @@ struct mlx5_ifc_e_switch_cap_bits { ...@@ -537,13 +543,27 @@ struct mlx5_ifc_e_switch_cap_bits {
struct mlx5_ifc_qos_cap_bits { struct mlx5_ifc_qos_cap_bits {
u8 packet_pacing[0x1]; u8 packet_pacing[0x1];
u8 reserved_0[0x1f]; u8 esw_scheduling[0x1];
u8 reserved_1[0x20]; u8 reserved_at_2[0x1e];
u8 reserved_at_20[0x20];
u8 packet_pacing_max_rate[0x20]; u8 packet_pacing_max_rate[0x20];
u8 packet_pacing_min_rate[0x20]; u8 packet_pacing_min_rate[0x20];
u8 reserved_2[0x10];
u8 reserved_at_80[0x10];
u8 packet_pacing_rate_table_size[0x10]; u8 packet_pacing_rate_table_size[0x10];
u8 reserved_3[0x760];
u8 esw_element_type[0x10];
u8 esw_tsar_type[0x10];
u8 reserved_at_c0[0x10];
u8 max_qos_para_vport[0x10];
u8 max_tsar_bw_share[0x20];
u8 reserved_at_100[0x700];
}; };
struct mlx5_ifc_per_protocol_networking_offload_caps_bits { struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
...@@ -2333,6 +2353,30 @@ struct mlx5_ifc_sqc_bits { ...@@ -2333,6 +2353,30 @@ struct mlx5_ifc_sqc_bits {
struct mlx5_ifc_wq_bits wq; struct mlx5_ifc_wq_bits wq;
}; };
enum {
SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR = 0x0,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2,
SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
};
struct mlx5_ifc_scheduling_context_bits {
u8 element_type[0x8];
u8 reserved_at_8[0x18];
u8 element_attributes[0x20];
u8 parent_element_id[0x20];
u8 reserved_at_60[0x40];
u8 bw_share[0x20];
u8 max_average_bw[0x20];
u8 reserved_at_e0[0x120];
};
struct mlx5_ifc_rqtc_bits { struct mlx5_ifc_rqtc_bits {
u8 reserved_at_0[0xa0]; u8 reserved_at_0[0xa0];
...@@ -2844,7 +2888,7 @@ struct mlx5_ifc_xrqc_bits { ...@@ -2844,7 +2888,7 @@ struct mlx5_ifc_xrqc_bits {
struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context; struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context;
u8 reserved_at_180[0x200]; u8 reserved_at_180[0x880];
struct mlx5_ifc_wq_bits wq; struct mlx5_ifc_wq_bits wq;
}; };
...@@ -2920,6 +2964,29 @@ struct mlx5_ifc_register_loopback_control_bits { ...@@ -2920,6 +2964,29 @@ struct mlx5_ifc_register_loopback_control_bits {
u8 reserved_at_20[0x60]; u8 reserved_at_20[0x60];
}; };
struct mlx5_ifc_vport_tc_element_bits {
u8 traffic_class[0x4];
u8 reserved_at_4[0xc];
u8 vport_number[0x10];
};
struct mlx5_ifc_vport_element_bits {
u8 reserved_at_0[0x10];
u8 vport_number[0x10];
};
enum {
TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0,
TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1,
TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2,
};
struct mlx5_ifc_tsar_element_bits {
u8 reserved_at_0[0x8];
u8 tsar_type[0x8];
u8 reserved_at_10[0x10];
};
struct mlx5_ifc_teardown_hca_out_bits { struct mlx5_ifc_teardown_hca_out_bits {
u8 status[0x8]; u8 status[0x8];
u8 reserved_at_8[0x18]; u8 reserved_at_8[0x18];
...@@ -3540,6 +3607,39 @@ struct mlx5_ifc_query_special_contexts_in_bits { ...@@ -3540,6 +3607,39 @@ struct mlx5_ifc_query_special_contexts_in_bits {
u8 reserved_at_40[0x40]; u8 reserved_at_40[0x40];
}; };
struct mlx5_ifc_query_scheduling_element_out_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0xc0];
struct mlx5_ifc_scheduling_context_bits scheduling_context;
u8 reserved_at_300[0x100];
};
enum {
SCHEDULING_HIERARCHY_E_SWITCH = 0x2,
};
struct mlx5_ifc_query_scheduling_element_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 scheduling_hierarchy[0x8];
u8 reserved_at_48[0x18];
u8 scheduling_element_id[0x20];
u8 reserved_at_80[0x180];
};
struct mlx5_ifc_query_rqt_out_bits { struct mlx5_ifc_query_rqt_out_bits {
u8 status[0x8]; u8 status[0x8];
u8 reserved_at_8[0x18]; u8 reserved_at_8[0x18];
...@@ -4725,6 +4825,43 @@ struct mlx5_ifc_modify_sq_in_bits { ...@@ -4725,6 +4825,43 @@ struct mlx5_ifc_modify_sq_in_bits {
struct mlx5_ifc_sqc_bits ctx; struct mlx5_ifc_sqc_bits ctx;
}; };
struct mlx5_ifc_modify_scheduling_element_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x1c0];
};
enum {
MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE = 0x1,
MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW = 0x2,
};
struct mlx5_ifc_modify_scheduling_element_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 scheduling_hierarchy[0x8];
u8 reserved_at_48[0x18];
u8 scheduling_element_id[0x20];
u8 reserved_at_80[0x20];
u8 modify_bitmask[0x20];
u8 reserved_at_c0[0x40];
struct mlx5_ifc_scheduling_context_bits scheduling_context;
u8 reserved_at_300[0x100];
};
struct mlx5_ifc_modify_rqt_out_bits { struct mlx5_ifc_modify_rqt_out_bits {
u8 status[0x8]; u8 status[0x8];
u8 reserved_at_8[0x18]; u8 reserved_at_8[0x18];
...@@ -5390,6 +5527,30 @@ struct mlx5_ifc_destroy_sq_in_bits { ...@@ -5390,6 +5527,30 @@ struct mlx5_ifc_destroy_sq_in_bits {
u8 reserved_at_60[0x20]; u8 reserved_at_60[0x20];
}; };
struct mlx5_ifc_destroy_scheduling_element_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x1c0];
};
struct mlx5_ifc_destroy_scheduling_element_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 scheduling_hierarchy[0x8];
u8 reserved_at_48[0x18];
u8 scheduling_element_id[0x20];
u8 reserved_at_80[0x180];
};
struct mlx5_ifc_destroy_rqt_out_bits { struct mlx5_ifc_destroy_rqt_out_bits {
u8 status[0x8]; u8 status[0x8];
u8 reserved_at_8[0x18]; u8 reserved_at_8[0x18];
...@@ -6017,6 +6178,36 @@ struct mlx5_ifc_create_sq_in_bits { ...@@ -6017,6 +6178,36 @@ struct mlx5_ifc_create_sq_in_bits {
struct mlx5_ifc_sqc_bits ctx; struct mlx5_ifc_sqc_bits ctx;
}; };
struct mlx5_ifc_create_scheduling_element_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x40];
u8 scheduling_element_id[0x20];
u8 reserved_at_a0[0x160];
};
struct mlx5_ifc_create_scheduling_element_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 scheduling_hierarchy[0x8];
u8 reserved_at_48[0x18];
u8 reserved_at_60[0xa0];
struct mlx5_ifc_scheduling_context_bits scheduling_context;
u8 reserved_at_300[0x100];
};
struct mlx5_ifc_create_rqt_out_bits { struct mlx5_ifc_create_rqt_out_bits {
u8 status[0x8]; u8 status[0x8];
u8 reserved_at_8[0x18]; u8 reserved_at_8[0x18];
......
...@@ -55,7 +55,7 @@ struct mlx5_srq_attr { ...@@ -55,7 +55,7 @@ struct mlx5_srq_attr {
u32 lwm; u32 lwm;
u32 user_index; u32 user_index;
u64 db_record; u64 db_record;
u64 *pas; __be64 *pas;
}; };
struct mlx5_core_dev; struct mlx5_core_dev;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment