Commit 3e3cf2e8 authored by Jason Gunthorpe's avatar Jason Gunthorpe

Merge branch 'mlx5_packet_pacing' into rdma.git for-next

Yishai Hadas Says:

====================
Expose raw packet pacing APIs to be used by DEVX based applications.  The
existing code was refactored to have a single flow with the new raw APIs.
====================

Based on the mlx5-next branch at
 git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux
Due to dependencies

* branch 'mlx5_packet_pacing':
  IB/mlx5: Introduce UAPIs to manage packet pacing
  net/mlx5: Expose raw packet pacing APIs
parents 0aeb3622 30f2fe40
...@@ -8,3 +8,4 @@ mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o ...@@ -8,3 +8,4 @@ mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o
mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += flow.o mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += flow.o
mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += qos.o
...@@ -6251,6 +6251,7 @@ ADD_UVERBS_ATTRIBUTES_SIMPLE( ...@@ -6251,6 +6251,7 @@ ADD_UVERBS_ATTRIBUTES_SIMPLE(
static const struct uapi_definition mlx5_ib_defs[] = { static const struct uapi_definition mlx5_ib_defs[] = {
UAPI_DEF_CHAIN(mlx5_ib_devx_defs), UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
UAPI_DEF_CHAIN(mlx5_ib_flow_defs), UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
UAPI_DEF_CHAIN(mlx5_ib_qos_defs),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
&mlx5_ib_flow_action), &mlx5_ib_flow_action),
......
...@@ -203,6 +203,11 @@ struct mlx5_ib_flow_matcher { ...@@ -203,6 +203,11 @@ struct mlx5_ib_flow_matcher {
u8 match_criteria_enable; u8 match_criteria_enable;
}; };
struct mlx5_ib_pp {
u16 index;
struct mlx5_core_dev *mdev;
};
struct mlx5_ib_flow_db { struct mlx5_ib_flow_db {
struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT]; struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT]; struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT];
...@@ -1383,6 +1388,7 @@ int mlx5_ib_fill_stat_entry(struct sk_buff *msg, ...@@ -1383,6 +1388,7 @@ int mlx5_ib_fill_stat_entry(struct sk_buff *msg,
extern const struct uapi_definition mlx5_ib_devx_defs[]; extern const struct uapi_definition mlx5_ib_devx_defs[];
extern const struct uapi_definition mlx5_ib_flow_defs[]; extern const struct uapi_definition mlx5_ib_flow_defs[];
extern const struct uapi_definition mlx5_ib_qos_defs[];
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user); int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user);
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
*/
#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
#include <linux/mlx5/driver.h>
#include "mlx5_ib.h"
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
static bool pp_is_supported(struct ib_device *device)
{
struct mlx5_ib_dev *dev = to_mdev(device);
return (MLX5_CAP_GEN(dev->mdev, qos) &&
MLX5_CAP_QOS(dev->mdev, packet_pacing) &&
MLX5_CAP_QOS(dev->mdev, packet_pacing_uid));
}
static int UVERBS_HANDLER(MLX5_IB_METHOD_PP_OBJ_ALLOC)(
struct uverbs_attr_bundle *attrs)
{
u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE);
struct mlx5_ib_dev *dev;
struct mlx5_ib_ucontext *c;
struct mlx5_ib_pp *pp_entry;
void *in_ctx;
u16 uid;
int inlen;
u32 flags;
int err;
c = to_mucontext(ib_uverbs_get_ucontext(attrs));
if (IS_ERR(c))
return PTR_ERR(c);
/* The allocated entry can be used only by a DEVX context */
if (!c->devx_uid)
return -EINVAL;
dev = to_mdev(c->ibucontext.device);
pp_entry = kzalloc(sizeof(*pp_entry), GFP_KERNEL);
if (IS_ERR(pp_entry))
return PTR_ERR(pp_entry);
in_ctx = uverbs_attr_get_alloced_ptr(attrs,
MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX);
inlen = uverbs_attr_get_len(attrs,
MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX);
memcpy(rl_raw, in_ctx, inlen);
err = uverbs_get_flags32(&flags, attrs,
MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS,
MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX);
if (err)
goto err;
uid = (flags & MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX) ?
c->devx_uid : MLX5_SHARED_RESOURCE_UID;
err = mlx5_rl_add_rate_raw(dev->mdev, rl_raw, uid,
(flags & MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX),
&pp_entry->index);
if (err)
goto err;
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
&pp_entry->index, sizeof(pp_entry->index));
if (err)
goto clean;
pp_entry->mdev = dev->mdev;
uobj->object = pp_entry;
return 0;
clean:
mlx5_rl_remove_rate_raw(dev->mdev, pp_entry->index);
err:
kfree(pp_entry);
return err;
}
static int pp_obj_cleanup(struct ib_uobject *uobject,
enum rdma_remove_reason why,
struct uverbs_attr_bundle *attrs)
{
struct mlx5_ib_pp *pp_entry = uobject->object;
mlx5_rl_remove_rate_raw(pp_entry->mdev, pp_entry->index);
kfree(pp_entry);
return 0;
}
DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_PP_OBJ_ALLOC,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE,
MLX5_IB_OBJECT_PP,
UVERBS_ACCESS_NEW,
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(
MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX,
UVERBS_ATTR_SIZE(1,
MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)),
UA_MANDATORY,
UA_ALLOC_AND_COPY),
UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS,
enum mlx5_ib_uapi_pp_alloc_flags,
UA_MANDATORY),
UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
UVERBS_ATTR_TYPE(u16),
UA_MANDATORY));
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
MLX5_IB_METHOD_PP_OBJ_DESTROY,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_PP_OBJ_DESTROY_HANDLE,
MLX5_IB_OBJECT_PP,
UVERBS_ACCESS_DESTROY,
UA_MANDATORY));
DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_PP,
UVERBS_TYPE_ALLOC_IDR(pp_obj_cleanup),
&UVERBS_METHOD(MLX5_IB_METHOD_PP_OBJ_ALLOC),
&UVERBS_METHOD(MLX5_IB_METHOD_PP_OBJ_DESTROY));
const struct uapi_definition mlx5_ib_qos_defs[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
MLX5_IB_OBJECT_PP,
UAPI_DEF_IS_OBJ_SUPPORTED(pp_is_supported)),
{},
};
...@@ -204,7 +204,7 @@ struct mlx5e_tx_wqe { ...@@ -204,7 +204,7 @@ struct mlx5e_tx_wqe {
struct mlx5e_rx_wqe_ll { struct mlx5e_rx_wqe_ll {
struct mlx5_wqe_srq_next_seg next; struct mlx5_wqe_srq_next_seg next;
struct mlx5_wqe_data_seg data[0]; struct mlx5_wqe_data_seg data[];
}; };
struct mlx5e_rx_wqe_cyc { struct mlx5e_rx_wqe_cyc {
......
...@@ -57,7 +57,7 @@ struct mlx5_fpga_ipsec_cmd_context { ...@@ -57,7 +57,7 @@ struct mlx5_fpga_ipsec_cmd_context {
struct completion complete; struct completion complete;
struct mlx5_fpga_device *dev; struct mlx5_fpga_device *dev;
struct list_head list; /* Item in pending_cmds */ struct list_head list; /* Item in pending_cmds */
u8 command[0]; u8 command[];
}; };
struct mlx5_fpga_esp_xfrm; struct mlx5_fpga_esp_xfrm;
......
...@@ -470,7 +470,7 @@ struct mlx5_fc_bulk { ...@@ -470,7 +470,7 @@ struct mlx5_fc_bulk {
u32 base_id; u32 base_id;
int bulk_len; int bulk_len;
unsigned long *bitmask; unsigned long *bitmask;
struct mlx5_fc fcs[0]; struct mlx5_fc fcs[];
}; };
static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk, static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
......
...@@ -56,7 +56,7 @@ struct mlx5i_priv { ...@@ -56,7 +56,7 @@ struct mlx5i_priv {
u32 qkey; u32 qkey;
u16 pkey_index; u16 pkey_index;
struct mlx5i_pkey_qpn_ht *qpn_htbl; struct mlx5i_pkey_qpn_ht *qpn_htbl;
char *mlx5e_priv[0]; char *mlx5e_priv[];
}; };
int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn); int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn);
...@@ -107,7 +107,7 @@ struct mlx5i_tx_wqe { ...@@ -107,7 +107,7 @@ struct mlx5i_tx_wqe {
struct mlx5_wqe_datagram_seg datagram; struct mlx5_wqe_datagram_seg datagram;
struct mlx5_wqe_eth_pad pad; struct mlx5_wqe_eth_pad pad;
struct mlx5_wqe_eth_seg eth; struct mlx5_wqe_eth_seg eth;
struct mlx5_wqe_data_seg data[0]; struct mlx5_wqe_data_seg data[];
}; };
static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq, static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq,
......
...@@ -101,22 +101,39 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, ...@@ -101,22 +101,39 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
} }
static bool mlx5_rl_are_equal_raw(struct mlx5_rl_entry *entry, void *rl_in,
u16 uid)
{
return (!memcmp(entry->rl_raw, rl_in, sizeof(entry->rl_raw)) &&
entry->uid == uid);
}
/* Finds an entry where we can register the given rate /* Finds an entry where we can register the given rate
* If the rate already exists, return the entry where it is registered, * If the rate already exists, return the entry where it is registered,
* otherwise return the first available entry. * otherwise return the first available entry.
* If the table is full, return NULL * If the table is full, return NULL
*/ */
static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table, static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
struct mlx5_rate_limit *rl) void *rl_in, u16 uid, bool dedicated)
{ {
struct mlx5_rl_entry *ret_entry = NULL; struct mlx5_rl_entry *ret_entry = NULL;
bool empty_found = false; bool empty_found = false;
int i; int i;
for (i = 0; i < table->max_size; i++) { for (i = 0; i < table->max_size; i++) {
if (mlx5_rl_are_equal(&table->rl_entry[i].rl, rl)) if (dedicated) {
if (!table->rl_entry[i].refcount)
return &table->rl_entry[i]; return &table->rl_entry[i];
if (!empty_found && !table->rl_entry[i].rl.rate) { continue;
}
if (table->rl_entry[i].refcount) {
if (table->rl_entry[i].dedicated)
continue;
if (mlx5_rl_are_equal_raw(&table->rl_entry[i], rl_in,
uid))
return &table->rl_entry[i];
} else if (!empty_found) {
empty_found = true; empty_found = true;
ret_entry = &table->rl_entry[i]; ret_entry = &table->rl_entry[i];
} }
...@@ -126,18 +143,19 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table, ...@@ -126,18 +143,19 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
} }
static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev, static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
u16 index, struct mlx5_rl_entry *entry, bool set)
struct mlx5_rate_limit *rl)
{ {
u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {0}; u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {};
u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0}; u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {};
void *pp_context;
pp_context = MLX5_ADDR_OF(set_pp_rate_limit_in, in, ctx);
MLX5_SET(set_pp_rate_limit_in, in, opcode, MLX5_SET(set_pp_rate_limit_in, in, opcode,
MLX5_CMD_OP_SET_PP_RATE_LIMIT); MLX5_CMD_OP_SET_PP_RATE_LIMIT);
MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index); MLX5_SET(set_pp_rate_limit_in, in, uid, entry->uid);
MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rl->rate); MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, entry->index);
MLX5_SET(set_pp_rate_limit_in, in, burst_upper_bound, rl->max_burst_sz); if (set)
MLX5_SET(set_pp_rate_limit_in, in, typical_packet_size, rl->typical_pkt_sz); memcpy(pp_context, entry->rl_raw, sizeof(entry->rl_raw));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
} }
...@@ -158,23 +176,25 @@ bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0, ...@@ -158,23 +176,25 @@ bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
} }
EXPORT_SYMBOL(mlx5_rl_are_equal); EXPORT_SYMBOL(mlx5_rl_are_equal);
int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
struct mlx5_rate_limit *rl) bool dedicated_entry, u16 *index)
{ {
struct mlx5_rl_table *table = &dev->priv.rl_table; struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry; struct mlx5_rl_entry *entry;
int err = 0; int err = 0;
u32 rate;
rate = MLX5_GET(set_pp_rate_limit_context, rl_in, rate_limit);
mutex_lock(&table->rl_lock); mutex_lock(&table->rl_lock);
if (!rl->rate || !mlx5_rl_is_in_range(dev, rl->rate)) { if (!rate || !mlx5_rl_is_in_range(dev, rate)) {
mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n", mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n",
rl->rate, table->min_rate, table->max_rate); rate, table->min_rate, table->max_rate);
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} }
entry = find_rl_entry(table, rl); entry = find_rl_entry(table, rl_in, uid, dedicated_entry);
if (!entry) { if (!entry) {
mlx5_core_err(dev, "Max number of %u rates reached\n", mlx5_core_err(dev, "Max number of %u rates reached\n",
table->max_size); table->max_size);
...@@ -185,16 +205,24 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, ...@@ -185,16 +205,24 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
/* rate already configured */ /* rate already configured */
entry->refcount++; entry->refcount++;
} else { } else {
memcpy(entry->rl_raw, rl_in, sizeof(entry->rl_raw));
entry->uid = uid;
/* new rate limit */ /* new rate limit */
err = mlx5_set_pp_rate_limit_cmd(dev, entry->index, rl); err = mlx5_set_pp_rate_limit_cmd(dev, entry, true);
if (err) { if (err) {
mlx5_core_err(dev, "Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n", mlx5_core_err(
err, rl->rate, rl->max_burst_sz, dev,
rl->typical_pkt_sz); "Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
err, rate,
MLX5_GET(set_pp_rate_limit_context, rl_in,
burst_upper_bound),
MLX5_GET(set_pp_rate_limit_context, rl_in,
typical_packet_size));
goto out; goto out;
} }
entry->rl = *rl;
entry->refcount = 1; entry->refcount = 1;
entry->dedicated = dedicated_entry;
} }
*index = entry->index; *index = entry->index;
...@@ -202,20 +230,61 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, ...@@ -202,20 +230,61 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
mutex_unlock(&table->rl_lock); mutex_unlock(&table->rl_lock);
return err; return err;
} }
EXPORT_SYMBOL(mlx5_rl_add_rate_raw);
void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry;
mutex_lock(&table->rl_lock);
entry = &table->rl_entry[index - 1];
entry->refcount--;
if (!entry->refcount)
/* need to remove rate */
mlx5_set_pp_rate_limit_cmd(dev, entry, false);
mutex_unlock(&table->rl_lock);
}
EXPORT_SYMBOL(mlx5_rl_remove_rate_raw);
int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
struct mlx5_rate_limit *rl)
{
u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate);
MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound,
rl->max_burst_sz);
MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size,
rl->typical_pkt_sz);
return mlx5_rl_add_rate_raw(dev, rl_raw,
MLX5_CAP_QOS(dev, packet_pacing_uid) ?
MLX5_SHARED_RESOURCE_UID : 0,
false, index);
}
EXPORT_SYMBOL(mlx5_rl_add_rate); EXPORT_SYMBOL(mlx5_rl_add_rate);
void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl) void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
{ {
u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
struct mlx5_rl_table *table = &dev->priv.rl_table; struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry = NULL; struct mlx5_rl_entry *entry = NULL;
struct mlx5_rate_limit reset_rl = {0};
/* 0 is a reserved value for unlimited rate */ /* 0 is a reserved value for unlimited rate */
if (rl->rate == 0) if (rl->rate == 0)
return; return;
MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate);
MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound,
rl->max_burst_sz);
MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size,
rl->typical_pkt_sz);
mutex_lock(&table->rl_lock); mutex_lock(&table->rl_lock);
entry = find_rl_entry(table, rl); entry = find_rl_entry(table, rl_raw,
MLX5_CAP_QOS(dev, packet_pacing_uid) ?
MLX5_SHARED_RESOURCE_UID : 0, false);
if (!entry || !entry->refcount) { if (!entry || !entry->refcount) {
mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u are not configured\n", mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u are not configured\n",
rl->rate, rl->max_burst_sz, rl->typical_pkt_sz); rl->rate, rl->max_burst_sz, rl->typical_pkt_sz);
...@@ -223,11 +292,9 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl) ...@@ -223,11 +292,9 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
} }
entry->refcount--; entry->refcount--;
if (!entry->refcount) { if (!entry->refcount)
/* need to remove rate */ /* need to remove rate */
mlx5_set_pp_rate_limit_cmd(dev, entry->index, &reset_rl); mlx5_set_pp_rate_limit_cmd(dev, entry, false);
entry->rl = reset_rl;
}
out: out:
mutex_unlock(&table->rl_lock); mutex_unlock(&table->rl_lock);
...@@ -273,14 +340,13 @@ int mlx5_init_rl_table(struct mlx5_core_dev *dev) ...@@ -273,14 +340,13 @@ int mlx5_init_rl_table(struct mlx5_core_dev *dev)
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev) void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
{ {
struct mlx5_rl_table *table = &dev->priv.rl_table; struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rate_limit rl = {0};
int i; int i;
/* Clear all configured rates */ /* Clear all configured rates */
for (i = 0; i < table->max_size; i++) for (i = 0; i < table->max_size; i++)
if (table->rl_entry[i].rl.rate) if (table->rl_entry[i].refcount)
mlx5_set_pp_rate_limit_cmd(dev, table->rl_entry[i].index, mlx5_set_pp_rate_limit_cmd(dev, &table->rl_entry[i],
&rl); false);
kfree(dev->priv.rl_table.rl_entry); kfree(dev->priv.rl_table.rl_entry);
} }
...@@ -518,9 +518,11 @@ struct mlx5_rate_limit { ...@@ -518,9 +518,11 @@ struct mlx5_rate_limit {
}; };
struct mlx5_rl_entry { struct mlx5_rl_entry {
struct mlx5_rate_limit rl; u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)];
u16 index; u16 index;
u16 refcount; u64 refcount;
u16 uid;
u8 dedicated : 1;
}; };
struct mlx5_rl_table { struct mlx5_rl_table {
...@@ -1007,6 +1009,9 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, ...@@ -1007,6 +1009,9 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
struct mlx5_rate_limit *rl); struct mlx5_rate_limit *rl);
void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl); void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate); bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
bool dedicated_entry, u16 *index);
void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index);
bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0, bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
struct mlx5_rate_limit *rl_1); struct mlx5_rate_limit *rl_1);
int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
......
...@@ -813,7 +813,9 @@ struct mlx5_ifc_qos_cap_bits { ...@@ -813,7 +813,9 @@ struct mlx5_ifc_qos_cap_bits {
u8 reserved_at_4[0x1]; u8 reserved_at_4[0x1];
u8 packet_pacing_burst_bound[0x1]; u8 packet_pacing_burst_bound[0x1];
u8 packet_pacing_typical_size[0x1]; u8 packet_pacing_typical_size[0x1];
u8 reserved_at_7[0x19]; u8 reserved_at_7[0x4];
u8 packet_pacing_uid[0x1];
u8 reserved_at_c[0x14];
u8 reserved_at_20[0x20]; u8 reserved_at_20[0x20];
...@@ -8265,9 +8267,20 @@ struct mlx5_ifc_set_pp_rate_limit_out_bits { ...@@ -8265,9 +8267,20 @@ struct mlx5_ifc_set_pp_rate_limit_out_bits {
u8 reserved_at_40[0x40]; u8 reserved_at_40[0x40];
}; };
struct mlx5_ifc_set_pp_rate_limit_context_bits {
u8 rate_limit[0x20];
u8 burst_upper_bound[0x20];
u8 reserved_at_40[0x10];
u8 typical_packet_size[0x10];
u8 reserved_at_60[0x120];
};
struct mlx5_ifc_set_pp_rate_limit_in_bits { struct mlx5_ifc_set_pp_rate_limit_in_bits {
u8 opcode[0x10]; u8 opcode[0x10];
u8 reserved_at_10[0x10]; u8 uid[0x10];
u8 reserved_at_20[0x10]; u8 reserved_at_20[0x10];
u8 op_mod[0x10]; u8 op_mod[0x10];
...@@ -8277,14 +8290,7 @@ struct mlx5_ifc_set_pp_rate_limit_in_bits { ...@@ -8277,14 +8290,7 @@ struct mlx5_ifc_set_pp_rate_limit_in_bits {
u8 reserved_at_60[0x20]; u8 reserved_at_60[0x20];
u8 rate_limit[0x20]; struct mlx5_ifc_set_pp_rate_limit_context_bits ctx;
u8 burst_upper_bound[0x20];
u8 reserved_at_c0[0x10];
u8 typical_packet_size[0x10];
u8 reserved_at_e0[0x120];
}; };
struct mlx5_ifc_access_register_out_bits { struct mlx5_ifc_access_register_out_bits {
......
...@@ -608,7 +608,7 @@ struct mlx5_ifc_tls_cmd_bits { ...@@ -608,7 +608,7 @@ struct mlx5_ifc_tls_cmd_bits {
struct mlx5_ifc_tls_resp_bits { struct mlx5_ifc_tls_resp_bits {
u8 syndrome[0x20]; u8 syndrome[0x20];
u8 stream_id[0x20]; u8 stream_id[0x20];
u8 reserverd[0x40]; u8 reserved[0x40];
}; };
#define MLX5_TLS_COMMAND_SIZE (0x100) #define MLX5_TLS_COMMAND_SIZE (0x100)
......
...@@ -143,6 +143,22 @@ enum mlx5_ib_devx_umem_dereg_attrs { ...@@ -143,6 +143,22 @@ enum mlx5_ib_devx_umem_dereg_attrs {
MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
}; };
enum mlx5_ib_pp_obj_methods {
MLX5_IB_METHOD_PP_OBJ_ALLOC = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_METHOD_PP_OBJ_DESTROY,
};
enum mlx5_ib_pp_alloc_attrs {
MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX,
MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS,
MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
};
enum mlx5_ib_pp_obj_destroy_attrs {
MLX5_IB_ATTR_PP_OBJ_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
};
enum mlx5_ib_devx_umem_methods { enum mlx5_ib_devx_umem_methods {
MLX5_IB_METHOD_DEVX_UMEM_REG = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_METHOD_DEVX_UMEM_REG = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_METHOD_DEVX_UMEM_DEREG, MLX5_IB_METHOD_DEVX_UMEM_DEREG,
...@@ -173,6 +189,7 @@ enum mlx5_ib_objects { ...@@ -173,6 +189,7 @@ enum mlx5_ib_objects {
MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD, MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD, MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
MLX5_IB_OBJECT_VAR, MLX5_IB_OBJECT_VAR,
MLX5_IB_OBJECT_PP,
}; };
enum mlx5_ib_flow_matcher_create_attrs { enum mlx5_ib_flow_matcher_create_attrs {
......
...@@ -73,5 +73,9 @@ struct mlx5_ib_uapi_devx_async_event_hdr { ...@@ -73,5 +73,9 @@ struct mlx5_ib_uapi_devx_async_event_hdr {
__u8 out_data[]; __u8 out_data[];
}; };
enum mlx5_ib_uapi_pp_alloc_flags {
MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX = 1 << 0,
};
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment