Commit 1326034b authored by Yishai Hadas's avatar Yishai Hadas Committed by Leon Romanovsky

net/mlx5: Expose raw packet pacing APIs

Expose raw packet pacing APIs to be used by DEVX based applications.
The existing code was refactored to have a single flow with the new raw
APIs.

The new raw APIs considered the input of 'pp_rate_limit_context', uid,
'dedicated', upon looking for an existing entry.

This raw mode enables future device specification data in the raw
context without changing the existing logic and code.

The ability to ask for a dedicated entry gives control for application
to allocate entries according to its needs.

A dedicated entry may not be used by some other process and it also
enables the process spreading its resources to some different entries
for use different hardware resources as part of enforcing the rate.

The counter per entry was changed to be u64 to prevent any option to
overflow.
Signed-off-by: default avatarYishai Hadas <yishaih@mellanox.com>
Acked-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
parent 339ffae5
...@@ -101,22 +101,39 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, ...@@ -101,22 +101,39 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
} }
static bool mlx5_rl_are_equal_raw(struct mlx5_rl_entry *entry, void *rl_in,
u16 uid)
{
return (!memcmp(entry->rl_raw, rl_in, sizeof(entry->rl_raw)) &&
entry->uid == uid);
}
/* Finds an entry where we can register the given rate /* Finds an entry where we can register the given rate
* If the rate already exists, return the entry where it is registered, * If the rate already exists, return the entry where it is registered,
* otherwise return the first available entry. * otherwise return the first available entry.
* If the table is full, return NULL * If the table is full, return NULL
*/ */
static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table, static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
struct mlx5_rate_limit *rl) void *rl_in, u16 uid, bool dedicated)
{ {
struct mlx5_rl_entry *ret_entry = NULL; struct mlx5_rl_entry *ret_entry = NULL;
bool empty_found = false; bool empty_found = false;
int i; int i;
for (i = 0; i < table->max_size; i++) { for (i = 0; i < table->max_size; i++) {
if (mlx5_rl_are_equal(&table->rl_entry[i].rl, rl)) if (dedicated) {
if (!table->rl_entry[i].refcount)
return &table->rl_entry[i]; return &table->rl_entry[i];
if (!empty_found && !table->rl_entry[i].rl.rate) { continue;
}
if (table->rl_entry[i].refcount) {
if (table->rl_entry[i].dedicated)
continue;
if (mlx5_rl_are_equal_raw(&table->rl_entry[i], rl_in,
uid))
return &table->rl_entry[i];
} else if (!empty_found) {
empty_found = true; empty_found = true;
ret_entry = &table->rl_entry[i]; ret_entry = &table->rl_entry[i];
} }
...@@ -126,18 +143,19 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table, ...@@ -126,18 +143,19 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
} }
static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev, static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
u16 index, struct mlx5_rl_entry *entry, bool set)
struct mlx5_rate_limit *rl)
{ {
u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {0}; u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {};
u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0}; u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {};
void *pp_context;
pp_context = MLX5_ADDR_OF(set_pp_rate_limit_in, in, ctx);
MLX5_SET(set_pp_rate_limit_in, in, opcode, MLX5_SET(set_pp_rate_limit_in, in, opcode,
MLX5_CMD_OP_SET_PP_RATE_LIMIT); MLX5_CMD_OP_SET_PP_RATE_LIMIT);
MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index); MLX5_SET(set_pp_rate_limit_in, in, uid, entry->uid);
MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rl->rate); MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, entry->index);
MLX5_SET(set_pp_rate_limit_in, in, burst_upper_bound, rl->max_burst_sz); if (set)
MLX5_SET(set_pp_rate_limit_in, in, typical_packet_size, rl->typical_pkt_sz); memcpy(pp_context, entry->rl_raw, sizeof(entry->rl_raw));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
} }
...@@ -158,23 +176,25 @@ bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0, ...@@ -158,23 +176,25 @@ bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
} }
EXPORT_SYMBOL(mlx5_rl_are_equal); EXPORT_SYMBOL(mlx5_rl_are_equal);
int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
struct mlx5_rate_limit *rl) bool dedicated_entry, u16 *index)
{ {
struct mlx5_rl_table *table = &dev->priv.rl_table; struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry; struct mlx5_rl_entry *entry;
int err = 0; int err = 0;
u32 rate;
rate = MLX5_GET(set_pp_rate_limit_context, rl_in, rate_limit);
mutex_lock(&table->rl_lock); mutex_lock(&table->rl_lock);
if (!rl->rate || !mlx5_rl_is_in_range(dev, rl->rate)) { if (!rate || !mlx5_rl_is_in_range(dev, rate)) {
mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n", mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n",
rl->rate, table->min_rate, table->max_rate); rate, table->min_rate, table->max_rate);
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} }
entry = find_rl_entry(table, rl); entry = find_rl_entry(table, rl_in, uid, dedicated_entry);
if (!entry) { if (!entry) {
mlx5_core_err(dev, "Max number of %u rates reached\n", mlx5_core_err(dev, "Max number of %u rates reached\n",
table->max_size); table->max_size);
...@@ -185,16 +205,24 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, ...@@ -185,16 +205,24 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
/* rate already configured */ /* rate already configured */
entry->refcount++; entry->refcount++;
} else { } else {
memcpy(entry->rl_raw, rl_in, sizeof(entry->rl_raw));
entry->uid = uid;
/* new rate limit */ /* new rate limit */
err = mlx5_set_pp_rate_limit_cmd(dev, entry->index, rl); err = mlx5_set_pp_rate_limit_cmd(dev, entry, true);
if (err) { if (err) {
mlx5_core_err(dev, "Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n", mlx5_core_err(
err, rl->rate, rl->max_burst_sz, dev,
rl->typical_pkt_sz); "Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
err, rate,
MLX5_GET(set_pp_rate_limit_context, rl_in,
burst_upper_bound),
MLX5_GET(set_pp_rate_limit_context, rl_in,
typical_packet_size));
goto out; goto out;
} }
entry->rl = *rl;
entry->refcount = 1; entry->refcount = 1;
entry->dedicated = dedicated_entry;
} }
*index = entry->index; *index = entry->index;
...@@ -202,20 +230,61 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, ...@@ -202,20 +230,61 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
mutex_unlock(&table->rl_lock); mutex_unlock(&table->rl_lock);
return err; return err;
} }
EXPORT_SYMBOL(mlx5_rl_add_rate_raw);
void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry;
mutex_lock(&table->rl_lock);
entry = &table->rl_entry[index - 1];
entry->refcount--;
if (!entry->refcount)
/* need to remove rate */
mlx5_set_pp_rate_limit_cmd(dev, entry, false);
mutex_unlock(&table->rl_lock);
}
EXPORT_SYMBOL(mlx5_rl_remove_rate_raw);
int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
struct mlx5_rate_limit *rl)
{
u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate);
MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound,
rl->max_burst_sz);
MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size,
rl->typical_pkt_sz);
return mlx5_rl_add_rate_raw(dev, rl_raw,
MLX5_CAP_QOS(dev, packet_pacing_uid) ?
MLX5_SHARED_RESOURCE_UID : 0,
false, index);
}
EXPORT_SYMBOL(mlx5_rl_add_rate); EXPORT_SYMBOL(mlx5_rl_add_rate);
void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl) void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
{ {
u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
struct mlx5_rl_table *table = &dev->priv.rl_table; struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry = NULL; struct mlx5_rl_entry *entry = NULL;
struct mlx5_rate_limit reset_rl = {0};
/* 0 is a reserved value for unlimited rate */ /* 0 is a reserved value for unlimited rate */
if (rl->rate == 0) if (rl->rate == 0)
return; return;
MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate);
MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound,
rl->max_burst_sz);
MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size,
rl->typical_pkt_sz);
mutex_lock(&table->rl_lock); mutex_lock(&table->rl_lock);
entry = find_rl_entry(table, rl); entry = find_rl_entry(table, rl_raw,
MLX5_CAP_QOS(dev, packet_pacing_uid) ?
MLX5_SHARED_RESOURCE_UID : 0, false);
if (!entry || !entry->refcount) { if (!entry || !entry->refcount) {
mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u are not configured\n", mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u are not configured\n",
rl->rate, rl->max_burst_sz, rl->typical_pkt_sz); rl->rate, rl->max_burst_sz, rl->typical_pkt_sz);
...@@ -223,11 +292,9 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl) ...@@ -223,11 +292,9 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
} }
entry->refcount--; entry->refcount--;
if (!entry->refcount) { if (!entry->refcount)
/* need to remove rate */ /* need to remove rate */
mlx5_set_pp_rate_limit_cmd(dev, entry->index, &reset_rl); mlx5_set_pp_rate_limit_cmd(dev, entry, false);
entry->rl = reset_rl;
}
out: out:
mutex_unlock(&table->rl_lock); mutex_unlock(&table->rl_lock);
...@@ -273,14 +340,13 @@ int mlx5_init_rl_table(struct mlx5_core_dev *dev) ...@@ -273,14 +340,13 @@ int mlx5_init_rl_table(struct mlx5_core_dev *dev)
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev) void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
{ {
struct mlx5_rl_table *table = &dev->priv.rl_table; struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rate_limit rl = {0};
int i; int i;
/* Clear all configured rates */ /* Clear all configured rates */
for (i = 0; i < table->max_size; i++) for (i = 0; i < table->max_size; i++)
if (table->rl_entry[i].rl.rate) if (table->rl_entry[i].refcount)
mlx5_set_pp_rate_limit_cmd(dev, table->rl_entry[i].index, mlx5_set_pp_rate_limit_cmd(dev, &table->rl_entry[i],
&rl); false);
kfree(dev->priv.rl_table.rl_entry); kfree(dev->priv.rl_table.rl_entry);
} }
...@@ -518,9 +518,11 @@ struct mlx5_rate_limit { ...@@ -518,9 +518,11 @@ struct mlx5_rate_limit {
}; };
struct mlx5_rl_entry { struct mlx5_rl_entry {
struct mlx5_rate_limit rl; u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)];
u16 index; u16 index;
u16 refcount; u64 refcount;
u16 uid;
u8 dedicated : 1;
}; };
struct mlx5_rl_table { struct mlx5_rl_table {
...@@ -1007,6 +1009,9 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, ...@@ -1007,6 +1009,9 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
struct mlx5_rate_limit *rl); struct mlx5_rate_limit *rl);
void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl); void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate); bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
bool dedicated_entry, u16 *index);
void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index);
bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0, bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
struct mlx5_rate_limit *rl_1); struct mlx5_rate_limit *rl_1);
int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
......
...@@ -810,7 +810,9 @@ struct mlx5_ifc_qos_cap_bits { ...@@ -810,7 +810,9 @@ struct mlx5_ifc_qos_cap_bits {
u8 reserved_at_4[0x1]; u8 reserved_at_4[0x1];
u8 packet_pacing_burst_bound[0x1]; u8 packet_pacing_burst_bound[0x1];
u8 packet_pacing_typical_size[0x1]; u8 packet_pacing_typical_size[0x1];
u8 reserved_at_7[0x19]; u8 reserved_at_7[0x4];
u8 packet_pacing_uid[0x1];
u8 reserved_at_c[0x14];
u8 reserved_at_20[0x20]; u8 reserved_at_20[0x20];
...@@ -8262,9 +8264,20 @@ struct mlx5_ifc_set_pp_rate_limit_out_bits { ...@@ -8262,9 +8264,20 @@ struct mlx5_ifc_set_pp_rate_limit_out_bits {
u8 reserved_at_40[0x40]; u8 reserved_at_40[0x40];
}; };
struct mlx5_ifc_set_pp_rate_limit_context_bits {
u8 rate_limit[0x20];
u8 burst_upper_bound[0x20];
u8 reserved_at_40[0x10];
u8 typical_packet_size[0x10];
u8 reserved_at_60[0x120];
};
struct mlx5_ifc_set_pp_rate_limit_in_bits { struct mlx5_ifc_set_pp_rate_limit_in_bits {
u8 opcode[0x10]; u8 opcode[0x10];
u8 reserved_at_10[0x10]; u8 uid[0x10];
u8 reserved_at_20[0x10]; u8 reserved_at_20[0x10];
u8 op_mod[0x10]; u8 op_mod[0x10];
...@@ -8274,14 +8287,7 @@ struct mlx5_ifc_set_pp_rate_limit_in_bits { ...@@ -8274,14 +8287,7 @@ struct mlx5_ifc_set_pp_rate_limit_in_bits {
u8 reserved_at_60[0x20]; u8 reserved_at_60[0x20];
u8 rate_limit[0x20]; struct mlx5_ifc_set_pp_rate_limit_context_bits ctx;
u8 burst_upper_bound[0x20];
u8 reserved_at_c0[0x10];
u8 typical_packet_size[0x10];
u8 reserved_at_e0[0x120];
}; };
struct mlx5_ifc_access_register_out_bits { struct mlx5_ifc_access_register_out_bits {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment