Commit 1497d2fd authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-More-Spectrum-2-preparations'

aIdo Schimmel says:

====================
mlxsw: More Spectrum-2 preparations

This is the second and last set of preparations towards initial
Spectrum-2 support in mlxsw. It mainly re-arranges parts of the code
that need to work with both ASICs, but somewhat differ.

The first three patches allow different ASICs to register different set
of operations for KVD linear (KVDL) management. In Spectrum-2 there is
no linear memory and instead entries that reside there in Spectrum
(e.g., nexthops) are hashed and inserted to the hash-based KVD memory.

The fourth patch does a similar restructuring in the low-level multicast
router code. This is necessary because multicast routing is implemented
using regular circuit TCAM (C-TCAM) in Spectrum, whereas Spectrum-2 uses
an algorithmic TCAM (A-TCAM).

Next six patches prepare the ACL code for the introduction of A-TCAM in
follow-up patch sets.

Last two patches allow different ASICs to require different firmware
versions and add two resources that need to be queried from firmware by
Spectrum-2 specific code.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents eec4edc9 a8b9f232
......@@ -15,11 +15,16 @@ mlxsw_switchx2-objs := switchx2.o
obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o spectrum_router.o \
spectrum_kvdl.o spectrum_acl_tcam.o \
spectrum_acl.o spectrum_flower.o \
spectrum_cnt.o spectrum_fid.o \
spectrum_ipip.o spectrum_acl_flex_actions.o \
spectrum_mr.o spectrum_mr_tcam.o \
spectrum1_kvdl.o spectrum_kvdl.o \
spectrum_acl_tcam.o spectrum_acl_ctcam.o \
spectrum1_acl_tcam.o \
spectrum_acl.o \
spectrum_flower.o spectrum_cnt.o \
spectrum_fid.o spectrum_ipip.o \
spectrum_acl_flex_actions.o \
spectrum_acl_flex_keys.o \
spectrum1_mr_tcam.o \
spectrum_mr_tcam.o spectrum_mr.o \
spectrum_qdisc.o spectrum_span.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o
......
......@@ -43,6 +43,7 @@
struct mlxsw_afk {
struct list_head key_info_list;
unsigned int max_blocks;
const struct mlxsw_afk_ops *ops;
const struct mlxsw_afk_block *blocks;
unsigned int blocks_count;
};
......@@ -69,8 +70,7 @@ static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk)
}
struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
const struct mlxsw_afk_block *blocks,
unsigned int blocks_count)
const struct mlxsw_afk_ops *ops)
{
struct mlxsw_afk *mlxsw_afk;
......@@ -79,8 +79,9 @@ struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
return NULL;
INIT_LIST_HEAD(&mlxsw_afk->key_info_list);
mlxsw_afk->max_blocks = max_blocks;
mlxsw_afk->blocks = blocks;
mlxsw_afk->blocks_count = blocks_count;
mlxsw_afk->ops = ops;
mlxsw_afk->blocks = ops->blocks;
mlxsw_afk->blocks_count = ops->blocks_count;
WARN_ON(!mlxsw_afk_blocks_check(mlxsw_afk));
return mlxsw_afk;
}
......@@ -415,45 +416,8 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
}
EXPORT_SYMBOL(mlxsw_afk_values_add_buf);
static void mlxsw_afk_encode_u32(const struct mlxsw_item *storage_item,
const struct mlxsw_item *output_item,
char *storage, char *output_indexed)
{
u32 value;
value = __mlxsw_item_get32(storage, storage_item, 0);
__mlxsw_item_set32(output_indexed, output_item, 0, value);
}
static void mlxsw_afk_encode_buf(const struct mlxsw_item *storage_item,
const struct mlxsw_item *output_item,
char *storage, char *output_indexed)
{
char *storage_data = __mlxsw_item_data(storage, storage_item, 0);
char *output_data = __mlxsw_item_data(output_indexed, output_item, 0);
size_t len = output_item->size.bytes;
memcpy(output_data, storage_data, len);
}
#define MLXSW_AFK_KEY_BLOCK_SIZE 16
static void mlxsw_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
int block_index, char *storage, char *output)
{
char *output_indexed = output + block_index * MLXSW_AFK_KEY_BLOCK_SIZE;
const struct mlxsw_item *storage_item = &elinst->info->item;
const struct mlxsw_item *output_item = &elinst->item;
if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32)
mlxsw_afk_encode_u32(storage_item, output_item,
storage, output_indexed);
else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF)
mlxsw_afk_encode_buf(storage_item, output_item,
storage, output_indexed);
}
void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info,
void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
char *key, char *mask)
{
......@@ -466,10 +430,10 @@ void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info,
&block_index);
if (!elinst)
continue;
mlxsw_afk_encode_one(elinst, block_index,
values->storage.key, key);
mlxsw_afk_encode_one(elinst, block_index,
values->storage.mask, mask);
mlxsw_afk->ops->encode_one(elinst, block_index,
values->storage.key, key);
mlxsw_afk->ops->encode_one(elinst, block_index,
values->storage.mask, mask);
}
}
EXPORT_SYMBOL(mlxsw_afk_encode);
......@@ -216,9 +216,15 @@ mlxsw_afk_element_usage_subset(struct mlxsw_afk_element_usage *elusage_small,
struct mlxsw_afk;
struct mlxsw_afk_ops {
const struct mlxsw_afk_block *blocks;
unsigned int blocks_count;
void (*encode_one)(const struct mlxsw_afk_element_inst *elinst,
int block_index, char *storage, char *output);
};
struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
const struct mlxsw_afk_block *blocks,
unsigned int blocks_count);
const struct mlxsw_afk_ops *ops);
void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk);
struct mlxsw_afk_key_info;
......@@ -251,7 +257,8 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
enum mlxsw_afk_element element,
const char *key_value, const char *mask_value,
unsigned int len);
void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info,
void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
char *key, char *mask);
......
......@@ -2402,6 +2402,15 @@ MLXSW_ITEM32(reg, ptce2, op, 0x00, 20, 3);
*/
MLXSW_ITEM32(reg, ptce2, offset, 0x00, 0, 16);
/* reg_ptce2_priority
* Priority of the rule, higher values win. The range is 1..cap_kvd_size-1.
* Note: priority does not have to be unique per rule.
* Within a region, higher priority should have lower offset (no limitation
* between regions in a multi-region).
* Access: RW
*/
MLXSW_ITEM32(reg, ptce2, priority, 0x04, 0, 24);
/* reg_ptce2_tcam_region_info
* Opaque object that represents the TCAM region.
* Access: Index
......@@ -2437,12 +2446,13 @@ MLXSW_ITEM_BUF(reg, ptce2, flex_action_set, 0xE0,
static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid,
enum mlxsw_reg_ptce2_op op,
const char *tcam_region_info,
u16 offset)
u16 offset, u32 priority)
{
MLXSW_REG_ZERO(ptce2, payload);
mlxsw_reg_ptce2_v_set(payload, valid);
mlxsw_reg_ptce2_op_set(payload, op);
mlxsw_reg_ptce2_offset_set(payload, offset);
mlxsw_reg_ptce2_priority_set(payload, priority);
mlxsw_reg_ptce2_tcam_region_info_memcpy_to(payload, tcam_region_info);
}
......
......@@ -42,6 +42,8 @@ enum mlxsw_res_id {
MLXSW_RES_ID_KVD_SIZE,
MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE,
MLXSW_RES_ID_MAX_KVD_ACTION_SETS,
MLXSW_RES_ID_MAX_TRAP_GROUPS,
MLXSW_RES_ID_CQE_V0,
MLXSW_RES_ID_CQE_V1,
......@@ -83,6 +85,8 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_KVD_SIZE] = 0x1001,
[MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
[MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
[MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE] = 0x1005,
[MLXSW_RES_ID_MAX_KVD_ACTION_SETS] = 0x1007,
[MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
[MLXSW_RES_ID_CQE_V0] = 0x2210,
[MLXSW_RES_ID_CQE_V1] = 0x2211,
......
......@@ -74,15 +74,22 @@
#include "spectrum_span.h"
#include "../mlxfw/mlxfw.h"
#define MLXSW_FWREV_MAJOR 13
#define MLXSW_FWREV_MINOR 1620
#define MLXSW_FWREV_SUBMINOR 192
#define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
#define MLXSW_SP_FW_FILENAME \
"mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
"." __stringify(MLXSW_FWREV_MINOR) \
"." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2"
#define MLXSW_SP1_FWREV_MAJOR 13
#define MLXSW_SP1_FWREV_MINOR 1620
#define MLXSW_SP1_FWREV_SUBMINOR 192
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
.major = MLXSW_SP1_FWREV_MAJOR,
.minor = MLXSW_SP1_FWREV_MINOR,
.subminor = MLXSW_SP1_FWREV_SUBMINOR,
};
#define MLXSW_SP1_FW_FILENAME \
"mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
"." __stringify(MLXSW_SP1_FWREV_MINOR) \
"." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp_driver_version[] = "1.0";
......@@ -338,29 +345,35 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
{
const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev;
const char *fw_filename = mlxsw_sp->fw_filename;
const struct firmware *firmware;
int err;
/* Don't check if driver does not require it */
if (!req_rev || !fw_filename)
return 0;
/* Validate driver & FW are compatible */
if (rev->major != MLXSW_FWREV_MAJOR) {
if (rev->major != req_rev->major) {
WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
rev->major, MLXSW_FWREV_MAJOR);
rev->major, req_rev->major);
return -EINVAL;
}
if (MLXSW_FWREV_MINOR_TO_BRANCH(rev->minor) ==
MLXSW_FWREV_MINOR_TO_BRANCH(MLXSW_FWREV_MINOR))
if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) ==
MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor))
return 0;
dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
rev->major, rev->minor, rev->subminor);
dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
MLXSW_SP_FW_FILENAME);
fw_filename);
err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
err = request_firmware_direct(&firmware, fw_filename,
mlxsw_sp->bus_info->dev);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
MLXSW_SP_FW_FILENAME);
fw_filename);
return err;
}
......@@ -3621,7 +3634,13 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
int err;
mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev;
mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME;
mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
mlxsw_sp->core = mlxsw_core;
mlxsw_sp->bus_info = mlxsw_bus_info;
......@@ -3880,7 +3899,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
return err;
err = mlxsw_sp_kvdl_resources_register(mlxsw_core);
err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
if (err)
return err;
......@@ -4741,4 +4760,4 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Spectrum driver");
MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);
MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME);
MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
......@@ -145,6 +145,9 @@ struct mlxsw_sp_acl;
struct mlxsw_sp_counter_pool;
struct mlxsw_sp_fid_core;
struct mlxsw_sp_kvdl;
struct mlxsw_sp_kvdl_ops;
struct mlxsw_sp_mr_tcam_ops;
struct mlxsw_sp_acl_tcam_ops;
struct mlxsw_sp {
struct mlxsw_sp_port **ports;
......@@ -168,7 +171,13 @@ struct mlxsw_sp {
struct mlxsw_sp_span_entry *entries;
int entries_count;
} span;
const struct mlxsw_fw_rev *req_rev;
const char *fw_filename;
const struct mlxsw_sp_kvdl_ops *kvdl_ops;
const struct mlxsw_afa_ops *afa_ops;
const struct mlxsw_afk_ops *afk_ops;
const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops;
const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops;
};
static inline struct mlxsw_sp_upper *
......@@ -436,15 +445,59 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
/* spectrum_kvdl.c */
enum mlxsw_sp_kvdl_entry_type {
MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
};
static inline unsigned int
mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type)
{
switch (type) {
case MLXSW_SP_KVDL_ENTRY_TYPE_ADJ: /* fall through */
case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: /* fall through */
case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: /* fall through */
case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: /* fall through */
default:
return 1;
}
}
struct mlxsw_sp_kvdl_ops {
size_t priv_size;
int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
int (*alloc)(struct mlxsw_sp *mlxsw_sp, void *priv,
enum mlxsw_sp_kvdl_entry_type type,
unsigned int entry_count, u32 *p_entry_index);
void (*free)(struct mlxsw_sp *mlxsw_sp, void *priv,
enum mlxsw_sp_kvdl_entry_type type,
unsigned int entry_count, int entry_index);
int (*alloc_size_query)(struct mlxsw_sp *mlxsw_sp, void *priv,
enum mlxsw_sp_kvdl_entry_type type,
unsigned int entry_count,
unsigned int *p_alloc_count);
int (*resources_register)(struct mlxsw_sp *mlxsw_sp, void *priv);
};
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
u32 *p_entry_index);
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
unsigned int entry_count,
unsigned int *p_alloc_size);
int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_kvdl_entry_type type,
unsigned int entry_count, u32 *p_entry_index);
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_kvdl_entry_type type,
unsigned int entry_count, int entry_index);
int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_kvdl_entry_type type,
unsigned int entry_count,
unsigned int *p_alloc_count);
/* spectrum1_kvdl.c */
extern const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops;
int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
......@@ -453,44 +506,14 @@ struct mlxsw_sp_acl_rule_info {
unsigned int counter_index;
};
enum mlxsw_sp_acl_profile {
MLXSW_SP_ACL_PROFILE_FLOWER,
};
struct mlxsw_sp_acl_profile_ops {
size_t ruleset_priv_size;
int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp,
void *priv, void *ruleset_priv);
void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress);
void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress);
u16 (*ruleset_group_id)(void *ruleset_priv);
size_t rule_priv_size;
int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
void *ruleset_priv, void *rule_priv,
struct mlxsw_sp_acl_rule_info *rulei);
void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
bool *activity);
};
struct mlxsw_sp_acl_ops {
size_t priv_size;
int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
const struct mlxsw_sp_acl_profile_ops *
(*profile_ops)(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_acl_profile profile);
};
struct mlxsw_sp_acl_block;
struct mlxsw_sp_acl_ruleset;
/* spectrum_acl.c */
enum mlxsw_sp_acl_profile {
MLXSW_SP_ACL_PROFILE_FLOWER,
};
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block);
unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block);
......@@ -583,11 +606,45 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
/* spectrum_acl_tcam.c */
extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops;
struct mlxsw_sp_acl_tcam;
struct mlxsw_sp_acl_tcam_region;
struct mlxsw_sp_acl_tcam_ops {
enum mlxsw_reg_ptar_key_type key_type;
size_t priv_size;
int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv,
struct mlxsw_sp_acl_tcam *tcam);
void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
size_t region_priv_size;
int (*region_init)(struct mlxsw_sp *mlxsw_sp, void *region_priv,
struct mlxsw_sp_acl_tcam_region *region);
void (*region_fini)(struct mlxsw_sp *mlxsw_sp, void *region_priv);
size_t chunk_priv_size;
void (*chunk_init)(void *region_priv, void *chunk_priv,
unsigned int priority);
void (*chunk_fini)(void *chunk_priv);
size_t entry_priv_size;
int (*entry_add)(struct mlxsw_sp *mlxsw_sp,
void *region_priv, void *chunk_priv,
void *entry_priv,
struct mlxsw_sp_acl_rule_info *rulei);
void (*entry_del)(struct mlxsw_sp *mlxsw_sp,
void *region_priv, void *chunk_priv,
void *entry_priv);
int (*entry_activity_get)(struct mlxsw_sp *mlxsw_sp,
void *region_priv, void *entry_priv,
bool *activity);
};
/* spectrum1_acl_tcam.c */
extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops;
/* spectrum_acl_flex_actions.c */
extern const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops;
/* spectrum_acl_flex_keys.c */
extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops;
/* spectrum_flower.c */
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
......@@ -635,4 +692,37 @@ void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp);
/* spectrum_mr.c */
enum mlxsw_sp_mr_route_prio {
MLXSW_SP_MR_ROUTE_PRIO_SG,
MLXSW_SP_MR_ROUTE_PRIO_STARG,
MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
__MLXSW_SP_MR_ROUTE_PRIO_MAX
};
#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1)
struct mlxsw_sp_mr_route_key;
struct mlxsw_sp_mr_tcam_ops {
size_t priv_size;
int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
void (*fini)(void *priv);
size_t route_priv_size;
int (*route_create)(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv,
struct mlxsw_sp_mr_route_key *key,
struct mlxsw_afa_block *afa_block,
enum mlxsw_sp_mr_route_prio prio);
void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv,
struct mlxsw_sp_mr_route_key *key);
int (*route_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
struct mlxsw_sp_mr_route_key *key,
struct mlxsw_afa_block *afa_block);
};
/* spectrum1_mr_tcam.c */
extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops;
#endif
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include "reg.h"
#include "core.h"
#include "spectrum.h"
#include "spectrum_acl_tcam.h"
struct mlxsw_sp1_acl_tcam_region {
struct mlxsw_sp_acl_ctcam_region cregion;
struct mlxsw_sp_acl_tcam_region *region;
struct {
struct mlxsw_sp_acl_ctcam_chunk cchunk;
struct mlxsw_sp_acl_ctcam_entry centry;
struct mlxsw_sp_acl_rule_info *rulei;
} catchall;
};
struct mlxsw_sp1_acl_tcam_chunk {
struct mlxsw_sp_acl_ctcam_chunk cchunk;
};
struct mlxsw_sp1_acl_tcam_entry {
struct mlxsw_sp_acl_ctcam_entry centry;
};
static int mlxsw_sp1_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
struct mlxsw_sp_acl_tcam *tcam)
{
return 0;
}
static void mlxsw_sp1_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
{
}
static int
mlxsw_sp1_acl_ctcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_acl_tcam_region *region)
{
struct mlxsw_sp_acl_rule_info *rulei;
int err;
mlxsw_sp_acl_ctcam_chunk_init(&region->cregion,
&region->catchall.cchunk,
MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
if (IS_ERR(rulei)) {
err = PTR_ERR(rulei);
goto err_rulei_create;
}
err = mlxsw_sp_acl_rulei_act_continue(rulei);
if (WARN_ON(err))
goto err_rulei_act_continue;
err = mlxsw_sp_acl_rulei_commit(rulei);
if (err)
goto err_rulei_commit;
err = mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &region->cregion,
&region->catchall.cchunk,
&region->catchall.centry,
rulei, false);
if (err)
goto err_entry_add;
region->catchall.rulei = rulei;
return 0;
err_entry_add:
err_rulei_commit:
err_rulei_act_continue:
mlxsw_sp_acl_rulei_destroy(rulei);
err_rulei_create:
mlxsw_sp_acl_ctcam_chunk_fini(&region->catchall.cchunk);
return err;
}
static void
mlxsw_sp1_acl_ctcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_acl_tcam_region *region)
{
struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei;
mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &region->cregion,
&region->catchall.cchunk,
&region->catchall.centry);
mlxsw_sp_acl_rulei_destroy(rulei);
mlxsw_sp_acl_ctcam_chunk_fini(&region->catchall.cchunk);
}
static int
mlxsw_sp1_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv,
struct mlxsw_sp_acl_tcam_region *_region)
{
struct mlxsw_sp1_acl_tcam_region *region = region_priv;
int err;
err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &region->cregion,
_region);
if (err)
return err;
err = mlxsw_sp1_acl_ctcam_region_catchall_add(mlxsw_sp, region);
if (err)
goto err_catchall_add;
region->region = _region;
return 0;
err_catchall_add:
mlxsw_sp_acl_ctcam_region_fini(&region->cregion);
return err;
}
static void
mlxsw_sp1_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv)
{
struct mlxsw_sp1_acl_tcam_region *region = region_priv;
mlxsw_sp1_acl_ctcam_region_catchall_del(mlxsw_sp, region);
mlxsw_sp_acl_ctcam_region_fini(&region->cregion);
}
static void mlxsw_sp1_acl_tcam_chunk_init(void *region_priv, void *chunk_priv,
unsigned int priority)
{
struct mlxsw_sp1_acl_tcam_region *region = region_priv;
struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
mlxsw_sp_acl_ctcam_chunk_init(&region->cregion, &chunk->cchunk,
priority);
}
static void mlxsw_sp1_acl_tcam_chunk_fini(void *chunk_priv)
{
struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
mlxsw_sp_acl_ctcam_chunk_fini(&chunk->cchunk);
}
static int mlxsw_sp1_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
void *region_priv, void *chunk_priv,
void *entry_priv,
struct mlxsw_sp_acl_rule_info *rulei)
{
struct mlxsw_sp1_acl_tcam_region *region = region_priv;
struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv;
return mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &region->cregion,
&chunk->cchunk, &entry->centry,
rulei, false);
}
static void mlxsw_sp1_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
void *region_priv, void *chunk_priv,
void *entry_priv)
{
struct mlxsw_sp1_acl_tcam_region *region = region_priv;
struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv;
mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &region->cregion,
&chunk->cchunk, &entry->centry);
}
static int
mlxsw_sp1_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *_region,
unsigned int offset,
bool *activity)
{
char ptce2_pl[MLXSW_REG_PTCE2_LEN];
int err;
mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
_region->tcam_region_info, offset, 0);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
if (err)
return err;
*activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
return 0;
}
static int
mlxsw_sp1_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
void *region_priv, void *entry_priv,
bool *activity)
{
struct mlxsw_sp1_acl_tcam_region *region = region_priv;
struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv;
unsigned int offset;
offset = mlxsw_sp_acl_ctcam_entry_offset(&entry->centry);
return mlxsw_sp1_acl_tcam_region_entry_activity_get(mlxsw_sp,
region->region,
offset, activity);
}
const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops = {
.key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX,
.priv_size = 0,
.init = mlxsw_sp1_acl_tcam_init,
.fini = mlxsw_sp1_acl_tcam_fini,
.region_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_region),
.region_init = mlxsw_sp1_acl_tcam_region_init,
.region_fini = mlxsw_sp1_acl_tcam_region_fini,
.chunk_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_chunk),
.chunk_init = mlxsw_sp1_acl_tcam_chunk_init,
.chunk_fini = mlxsw_sp1_acl_tcam_chunk_fini,
.entry_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_entry),
.entry_add = mlxsw_sp1_acl_tcam_entry_add,
.entry_del = mlxsw_sp1_acl_tcam_entry_del,
.entry_activity_get = mlxsw_sp1_acl_tcam_entry_activity_get,
};
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/bitops.h>
#include "spectrum.h"
#define MLXSW_SP1_KVDL_SINGLE_BASE 0
#define MLXSW_SP1_KVDL_SINGLE_SIZE 16384
#define MLXSW_SP1_KVDL_SINGLE_END \
(MLXSW_SP1_KVDL_SINGLE_SIZE + MLXSW_SP1_KVDL_SINGLE_BASE - 1)
#define MLXSW_SP1_KVDL_CHUNKS_BASE \
(MLXSW_SP1_KVDL_SINGLE_BASE + MLXSW_SP1_KVDL_SINGLE_SIZE)
#define MLXSW_SP1_KVDL_CHUNKS_SIZE 49152
#define MLXSW_SP1_KVDL_CHUNKS_END \
(MLXSW_SP1_KVDL_CHUNKS_SIZE + MLXSW_SP1_KVDL_CHUNKS_BASE - 1)
#define MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE \
(MLXSW_SP1_KVDL_CHUNKS_BASE + MLXSW_SP1_KVDL_CHUNKS_SIZE)
#define MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE \
(MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE)
#define MLXSW_SP1_KVDL_LARGE_CHUNKS_END \
(MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE - 1)
#define MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE 1
#define MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE 32
#define MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512
struct mlxsw_sp1_kvdl_part_info {
unsigned int part_index;
unsigned int start_index;
unsigned int end_index;
unsigned int alloc_size;
enum mlxsw_sp_resource_id resource_id;
};
enum mlxsw_sp1_kvdl_part_id {
MLXSW_SP1_KVDL_PART_ID_SINGLE,
MLXSW_SP1_KVDL_PART_ID_CHUNKS,
MLXSW_SP1_KVDL_PART_ID_LARGE_CHUNKS,
};
#define MLXSW_SP1_KVDL_PART_INFO(id) \
[MLXSW_SP1_KVDL_PART_ID_##id] = { \
.start_index = MLXSW_SP1_KVDL_##id##_BASE, \
.end_index = MLXSW_SP1_KVDL_##id##_END, \
.alloc_size = MLXSW_SP1_KVDL_##id##_ALLOC_SIZE, \
.resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \
}
static const struct mlxsw_sp1_kvdl_part_info mlxsw_sp1_kvdl_parts_info[] = {
MLXSW_SP1_KVDL_PART_INFO(SINGLE),
MLXSW_SP1_KVDL_PART_INFO(CHUNKS),
MLXSW_SP1_KVDL_PART_INFO(LARGE_CHUNKS),
};
#define MLXSW_SP1_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp1_kvdl_parts_info)
struct mlxsw_sp1_kvdl_part {
struct mlxsw_sp1_kvdl_part_info info;
unsigned long usage[0]; /* Entries */
};
struct mlxsw_sp1_kvdl {
struct mlxsw_sp1_kvdl_part *parts[MLXSW_SP1_KVDL_PARTS_INFO_LEN];
};
static struct mlxsw_sp1_kvdl_part *
mlxsw_sp1_kvdl_alloc_size_part(struct mlxsw_sp1_kvdl *kvdl,
unsigned int alloc_size)
{
struct mlxsw_sp1_kvdl_part *part, *min_part = NULL;
int i;
for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
part = kvdl->parts[i];
if (alloc_size <= part->info.alloc_size &&
(!min_part ||
part->info.alloc_size <= min_part->info.alloc_size))
min_part = part;
}
return min_part ?: ERR_PTR(-ENOBUFS);
}
static struct mlxsw_sp1_kvdl_part *
mlxsw_sp1_kvdl_index_part(struct mlxsw_sp1_kvdl *kvdl, u32 kvdl_index)
{
struct mlxsw_sp1_kvdl_part *part;
int i;
for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
part = kvdl->parts[i];
if (kvdl_index >= part->info.start_index &&
kvdl_index <= part->info.end_index)
return part;
}
return ERR_PTR(-EINVAL);
}
static u32
mlxsw_sp1_kvdl_to_kvdl_index(const struct mlxsw_sp1_kvdl_part_info *info,
unsigned int entry_index)
{
return info->start_index + entry_index * info->alloc_size;
}
static unsigned int
mlxsw_sp1_kvdl_to_entry_index(const struct mlxsw_sp1_kvdl_part_info *info,
u32 kvdl_index)
{
return (kvdl_index - info->start_index) / info->alloc_size;
}
static int mlxsw_sp1_kvdl_part_alloc(struct mlxsw_sp1_kvdl_part *part,
u32 *p_kvdl_index)
{
const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
unsigned int entry_index, nr_entries;
nr_entries = (info->end_index - info->start_index + 1) /
info->alloc_size;
entry_index = find_first_zero_bit(part->usage, nr_entries);
if (entry_index == nr_entries)
return -ENOBUFS;
__set_bit(entry_index, part->usage);
*p_kvdl_index = mlxsw_sp1_kvdl_to_kvdl_index(info, entry_index);
return 0;
}
static void mlxsw_sp1_kvdl_part_free(struct mlxsw_sp1_kvdl_part *part,
u32 kvdl_index)
{
const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
unsigned int entry_index;
entry_index = mlxsw_sp1_kvdl_to_entry_index(info, kvdl_index);
__clear_bit(entry_index, part->usage);
}
static int mlxsw_sp1_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv,
enum mlxsw_sp_kvdl_entry_type type,
unsigned int entry_count,
u32 *p_entry_index)
{
struct mlxsw_sp1_kvdl *kvdl = priv;
struct mlxsw_sp1_kvdl_part *part;
/* Find partition with smallest allocation size satisfying the
* requested size.
*/
part = mlxsw_sp1_kvdl_alloc_size_part(kvdl, entry_count);
if (IS_ERR(part))
return PTR_ERR(part);
return mlxsw_sp1_kvdl_part_alloc(part, p_entry_index);
}
static void mlxsw_sp1_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv,
enum mlxsw_sp_kvdl_entry_type type,
unsigned int entry_count, int entry_index)
{
struct mlxsw_sp1_kvdl *kvdl = priv;
struct mlxsw_sp1_kvdl_part *part;
part = mlxsw_sp1_kvdl_index_part(kvdl, entry_index);
if (IS_ERR(part))
return;
mlxsw_sp1_kvdl_part_free(part, entry_index);
}
static int mlxsw_sp1_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
void *priv,
enum mlxsw_sp_kvdl_entry_type type,
unsigned int entry_count,
unsigned int *p_alloc_size)
{
struct mlxsw_sp1_kvdl *kvdl = priv;
struct mlxsw_sp1_kvdl_part *part;
part = mlxsw_sp1_kvdl_alloc_size_part(kvdl, entry_count);
if (IS_ERR(part))
return PTR_ERR(part);
*p_alloc_size = part->info.alloc_size;
return 0;
}
static void mlxsw_sp1_kvdl_part_update(struct mlxsw_sp1_kvdl_part *part,
struct mlxsw_sp1_kvdl_part *part_prev,
unsigned int size)
{
if (!part_prev) {
part->info.end_index = size - 1;
} else {
part->info.start_index = part_prev->info.end_index + 1;
part->info.end_index = part->info.start_index + size - 1;
}
}
static struct mlxsw_sp1_kvdl_part *
mlxsw_sp1_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp1_kvdl_part_info *info,
struct mlxsw_sp1_kvdl_part *part_prev)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp1_kvdl_part *part;
bool need_update = true;
unsigned int nr_entries;
size_t usage_size;
u64 resource_size;
int err;
err = devlink_resource_size_get(devlink, info->resource_id,
&resource_size);
if (err) {
need_update = false;
resource_size = info->end_index - info->start_index + 1;
}
nr_entries = div_u64(resource_size, info->alloc_size);
usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
if (!part)
return ERR_PTR(-ENOMEM);
memcpy(&part->info, info, sizeof(part->info));
if (need_update)
mlxsw_sp1_kvdl_part_update(part, part_prev, resource_size);
return part;
}
static void mlxsw_sp1_kvdl_part_fini(struct mlxsw_sp1_kvdl_part *part)
{
kfree(part);
}
static int mlxsw_sp1_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_kvdl *kvdl)
{
const struct mlxsw_sp1_kvdl_part_info *info;
struct mlxsw_sp1_kvdl_part *part_prev = NULL;
int err, i;
for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
info = &mlxsw_sp1_kvdl_parts_info[i];
kvdl->parts[i] = mlxsw_sp1_kvdl_part_init(mlxsw_sp, info,
part_prev);
if (IS_ERR(kvdl->parts[i])) {
err = PTR_ERR(kvdl->parts[i]);
goto err_kvdl_part_init;
}
part_prev = kvdl->parts[i];
}
return 0;
err_kvdl_part_init:
for (i--; i >= 0; i--)
mlxsw_sp1_kvdl_part_fini(kvdl->parts[i]);
return err;
}
static void mlxsw_sp1_kvdl_parts_fini(struct mlxsw_sp1_kvdl *kvdl)
{
int i;
for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++)
mlxsw_sp1_kvdl_part_fini(kvdl->parts[i]);
}
static u64 mlxsw_sp1_kvdl_part_occ(struct mlxsw_sp1_kvdl_part *part)
{
const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
unsigned int nr_entries;
int bit = -1;
u64 occ = 0;
nr_entries = (info->end_index -
info->start_index + 1) /
info->alloc_size;
while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
< nr_entries)
occ += info->alloc_size;
return occ;
}
static u64 mlxsw_sp1_kvdl_occ_get(void *priv)
{
const struct mlxsw_sp1_kvdl *kvdl = priv;
u64 occ = 0;
int i;
for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++)
occ += mlxsw_sp1_kvdl_part_occ(kvdl->parts[i]);
return occ;
}
static u64 mlxsw_sp1_kvdl_single_occ_get(void *priv)
{
const struct mlxsw_sp1_kvdl *kvdl = priv;
struct mlxsw_sp1_kvdl_part *part;
part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_SINGLE];
return mlxsw_sp1_kvdl_part_occ(part);
}
static u64 mlxsw_sp1_kvdl_chunks_occ_get(void *priv)
{
const struct mlxsw_sp1_kvdl *kvdl = priv;
struct mlxsw_sp1_kvdl_part *part;
part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_CHUNKS];
return mlxsw_sp1_kvdl_part_occ(part);
}
static u64 mlxsw_sp1_kvdl_large_chunks_occ_get(void *priv)
{
const struct mlxsw_sp1_kvdl *kvdl = priv;
struct mlxsw_sp1_kvdl_part *part;
part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_LARGE_CHUNKS];
return mlxsw_sp1_kvdl_part_occ(part);
}
static int mlxsw_sp1_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp1_kvdl *kvdl = priv;
int err;
err = mlxsw_sp1_kvdl_parts_init(mlxsw_sp, kvdl);
if (err)
return err;
devlink_resource_occ_get_register(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR,
mlxsw_sp1_kvdl_occ_get,
kvdl);
devlink_resource_occ_get_register(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
mlxsw_sp1_kvdl_single_occ_get,
kvdl);
devlink_resource_occ_get_register(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
mlxsw_sp1_kvdl_chunks_occ_get,
kvdl);
devlink_resource_occ_get_register(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
mlxsw_sp1_kvdl_large_chunks_occ_get,
kvdl);
return 0;
}
static void mlxsw_sp1_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp1_kvdl *kvdl = priv;
devlink_resource_occ_get_unregister(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
devlink_resource_occ_get_unregister(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
devlink_resource_occ_get_unregister(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
devlink_resource_occ_get_unregister(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR);
mlxsw_sp1_kvdl_parts_fini(kvdl);
}
const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops = {
.priv_size = sizeof(struct mlxsw_sp1_kvdl),
.init = mlxsw_sp1_kvdl_init,
.fini = mlxsw_sp1_kvdl_fini,
.alloc = mlxsw_sp1_kvdl_alloc,
.free = mlxsw_sp1_kvdl_free,
.alloc_size_query = mlxsw_sp1_kvdl_alloc_size_query,
};
int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
static struct devlink_resource_size_params size_params;
u32 kvdl_max_size;
int err;
kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) -
MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE);
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
MLXSW_SP1_KVDL_SINGLE_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
MLXSW_SP_RESOURCE_KVD_LINEAR,
&size_params);
if (err)
return err;
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
MLXSW_SP1_KVDL_CHUNKS_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
MLXSW_SP_RESOURCE_KVD_LINEAR,
&size_params);
if (err)
return err;
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
MLXSW_SP_RESOURCE_KVD_LINEAR,
&size_params);
return err;
}
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c
* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
* Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/parman.h>
#include "reg.h"
#include "spectrum.h"
#include "core_acl_flex_actions.h"
#include "spectrum_mr.h"
struct mlxsw_sp1_mr_tcam_region {
struct mlxsw_sp *mlxsw_sp;
enum mlxsw_reg_rtar_key_type rtar_key_type;
struct parman *parman;
struct parman_prio *parman_prios;
};
struct mlxsw_sp1_mr_tcam {
struct mlxsw_sp1_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX];
};
struct mlxsw_sp1_mr_tcam_route {
struct parman_item parman_item;
struct parman_prio *parman_prio;
};
static int mlxsw_sp1_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
struct parman_item *parman_item,
struct mlxsw_sp_mr_route_key *key,
struct mlxsw_afa_block *afa_block)
{
char rmft2_pl[MLXSW_REG_RMFT2_LEN];
switch (key->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index,
key->vrid,
MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
ntohl(key->group.addr4),
ntohl(key->group_mask.addr4),
ntohl(key->source.addr4),
ntohl(key->source_mask.addr4),
mlxsw_afa_block_first_set(afa_block));
break;
case MLXSW_SP_L3_PROTO_IPV6:
mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index,
key->vrid,
MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
key->group.addr6,
key->group_mask.addr6,
key->source.addr6,
key->source_mask.addr6,
mlxsw_afa_block_first_set(afa_block));
}
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
}
static int mlxsw_sp1_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp,
struct parman_item *parman_item,
struct mlxsw_sp_mr_route_key *key)
{
struct in6_addr zero_addr = IN6ADDR_ANY_INIT;
char rmft2_pl[MLXSW_REG_RMFT2_LEN];
switch (key->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index,
key->vrid, 0, 0, 0, 0, 0, 0, NULL);
break;
case MLXSW_SP_L3_PROTO_IPV6:
mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index,
key->vrid, 0, 0, zero_addr, zero_addr,
zero_addr, zero_addr, NULL);
break;
}
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
}
static struct mlxsw_sp1_mr_tcam_region *
mlxsw_sp1_mr_tcam_protocol_region(struct mlxsw_sp1_mr_tcam *mr_tcam,
enum mlxsw_sp_l3proto proto)
{
return &mr_tcam->tcam_regions[proto];
}
static int
mlxsw_sp1_mr_tcam_route_parman_item_add(struct mlxsw_sp1_mr_tcam *mr_tcam,
struct mlxsw_sp1_mr_tcam_route *route,
struct mlxsw_sp_mr_route_key *key,
enum mlxsw_sp_mr_route_prio prio)
{
struct mlxsw_sp1_mr_tcam_region *tcam_region;
int err;
tcam_region = mlxsw_sp1_mr_tcam_protocol_region(mr_tcam, key->proto);
err = parman_item_add(tcam_region->parman,
&tcam_region->parman_prios[prio],
&route->parman_item);
if (err)
return err;
route->parman_prio = &tcam_region->parman_prios[prio];
return 0;
}
static void
mlxsw_sp1_mr_tcam_route_parman_item_remove(struct mlxsw_sp1_mr_tcam *mr_tcam,
struct mlxsw_sp1_mr_tcam_route *route,
struct mlxsw_sp_mr_route_key *key)
{
struct mlxsw_sp1_mr_tcam_region *tcam_region;
tcam_region = mlxsw_sp1_mr_tcam_protocol_region(mr_tcam, key->proto);
parman_item_remove(tcam_region->parman,
route->parman_prio, &route->parman_item);
}
static int
mlxsw_sp1_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv,
struct mlxsw_sp_mr_route_key *key,
struct mlxsw_afa_block *afa_block,
enum mlxsw_sp_mr_route_prio prio)
{
struct mlxsw_sp1_mr_tcam_route *route = route_priv;
struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
int err;
err = mlxsw_sp1_mr_tcam_route_parman_item_add(mr_tcam, route,
key, prio);
if (err)
return err;
err = mlxsw_sp1_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
key, afa_block);
if (err)
goto err_route_replace;
return 0;
err_route_replace:
mlxsw_sp1_mr_tcam_route_parman_item_remove(mr_tcam, route, key);
return err;
}
static void
mlxsw_sp1_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv,
struct mlxsw_sp_mr_route_key *key)
{
struct mlxsw_sp1_mr_tcam_route *route = route_priv;
struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
mlxsw_sp1_mr_tcam_route_remove(mlxsw_sp, &route->parman_item, key);
mlxsw_sp1_mr_tcam_route_parman_item_remove(mr_tcam, route, key);
}
static int
mlxsw_sp1_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp,
void *route_priv,
struct mlxsw_sp_mr_route_key *key,
struct mlxsw_afa_block *afa_block)
{
struct mlxsw_sp1_mr_tcam_route *route = route_priv;
return mlxsw_sp1_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
key, afa_block);
}
#define MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT 16
#define MLXSW_SP1_MR_TCAM_REGION_RESIZE_STEP 16
static int
mlxsw_sp1_mr_tcam_region_alloc(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region)
{
struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
char rtar_pl[MLXSW_REG_RTAR_LEN];
mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE,
mr_tcam_region->rtar_key_type,
MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
}
static void
mlxsw_sp1_mr_tcam_region_free(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region)
{
struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
char rtar_pl[MLXSW_REG_RTAR_LEN];
mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE,
mr_tcam_region->rtar_key_type, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
}
static int mlxsw_sp1_mr_tcam_region_parman_resize(void *priv,
unsigned long new_count)
{
struct mlxsw_sp1_mr_tcam_region *mr_tcam_region = priv;
struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
char rtar_pl[MLXSW_REG_RTAR_LEN];
u64 max_tcam_rules;
max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
if (new_count > max_tcam_rules)
return -EINVAL;
mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE,
mr_tcam_region->rtar_key_type, new_count);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
}
static void mlxsw_sp1_mr_tcam_region_parman_move(void *priv,
unsigned long from_index,
unsigned long to_index,
unsigned long count)
{
struct mlxsw_sp1_mr_tcam_region *mr_tcam_region = priv;
struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
char rrcr_pl[MLXSW_REG_RRCR_LEN];
mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE,
from_index, count,
mr_tcam_region->rtar_key_type, to_index);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl);
}
static const struct parman_ops mlxsw_sp1_mr_tcam_region_parman_ops = {
.base_count = MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT,
.resize_step = MLXSW_SP1_MR_TCAM_REGION_RESIZE_STEP,
.resize = mlxsw_sp1_mr_tcam_region_parman_resize,
.move = mlxsw_sp1_mr_tcam_region_parman_move,
.algo = PARMAN_ALGO_TYPE_LSORT,
};
static int
mlxsw_sp1_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_mr_tcam_region *mr_tcam_region,
enum mlxsw_reg_rtar_key_type rtar_key_type)
{
struct parman_prio *parman_prios;
struct parman *parman;
int err;
int i;
mr_tcam_region->rtar_key_type = rtar_key_type;
mr_tcam_region->mlxsw_sp = mlxsw_sp;
err = mlxsw_sp1_mr_tcam_region_alloc(mr_tcam_region);
if (err)
return err;
parman = parman_create(&mlxsw_sp1_mr_tcam_region_parman_ops,
mr_tcam_region);
if (!parman) {
err = -ENOMEM;
goto err_parman_create;
}
mr_tcam_region->parman = parman;
parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1,
sizeof(*parman_prios), GFP_KERNEL);
if (!parman_prios) {
err = -ENOMEM;
goto err_parman_prios_alloc;
}
mr_tcam_region->parman_prios = parman_prios;
for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
parman_prio_init(mr_tcam_region->parman,
&mr_tcam_region->parman_prios[i], i);
return 0;
err_parman_prios_alloc:
parman_destroy(parman);
err_parman_create:
mlxsw_sp1_mr_tcam_region_free(mr_tcam_region);
return err;
}
static void
mlxsw_sp1_mr_tcam_region_fini(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region)
{
int i;
for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
parman_prio_fini(&mr_tcam_region->parman_prios[i]);
kfree(mr_tcam_region->parman_prios);
parman_destroy(mr_tcam_region->parman);
mlxsw_sp1_mr_tcam_region_free(mr_tcam_region);
}
static int mlxsw_sp1_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
{
struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
struct mlxsw_sp1_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
u32 rtar_key;
int err;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
return -EIO;
rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST;
err = mlxsw_sp1_mr_tcam_region_init(mlxsw_sp,
&region[MLXSW_SP_L3_PROTO_IPV4],
rtar_key);
if (err)
return err;
rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST;
err = mlxsw_sp1_mr_tcam_region_init(mlxsw_sp,
&region[MLXSW_SP_L3_PROTO_IPV6],
rtar_key);
if (err)
goto err_ipv6_region_init;
return 0;
err_ipv6_region_init:
mlxsw_sp1_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
return err;
}
static void mlxsw_sp1_mr_tcam_fini(void *priv)
{
struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
struct mlxsw_sp1_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
mlxsw_sp1_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV6]);
mlxsw_sp1_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
}
const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops = {
.priv_size = sizeof(struct mlxsw_sp1_mr_tcam),
.init = mlxsw_sp1_mr_tcam_init,
.fini = mlxsw_sp1_mr_tcam_fini,
.route_priv_size = sizeof(struct mlxsw_sp1_mr_tcam_route),
.route_create = mlxsw_sp1_mr_tcam_route_create,
.route_destroy = mlxsw_sp1_mr_tcam_route_destroy,
.route_update = mlxsw_sp1_mr_tcam_route_update,
};
......@@ -48,13 +48,12 @@
#include "spectrum.h"
#include "core_acl_flex_keys.h"
#include "core_acl_flex_actions.h"
#include "spectrum_acl_flex_keys.h"
#include "spectrum_acl_tcam.h"
struct mlxsw_sp_acl {
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_afk *afk;
struct mlxsw_sp_fid *dummy_fid;
const struct mlxsw_sp_acl_ops *ops;
struct rhashtable ruleset_ht;
struct list_head rules;
struct {
......@@ -62,8 +61,7 @@ struct mlxsw_sp_acl {
unsigned long interval; /* ms */
#define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
} rule_activity_update;
unsigned long priv[0];
/* priv has to be always the last item */
struct mlxsw_sp_acl_tcam tcam;
};
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
......@@ -339,7 +337,7 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_rhashtable_init;
err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv);
err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv);
if (err)
goto err_ops_ruleset_add;
......@@ -409,7 +407,7 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
struct mlxsw_sp_acl_ruleset *ruleset;
ops = acl->ops->profile_ops(mlxsw_sp, profile);
ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
if (!ops)
return ERR_PTR(-EINVAL);
ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
......@@ -427,7 +425,7 @@ mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
struct mlxsw_sp_acl_ruleset *ruleset;
ops = acl->ops->profile_ops(mlxsw_sp, profile);
ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
if (!ops)
return ERR_PTR(-EINVAL);
......@@ -634,7 +632,8 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
int err;
mlxsw_sp_acl_ruleset_ref_inc(ruleset);
rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL);
rule = kzalloc(sizeof(*rule) + ops->rule_priv_size(mlxsw_sp),
GFP_KERNEL);
if (!rule) {
err = -ENOMEM;
goto err_alloc;
......@@ -825,20 +824,20 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
{
const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
struct mlxsw_sp_fid *fid;
struct mlxsw_sp_acl *acl;
size_t alloc_size;
int err;
acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL);
alloc_size = sizeof(*acl) + mlxsw_sp_acl_tcam_priv_size(mlxsw_sp);
acl = kzalloc(alloc_size, GFP_KERNEL);
if (!acl)
return -ENOMEM;
mlxsw_sp->acl = acl;
acl->mlxsw_sp = mlxsw_sp;
acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_FLEX_KEYS),
mlxsw_sp1_afk_blocks,
MLXSW_SP1_AFK_BLOCKS_COUNT);
mlxsw_sp->afk_ops);
if (!acl->afk) {
err = -ENOMEM;
goto err_afk_create;
......@@ -857,12 +856,10 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
acl->dummy_fid = fid;
INIT_LIST_HEAD(&acl->rules);
err = acl_ops->init(mlxsw_sp, acl->priv);
err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
if (err)
goto err_acl_ops_init;
acl->ops = acl_ops;
/* Create the delayed work for the rule activity_update */
INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
mlxsw_sp_acl_rul_activity_update_work);
......@@ -884,10 +881,9 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
acl_ops->fini(mlxsw_sp, acl->priv);
mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
WARN_ON(!list_empty(&acl->rules));
mlxsw_sp_fid_put(acl->dummy_fid);
rhashtable_destroy(&acl->ruleset_ht);
......
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/parman.h>
#include "reg.h"
#include "core.h"
#include "spectrum.h"
#include "spectrum_acl_tcam.h"
static int
mlxsw_sp_acl_ctcam_region_resize(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region,
u16 new_size)
{
char ptar_pl[MLXSW_REG_PTAR_LEN];
mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE,
region->key_type, new_size, region->id,
region->tcam_region_info);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
}
static void
mlxsw_sp_acl_ctcam_region_move(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region,
u16 src_offset, u16 dst_offset, u16 size)
{
char prcr_pl[MLXSW_REG_PRCR_LEN];
mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE,
region->tcam_region_info, src_offset,
region->tcam_region_info, dst_offset, size);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl);
}
static int
mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region,
unsigned int offset,
struct mlxsw_sp_acl_rule_info *rulei,
bool fillup_priority)
{
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
char ptce2_pl[MLXSW_REG_PTCE2_LEN];
char *act_set;
u32 priority;
char *mask;
char *key;
int err;
err = mlxsw_sp_acl_tcam_priority_get(mlxsw_sp, rulei, &priority,
fillup_priority);
if (err)
return err;
mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
region->tcam_region_info, offset, priority);
key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask);
/* Only the first action set belongs here, the rest is in KVD */
act_set = mlxsw_afa_block_first_set(rulei->act_block);
mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
}
static void
mlxsw_sp_acl_ctcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region,
unsigned int offset)
{
char ptce2_pl[MLXSW_REG_PTCE2_LEN];
mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
region->tcam_region_info, offset, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
}
static int mlxsw_sp_acl_ctcam_region_parman_resize(void *priv,
unsigned long new_count)
{
struct mlxsw_sp_acl_ctcam_region *cregion = priv;
struct mlxsw_sp_acl_tcam_region *region = cregion->region;
struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
u64 max_tcam_rules;
max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
if (new_count > max_tcam_rules)
return -EINVAL;
return mlxsw_sp_acl_ctcam_region_resize(mlxsw_sp, region, new_count);
}
static void mlxsw_sp_acl_ctcam_region_parman_move(void *priv,
unsigned long from_index,
unsigned long to_index,
unsigned long count)
{
struct mlxsw_sp_acl_ctcam_region *cregion = priv;
struct mlxsw_sp_acl_tcam_region *region = cregion->region;
struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
mlxsw_sp_acl_ctcam_region_move(mlxsw_sp, region,
from_index, to_index, count);
}
static const struct parman_ops mlxsw_sp_acl_ctcam_region_parman_ops = {
.base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
.resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP,
.resize = mlxsw_sp_acl_ctcam_region_parman_resize,
.move = mlxsw_sp_acl_ctcam_region_parman_move,
.algo = PARMAN_ALGO_TYPE_LSORT,
};
int mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ctcam_region *cregion,
struct mlxsw_sp_acl_tcam_region *region)
{
cregion->region = region;
cregion->parman = parman_create(&mlxsw_sp_acl_ctcam_region_parman_ops,
cregion);
if (!cregion->parman)
return -ENOMEM;
return 0;
}
void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion)
{
parman_destroy(cregion->parman);
}
void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion,
struct mlxsw_sp_acl_ctcam_chunk *cchunk,
unsigned int priority)
{
parman_prio_init(cregion->parman, &cchunk->parman_prio, priority);
}
void mlxsw_sp_acl_ctcam_chunk_fini(struct mlxsw_sp_acl_ctcam_chunk *cchunk)
{
parman_prio_fini(&cchunk->parman_prio);
}
int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ctcam_region *cregion,
struct mlxsw_sp_acl_ctcam_chunk *cchunk,
struct mlxsw_sp_acl_ctcam_entry *centry,
struct mlxsw_sp_acl_rule_info *rulei,
bool fillup_priority)
{
int err;
err = parman_item_add(cregion->parman, &cchunk->parman_prio,
&centry->parman_item);
if (err)
return err;
err = mlxsw_sp_acl_ctcam_region_entry_insert(mlxsw_sp, cregion->region,
centry->parman_item.index,
rulei, fillup_priority);
if (err)
goto err_rule_insert;
return 0;
err_rule_insert:
parman_item_remove(cregion->parman, &cchunk->parman_prio,
&centry->parman_item);
return err;
}
void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ctcam_region *cregion,
struct mlxsw_sp_acl_ctcam_chunk *cchunk,
struct mlxsw_sp_acl_ctcam_entry *centry)
{
mlxsw_sp_acl_ctcam_region_entry_remove(mlxsw_sp, cregion->region,
centry->parman_item.index);
parman_item_remove(cregion->parman, &cchunk->parman_prio,
&centry->parman_item);
}
......@@ -37,8 +37,6 @@
#include "core_acl_flex_actions.h"
#include "spectrum_span.h"
#define MLXSW_SP_KVDL_ACT_EXT_SIZE 1
static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
char *enc_actions, bool is_first)
{
......@@ -53,8 +51,8 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
if (is_first)
return 0;
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ACT_EXT_SIZE,
&kvdl_index);
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
1, &kvdl_index);
if (err)
return err;
mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
......@@ -65,7 +63,8 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
return 0;
err_pefa_write:
mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
1, kvdl_index);
return err;
}
......@@ -76,7 +75,8 @@ static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
if (is_first)
return;
mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
1, kvdl_index);
}
static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
......@@ -87,7 +87,8 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
u32 kvdl_index;
int err;
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index);
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
1, &kvdl_index);
if (err)
return err;
mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
......@@ -98,7 +99,8 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
return 0;
err_ppbs_write:
mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
1, kvdl_index);
return err;
}
......@@ -106,7 +108,8 @@ static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
{
struct mlxsw_sp *mlxsw_sp = priv;
mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
1, kvdl_index);
}
static int
......
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
* Copyright (c) 2017 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
* drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -32,9 +32,10 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
#define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
#include <linux/kernel.h>
#include <linux/module.h>
#include "spectrum.h"
#include "item.h"
#include "core_acl_flex_keys.h"
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
......@@ -126,6 +127,48 @@ static const struct mlxsw_afk_block mlxsw_sp1_afk_blocks[] = {
MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type),
};
#define MLXSW_SP1_AFK_BLOCKS_COUNT ARRAY_SIZE(mlxsw_sp1_afk_blocks)
#endif
static void mlxsw_sp1_afk_encode_u32(const struct mlxsw_item *storage_item,
const struct mlxsw_item *output_item,
char *storage, char *output_indexed)
{
u32 value;
value = __mlxsw_item_get32(storage, storage_item, 0);
__mlxsw_item_set32(output_indexed, output_item, 0, value);
}
static void mlxsw_sp1_afk_encode_buf(const struct mlxsw_item *storage_item,
const struct mlxsw_item *output_item,
char *storage, char *output_indexed)
{
char *storage_data = __mlxsw_item_data(storage, storage_item, 0);
char *output_data = __mlxsw_item_data(output_indexed, output_item, 0);
size_t len = output_item->size.bytes;
memcpy(output_data, storage_data, len);
}
#define MLXSW_SP1_AFK_KEY_BLOCK_SIZE 16
static void
mlxsw_sp1_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
int block_index, char *storage, char *output)
{
unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE;
char *output_indexed = output + offset;
const struct mlxsw_item *storage_item = &elinst->info->item;
const struct mlxsw_item *output_item = &elinst->item;
if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32)
mlxsw_sp1_afk_encode_u32(storage_item, output_item,
storage, output_indexed);
else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF)
mlxsw_sp1_afk_encode_buf(storage_item, output_item,
storage, output_indexed);
}
const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = {
.blocks = mlxsw_sp1_afk_blocks,
.blocks_count = ARRAY_SIZE(mlxsw_sp1_afk_blocks),
.encode_one = mlxsw_sp1_afk_encode_one,
};
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
* Copyright (c) 2017 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -39,25 +39,25 @@
#include <linux/list.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
#include <linux/parman.h>
#include "reg.h"
#include "core.h"
#include "resources.h"
#include "spectrum.h"
#include "spectrum_acl_tcam.h"
#include "core_acl_flex_keys.h"
struct mlxsw_sp_acl_tcam {
unsigned long *used_regions; /* bit array */
unsigned int max_regions;
unsigned long *used_groups; /* bit array */
unsigned int max_groups;
unsigned int max_group_size;
};
size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
{
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
return ops->priv_size;
}
static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam)
{
struct mlxsw_sp_acl_tcam *tcam = priv;
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
u64 max_tcam_regions;
u64 max_regions;
u64 max_groups;
......@@ -88,21 +88,53 @@ static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
tcam->max_groups = max_groups;
tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_MAX_GROUP_SIZE);
err = ops->init(mlxsw_sp, tcam->priv, tcam);
if (err)
goto err_tcam_init;
return 0;
err_tcam_init:
kfree(tcam->used_groups);
err_alloc_used_groups:
kfree(tcam->used_regions);
return err;
}
static void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam)
{
struct mlxsw_sp_acl_tcam *tcam = priv;
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
ops->fini(mlxsw_sp, tcam->priv);
kfree(tcam->used_groups);
kfree(tcam->used_regions);
}
int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
u32 *priority, bool fillup_priority)
{
u64 max_priority;
if (!fillup_priority) {
*priority = 0;
return 0;
}
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
return -EIO;
max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE);
if (rulei->priority > max_priority)
return -EINVAL;
/* Unlike in TC, in HW, higher number means higher priority. */
*priority = max_priority - rulei->priority;
return 0;
}
static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
u16 *p_id)
{
......@@ -159,36 +191,21 @@ struct mlxsw_sp_acl_tcam_group {
unsigned int patterns_count;
};
struct mlxsw_sp_acl_tcam_region {
struct list_head list; /* Member of a TCAM group */
struct list_head chunk_list; /* List of chunks under this region */
struct parman *parman;
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_acl_tcam_group *group;
enum mlxsw_reg_ptar_key_type key_type;
u16 id; /* ACL ID and region ID - they are same */
char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
struct mlxsw_afk_key_info *key_info;
struct {
struct parman_prio parman_prio;
struct parman_item parman_item;
struct mlxsw_sp_acl_rule_info *rulei;
} catchall;
};
struct mlxsw_sp_acl_tcam_chunk {
struct list_head list; /* Member of a TCAM region */
struct rhash_head ht_node; /* Member of a chunk HT */
unsigned int priority; /* Priority within the region and group */
struct parman_prio parman_prio;
struct mlxsw_sp_acl_tcam_group *group;
struct mlxsw_sp_acl_tcam_region *region;
unsigned int ref_count;
unsigned long priv[0];
/* priv has to be always the last item */
};
struct mlxsw_sp_acl_tcam_entry {
struct parman_item parman_item;
struct mlxsw_sp_acl_tcam_chunk *chunk;
unsigned long priv[0];
/* priv has to be always the last item */
};
static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = {
......@@ -442,9 +459,6 @@ mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
memcpy(out, elusage, sizeof(*out));
}
#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16
#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16
static int
mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
......@@ -485,19 +499,6 @@ mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
}
static int
mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region,
u16 new_size)
{
char ptar_pl[MLXSW_REG_PTAR_LEN];
mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE,
region->key_type, new_size, region->id,
region->tcam_region_info);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
}
static int
mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
......@@ -520,193 +521,22 @@ mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
}
static int
mlxsw_sp_acl_tcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region,
unsigned int offset,
struct mlxsw_sp_acl_rule_info *rulei)
{
char ptce2_pl[MLXSW_REG_PTCE2_LEN];
char *act_set;
char *mask;
char *key;
mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
region->tcam_region_info, offset);
key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
mlxsw_afk_encode(region->key_info, &rulei->values, key, mask);
/* Only the first action set belongs here, the rest is in KVD */
act_set = mlxsw_afa_block_first_set(rulei->act_block);
mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
}
static void
mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region,
unsigned int offset)
{
char ptce2_pl[MLXSW_REG_PTCE2_LEN];
mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
region->tcam_region_info, offset);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
}
static int
mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region,
unsigned int offset,
bool *activity)
{
char ptce2_pl[MLXSW_REG_PTCE2_LEN];
int err;
mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
region->tcam_region_info, offset);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
if (err)
return err;
*activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
return 0;
}
#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
static int
mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
{
struct parman_prio *parman_prio = &region->catchall.parman_prio;
struct parman_item *parman_item = &region->catchall.parman_item;
struct mlxsw_sp_acl_rule_info *rulei;
int err;
parman_prio_init(region->parman, parman_prio,
MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
err = parman_item_add(region->parman, parman_prio, parman_item);
if (err)
goto err_parman_item_add;
rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
if (IS_ERR(rulei)) {
err = PTR_ERR(rulei);
goto err_rulei_create;
}
err = mlxsw_sp_acl_rulei_act_continue(rulei);
if (WARN_ON(err))
goto err_rulei_act_continue;
err = mlxsw_sp_acl_rulei_commit(rulei);
if (err)
goto err_rulei_commit;
err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
parman_item->index, rulei);
region->catchall.rulei = rulei;
if (err)
goto err_rule_insert;
return 0;
err_rule_insert:
err_rulei_commit:
err_rulei_act_continue:
mlxsw_sp_acl_rulei_destroy(rulei);
err_rulei_create:
parman_item_remove(region->parman, parman_prio, parman_item);
err_parman_item_add:
parman_prio_fini(parman_prio);
return err;
}
static void
mlxsw_sp_acl_tcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
{
struct parman_prio *parman_prio = &region->catchall.parman_prio;
struct parman_item *parman_item = &region->catchall.parman_item;
struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei;
mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
parman_item->index);
mlxsw_sp_acl_rulei_destroy(rulei);
parman_item_remove(region->parman, parman_prio, parman_item);
parman_prio_fini(parman_prio);
}
static void
mlxsw_sp_acl_tcam_region_move(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region,
u16 src_offset, u16 dst_offset, u16 size)
{
char prcr_pl[MLXSW_REG_PRCR_LEN];
mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE,
region->tcam_region_info, src_offset,
region->tcam_region_info, dst_offset, size);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl);
}
static int mlxsw_sp_acl_tcam_region_parman_resize(void *priv,
unsigned long new_count)
{
struct mlxsw_sp_acl_tcam_region *region = priv;
struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
u64 max_tcam_rules;
max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
if (new_count > max_tcam_rules)
return -EINVAL;
return mlxsw_sp_acl_tcam_region_resize(mlxsw_sp, region, new_count);
}
static void mlxsw_sp_acl_tcam_region_parman_move(void *priv,
unsigned long from_index,
unsigned long to_index,
unsigned long count)
{
struct mlxsw_sp_acl_tcam_region *region = priv;
struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
mlxsw_sp_acl_tcam_region_move(mlxsw_sp, region,
from_index, to_index, count);
}
static const struct parman_ops mlxsw_sp_acl_tcam_region_parman_ops = {
.base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
.resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP,
.resize = mlxsw_sp_acl_tcam_region_parman_resize,
.move = mlxsw_sp_acl_tcam_region_parman_move,
.algo = PARMAN_ALGO_TYPE_LSORT,
};
static struct mlxsw_sp_acl_tcam_region *
mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
struct mlxsw_afk_element_usage *elusage)
{
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
struct mlxsw_sp_acl_tcam_region *region;
int err;
region = kzalloc(sizeof(*region), GFP_KERNEL);
region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
if (!region)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&region->chunk_list);
region->mlxsw_sp = mlxsw_sp;
region->parman = parman_create(&mlxsw_sp_acl_tcam_region_parman_ops,
region);
if (!region->parman) {
err = -ENOMEM;
goto err_parman_create;
}
region->key_info = mlxsw_afk_key_info_get(afk, elusage);
if (IS_ERR(region->key_info)) {
err = PTR_ERR(region->key_info);
......@@ -717,7 +547,7 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_region_id_get;
region->key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX;
region->key_type = ops->key_type;
err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
if (err)
goto err_tcam_region_alloc;
......@@ -726,13 +556,13 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_tcam_region_enable;
err = mlxsw_sp_acl_tcam_region_catchall_add(mlxsw_sp, region);
err = ops->region_init(mlxsw_sp, region->priv, region);
if (err)
goto err_tcam_region_catchall_add;
goto err_tcam_region_init;
return region;
err_tcam_region_catchall_add:
err_tcam_region_init:
mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
err_tcam_region_enable:
mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
......@@ -741,8 +571,6 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
err_region_id_get:
mlxsw_afk_key_info_put(region->key_info);
err_key_info_get:
parman_destroy(region->parman);
err_parman_create:
kfree(region);
return ERR_PTR(err);
}
......@@ -751,12 +579,13 @@ static void
mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
{
mlxsw_sp_acl_tcam_region_catchall_del(mlxsw_sp, region);
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
ops->region_fini(mlxsw_sp, region->priv);
mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id);
mlxsw_afk_key_info_put(region->key_info);
parman_destroy(region->parman);
kfree(region);
}
......@@ -831,13 +660,14 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
unsigned int priority,
struct mlxsw_afk_element_usage *elusage)
{
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_chunk *chunk;
int err;
if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
return ERR_PTR(-EINVAL);
chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
if (!chunk)
return ERR_PTR(-ENOMEM);
chunk->priority = priority;
......@@ -849,7 +679,7 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_chunk_assoc;
parman_prio_init(chunk->region->parman, &chunk->parman_prio, priority);
ops->chunk_init(chunk->region->priv, chunk->priv, priority);
err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node,
mlxsw_sp_acl_tcam_chunk_ht_params);
......@@ -859,7 +689,7 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
return chunk;
err_rhashtable_insert:
parman_prio_fini(&chunk->parman_prio);
ops->chunk_fini(chunk->priv);
mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
err_chunk_assoc:
kfree(chunk);
......@@ -870,11 +700,12 @@ static void
mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_chunk *chunk)
{
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_group *group = chunk->group;
rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node,
mlxsw_sp_acl_tcam_chunk_ht_params);
parman_prio_fini(&chunk->parman_prio);
ops->chunk_fini(chunk->priv);
mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
kfree(chunk);
}
......@@ -908,11 +739,19 @@ static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk);
}
static size_t mlxsw_sp_acl_tcam_entry_priv_size(struct mlxsw_sp *mlxsw_sp)
{
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
return ops->entry_priv_size;
}
static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_group *group,
struct mlxsw_sp_acl_tcam_entry *entry,
struct mlxsw_sp_acl_rule_info *rulei)
{
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_chunk *chunk;
struct mlxsw_sp_acl_tcam_region *region;
int err;
......@@ -923,24 +762,16 @@ static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(chunk);
region = chunk->region;
err = parman_item_add(region->parman, &chunk->parman_prio,
&entry->parman_item);
if (err)
goto err_parman_item_add;
err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
entry->parman_item.index,
rulei);
err = ops->entry_add(mlxsw_sp, region->priv, chunk->priv,
entry->priv, rulei);
if (err)
goto err_rule_insert;
goto err_entry_add;
entry->chunk = chunk;
return 0;
err_rule_insert:
parman_item_remove(region->parman, &chunk->parman_prio,
&entry->parman_item);
err_parman_item_add:
err_entry_add:
mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
return err;
}
......@@ -948,13 +779,11 @@ static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_entry *entry)
{
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
struct mlxsw_sp_acl_tcam_region *region = chunk->region;
mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
entry->parman_item.index);
parman_item_remove(region->parman, &chunk->parman_prio,
&entry->parman_item);
ops->entry_del(mlxsw_sp, region->priv, chunk->priv, entry->priv);
mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
}
......@@ -963,12 +792,12 @@ mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_entry *entry,
bool *activity)
{
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
struct mlxsw_sp_acl_tcam_region *region = chunk->region;
return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region,
entry->parman_item.index,
activity);
return ops->entry_activity_get(mlxsw_sp, region->priv,
entry->priv, activity);
}
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
......@@ -1030,10 +859,10 @@ struct mlxsw_sp_acl_tcam_flower_rule {
static int
mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
void *priv, void *ruleset_priv)
struct mlxsw_sp_acl_tcam *tcam,
void *ruleset_priv)
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
struct mlxsw_sp_acl_tcam *tcam = priv;
return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
mlxsw_sp_acl_tcam_patterns,
......@@ -1081,6 +910,12 @@ mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
}
static size_t mlxsw_sp_acl_tcam_flower_rule_priv_size(struct mlxsw_sp *mlxsw_sp)
{
return sizeof(struct mlxsw_sp_acl_tcam_flower_rule) +
mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp);
}
static int
mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
void *ruleset_priv, void *rule_priv,
......@@ -1118,7 +953,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
.ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind,
.ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
.ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
.rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
.rule_priv_size = mlxsw_sp_acl_tcam_flower_rule_priv_size,
.rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
.rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
.rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
......@@ -1129,7 +964,7 @@ mlxsw_sp_acl_tcam_profile_ops_arr[] = {
[MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
};
static const struct mlxsw_sp_acl_profile_ops *
const struct mlxsw_sp_acl_profile_ops *
mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_acl_profile profile)
{
......@@ -1142,10 +977,3 @@ mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
return NULL;
return ops;
}
const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops = {
.priv_size = sizeof(struct mlxsw_sp_acl_tcam),
.init = mlxsw_sp_acl_tcam_init,
.fini = mlxsw_sp_acl_tcam_fini,
.profile_ops = mlxsw_sp_acl_tcam_profile_ops,
};
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _MLXSW_SPECTRUM_ACL_TCAM_H
#define _MLXSW_SPECTRUM_ACL_TCAM_H
#include <linux/list.h>
#include <linux/parman.h>
#include "reg.h"
#include "spectrum.h"
#include "core_acl_flex_keys.h"
struct mlxsw_sp_acl_tcam {
unsigned long *used_regions; /* bit array */
unsigned int max_regions;
unsigned long *used_groups; /* bit array */
unsigned int max_groups;
unsigned int max_group_size;
unsigned long priv[0];
/* priv has to be always the last item */
};
size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam);
void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam);
int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
u32 *priority, bool fillup_priority);
struct mlxsw_sp_acl_profile_ops {
size_t ruleset_priv_size;
int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam, void *ruleset_priv);
void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress);
void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress);
u16 (*ruleset_group_id)(void *ruleset_priv);
size_t (*rule_priv_size)(struct mlxsw_sp *mlxsw_sp);
int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
void *ruleset_priv, void *rule_priv,
struct mlxsw_sp_acl_rule_info *rulei);
void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
bool *activity);
};
const struct mlxsw_sp_acl_profile_ops *
mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_acl_profile profile);
#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16
#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16
#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
struct mlxsw_sp_acl_tcam_group;
struct mlxsw_sp_acl_tcam_region {
struct list_head list; /* Member of a TCAM group */
struct list_head chunk_list; /* List of chunks under this region */
struct mlxsw_sp_acl_tcam_group *group;
enum mlxsw_reg_ptar_key_type key_type;
u16 id; /* ACL ID and region ID - they are same */
char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
struct mlxsw_afk_key_info *key_info;
struct mlxsw_sp *mlxsw_sp;
unsigned long priv[0];
/* priv has to be always the last item */
};
struct mlxsw_sp_acl_ctcam_region {
struct parman *parman;
struct mlxsw_sp_acl_tcam_region *region;
};
struct mlxsw_sp_acl_ctcam_chunk {
struct parman_prio parman_prio;
};
struct mlxsw_sp_acl_ctcam_entry {
struct parman_item parman_item;
};
int mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ctcam_region *cregion,
struct mlxsw_sp_acl_tcam_region *region);
void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion);
void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion,
struct mlxsw_sp_acl_ctcam_chunk *cchunk,
unsigned int priority);
void mlxsw_sp_acl_ctcam_chunk_fini(struct mlxsw_sp_acl_ctcam_chunk *cchunk);
int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ctcam_region *cregion,
struct mlxsw_sp_acl_ctcam_chunk *cchunk,
struct mlxsw_sp_acl_ctcam_entry *centry,
struct mlxsw_sp_acl_rule_info *rulei,
bool fillup_priority);
void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ctcam_region *cregion,
struct mlxsw_sp_acl_ctcam_chunk *cchunk,
struct mlxsw_sp_acl_ctcam_entry *centry);
static inline unsigned int
mlxsw_sp_acl_ctcam_entry_offset(struct mlxsw_sp_acl_ctcam_entry *centry)
{
return centry->parman_item.index;
}
#endif
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
* Copyright (c) 2016 Mellanox Technologies. All rights reserved.
* Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2016-2018 Jiri Pirko <jiri@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -33,422 +33,74 @@
*/
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include "spectrum.h"
#define MLXSW_SP_KVDL_SINGLE_BASE 0
#define MLXSW_SP_KVDL_SINGLE_SIZE 16384
#define MLXSW_SP_KVDL_SINGLE_END \
(MLXSW_SP_KVDL_SINGLE_SIZE + MLXSW_SP_KVDL_SINGLE_BASE - 1)
#define MLXSW_SP_KVDL_CHUNKS_BASE \
(MLXSW_SP_KVDL_SINGLE_BASE + MLXSW_SP_KVDL_SINGLE_SIZE)
#define MLXSW_SP_KVDL_CHUNKS_SIZE 49152
#define MLXSW_SP_KVDL_CHUNKS_END \
(MLXSW_SP_KVDL_CHUNKS_SIZE + MLXSW_SP_KVDL_CHUNKS_BASE - 1)
#define MLXSW_SP_KVDL_LARGE_CHUNKS_BASE \
(MLXSW_SP_KVDL_CHUNKS_BASE + MLXSW_SP_KVDL_CHUNKS_SIZE)
#define MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE \
(MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_LARGE_CHUNKS_BASE)
#define MLXSW_SP_KVDL_LARGE_CHUNKS_END \
(MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP_KVDL_LARGE_CHUNKS_BASE - 1)
#define MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE 1
#define MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE 32
#define MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512
struct mlxsw_sp_kvdl_part_info {
unsigned int part_index;
unsigned int start_index;
unsigned int end_index;
unsigned int alloc_size;
enum mlxsw_sp_resource_id resource_id;
};
enum mlxsw_sp_kvdl_part_id {
MLXSW_SP_KVDL_PART_ID_SINGLE,
MLXSW_SP_KVDL_PART_ID_CHUNKS,
MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS,
};
#define MLXSW_SP_KVDL_PART_INFO(id) \
[MLXSW_SP_KVDL_PART_ID_##id] = { \
.start_index = MLXSW_SP_KVDL_##id##_BASE, \
.end_index = MLXSW_SP_KVDL_##id##_END, \
.alloc_size = MLXSW_SP_KVDL_##id##_ALLOC_SIZE, \
.resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \
}
static const struct mlxsw_sp_kvdl_part_info mlxsw_sp_kvdl_parts_info[] = {
MLXSW_SP_KVDL_PART_INFO(SINGLE),
MLXSW_SP_KVDL_PART_INFO(CHUNKS),
MLXSW_SP_KVDL_PART_INFO(LARGE_CHUNKS),
};
#define MLXSW_SP_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp_kvdl_parts_info)
struct mlxsw_sp_kvdl_part {
struct mlxsw_sp_kvdl_part_info info;
unsigned long usage[0]; /* Entries */
};
struct mlxsw_sp_kvdl {
struct mlxsw_sp_kvdl_part *parts[MLXSW_SP_KVDL_PARTS_INFO_LEN];
const struct mlxsw_sp_kvdl_ops *kvdl_ops;
unsigned long priv[0];
/* priv has to be always the last item */
};
static struct mlxsw_sp_kvdl_part *
mlxsw_sp_kvdl_alloc_size_part(struct mlxsw_sp_kvdl *kvdl,
unsigned int alloc_size)
{
struct mlxsw_sp_kvdl_part *part, *min_part = NULL;
int i;
for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
part = kvdl->parts[i];
if (alloc_size <= part->info.alloc_size &&
(!min_part ||
part->info.alloc_size <= min_part->info.alloc_size))
min_part = part;
}
return min_part ?: ERR_PTR(-ENOBUFS);
}
static struct mlxsw_sp_kvdl_part *
mlxsw_sp_kvdl_index_part(struct mlxsw_sp_kvdl *kvdl, u32 kvdl_index)
{
struct mlxsw_sp_kvdl_part *part;
int i;
for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
part = kvdl->parts[i];
if (kvdl_index >= part->info.start_index &&
kvdl_index <= part->info.end_index)
return part;
}
return ERR_PTR(-EINVAL);
}
static u32
mlxsw_sp_entry_index_kvdl_index(const struct mlxsw_sp_kvdl_part_info *info,
unsigned int entry_index)
{
return info->start_index + entry_index * info->alloc_size;
}
static unsigned int
mlxsw_sp_kvdl_index_entry_index(const struct mlxsw_sp_kvdl_part_info *info,
u32 kvdl_index)
{
return (kvdl_index - info->start_index) / info->alloc_size;
}
static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part,
u32 *p_kvdl_index)
{
const struct mlxsw_sp_kvdl_part_info *info = &part->info;
unsigned int entry_index, nr_entries;
nr_entries = (info->end_index - info->start_index + 1) /
info->alloc_size;
entry_index = find_first_zero_bit(part->usage, nr_entries);
if (entry_index == nr_entries)
return -ENOBUFS;
__set_bit(entry_index, part->usage);
*p_kvdl_index = mlxsw_sp_entry_index_kvdl_index(info, entry_index);
return 0;
}
static void mlxsw_sp_kvdl_part_free(struct mlxsw_sp_kvdl_part *part,
u32 kvdl_index)
{
const struct mlxsw_sp_kvdl_part_info *info = &part->info;
unsigned int entry_index;
entry_index = mlxsw_sp_kvdl_index_entry_index(info, kvdl_index);
__clear_bit(entry_index, part->usage);
}
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
u32 *p_entry_index)
{
struct mlxsw_sp_kvdl_part *part;
/* Find partition with smallest allocation size satisfying the
* requested size.
*/
part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count);
if (IS_ERR(part))
return PTR_ERR(part);
return mlxsw_sp_kvdl_part_alloc(part, p_entry_index);
}
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index)
{
struct mlxsw_sp_kvdl_part *part;
part = mlxsw_sp_kvdl_index_part(mlxsw_sp->kvdl, entry_index);
if (IS_ERR(part))
return;
mlxsw_sp_kvdl_part_free(part, entry_index);
}
int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
unsigned int entry_count,
unsigned int *p_alloc_size)
{
struct mlxsw_sp_kvdl_part *part;
part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count);
if (IS_ERR(part))
return PTR_ERR(part);
*p_alloc_size = part->info.alloc_size;
return 0;
}
static void mlxsw_sp_kvdl_part_update(struct mlxsw_sp_kvdl_part *part,
struct mlxsw_sp_kvdl_part *part_prev,
unsigned int size)
{
if (!part_prev) {
part->info.end_index = size - 1;
} else {
part->info.start_index = part_prev->info.end_index + 1;
part->info.end_index = part->info.start_index + size - 1;
}
}
static struct mlxsw_sp_kvdl_part *
mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_kvdl_part_info *info,
struct mlxsw_sp_kvdl_part *part_prev)
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_kvdl_part *part;
bool need_update = true;
unsigned int nr_entries;
size_t usage_size;
u64 resource_size;
const struct mlxsw_sp_kvdl_ops *kvdl_ops = mlxsw_sp->kvdl_ops;
struct mlxsw_sp_kvdl *kvdl;
int err;
err = devlink_resource_size_get(devlink, info->resource_id,
&resource_size);
if (err) {
need_update = false;
resource_size = info->end_index - info->start_index + 1;
}
nr_entries = div_u64(resource_size, info->alloc_size);
usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
if (!part)
return ERR_PTR(-ENOMEM);
memcpy(&part->info, info, sizeof(part->info));
if (need_update)
mlxsw_sp_kvdl_part_update(part, part_prev, resource_size);
return part;
}
static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp_kvdl_part *part)
{
kfree(part);
}
static int mlxsw_sp_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
const struct mlxsw_sp_kvdl_part_info *info;
struct mlxsw_sp_kvdl_part *part_prev = NULL;
int err, i;
kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl) + kvdl_ops->priv_size,
GFP_KERNEL);
if (!kvdl)
return -ENOMEM;
kvdl->kvdl_ops = kvdl_ops;
mlxsw_sp->kvdl = kvdl;
for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
info = &mlxsw_sp_kvdl_parts_info[i];
kvdl->parts[i] = mlxsw_sp_kvdl_part_init(mlxsw_sp, info,
part_prev);
if (IS_ERR(kvdl->parts[i])) {
err = PTR_ERR(kvdl->parts[i]);
goto err_kvdl_part_init;
}
part_prev = kvdl->parts[i];
}
err = kvdl_ops->init(mlxsw_sp, kvdl->priv);
if (err)
goto err_init;
return 0;
err_kvdl_part_init:
for (i--; i >= 0; i--)
mlxsw_sp_kvdl_part_fini(kvdl->parts[i]);
err_init:
kfree(kvdl);
return err;
}
static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp)
void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
int i;
for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++)
mlxsw_sp_kvdl_part_fini(kvdl->parts[i]);
}
static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part)
{
const struct mlxsw_sp_kvdl_part_info *info = &part->info;
unsigned int nr_entries;
int bit = -1;
u64 occ = 0;
nr_entries = (info->end_index -
info->start_index + 1) /
info->alloc_size;
while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
< nr_entries)
occ += info->alloc_size;
return occ;
}
static u64 mlxsw_sp_kvdl_occ_get(void *priv)
{
const struct mlxsw_sp *mlxsw_sp = priv;
u64 occ = 0;
int i;
for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++)
occ += mlxsw_sp_kvdl_part_occ(mlxsw_sp->kvdl->parts[i]);
return occ;
}
static u64 mlxsw_sp_kvdl_single_occ_get(void *priv)
{
const struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_kvdl_part *part;
part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_SINGLE];
return mlxsw_sp_kvdl_part_occ(part);
}
static u64 mlxsw_sp_kvdl_chunks_occ_get(void *priv)
{
const struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_kvdl_part *part;
part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_CHUNKS];
return mlxsw_sp_kvdl_part_occ(part);
}
static u64 mlxsw_sp_kvdl_large_chunks_occ_get(void *priv)
{
const struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_kvdl_part *part;
part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS];
return mlxsw_sp_kvdl_part_occ(part);
kvdl->kvdl_ops->fini(mlxsw_sp, kvdl->priv);
kfree(kvdl);
}
int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_kvdl_entry_type type,
unsigned int entry_count, u32 *p_entry_index)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
static struct devlink_resource_size_params size_params;
u32 kvdl_max_size;
int err;
kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) -
MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE);
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
MLXSW_SP_KVDL_SINGLE_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
MLXSW_SP_RESOURCE_KVD_LINEAR,
&size_params);
if (err)
return err;
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
MLXSW_SP_KVDL_CHUNKS_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
MLXSW_SP_RESOURCE_KVD_LINEAR,
&size_params);
if (err)
return err;
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
MLXSW_SP_RESOURCE_KVD_LINEAR,
&size_params);
return err;
return kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, type,
entry_count, p_entry_index);
}
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_kvdl_entry_type type,
unsigned int entry_count, int entry_index)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_kvdl *kvdl;
int err;
kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl), GFP_KERNEL);
if (!kvdl)
return -ENOMEM;
mlxsw_sp->kvdl = kvdl;
err = mlxsw_sp_kvdl_parts_init(mlxsw_sp);
if (err)
goto err_kvdl_parts_init;
devlink_resource_occ_get_register(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR,
mlxsw_sp_kvdl_occ_get,
mlxsw_sp);
devlink_resource_occ_get_register(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
mlxsw_sp_kvdl_single_occ_get,
mlxsw_sp);
devlink_resource_occ_get_register(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
mlxsw_sp_kvdl_chunks_occ_get,
mlxsw_sp);
devlink_resource_occ_get_register(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
mlxsw_sp_kvdl_large_chunks_occ_get,
mlxsw_sp);
return 0;
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
err_kvdl_parts_init:
kfree(mlxsw_sp->kvdl);
return err;
kvdl->kvdl_ops->free(mlxsw_sp, kvdl->priv, type,
entry_count, entry_index);
}
void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_kvdl_entry_type type,
unsigned int entry_count,
unsigned int *p_alloc_count)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
devlink_resource_occ_get_unregister(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
devlink_resource_occ_get_unregister(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
devlink_resource_occ_get_unregister(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
devlink_resource_occ_get_unregister(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR);
mlxsw_sp_kvdl_parts_fini(mlxsw_sp);
kfree(mlxsw_sp->kvdl);
return kvdl->kvdl_ops->alloc_size_query(mlxsw_sp, kvdl->priv, type,
entry_count, p_alloc_count);
}
......@@ -1075,6 +1075,6 @@ void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp)
struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
cancel_delayed_work_sync(&mr->stats_update_dw);
mr->mr_ops->fini(mr->priv);
mr->mr_ops->fini(mlxsw_sp, mr->priv);
kfree(mr);
}
......@@ -46,15 +46,6 @@ enum mlxsw_sp_mr_route_action {
MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD,
};
enum mlxsw_sp_mr_route_prio {
MLXSW_SP_MR_ROUTE_PRIO_SG,
MLXSW_SP_MR_ROUTE_PRIO_STARG,
MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
__MLXSW_SP_MR_ROUTE_PRIO_MAX
};
#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1)
struct mlxsw_sp_mr_route_key {
int vrid;
enum mlxsw_sp_l3proto proto;
......@@ -101,7 +92,7 @@ struct mlxsw_sp_mr_ops {
u16 erif_index);
void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv);
void (*fini)(void *priv);
void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
};
struct mlxsw_sp_mr;
......
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
* Copyright (c) 2017 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
* Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -35,7 +36,6 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/parman.h>
#include "spectrum_mr_tcam.h"
#include "reg.h"
......@@ -43,15 +43,8 @@
#include "core_acl_flex_actions.h"
#include "spectrum_mr.h"
struct mlxsw_sp_mr_tcam_region {
struct mlxsw_sp *mlxsw_sp;
enum mlxsw_reg_rtar_key_type rtar_key_type;
struct parman *parman;
struct parman_prio *parman_prios;
};
struct mlxsw_sp_mr_tcam {
struct mlxsw_sp_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX];
void *priv;
};
/* This struct maps to one RIGR2 register entry */
......@@ -84,8 +77,6 @@ mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list *erif_list)
INIT_LIST_HEAD(&erif_list->erif_sublists);
}
#define MLXSW_SP_KVDL_RIGR2_SIZE 1
static struct mlxsw_sp_mr_erif_sublist *
mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_tcam_erif_list *erif_list)
......@@ -96,8 +87,8 @@ mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
erif_sublist = kzalloc(sizeof(*erif_sublist), GFP_KERNEL);
if (!erif_sublist)
return ERR_PTR(-ENOMEM);
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_RIGR2_SIZE,
&erif_sublist->rigr2_kvdl_index);
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
1, &erif_sublist->rigr2_kvdl_index);
if (err) {
kfree(erif_sublist);
return ERR_PTR(err);
......@@ -112,7 +103,8 @@ mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_erif_sublist *erif_sublist)
{
list_del(&erif_sublist->list);
mlxsw_sp_kvdl_free(mlxsw_sp, erif_sublist->rigr2_kvdl_index);
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
1, erif_sublist->rigr2_kvdl_index);
kfree(erif_sublist);
}
......@@ -221,12 +213,11 @@ struct mlxsw_sp_mr_tcam_route {
struct mlxsw_sp_mr_tcam_erif_list erif_list;
struct mlxsw_afa_block *afa_block;
u32 counter_index;
struct parman_item parman_item;
struct parman_prio *parman_prio;
enum mlxsw_sp_mr_route_action action;
struct mlxsw_sp_mr_route_key key;
u16 irif_index;
u16 min_mtu;
void *priv;
};
static struct mlxsw_afa_block *
......@@ -297,60 +288,6 @@ mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block *afa_block)
mlxsw_afa_block_destroy(afa_block);
}
static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
struct parman_item *parman_item,
struct mlxsw_sp_mr_route_key *key,
struct mlxsw_afa_block *afa_block)
{
char rmft2_pl[MLXSW_REG_RMFT2_LEN];
switch (key->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index,
key->vrid,
MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
ntohl(key->group.addr4),
ntohl(key->group_mask.addr4),
ntohl(key->source.addr4),
ntohl(key->source_mask.addr4),
mlxsw_afa_block_first_set(afa_block));
break;
case MLXSW_SP_L3_PROTO_IPV6:
mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index,
key->vrid,
MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
key->group.addr6,
key->group_mask.addr6,
key->source.addr6,
key->source_mask.addr6,
mlxsw_afa_block_first_set(afa_block));
}
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
}
static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid,
struct mlxsw_sp_mr_route_key *key,
struct parman_item *parman_item)
{
struct in6_addr zero_addr = IN6ADDR_ANY_INIT;
char rmft2_pl[MLXSW_REG_RMFT2_LEN];
switch (key->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index,
vrid, 0, 0, 0, 0, 0, 0, NULL);
break;
case MLXSW_SP_L3_PROTO_IPV6:
mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index,
vrid, 0, 0, zero_addr, zero_addr,
zero_addr, zero_addr, NULL);
break;
}
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
}
static int
mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_tcam_erif_list *erif_list,
......@@ -370,51 +307,12 @@ mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
return 0;
}
static struct mlxsw_sp_mr_tcam_region *
mlxsw_sp_mr_tcam_protocol_region(struct mlxsw_sp_mr_tcam *mr_tcam,
enum mlxsw_sp_l3proto proto)
{
return &mr_tcam->tcam_regions[proto];
}
static int
mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam,
struct mlxsw_sp_mr_tcam_route *route,
enum mlxsw_sp_mr_route_prio prio)
{
struct mlxsw_sp_mr_tcam_region *tcam_region;
int err;
tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam,
route->key.proto);
err = parman_item_add(tcam_region->parman,
&tcam_region->parman_prios[prio],
&route->parman_item);
if (err)
return err;
route->parman_prio = &tcam_region->parman_prios[prio];
return 0;
}
static void
mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam,
struct mlxsw_sp_mr_tcam_route *route)
{
struct mlxsw_sp_mr_tcam_region *tcam_region;
tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam,
route->key.proto);
parman_item_remove(tcam_region->parman,
route->parman_prio, &route->parman_item);
}
static int
mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv,
struct mlxsw_sp_mr_route_params *route_params)
{
const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
int err;
......@@ -448,22 +346,23 @@ mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
goto err_afa_block_create;
}
/* Allocate place in the TCAM */
err = mlxsw_sp_mr_tcam_route_parman_item_add(mr_tcam, route,
route_params->prio);
if (err)
goto err_parman_item_add;
route->priv = kzalloc(ops->route_priv_size, GFP_KERNEL);
if (!route->priv) {
err = -ENOMEM;
goto err_route_priv_alloc;
}
/* Write the route to the TCAM */
err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
&route->key, route->afa_block);
err = ops->route_create(mlxsw_sp, mr_tcam->priv, route->priv,
&route->key, route->afa_block,
route_params->prio);
if (err)
goto err_route_replace;
goto err_route_create;
return 0;
err_route_replace:
mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
err_parman_item_add:
err_route_create:
kfree(route->priv);
err_route_priv_alloc:
mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
err_afa_block_create:
mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
......@@ -476,12 +375,12 @@ mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp,
void *priv, void *route_priv)
{
const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid,
&route->key, &route->parman_item);
mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
ops->route_destroy(mlxsw_sp, mr_tcam->priv, route->priv, &route->key);
kfree(route->priv);
mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
......@@ -502,6 +401,7 @@ mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
void *route_priv,
enum mlxsw_sp_mr_route_action route_action)
{
const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_afa_block *afa_block;
int err;
......@@ -516,8 +416,7 @@ mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(afa_block);
/* Update the TCAM route entry */
err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
&route->key, afa_block);
err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
if (err)
goto err;
......@@ -534,6 +433,7 @@ mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
void *route_priv, u16 min_mtu)
{
const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_afa_block *afa_block;
int err;
......@@ -549,8 +449,7 @@ static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(afa_block);
/* Update the TCAM route entry */
err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
&route->key, afa_block);
err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
if (err)
goto err;
......@@ -596,6 +495,7 @@ static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
void *route_priv, u16 erif_index)
{
const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_erif_sublist *erif_sublist;
struct mlxsw_sp_mr_tcam_erif_list erif_list;
......@@ -630,8 +530,7 @@ static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
}
/* Update the TCAM route entry */
err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
&route->key, afa_block);
err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
if (err)
goto err_route_write;
......@@ -653,6 +552,7 @@ static int
mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
struct mlxsw_sp_mr_route_info *route_info)
{
const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_tcam_erif_list erif_list;
struct mlxsw_afa_block *afa_block;
......@@ -677,8 +577,7 @@ mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
}
/* Update the TCAM route entry */
err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
&route->key, afa_block);
err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
if (err)
goto err_route_write;
......@@ -699,167 +598,36 @@ mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
return err;
}
#define MLXSW_SP_MR_TCAM_REGION_BASE_COUNT 16
#define MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP 16
static int
mlxsw_sp_mr_tcam_region_alloc(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
{
struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
char rtar_pl[MLXSW_REG_RTAR_LEN];
mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE,
mr_tcam_region->rtar_key_type,
MLXSW_SP_MR_TCAM_REGION_BASE_COUNT);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
}
static void
mlxsw_sp_mr_tcam_region_free(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
{
struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
char rtar_pl[MLXSW_REG_RTAR_LEN];
mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE,
mr_tcam_region->rtar_key_type, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
}
static int mlxsw_sp_mr_tcam_region_parman_resize(void *priv,
unsigned long new_count)
{
struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
char rtar_pl[MLXSW_REG_RTAR_LEN];
u64 max_tcam_rules;
max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
if (new_count > max_tcam_rules)
return -EINVAL;
mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE,
mr_tcam_region->rtar_key_type, new_count);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
}
static void mlxsw_sp_mr_tcam_region_parman_move(void *priv,
unsigned long from_index,
unsigned long to_index,
unsigned long count)
{
struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
char rrcr_pl[MLXSW_REG_RRCR_LEN];
mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE,
from_index, count,
mr_tcam_region->rtar_key_type, to_index);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl);
}
static const struct parman_ops mlxsw_sp_mr_tcam_region_parman_ops = {
.base_count = MLXSW_SP_MR_TCAM_REGION_BASE_COUNT,
.resize_step = MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP,
.resize = mlxsw_sp_mr_tcam_region_parman_resize,
.move = mlxsw_sp_mr_tcam_region_parman_move,
.algo = PARMAN_ALGO_TYPE_LSORT,
};
static int
mlxsw_sp_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_tcam_region *mr_tcam_region,
enum mlxsw_reg_rtar_key_type rtar_key_type)
{
struct parman_prio *parman_prios;
struct parman *parman;
int err;
int i;
mr_tcam_region->rtar_key_type = rtar_key_type;
mr_tcam_region->mlxsw_sp = mlxsw_sp;
err = mlxsw_sp_mr_tcam_region_alloc(mr_tcam_region);
if (err)
return err;
parman = parman_create(&mlxsw_sp_mr_tcam_region_parman_ops,
mr_tcam_region);
if (!parman) {
err = -ENOMEM;
goto err_parman_create;
}
mr_tcam_region->parman = parman;
parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1,
sizeof(*parman_prios), GFP_KERNEL);
if (!parman_prios) {
err = -ENOMEM;
goto err_parman_prios_alloc;
}
mr_tcam_region->parman_prios = parman_prios;
for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
parman_prio_init(mr_tcam_region->parman,
&mr_tcam_region->parman_prios[i], i);
return 0;
err_parman_prios_alloc:
parman_destroy(parman);
err_parman_create:
mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
return err;
}
static void
mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
{
int i;
for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
parman_prio_fini(&mr_tcam_region->parman_prios[i]);
kfree(mr_tcam_region->parman_prios);
parman_destroy(mr_tcam_region->parman);
mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
}
static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
{
const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
u32 rtar_key;
int err;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) ||
!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES))
return -EIO;
rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST;
err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
&region[MLXSW_SP_L3_PROTO_IPV4],
rtar_key);
if (err)
return err;
mr_tcam->priv = kzalloc(ops->priv_size, GFP_KERNEL);
if (!mr_tcam->priv)
return -ENOMEM;
rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST;
err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
&region[MLXSW_SP_L3_PROTO_IPV6],
rtar_key);
err = ops->init(mlxsw_sp, mr_tcam->priv);
if (err)
goto err_ipv6_region_init;
goto err_init;
return 0;
err_ipv6_region_init:
mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
err_init:
kfree(mr_tcam->priv);
return err;
}
static void mlxsw_sp_mr_tcam_fini(void *priv)
static void mlxsw_sp_mr_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
{
const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV6]);
mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
ops->fini(mr_tcam->priv);
kfree(mr_tcam->priv);
}
const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = {
......
......@@ -1106,7 +1106,8 @@ mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
u32 tunnel_index;
int err;
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1, &tunnel_index);
if (err)
return err;
......@@ -1122,7 +1123,8 @@ static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
/* Unlink this node from the IPIP entry that it's the decap entry of. */
fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
fib_entry->decap.ipip_entry = NULL;
mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1, fib_entry->decap.tunnel_index);
}
static struct mlxsw_sp_fib_node *
......@@ -3162,8 +3164,9 @@ static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
* by the device and make sure the request can be satisfied.
*/
mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
&alloc_size);
err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
*p_adj_grp_size, &alloc_size);
if (err)
return err;
/* It is possible the allocation results in more allocated
......@@ -3275,7 +3278,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
/* No valid allocation size available. */
goto set_trap;
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
ecmp_size, &adj_index);
if (err) {
/* We ran out of KVD linear space, just set the
* trap and let everything flow through kernel.
......@@ -3310,7 +3314,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
old_adj_index, old_ecmp_size);
mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
old_ecmp_size, old_adj_index);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
goto set_trap;
......@@ -3332,7 +3337,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
if (err)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
if (old_adj_index_valid)
mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
nh_grp->ecmp_size, nh_grp->adj_index);
}
static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment