Commit 756cd366 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-Introduce-algorithmic-TCAM-support'

Ido Schimmel says:

====================
mlxsw: Introduce algorithmic TCAM support

The Spectrum-2 ASIC uses an algorithmic TCAM (A-TCAM) where multiple
exact matches lookups are performed instead of a single lookup as with
standard circuit TCAM (C-TCAM) memory. This allows for higher scale and
reduced power consumption.

The lookups are performed by masking a packet using different masks
(e.g., {dst_ip/24, ethtype}) defined for the region and looking for an
exact match. Eventually, the rule with the highest priority will be
picked.

Since the number of masks per-region is limited, the ASIC includes a
C-TCAM that can be used as a spill area for rules that do not fit into
the A-TCAM.

The driver currently uses a C-TCAM only mode which is similar to
Spectrum-1. However, this mode severely limits both the number of
supported ACL rules and the performance of the ACL lookup.

This patch set introduces initial support for the A-TCAM mode where the
C-TCAM is only used for rule spillage.

The first five patches add the registers and ASIC resources needed in
order to make use of the A-TCAM.

Next three patches are the "meat" and add the eRP core which is used to
manage the masks used by each ACL region. The individual commit messages
are lengthy and aim to thoroughly explain the subject.

The next seven patches perform small adjustments in the code and the
related data structures and are meant to prepare the code base to the
introduction of the A-TCAM in the last two patches.

Various A-TCAM optimization will be the focus of follow-up patch sets:

* Pruning - Used to reduce the number of lookups. Each rule will include
  a prune vector that indicates which masks should not be considered for
  further lookups as they cannot result in a higher priority match

* Bloom filter - Used to reduce the number of lookups. Before performing
  a lookup with a given mask the ASIC will consult a bloom filter
  (managed by the driver) that indicates whether a match might exist using
  the considered mask

* Masks aggregation - Used to increase scale and reduce lookups. Masks
  that only differ by up to eight consecutive bits (delta bits) can be
  aggregated into a single mask. The delta bits then become a part of the
  rule's key. For example, dst_ip/16 and dst_ip/17 can be represented as
  dst_ip/16 with a delta bit of one. Rules using the aggregated mask then
  specify whether the 17-th bit should be masked or not and its value
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 41147bb1 a0a777b9
...@@ -78,6 +78,7 @@ config MLXSW_SPECTRUM ...@@ -78,6 +78,7 @@ config MLXSW_SPECTRUM
depends on IPV6 || IPV6=n depends on IPV6 || IPV6=n
depends on NET_IPGRE || NET_IPGRE=n depends on NET_IPGRE || NET_IPGRE=n
depends on IPV6_GRE || IPV6_GRE=n depends on IPV6_GRE || IPV6_GRE=n
select GENERIC_ALLOCATOR
select PARMAN select PARMAN
select MLXFW select MLXFW
default m default m
......
...@@ -18,7 +18,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ ...@@ -18,7 +18,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum1_kvdl.o spectrum2_kvdl.o \ spectrum1_kvdl.o spectrum2_kvdl.o \
spectrum_kvdl.o \ spectrum_kvdl.o \
spectrum_acl_tcam.o spectrum_acl_ctcam.o \ spectrum_acl_tcam.o spectrum_acl_ctcam.o \
spectrum_acl_atcam.o \ spectrum_acl_atcam.o spectrum_acl_erp.o \
spectrum1_acl_tcam.o spectrum2_acl_tcam.o \ spectrum1_acl_tcam.o spectrum2_acl_tcam.o \
spectrum_acl.o \ spectrum_acl.o \
spectrum_flower.o spectrum_cnt.o \ spectrum_flower.o spectrum_cnt.o \
......
...@@ -457,7 +457,7 @@ mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst, ...@@ -457,7 +457,7 @@ mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk, void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
struct mlxsw_afk_key_info *key_info, struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values, struct mlxsw_afk_element_values *values,
char *key, char *mask) char *key, char *mask, int block_start, int block_end)
{ {
char block_mask[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE]; char block_mask[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
char block_key[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE]; char block_key[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
...@@ -465,7 +465,7 @@ void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk, ...@@ -465,7 +465,7 @@ void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
enum mlxsw_afk_element element; enum mlxsw_afk_element element;
int block_index, i; int block_index, i;
for (i = 0; i < key_info->blocks_count; i++) { for (i = block_start; i <= block_end; i++) {
memset(block_key, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE); memset(block_key, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
memset(block_mask, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE); memset(block_mask, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
...@@ -482,7 +482,9 @@ void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk, ...@@ -482,7 +482,9 @@ void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
values->storage.mask); values->storage.mask);
} }
if (key)
mlxsw_afk->ops->encode_block(block_key, i, key); mlxsw_afk->ops->encode_block(block_key, i, key);
if (mask)
mlxsw_afk->ops->encode_block(block_mask, i, mask); mlxsw_afk->ops->encode_block(block_mask, i, mask);
} }
} }
......
...@@ -259,6 +259,6 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values, ...@@ -259,6 +259,6 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk, void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
struct mlxsw_afk_key_info *key_info, struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values, struct mlxsw_afk_element_values *values,
char *key, char *mask); char *key, char *mask, int block_start, int block_end);
#endif #endif
...@@ -2466,14 +2466,14 @@ MLXSW_ITEM32(reg, ptce2, priority, 0x04, 0, 24); ...@@ -2466,14 +2466,14 @@ MLXSW_ITEM32(reg, ptce2, priority, 0x04, 0, 24);
MLXSW_ITEM_BUF(reg, ptce2, tcam_region_info, 0x10, MLXSW_ITEM_BUF(reg, ptce2, tcam_region_info, 0x10,
MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
#define MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN 96 #define MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN 96
/* reg_ptce2_flex_key_blocks /* reg_ptce2_flex_key_blocks
* ACL Key. * ACL Key.
* Access: RW * Access: RW
*/ */
MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20, MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20,
MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN); MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
/* reg_ptce2_mask /* reg_ptce2_mask
* mask- in the same size as key. A bit that is set directs the TCAM * mask- in the same size as key. A bit that is set directs the TCAM
...@@ -2482,7 +2482,7 @@ MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20, ...@@ -2482,7 +2482,7 @@ MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20,
* Access: RW * Access: RW
*/ */
MLXSW_ITEM_BUF(reg, ptce2, mask, 0x80, MLXSW_ITEM_BUF(reg, ptce2, mask, 0x80,
MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN); MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
/* reg_ptce2_flex_action_set /* reg_ptce2_flex_action_set
* ACL action set. * ACL action set.
...@@ -2504,6 +2504,118 @@ static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid, ...@@ -2504,6 +2504,118 @@ static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid,
mlxsw_reg_ptce2_tcam_region_info_memcpy_to(payload, tcam_region_info); mlxsw_reg_ptce2_tcam_region_info_memcpy_to(payload, tcam_region_info);
} }
/* PERPT - Policy-Engine ERP Table Register
* ----------------------------------------
* This register adds and removes eRPs from the eRP table.
*/
#define MLXSW_REG_PERPT_ID 0x3021
#define MLXSW_REG_PERPT_LEN 0x80
MLXSW_REG_DEFINE(perpt, MLXSW_REG_PERPT_ID, MLXSW_REG_PERPT_LEN);
/* reg_perpt_erpt_bank
* eRP table bank.
* Range 0 .. cap_max_erp_table_banks - 1
* Access: Index
*/
MLXSW_ITEM32(reg, perpt, erpt_bank, 0x00, 16, 4);
/* reg_perpt_erpt_index
* Index to eRP table within the eRP bank.
* Range is 0 .. cap_max_erp_table_bank_size - 1
* Access: Index
*/
MLXSW_ITEM32(reg, perpt, erpt_index, 0x00, 0, 8);
enum mlxsw_reg_perpt_key_size {
MLXSW_REG_PERPT_KEY_SIZE_2KB,
MLXSW_REG_PERPT_KEY_SIZE_4KB,
MLXSW_REG_PERPT_KEY_SIZE_8KB,
MLXSW_REG_PERPT_KEY_SIZE_12KB,
};
/* reg_perpt_key_size
* Access: OP
*/
MLXSW_ITEM32(reg, perpt, key_size, 0x04, 0, 4);
/* reg_perpt_bf_bypass
* 0 - The eRP is used only if bloom filter state is set for the given
* rule.
* 1 - The eRP is used regardless of bloom filter state.
* The bypass is an OR condition of region_id or eRP. See PERCR.bf_bypass
* Access: RW
*/
MLXSW_ITEM32(reg, perpt, bf_bypass, 0x08, 8, 1);
/* reg_perpt_erp_id
* eRP ID for use by the rules.
* Access: RW
*/
MLXSW_ITEM32(reg, perpt, erp_id, 0x08, 0, 4);
/* reg_perpt_erpt_base_bank
* Base eRP table bank, points to head of erp_vector
* Range is 0 .. cap_max_erp_table_banks - 1
* Access: OP
*/
MLXSW_ITEM32(reg, perpt, erpt_base_bank, 0x0C, 16, 4);
/* reg_perpt_erpt_base_index
* Base index to eRP table within the eRP bank
* Range is 0 .. cap_max_erp_table_bank_size - 1
* Access: OP
*/
MLXSW_ITEM32(reg, perpt, erpt_base_index, 0x0C, 0, 8);
/* reg_perpt_erp_index_in_vector
* eRP index in the vector.
* Access: OP
*/
MLXSW_ITEM32(reg, perpt, erp_index_in_vector, 0x10, 0, 4);
/* reg_perpt_erp_vector
* eRP vector.
* Access: OP
*/
MLXSW_ITEM_BIT_ARRAY(reg, perpt, erp_vector, 0x14, 4, 1);
/* reg_perpt_mask
* Mask
* 0 - A-TCAM will ignore the bit in key
* 1 - A-TCAM will compare the bit in key
* Access: RW
*/
MLXSW_ITEM_BUF(reg, perpt, mask, 0x20, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
static inline void mlxsw_reg_perpt_erp_vector_pack(char *payload,
unsigned long *erp_vector,
unsigned long size)
{
unsigned long bit;
for_each_set_bit(bit, erp_vector, size)
mlxsw_reg_perpt_erp_vector_set(payload, bit, true);
}
static inline void
mlxsw_reg_perpt_pack(char *payload, u8 erpt_bank, u8 erpt_index,
enum mlxsw_reg_perpt_key_size key_size, u8 erp_id,
u8 erpt_base_bank, u8 erpt_base_index, u8 erp_index,
char *mask)
{
MLXSW_REG_ZERO(perpt, payload);
mlxsw_reg_perpt_erpt_bank_set(payload, erpt_bank);
mlxsw_reg_perpt_erpt_index_set(payload, erpt_index);
mlxsw_reg_perpt_key_size_set(payload, key_size);
mlxsw_reg_perpt_bf_bypass_set(payload, true);
mlxsw_reg_perpt_erp_id_set(payload, erp_id);
mlxsw_reg_perpt_erpt_base_bank_set(payload, erpt_base_bank);
mlxsw_reg_perpt_erpt_base_index_set(payload, erpt_base_index);
mlxsw_reg_perpt_erp_index_in_vector_set(payload, erp_index);
mlxsw_reg_perpt_mask_memcpy_to(payload, mask);
}
/* PERAR - Policy-Engine Region Association Register /* PERAR - Policy-Engine Region Association Register
* ------------------------------------------------- * -------------------------------------------------
* This register associates a hw region for region_id's. Changing on the fly * This register associates a hw region for region_id's. Changing on the fly
...@@ -2545,6 +2657,164 @@ static inline void mlxsw_reg_perar_pack(char *payload, u16 region_id, ...@@ -2545,6 +2657,164 @@ static inline void mlxsw_reg_perar_pack(char *payload, u16 region_id,
mlxsw_reg_perar_hw_region_set(payload, hw_region); mlxsw_reg_perar_hw_region_set(payload, hw_region);
} }
/* PTCE-V3 - Policy-Engine TCAM Entry Register Version 3
* -----------------------------------------------------
* This register is a new version of PTCE-V2 in order to support the
* A-TCAM. This register is not supported by SwitchX/-2 and Spectrum.
*/
#define MLXSW_REG_PTCE3_ID 0x3027
#define MLXSW_REG_PTCE3_LEN 0xF0
MLXSW_REG_DEFINE(ptce3, MLXSW_REG_PTCE3_ID, MLXSW_REG_PTCE3_LEN);
/* reg_ptce3_v
* Valid.
* Access: RW
*/
MLXSW_ITEM32(reg, ptce3, v, 0x00, 31, 1);
enum mlxsw_reg_ptce3_op {
/* Write operation. Used to write a new entry to the table.
* All R/W fields are relevant for new entry. Activity bit is set
* for new entries. Write with v = 0 will delete the entry. Must
* not be used if an entry exists.
*/
MLXSW_REG_PTCE3_OP_WRITE_WRITE = 0,
/* Update operation */
MLXSW_REG_PTCE3_OP_WRITE_UPDATE = 1,
/* Read operation */
MLXSW_REG_PTCE3_OP_QUERY_READ = 0,
};
/* reg_ptce3_op
* Access: OP
*/
MLXSW_ITEM32(reg, ptce3, op, 0x00, 20, 3);
/* reg_ptce3_priority
* Priority of the rule. Higher values win.
* For Spectrum-2 range is 1..cap_kvd_size - 1
* Note: Priority does not have to be unique per rule.
* Access: RW
*/
MLXSW_ITEM32(reg, ptce3, priority, 0x04, 0, 24);
/* reg_ptce3_tcam_region_info
* Opaque object that represents the TCAM region.
* Access: Index
*/
MLXSW_ITEM_BUF(reg, ptce3, tcam_region_info, 0x10,
MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
/* reg_ptce3_flex2_key_blocks
* ACL key. The key must be masked according to eRP (if exists) or
* according to master mask.
* Access: Index
*/
MLXSW_ITEM_BUF(reg, ptce3, flex2_key_blocks, 0x20,
MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
/* reg_ptce3_erp_id
* eRP ID.
* Access: Index
*/
MLXSW_ITEM32(reg, ptce3, erp_id, 0x80, 0, 4);
/* reg_ptce3_delta_start
* Start point of delta_value and delta_mask, in bits. Must not exceed
* num_key_blocks * 36 - 8. Reserved when delta_mask = 0.
* Access: Index
*/
MLXSW_ITEM32(reg, ptce3, delta_start, 0x84, 0, 10);
/* reg_ptce3_delta_mask
* Delta mask.
* 0 - Ignore relevant bit in delta_value
* 1 - Compare relevant bit in delta_value
* Delta mask must not be set for reserved fields in the key blocks.
* Note: No delta when no eRPs. Thus, for regions with
* PERERP.erpt_pointer_valid = 0 the delta mask must be 0.
* Access: Index
*/
MLXSW_ITEM32(reg, ptce3, delta_mask, 0x88, 16, 8);
/* reg_ptce3_delta_value
* Delta value.
* Bits which are masked by delta_mask must be 0.
* Access: Index
*/
MLXSW_ITEM32(reg, ptce3, delta_value, 0x88, 0, 8);
/* reg_ptce3_prune_vector
* Pruning vector relative to the PERPT.erp_id.
* Used for reducing lookups.
* 0 - NEED: Do a lookup using the eRP.
* 1 - PRUNE: Do not perform a lookup using the eRP.
* Maybe be modified by PEAPBL and PEAPBM.
* Note: In Spectrum-2, a region of 8 key blocks must be set to either
* all 1's or all 0's.
* Access: RW
*/
MLXSW_ITEM_BIT_ARRAY(reg, ptce3, prune_vector, 0x90, 4, 1);
/* reg_ptce3_prune_ctcam
* Pruning on C-TCAM. Used for reducing lookups.
* 0 - NEED: Do a lookup in the C-TCAM.
* 1 - PRUNE: Do not perform a lookup in the C-TCAM.
* Access: RW
*/
MLXSW_ITEM32(reg, ptce3, prune_ctcam, 0x94, 31, 1);
/* reg_ptce3_large_exists
* Large entry key ID exists.
* Within the region:
* 0 - SINGLE: The large_entry_key_id is not currently in use.
* For rule insert: The MSB of the key (blocks 6..11) will be added.
* For rule delete: The MSB of the key will be removed.
* 1 - NON_SINGLE: The large_entry_key_id is currently in use.
* For rule insert: The MSB of the key (blocks 6..11) will not be added.
* For rule delete: The MSB of the key will not be removed.
* Access: WO
*/
MLXSW_ITEM32(reg, ptce3, large_exists, 0x98, 31, 1);
/* reg_ptce3_large_entry_key_id
* Large entry key ID.
* A key for 12 key blocks rules. Reserved when region has less than 12 key
* blocks. Must be different for different keys which have the same common
* 6 key blocks (MSB, blocks 6..11) key within a region.
* Range is 0..cap_max_pe_large_key_id - 1
* Access: RW
*/
MLXSW_ITEM32(reg, ptce3, large_entry_key_id, 0x98, 0, 24);
/* reg_ptce3_action_pointer
* Pointer to action.
* Range is 0..cap_max_kvd_action_sets - 1
* Access: RW
*/
MLXSW_ITEM32(reg, ptce3, action_pointer, 0xA0, 0, 24);
static inline void mlxsw_reg_ptce3_pack(char *payload, bool valid,
enum mlxsw_reg_ptce3_op op,
u32 priority,
const char *tcam_region_info,
const char *key, u8 erp_id,
bool large_exists, u32 lkey_id,
u32 action_pointer)
{
MLXSW_REG_ZERO(ptce3, payload);
mlxsw_reg_ptce3_v_set(payload, valid);
mlxsw_reg_ptce3_op_set(payload, op);
mlxsw_reg_ptce3_priority_set(payload, priority);
mlxsw_reg_ptce3_tcam_region_info_memcpy_to(payload, tcam_region_info);
mlxsw_reg_ptce3_flex2_key_blocks_memcpy_to(payload, key);
mlxsw_reg_ptce3_erp_id_set(payload, erp_id);
mlxsw_reg_ptce3_large_exists_set(payload, large_exists);
mlxsw_reg_ptce3_large_entry_key_id_set(payload, lkey_id);
mlxsw_reg_ptce3_action_pointer_set(payload, action_pointer);
}
/* PERCR - Policy-Engine Region Configuration Register /* PERCR - Policy-Engine Region Configuration Register
* --------------------------------------------------- * ---------------------------------------------------
* This register configures the region parameters. The region_id must be * This register configures the region parameters. The region_id must be
...@@ -2598,7 +2868,6 @@ static inline void mlxsw_reg_percr_pack(char *payload, u16 region_id) ...@@ -2598,7 +2868,6 @@ static inline void mlxsw_reg_percr_pack(char *payload, u16 region_id)
mlxsw_reg_percr_atcam_ignore_prune_set(payload, false); mlxsw_reg_percr_atcam_ignore_prune_set(payload, false);
mlxsw_reg_percr_ctcam_ignore_prune_set(payload, false); mlxsw_reg_percr_ctcam_ignore_prune_set(payload, false);
mlxsw_reg_percr_bf_bypass_set(payload, true); mlxsw_reg_percr_bf_bypass_set(payload, true);
memset(payload + 0x20, 0xff, 96);
} }
/* PERERP - Policy-Engine Region eRP Register /* PERERP - Policy-Engine Region eRP Register
...@@ -2663,12 +2932,28 @@ MLXSW_ITEM_BIT_ARRAY(reg, pererp, erpt_vector, 0x14, 4, 1); ...@@ -2663,12 +2932,28 @@ MLXSW_ITEM_BIT_ARRAY(reg, pererp, erpt_vector, 0x14, 4, 1);
*/ */
MLXSW_ITEM32(reg, pererp, master_rp_id, 0x18, 0, 4); MLXSW_ITEM32(reg, pererp, master_rp_id, 0x18, 0, 4);
static inline void mlxsw_reg_pererp_pack(char *payload, u16 region_id) static inline void mlxsw_reg_pererp_erp_vector_pack(char *payload,
unsigned long *erp_vector,
unsigned long size)
{
unsigned long bit;
for_each_set_bit(bit, erp_vector, size)
mlxsw_reg_pererp_erpt_vector_set(payload, bit, true);
}
static inline void mlxsw_reg_pererp_pack(char *payload, u16 region_id,
bool ctcam_le, bool erpt_pointer_valid,
u8 erpt_bank_pointer, u8 erpt_pointer,
u8 master_rp_id)
{ {
MLXSW_REG_ZERO(pererp, payload); MLXSW_REG_ZERO(pererp, payload);
mlxsw_reg_pererp_region_id_set(payload, region_id); mlxsw_reg_pererp_region_id_set(payload, region_id);
mlxsw_reg_pererp_ctcam_le_set(payload, true); mlxsw_reg_pererp_ctcam_le_set(payload, ctcam_le);
mlxsw_reg_pererp_erpt_pointer_valid_set(payload, true); mlxsw_reg_pererp_erpt_pointer_valid_set(payload, erpt_pointer_valid);
mlxsw_reg_pererp_erpt_bank_pointer_set(payload, erpt_bank_pointer);
mlxsw_reg_pererp_erpt_pointer_set(payload, erpt_pointer);
mlxsw_reg_pererp_master_rp_id_set(payload, master_rp_id);
} }
/* IEDR - Infrastructure Entry Delete Register /* IEDR - Infrastructure Entry Delete Register
...@@ -8248,7 +8533,9 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { ...@@ -8248,7 +8533,9 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(prcr), MLXSW_REG(prcr),
MLXSW_REG(pefa), MLXSW_REG(pefa),
MLXSW_REG(ptce2), MLXSW_REG(ptce2),
MLXSW_REG(perpt),
MLXSW_REG(perar), MLXSW_REG(perar),
MLXSW_REG(ptce3),
MLXSW_REG(percr), MLXSW_REG(percr),
MLXSW_REG(pererp), MLXSW_REG(pererp),
MLXSW_REG(iedr), MLXSW_REG(iedr),
......
...@@ -65,6 +65,13 @@ enum mlxsw_res_id { ...@@ -65,6 +65,13 @@ enum mlxsw_res_id {
MLXSW_RES_ID_ACL_FLEX_KEYS, MLXSW_RES_ID_ACL_FLEX_KEYS,
MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE, MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE,
MLXSW_RES_ID_ACL_ACTIONS_PER_SET, MLXSW_RES_ID_ACL_ACTIONS_PER_SET,
MLXSW_RES_ID_ACL_MAX_ERPT_BANKS,
MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE,
MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID,
MLXSW_RES_ID_ACL_ERPT_ENTRIES_2KB,
MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB,
MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB,
MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB,
MLXSW_RES_ID_MAX_CPU_POLICERS, MLXSW_RES_ID_MAX_CPU_POLICERS,
MLXSW_RES_ID_MAX_VRS, MLXSW_RES_ID_MAX_VRS,
MLXSW_RES_ID_MAX_RIFS, MLXSW_RES_ID_MAX_RIFS,
...@@ -108,6 +115,13 @@ static u16 mlxsw_res_ids[] = { ...@@ -108,6 +115,13 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910, [MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910,
[MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911, [MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911,
[MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912, [MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912,
[MLXSW_RES_ID_ACL_MAX_ERPT_BANKS] = 0x2940,
[MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE] = 0x2941,
[MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID] = 0x2942,
[MLXSW_RES_ID_ACL_ERPT_ENTRIES_2KB] = 0x2950,
[MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB] = 0x2951,
[MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB] = 0x2952,
[MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB] = 0x2953,
[MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13, [MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13,
[MLXSW_RES_ID_MAX_VRS] = 0x2C01, [MLXSW_RES_ID_MAX_VRS] = 0x2C01,
[MLXSW_RES_ID_MAX_RIFS] = 0x2C02, [MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
......
...@@ -628,6 +628,7 @@ struct mlxsw_sp_acl_tcam_ops { ...@@ -628,6 +628,7 @@ struct mlxsw_sp_acl_tcam_ops {
void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv); void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
size_t region_priv_size; size_t region_priv_size;
int (*region_init)(struct mlxsw_sp *mlxsw_sp, void *region_priv, int (*region_init)(struct mlxsw_sp *mlxsw_sp, void *region_priv,
void *tcam_priv,
struct mlxsw_sp_acl_tcam_region *region); struct mlxsw_sp_acl_tcam_region *region);
void (*region_fini)(struct mlxsw_sp *mlxsw_sp, void *region_priv); void (*region_fini)(struct mlxsw_sp *mlxsw_sp, void *region_priv);
int (*region_associate)(struct mlxsw_sp *mlxsw_sp, int (*region_associate)(struct mlxsw_sp *mlxsw_sp,
......
...@@ -58,6 +58,26 @@ struct mlxsw_sp1_acl_tcam_entry { ...@@ -58,6 +58,26 @@ struct mlxsw_sp1_acl_tcam_entry {
struct mlxsw_sp_acl_ctcam_entry centry; struct mlxsw_sp_acl_ctcam_entry centry;
}; };
static int
mlxsw_sp1_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregion,
struct mlxsw_sp_acl_ctcam_entry *centry,
const char *mask)
{
return 0;
}
static void
mlxsw_sp1_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregion,
struct mlxsw_sp_acl_ctcam_entry *centry)
{
}
static const struct mlxsw_sp_acl_ctcam_region_ops
mlxsw_sp1_acl_ctcam_region_ops = {
.entry_insert = mlxsw_sp1_acl_ctcam_region_entry_insert,
.entry_remove = mlxsw_sp1_acl_ctcam_region_entry_remove,
};
static int mlxsw_sp1_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv, static int mlxsw_sp1_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
struct mlxsw_sp_acl_tcam *tcam) struct mlxsw_sp_acl_tcam *tcam)
{ {
...@@ -122,13 +142,15 @@ mlxsw_sp1_acl_ctcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp, ...@@ -122,13 +142,15 @@ mlxsw_sp1_acl_ctcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
static int static int
mlxsw_sp1_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv, mlxsw_sp1_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv,
void *tcam_priv,
struct mlxsw_sp_acl_tcam_region *_region) struct mlxsw_sp_acl_tcam_region *_region)
{ {
struct mlxsw_sp1_acl_tcam_region *region = region_priv; struct mlxsw_sp1_acl_tcam_region *region = region_priv;
int err; int err;
err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &region->cregion, err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &region->cregion,
_region); _region,
&mlxsw_sp1_acl_ctcam_region_ops);
if (err) if (err)
return err; return err;
err = mlxsw_sp1_acl_ctcam_region_catchall_add(mlxsw_sp, region); err = mlxsw_sp1_acl_ctcam_region_catchall_add(mlxsw_sp, region);
......
...@@ -39,23 +39,64 @@ ...@@ -39,23 +39,64 @@
#include "core_acl_flex_actions.h" #include "core_acl_flex_actions.h"
struct mlxsw_sp2_acl_tcam { struct mlxsw_sp2_acl_tcam {
struct mlxsw_sp_acl_atcam atcam;
u32 kvdl_index; u32 kvdl_index;
unsigned int kvdl_count; unsigned int kvdl_count;
}; };
struct mlxsw_sp2_acl_tcam_region { struct mlxsw_sp2_acl_tcam_region {
struct mlxsw_sp_acl_ctcam_region cregion; struct mlxsw_sp_acl_atcam_region aregion;
struct mlxsw_sp_acl_tcam_region *region;
}; };
struct mlxsw_sp2_acl_tcam_chunk { struct mlxsw_sp2_acl_tcam_chunk {
struct mlxsw_sp_acl_ctcam_chunk cchunk; struct mlxsw_sp_acl_atcam_chunk achunk;
}; };
struct mlxsw_sp2_acl_tcam_entry { struct mlxsw_sp2_acl_tcam_entry {
struct mlxsw_sp_acl_ctcam_entry centry; struct mlxsw_sp_acl_atcam_entry aentry;
struct mlxsw_afa_block *act_block; struct mlxsw_afa_block *act_block;
}; };
static int
mlxsw_sp2_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregion,
struct mlxsw_sp_acl_ctcam_entry *centry,
const char *mask)
{
struct mlxsw_sp_acl_atcam_region *aregion;
struct mlxsw_sp_acl_atcam_entry *aentry;
struct mlxsw_sp_acl_erp *erp;
aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
erp = mlxsw_sp_acl_erp_get(aregion, mask, true);
if (IS_ERR(erp))
return PTR_ERR(erp);
aentry->erp = erp;
return 0;
}
static void
mlxsw_sp2_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregion,
struct mlxsw_sp_acl_ctcam_entry *centry)
{
struct mlxsw_sp_acl_atcam_region *aregion;
struct mlxsw_sp_acl_atcam_entry *aentry;
aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
mlxsw_sp_acl_erp_put(aregion, aentry->erp);
}
static const struct mlxsw_sp_acl_ctcam_region_ops
mlxsw_sp2_acl_ctcam_region_ops = {
.entry_insert = mlxsw_sp2_acl_ctcam_region_entry_insert,
.entry_remove = mlxsw_sp2_acl_ctcam_region_entry_remove,
};
static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv, static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
struct mlxsw_sp_acl_tcam *_tcam) struct mlxsw_sp_acl_tcam *_tcam)
{ {
...@@ -99,9 +140,14 @@ static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv, ...@@ -99,9 +140,14 @@ static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
if (err) if (err)
goto err_pgcr_write; goto err_pgcr_write;
err = mlxsw_sp_acl_atcam_init(mlxsw_sp, &tcam->atcam);
if (err)
goto err_atcam_init;
mlxsw_afa_block_destroy(afa_block); mlxsw_afa_block_destroy(afa_block);
return 0; return 0;
err_atcam_init:
err_pgcr_write: err_pgcr_write:
err_pefa_write: err_pefa_write:
err_afa_block_continue: err_afa_block_continue:
...@@ -116,22 +162,24 @@ static void mlxsw_sp2_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) ...@@ -116,22 +162,24 @@ static void mlxsw_sp2_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
{ {
struct mlxsw_sp2_acl_tcam *tcam = priv; struct mlxsw_sp2_acl_tcam *tcam = priv;
mlxsw_sp_acl_atcam_fini(mlxsw_sp, &tcam->atcam);
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
tcam->kvdl_count, tcam->kvdl_index); tcam->kvdl_count, tcam->kvdl_index);
} }
static int static int
mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv, mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv,
void *tcam_priv,
struct mlxsw_sp_acl_tcam_region *_region) struct mlxsw_sp_acl_tcam_region *_region)
{ {
struct mlxsw_sp2_acl_tcam_region *region = region_priv; struct mlxsw_sp2_acl_tcam_region *region = region_priv;
int err; struct mlxsw_sp2_acl_tcam *tcam = tcam_priv;
err = mlxsw_sp_acl_atcam_region_init(mlxsw_sp, _region); region->region = _region;
if (err)
return err; return mlxsw_sp_acl_atcam_region_init(mlxsw_sp, &tcam->atcam,
return mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &region->cregion, &region->aregion, _region,
_region); &mlxsw_sp2_acl_ctcam_region_ops);
} }
static void static void
...@@ -139,7 +187,7 @@ mlxsw_sp2_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv) ...@@ -139,7 +187,7 @@ mlxsw_sp2_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv)
{ {
struct mlxsw_sp2_acl_tcam_region *region = region_priv; struct mlxsw_sp2_acl_tcam_region *region = region_priv;
mlxsw_sp_acl_ctcam_region_fini(&region->cregion); mlxsw_sp_acl_atcam_region_fini(&region->aregion);
} }
static int static int
...@@ -155,7 +203,7 @@ static void mlxsw_sp2_acl_tcam_chunk_init(void *region_priv, void *chunk_priv, ...@@ -155,7 +203,7 @@ static void mlxsw_sp2_acl_tcam_chunk_init(void *region_priv, void *chunk_priv,
struct mlxsw_sp2_acl_tcam_region *region = region_priv; struct mlxsw_sp2_acl_tcam_region *region = region_priv;
struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
mlxsw_sp_acl_ctcam_chunk_init(&region->cregion, &chunk->cchunk, mlxsw_sp_acl_atcam_chunk_init(&region->aregion, &chunk->achunk,
priority); priority);
} }
...@@ -163,7 +211,7 @@ static void mlxsw_sp2_acl_tcam_chunk_fini(void *chunk_priv) ...@@ -163,7 +211,7 @@ static void mlxsw_sp2_acl_tcam_chunk_fini(void *chunk_priv)
{ {
struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
mlxsw_sp_acl_ctcam_chunk_fini(&chunk->cchunk); mlxsw_sp_acl_atcam_chunk_fini(&chunk->achunk);
} }
static int mlxsw_sp2_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp2_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
...@@ -176,9 +224,9 @@ static int mlxsw_sp2_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, ...@@ -176,9 +224,9 @@ static int mlxsw_sp2_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv; struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
entry->act_block = rulei->act_block; entry->act_block = rulei->act_block;
return mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &region->cregion, return mlxsw_sp_acl_atcam_entry_add(mlxsw_sp, &region->aregion,
&chunk->cchunk, &entry->centry, &chunk->achunk, &entry->aentry,
rulei, true); rulei);
} }
static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
...@@ -189,8 +237,8 @@ static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, ...@@ -189,8 +237,8 @@ static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv; struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &region->cregion, mlxsw_sp_acl_atcam_entry_del(mlxsw_sp, &region->aregion, &chunk->achunk,
&chunk->cchunk, &entry->centry); &entry->aentry);
} }
static int static int
......
...@@ -34,12 +34,275 @@ ...@@ -34,12 +34,275 @@
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/err.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/gfp.h>
#include <linux/refcount.h>
#include <linux/rhashtable.h>
#include "reg.h" #include "reg.h"
#include "core.h" #include "core.h"
#include "spectrum.h" #include "spectrum.h"
#include "spectrum_acl_tcam.h" #include "spectrum_acl_tcam.h"
#include "core_acl_flex_keys.h"
#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START 6
#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END 11
struct mlxsw_sp_acl_atcam_lkey_id_ht_key {
char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* MSB blocks */
u8 erp_id;
};
struct mlxsw_sp_acl_atcam_lkey_id {
struct rhash_head ht_node;
struct mlxsw_sp_acl_atcam_lkey_id_ht_key ht_key;
refcount_t refcnt;
u32 id;
};
struct mlxsw_sp_acl_atcam_region_ops {
int (*init)(struct mlxsw_sp_acl_atcam_region *aregion);
void (*fini)(struct mlxsw_sp_acl_atcam_region *aregion);
struct mlxsw_sp_acl_atcam_lkey_id *
(*lkey_id_get)(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_rule_info *rulei, u8 erp_id);
void (*lkey_id_put)(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id);
};
struct mlxsw_sp_acl_atcam_region_generic {
struct mlxsw_sp_acl_atcam_lkey_id dummy_lkey_id;
};
struct mlxsw_sp_acl_atcam_region_12kb {
struct rhashtable lkey_ht;
unsigned int max_lkey_id;
unsigned long *used_lkey_id;
};
static const struct rhashtable_params mlxsw_sp_acl_atcam_lkey_id_ht_params = {
.key_len = sizeof(struct mlxsw_sp_acl_atcam_lkey_id_ht_key),
.key_offset = offsetof(struct mlxsw_sp_acl_atcam_lkey_id, ht_key),
.head_offset = offsetof(struct mlxsw_sp_acl_atcam_lkey_id, ht_node),
};
static const struct rhashtable_params mlxsw_sp_acl_atcam_entries_ht_params = {
.key_len = sizeof(struct mlxsw_sp_acl_atcam_entry_ht_key),
.key_offset = offsetof(struct mlxsw_sp_acl_atcam_entry, ht_key),
.head_offset = offsetof(struct mlxsw_sp_acl_atcam_entry, ht_node),
};
static bool
mlxsw_sp_acl_atcam_is_centry(const struct mlxsw_sp_acl_atcam_entry *aentry)
{
return mlxsw_sp_acl_erp_is_ctcam_erp(aentry->erp);
}
static int
mlxsw_sp_acl_atcam_region_generic_init(struct mlxsw_sp_acl_atcam_region *aregion)
{
struct mlxsw_sp_acl_atcam_region_generic *region_generic;
region_generic = kzalloc(sizeof(*region_generic), GFP_KERNEL);
if (!region_generic)
return -ENOMEM;
refcount_set(&region_generic->dummy_lkey_id.refcnt, 1);
aregion->priv = region_generic;
return 0;
}
static void
mlxsw_sp_acl_atcam_region_generic_fini(struct mlxsw_sp_acl_atcam_region *aregion)
{
kfree(aregion->priv);
}
static struct mlxsw_sp_acl_atcam_lkey_id *
mlxsw_sp_acl_atcam_generic_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_rule_info *rulei,
u8 erp_id)
{
struct mlxsw_sp_acl_atcam_region_generic *region_generic;
region_generic = aregion->priv;
return &region_generic->dummy_lkey_id;
}
static void
mlxsw_sp_acl_atcam_generic_lkey_id_put(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id)
{
}
static const struct mlxsw_sp_acl_atcam_region_ops
mlxsw_sp_acl_atcam_region_generic_ops = {
.init = mlxsw_sp_acl_atcam_region_generic_init,
.fini = mlxsw_sp_acl_atcam_region_generic_fini,
.lkey_id_get = mlxsw_sp_acl_atcam_generic_lkey_id_get,
.lkey_id_put = mlxsw_sp_acl_atcam_generic_lkey_id_put,
};
static int
mlxsw_sp_acl_atcam_region_12kb_init(struct mlxsw_sp_acl_atcam_region *aregion)
{
struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
struct mlxsw_sp_acl_atcam_region_12kb *region_12kb;
size_t alloc_size;
u64 max_lkey_id;
int err;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_LARGE_KEY_ID))
return -EIO;
max_lkey_id = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_LARGE_KEY_ID);
region_12kb = kzalloc(sizeof(*region_12kb), GFP_KERNEL);
if (!region_12kb)
return -ENOMEM;
alloc_size = BITS_TO_LONGS(max_lkey_id) * sizeof(unsigned long);
region_12kb->used_lkey_id = kzalloc(alloc_size, GFP_KERNEL);
if (!region_12kb->used_lkey_id) {
err = -ENOMEM;
goto err_used_lkey_id_alloc;
}
err = rhashtable_init(&region_12kb->lkey_ht,
&mlxsw_sp_acl_atcam_lkey_id_ht_params);
if (err)
goto err_rhashtable_init;
region_12kb->max_lkey_id = max_lkey_id;
aregion->priv = region_12kb;
return 0;
err_rhashtable_init:
kfree(region_12kb->used_lkey_id);
err_used_lkey_id_alloc:
kfree(region_12kb);
return err;
}
static void
mlxsw_sp_acl_atcam_region_12kb_fini(struct mlxsw_sp_acl_atcam_region *aregion)
{
struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
rhashtable_destroy(&region_12kb->lkey_ht);
kfree(region_12kb->used_lkey_id);
kfree(region_12kb);
}
static struct mlxsw_sp_acl_atcam_lkey_id *
mlxsw_sp_acl_atcam_lkey_id_create(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_lkey_id_ht_key *ht_key)
{
struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
u32 id;
int err;
id = find_first_zero_bit(region_12kb->used_lkey_id,
region_12kb->max_lkey_id);
if (id < region_12kb->max_lkey_id)
__set_bit(id, region_12kb->used_lkey_id);
else
return ERR_PTR(-ENOBUFS);
lkey_id = kzalloc(sizeof(*lkey_id), GFP_KERNEL);
if (!lkey_id) {
err = -ENOMEM;
goto err_lkey_id_alloc;
}
lkey_id->id = id;
memcpy(&lkey_id->ht_key, ht_key, sizeof(*ht_key));
refcount_set(&lkey_id->refcnt, 1);
err = rhashtable_insert_fast(&region_12kb->lkey_ht,
&lkey_id->ht_node,
mlxsw_sp_acl_atcam_lkey_id_ht_params);
if (err)
goto err_rhashtable_insert;
return lkey_id;
err_rhashtable_insert:
kfree(lkey_id);
err_lkey_id_alloc:
__clear_bit(id, region_12kb->used_lkey_id);
return ERR_PTR(err);
}
static void
mlxsw_sp_acl_atcam_lkey_id_destroy(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id)
{
struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
u32 id = lkey_id->id;
rhashtable_remove_fast(&region_12kb->lkey_ht, &lkey_id->ht_node,
mlxsw_sp_acl_atcam_lkey_id_ht_params);
kfree(lkey_id);
__clear_bit(id, region_12kb->used_lkey_id);
}
static struct mlxsw_sp_acl_atcam_lkey_id *
mlxsw_sp_acl_atcam_12kb_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_rule_info *rulei,
u8 erp_id)
{
struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
struct mlxsw_sp_acl_atcam_lkey_id_ht_key ht_key = {{ 0 } };
struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
mlxsw_afk_encode(afk, region->key_info, &rulei->values, ht_key.enc_key,
NULL, MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START,
MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END);
ht_key.erp_id = erp_id;
lkey_id = rhashtable_lookup_fast(&region_12kb->lkey_ht, &ht_key,
mlxsw_sp_acl_atcam_lkey_id_ht_params);
if (lkey_id) {
refcount_inc(&lkey_id->refcnt);
return lkey_id;
}
return mlxsw_sp_acl_atcam_lkey_id_create(aregion, &ht_key);
}
static void
mlxsw_sp_acl_atcam_12kb_lkey_id_put(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id)
{
if (refcount_dec_and_test(&lkey_id->refcnt))
mlxsw_sp_acl_atcam_lkey_id_destroy(aregion, lkey_id);
}
static const struct mlxsw_sp_acl_atcam_region_ops
mlxsw_sp_acl_atcam_region_12kb_ops = {
.init = mlxsw_sp_acl_atcam_region_12kb_init,
.fini = mlxsw_sp_acl_atcam_region_12kb_fini,
.lkey_id_get = mlxsw_sp_acl_atcam_12kb_lkey_id_get,
.lkey_id_put = mlxsw_sp_acl_atcam_12kb_lkey_id_put,
};
static const struct mlxsw_sp_acl_atcam_region_ops *
mlxsw_sp_acl_atcam_region_ops_arr[] = {
[MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB] =
&mlxsw_sp_acl_atcam_region_generic_ops,
[MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB] =
&mlxsw_sp_acl_atcam_region_generic_ops,
[MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB] =
&mlxsw_sp_acl_atcam_region_generic_ops,
[MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB] =
&mlxsw_sp_acl_atcam_region_12kb_ops,
};
int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp,
u16 region_id) u16 region_id)
...@@ -57,39 +320,249 @@ int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp, ...@@ -57,39 +320,249 @@ int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perar), perar_pl); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perar), perar_pl);
} }
static int mlxsw_sp_acl_atcam_region_param_init(struct mlxsw_sp *mlxsw_sp, static void
u16 region_id) mlxsw_sp_acl_atcam_region_type_init(struct mlxsw_sp_acl_atcam_region *aregion)
{ {
char percr_pl[MLXSW_REG_PERCR_LEN]; struct mlxsw_sp_acl_tcam_region *region = aregion->region;
enum mlxsw_sp_acl_atcam_region_type region_type;
unsigned int blocks_count;
/* We already know the blocks count can not exceed the maximum
* blocks count.
*/
blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
if (blocks_count <= 2)
region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB;
else if (blocks_count <= 4)
region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB;
else if (blocks_count <= 8)
region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB;
else
region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB;
mlxsw_reg_percr_pack(percr_pl, region_id); aregion->type = region_type;
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl); aregion->ops = mlxsw_sp_acl_atcam_region_ops_arr[region_type];
} }
static int int
mlxsw_sp_acl_atcam_region_erp_init(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
u16 region_id) struct mlxsw_sp_acl_atcam *atcam,
struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_tcam_region *region,
const struct mlxsw_sp_acl_ctcam_region_ops *ops)
{
int err;
aregion->region = region;
aregion->atcam = atcam;
mlxsw_sp_acl_atcam_region_type_init(aregion);
err = rhashtable_init(&aregion->entries_ht,
&mlxsw_sp_acl_atcam_entries_ht_params);
if (err)
return err;
err = aregion->ops->init(aregion);
if (err)
goto err_ops_init;
err = mlxsw_sp_acl_erp_region_init(aregion);
if (err)
goto err_erp_region_init;
err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &aregion->cregion,
region, ops);
if (err)
goto err_ctcam_region_init;
return 0;
err_ctcam_region_init:
mlxsw_sp_acl_erp_region_fini(aregion);
err_erp_region_init:
aregion->ops->fini(aregion);
err_ops_init:
rhashtable_destroy(&aregion->entries_ht);
return err;
}
void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion)
{ {
char pererp_pl[MLXSW_REG_PERERP_LEN]; mlxsw_sp_acl_ctcam_region_fini(&aregion->cregion);
mlxsw_sp_acl_erp_region_fini(aregion);
aregion->ops->fini(aregion);
rhashtable_destroy(&aregion->entries_ht);
}
mlxsw_reg_pererp_pack(pererp_pl, region_id); void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl); struct mlxsw_sp_acl_atcam_chunk *achunk,
unsigned int priority)
{
mlxsw_sp_acl_ctcam_chunk_init(&aregion->cregion, &achunk->cchunk,
priority);
}
void mlxsw_sp_acl_atcam_chunk_fini(struct mlxsw_sp_acl_atcam_chunk *achunk)
{
mlxsw_sp_acl_ctcam_chunk_fini(&achunk->cchunk);
} }
int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, static int
struct mlxsw_sp_acl_tcam_region *region) mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_entry *aentry,
struct mlxsw_sp_acl_rule_info *rulei)
{ {
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp);
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
char ptce3_pl[MLXSW_REG_PTCE3_LEN];
u32 kvdl_index, priority;
int err; int err;
err = mlxsw_sp_acl_atcam_region_associate(mlxsw_sp, region->id); err = mlxsw_sp_acl_tcam_priority_get(mlxsw_sp, rulei, &priority, true);
if (err) if (err)
return err; return err;
err = mlxsw_sp_acl_atcam_region_param_init(mlxsw_sp, region->id);
lkey_id = aregion->ops->lkey_id_get(aregion, rulei, erp_id);
if (IS_ERR(lkey_id))
return PTR_ERR(lkey_id);
aentry->lkey_id = lkey_id;
kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE,
priority, region->tcam_region_info,
aentry->ht_key.enc_key, erp_id,
refcount_read(&lkey_id->refcnt) != 1, lkey_id->id,
kvdl_index);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
if (err) if (err)
goto err_ptce3_write;
return 0;
err_ptce3_write:
aregion->ops->lkey_id_put(aregion, lkey_id);
return err; return err;
err = mlxsw_sp_acl_atcam_region_erp_init(mlxsw_sp, region->id); }
static void
mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_entry *aentry)
{
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id = aentry->lkey_id;
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp);
char ptce3_pl[MLXSW_REG_PTCE3_LEN];
mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0,
region->tcam_region_info, aentry->ht_key.enc_key,
erp_id, refcount_read(&lkey_id->refcnt) != 1,
lkey_id->id, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
aregion->ops->lkey_id_put(aregion, lkey_id);
}
static int
__mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_entry *aentry,
struct mlxsw_sp_acl_rule_info *rulei)
{
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 };
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
struct mlxsw_sp_acl_erp *erp;
unsigned int blocks_count;
int err;
blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
mlxsw_afk_encode(afk, region->key_info, &rulei->values,
aentry->ht_key.enc_key, mask, 0, blocks_count - 1);
erp = mlxsw_sp_acl_erp_get(aregion, mask, false);
if (IS_ERR(erp))
return PTR_ERR(erp);
aentry->erp = erp;
aentry->ht_key.erp_id = mlxsw_sp_acl_erp_id(erp);
/* We can't insert identical rules into the A-TCAM, so fail and
* let the rule spill into C-TCAM
*/
err = rhashtable_lookup_insert_fast(&aregion->entries_ht,
&aentry->ht_node,
mlxsw_sp_acl_atcam_entries_ht_params);
if (err)
goto err_rhashtable_insert;
err = mlxsw_sp_acl_atcam_region_entry_insert(mlxsw_sp, aregion, aentry,
rulei);
if (err) if (err)
goto err_rule_insert;
return 0;
err_rule_insert:
rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
mlxsw_sp_acl_atcam_entries_ht_params);
err_rhashtable_insert:
mlxsw_sp_acl_erp_put(aregion, erp);
return err; return err;
}
static void
__mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_entry *aentry)
{
mlxsw_sp_acl_atcam_region_entry_remove(mlxsw_sp, aregion, aentry);
rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
mlxsw_sp_acl_atcam_entries_ht_params);
mlxsw_sp_acl_erp_put(aregion, aentry->erp);
}
int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_chunk *achunk,
struct mlxsw_sp_acl_atcam_entry *aentry,
struct mlxsw_sp_acl_rule_info *rulei)
{
int err;
err = __mlxsw_sp_acl_atcam_entry_add(mlxsw_sp, aregion, aentry, rulei);
if (!err)
return 0; return 0;
/* It is possible we failed to add the rule to the A-TCAM due to
* exceeded number of masks. Try to spill into C-TCAM.
*/
err = mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &aregion->cregion,
&achunk->cchunk, &aentry->centry,
rulei, true);
if (!err)
return 0;
return err;
}
void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_chunk *achunk,
struct mlxsw_sp_acl_atcam_entry *aentry)
{
if (mlxsw_sp_acl_atcam_is_centry(aentry))
mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &aregion->cregion,
&achunk->cchunk, &aentry->centry);
else
__mlxsw_sp_acl_atcam_entry_del(mlxsw_sp, aregion, aentry);
}
int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam)
{
return mlxsw_sp_acl_erps_init(mlxsw_sp, atcam);
}
void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam)
{
mlxsw_sp_acl_erps_fini(mlxsw_sp, atcam);
} }
...@@ -69,13 +69,15 @@ mlxsw_sp_acl_ctcam_region_move(struct mlxsw_sp *mlxsw_sp, ...@@ -69,13 +69,15 @@ mlxsw_sp_acl_ctcam_region_move(struct mlxsw_sp *mlxsw_sp,
static int static int
mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region, struct mlxsw_sp_acl_ctcam_region *cregion,
unsigned int offset, struct mlxsw_sp_acl_ctcam_entry *centry,
struct mlxsw_sp_acl_rule_info *rulei, struct mlxsw_sp_acl_rule_info *rulei,
bool fillup_priority) bool fillup_priority)
{ {
struct mlxsw_sp_acl_tcam_region *region = cregion->region;
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
char ptce2_pl[MLXSW_REG_PTCE2_LEN]; char ptce2_pl[MLXSW_REG_PTCE2_LEN];
unsigned int blocks_count;
char *act_set; char *act_set;
u32 priority; u32 priority;
char *mask; char *mask;
...@@ -88,10 +90,17 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, ...@@ -88,10 +90,17 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
return err; return err;
mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE, mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
region->tcam_region_info, offset, priority); region->tcam_region_info,
centry->parman_item.index, priority);
key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl); key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
mask = mlxsw_reg_ptce2_mask_data(ptce2_pl); mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask); blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask, 0,
blocks_count - 1);
err = cregion->ops->entry_insert(cregion, centry, mask);
if (err)
return err;
/* Only the first action set belongs here, the rest is in KVD */ /* Only the first action set belongs here, the rest is in KVD */
act_set = mlxsw_afa_block_first_set(rulei->act_block); act_set = mlxsw_afa_block_first_set(rulei->act_block);
...@@ -102,14 +111,16 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, ...@@ -102,14 +111,16 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
static void static void
mlxsw_sp_acl_ctcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ctcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region, struct mlxsw_sp_acl_ctcam_region *cregion,
unsigned int offset) struct mlxsw_sp_acl_ctcam_entry *centry)
{ {
char ptce2_pl[MLXSW_REG_PTCE2_LEN]; char ptce2_pl[MLXSW_REG_PTCE2_LEN];
mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE, mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
region->tcam_region_info, offset, 0); cregion->region->tcam_region_info,
centry->parman_item.index, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
cregion->ops->entry_remove(cregion, centry);
} }
static int mlxsw_sp_acl_ctcam_region_parman_resize(void *priv, static int mlxsw_sp_acl_ctcam_region_parman_resize(void *priv,
...@@ -147,11 +158,14 @@ static const struct parman_ops mlxsw_sp_acl_ctcam_region_parman_ops = { ...@@ -147,11 +158,14 @@ static const struct parman_ops mlxsw_sp_acl_ctcam_region_parman_ops = {
.algo = PARMAN_ALGO_TYPE_LSORT, .algo = PARMAN_ALGO_TYPE_LSORT,
}; };
int mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp, int
mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ctcam_region *cregion, struct mlxsw_sp_acl_ctcam_region *cregion,
struct mlxsw_sp_acl_tcam_region *region) struct mlxsw_sp_acl_tcam_region *region,
const struct mlxsw_sp_acl_ctcam_region_ops *ops)
{ {
cregion->region = region; cregion->region = region;
cregion->ops = ops;
cregion->parman = parman_create(&mlxsw_sp_acl_ctcam_region_parman_ops, cregion->parman = parman_create(&mlxsw_sp_acl_ctcam_region_parman_ops,
cregion); cregion);
if (!cregion->parman) if (!cregion->parman)
...@@ -190,8 +204,7 @@ int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp, ...@@ -190,8 +204,7 @@ int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp,
if (err) if (err)
return err; return err;
err = mlxsw_sp_acl_ctcam_region_entry_insert(mlxsw_sp, cregion->region, err = mlxsw_sp_acl_ctcam_region_entry_insert(mlxsw_sp, cregion, centry,
centry->parman_item.index,
rulei, fillup_priority); rulei, fillup_priority);
if (err) if (err)
goto err_rule_insert; goto err_rule_insert;
...@@ -208,8 +221,7 @@ void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp, ...@@ -208,8 +221,7 @@ void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ctcam_chunk *cchunk, struct mlxsw_sp_acl_ctcam_chunk *cchunk,
struct mlxsw_sp_acl_ctcam_entry *centry) struct mlxsw_sp_acl_ctcam_entry *centry)
{ {
mlxsw_sp_acl_ctcam_region_entry_remove(mlxsw_sp, cregion->region, mlxsw_sp_acl_ctcam_region_entry_remove(mlxsw_sp, cregion, centry);
centry->parman_item.index);
parman_item_remove(cregion->parman, &cchunk->parman_prio, parman_item_remove(cregion->parman, &cchunk->parman_prio,
&centry->parman_item); &centry->parman_item);
} }
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2018 Ido Schimmel <idosch@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/bitmap.h>
#include <linux/errno.h>
#include <linux/genalloc.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/rhashtable.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include "core.h"
#include "reg.h"
#include "spectrum.h"
#include "spectrum_acl_tcam.h"
/* gen_pool_alloc() returns 0 when allocation fails, so use an offset */
#define MLXSW_SP_ACL_ERP_GENALLOC_OFFSET 0x100
#define MLXSW_SP_ACL_ERP_MAX_PER_REGION 16
struct mlxsw_sp_acl_erp_core {
unsigned int erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX + 1];
struct gen_pool *erp_tables;
struct mlxsw_sp *mlxsw_sp;
unsigned int num_erp_banks;
};
struct mlxsw_sp_acl_erp_key {
char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN];
bool ctcam;
};
struct mlxsw_sp_acl_erp {
struct mlxsw_sp_acl_erp_key key;
u8 id;
u8 index;
refcount_t refcnt;
DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
struct list_head list;
struct rhash_head ht_node;
struct mlxsw_sp_acl_erp_table *erp_table;
};
struct mlxsw_sp_acl_erp_master_mask {
DECLARE_BITMAP(bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
unsigned int count[MLXSW_SP_ACL_TCAM_MASK_LEN];
};
struct mlxsw_sp_acl_erp_table {
struct mlxsw_sp_acl_erp_master_mask master_mask;
DECLARE_BITMAP(erp_id_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
DECLARE_BITMAP(erp_index_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
struct list_head atcam_erps_list;
struct rhashtable erp_ht;
struct mlxsw_sp_acl_erp_core *erp_core;
struct mlxsw_sp_acl_atcam_region *aregion;
const struct mlxsw_sp_acl_erp_table_ops *ops;
unsigned long base_index;
unsigned int num_atcam_erps;
unsigned int num_max_atcam_erps;
unsigned int num_ctcam_erps;
};
static const struct rhashtable_params mlxsw_sp_acl_erp_ht_params = {
.key_len = sizeof(struct mlxsw_sp_acl_erp_key),
.key_offset = offsetof(struct mlxsw_sp_acl_erp, key),
.head_offset = offsetof(struct mlxsw_sp_acl_erp, ht_node),
};
struct mlxsw_sp_acl_erp_table_ops {
struct mlxsw_sp_acl_erp *
(*erp_create)(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp_key *key);
void (*erp_destroy)(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp *erp);
};
static struct mlxsw_sp_acl_erp *
mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp_key *key);
static void
mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp *erp);
static struct mlxsw_sp_acl_erp *
mlxsw_sp_acl_erp_second_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp_key *key);
static void
mlxsw_sp_acl_erp_second_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp *erp);
static struct mlxsw_sp_acl_erp *
mlxsw_sp_acl_erp_first_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp_key *key);
static void
mlxsw_sp_acl_erp_first_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp *erp);
static void
mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp *erp);
static const struct mlxsw_sp_acl_erp_table_ops erp_multiple_masks_ops = {
.erp_create = mlxsw_sp_acl_erp_mask_create,
.erp_destroy = mlxsw_sp_acl_erp_mask_destroy,
};
static const struct mlxsw_sp_acl_erp_table_ops erp_two_masks_ops = {
.erp_create = mlxsw_sp_acl_erp_mask_create,
.erp_destroy = mlxsw_sp_acl_erp_second_mask_destroy,
};
static const struct mlxsw_sp_acl_erp_table_ops erp_single_mask_ops = {
.erp_create = mlxsw_sp_acl_erp_second_mask_create,
.erp_destroy = mlxsw_sp_acl_erp_first_mask_destroy,
};
static const struct mlxsw_sp_acl_erp_table_ops erp_no_mask_ops = {
.erp_create = mlxsw_sp_acl_erp_first_mask_create,
.erp_destroy = mlxsw_sp_acl_erp_no_mask_destroy,
};
bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp)
{
return erp->key.ctcam;
}
u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp)
{
return erp->id;
}
static unsigned int
mlxsw_sp_acl_erp_table_entry_size(const struct mlxsw_sp_acl_erp_table *erp_table)
{
struct mlxsw_sp_acl_atcam_region *aregion = erp_table->aregion;
struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
return erp_core->erpt_entries_size[aregion->type];
}
static int mlxsw_sp_acl_erp_id_get(struct mlxsw_sp_acl_erp_table *erp_table,
u8 *p_id)
{
u8 id;
id = find_first_zero_bit(erp_table->erp_id_bitmap,
MLXSW_SP_ACL_ERP_MAX_PER_REGION);
if (id < MLXSW_SP_ACL_ERP_MAX_PER_REGION) {
__set_bit(id, erp_table->erp_id_bitmap);
*p_id = id;
return 0;
}
return -ENOBUFS;
}
static void mlxsw_sp_acl_erp_id_put(struct mlxsw_sp_acl_erp_table *erp_table,
u8 id)
{
__clear_bit(id, erp_table->erp_id_bitmap);
}
static void
mlxsw_sp_acl_erp_master_mask_bit_set(unsigned long bit,
struct mlxsw_sp_acl_erp_master_mask *mask)
{
if (mask->count[bit]++ == 0)
__set_bit(bit, mask->bitmap);
}
static void
mlxsw_sp_acl_erp_master_mask_bit_clear(unsigned long bit,
struct mlxsw_sp_acl_erp_master_mask *mask)
{
if (--mask->count[bit] == 0)
__clear_bit(bit, mask->bitmap);
}
static int
mlxsw_sp_acl_erp_master_mask_update(struct mlxsw_sp_acl_erp_table *erp_table)
{
struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
char percr_pl[MLXSW_REG_PERCR_LEN];
char *master_mask;
mlxsw_reg_percr_pack(percr_pl, region->id);
master_mask = mlxsw_reg_percr_master_mask_data(percr_pl);
bitmap_to_arr32((u32 *) master_mask, erp_table->master_mask.bitmap,
MLXSW_SP_ACL_TCAM_MASK_LEN);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl);
}
static int
mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table,
const struct mlxsw_sp_acl_erp *erp)
{
unsigned long bit;
int err;
for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_set(bit,
&erp_table->master_mask);
err = mlxsw_sp_acl_erp_master_mask_update(erp_table);
if (err)
goto err_master_mask_update;
return 0;
err_master_mask_update:
for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
&erp_table->master_mask);
return err;
}
static int
mlxsw_sp_acl_erp_master_mask_clear(struct mlxsw_sp_acl_erp_table *erp_table,
const struct mlxsw_sp_acl_erp *erp)
{
unsigned long bit;
int err;
for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
&erp_table->master_mask);
err = mlxsw_sp_acl_erp_master_mask_update(erp_table);
if (err)
goto err_master_mask_update;
return 0;
err_master_mask_update:
for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_set(bit,
&erp_table->master_mask);
return err;
}
static struct mlxsw_sp_acl_erp *
mlxsw_sp_acl_erp_generic_create(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp_key *key)
{
struct mlxsw_sp_acl_erp *erp;
int err;
erp = kzalloc(sizeof(*erp), GFP_KERNEL);
if (!erp)
return ERR_PTR(-ENOMEM);
err = mlxsw_sp_acl_erp_id_get(erp_table, &erp->id);
if (err)
goto err_erp_id_get;
memcpy(&erp->key, key, sizeof(*key));
bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
MLXSW_SP_ACL_TCAM_MASK_LEN);
list_add(&erp->list, &erp_table->atcam_erps_list);
refcount_set(&erp->refcnt, 1);
erp_table->num_atcam_erps++;
erp->erp_table = erp_table;
err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp);
if (err)
goto err_master_mask_set;
err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node,
mlxsw_sp_acl_erp_ht_params);
if (err)
goto err_rhashtable_insert;
return erp;
err_rhashtable_insert:
mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
err_master_mask_set:
erp_table->num_atcam_erps--;
list_del(&erp->list);
mlxsw_sp_acl_erp_id_put(erp_table, erp->id);
err_erp_id_get:
kfree(erp);
return ERR_PTR(err);
}
static void
mlxsw_sp_acl_erp_generic_destroy(struct mlxsw_sp_acl_erp *erp)
{
struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
mlxsw_sp_acl_erp_ht_params);
mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
erp_table->num_atcam_erps--;
list_del(&erp->list);
mlxsw_sp_acl_erp_id_put(erp_table, erp->id);
kfree(erp);
}
static int
mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core,
unsigned int num_erps,
enum mlxsw_sp_acl_atcam_region_type region_type,
unsigned long *p_index)
{
unsigned int num_rows, entry_size;
/* We only allow allocations of entire rows */
if (num_erps % erp_core->num_erp_banks != 0)
return -EINVAL;
entry_size = erp_core->erpt_entries_size[region_type];
num_rows = num_erps / erp_core->num_erp_banks;
*p_index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size);
if (*p_index == 0)
return -ENOBUFS;
*p_index -= MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
return 0;
}
static void
mlxsw_sp_acl_erp_table_free(struct mlxsw_sp_acl_erp_core *erp_core,
unsigned int num_erps,
enum mlxsw_sp_acl_atcam_region_type region_type,
unsigned long index)
{
unsigned long base_index;
unsigned int entry_size;
size_t size;
entry_size = erp_core->erpt_entries_size[region_type];
base_index = index + MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
size = num_erps / erp_core->num_erp_banks * entry_size;
gen_pool_free(erp_core->erp_tables, base_index, size);
}
static struct mlxsw_sp_acl_erp *
mlxsw_sp_acl_erp_table_master_rp(struct mlxsw_sp_acl_erp_table *erp_table)
{
if (!list_is_singular(&erp_table->atcam_erps_list))
return NULL;
return list_first_entry(&erp_table->atcam_erps_list,
struct mlxsw_sp_acl_erp, list);
}
static int mlxsw_sp_acl_erp_index_get(struct mlxsw_sp_acl_erp_table *erp_table,
u8 *p_index)
{
u8 index;
index = find_first_zero_bit(erp_table->erp_index_bitmap,
erp_table->num_max_atcam_erps);
if (index < erp_table->num_max_atcam_erps) {
__set_bit(index, erp_table->erp_index_bitmap);
*p_index = index;
return 0;
}
return -ENOBUFS;
}
static void mlxsw_sp_acl_erp_index_put(struct mlxsw_sp_acl_erp_table *erp_table,
u8 index)
{
__clear_bit(index, erp_table->erp_index_bitmap);
}
static void
mlxsw_sp_acl_erp_table_locate(const struct mlxsw_sp_acl_erp_table *erp_table,
const struct mlxsw_sp_acl_erp *erp,
u8 *p_erpt_bank, u8 *p_erpt_index)
{
unsigned int entry_size = mlxsw_sp_acl_erp_table_entry_size(erp_table);
struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
unsigned int row;
*p_erpt_bank = erp->index % erp_core->num_erp_banks;
row = erp->index / erp_core->num_erp_banks;
*p_erpt_index = erp_table->base_index + row * entry_size;
}
static int
mlxsw_sp_acl_erp_table_erp_add(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp *erp)
{
struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
enum mlxsw_reg_perpt_key_size key_size;
char perpt_pl[MLXSW_REG_PERPT_LEN];
u8 erpt_bank, erpt_index;
mlxsw_sp_acl_erp_table_locate(erp_table, erp, &erpt_bank, &erpt_index);
key_size = (enum mlxsw_reg_perpt_key_size) erp_table->aregion->type;
mlxsw_reg_perpt_pack(perpt_pl, erpt_bank, erpt_index, key_size, erp->id,
0, erp_table->base_index, erp->index,
erp->key.mask);
mlxsw_reg_perpt_erp_vector_pack(perpt_pl, erp_table->erp_index_bitmap,
MLXSW_SP_ACL_ERP_MAX_PER_REGION);
mlxsw_reg_perpt_erp_vector_set(perpt_pl, erp->index, true);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perpt), perpt_pl);
}
static void mlxsw_sp_acl_erp_table_erp_del(struct mlxsw_sp_acl_erp *erp)
{
char empty_mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 };
struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
enum mlxsw_reg_perpt_key_size key_size;
char perpt_pl[MLXSW_REG_PERPT_LEN];
u8 erpt_bank, erpt_index;
mlxsw_sp_acl_erp_table_locate(erp_table, erp, &erpt_bank, &erpt_index);
key_size = (enum mlxsw_reg_perpt_key_size) erp_table->aregion->type;
mlxsw_reg_perpt_pack(perpt_pl, erpt_bank, erpt_index, key_size, erp->id,
0, erp_table->base_index, erp->index, empty_mask);
mlxsw_reg_perpt_erp_vector_pack(perpt_pl, erp_table->erp_index_bitmap,
MLXSW_SP_ACL_ERP_MAX_PER_REGION);
mlxsw_reg_perpt_erp_vector_set(perpt_pl, erp->index, false);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perpt), perpt_pl);
}
static int
mlxsw_sp_acl_erp_table_enable(struct mlxsw_sp_acl_erp_table *erp_table,
bool ctcam_le)
{
struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
char pererp_pl[MLXSW_REG_PERERP_LEN];
mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0,
erp_table->base_index, 0);
mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap,
MLXSW_SP_ACL_ERP_MAX_PER_REGION);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
}
static void
mlxsw_sp_acl_erp_table_disable(struct mlxsw_sp_acl_erp_table *erp_table)
{
struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
char pererp_pl[MLXSW_REG_PERERP_LEN];
struct mlxsw_sp_acl_erp *master_rp;
master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table);
/* It is possible we do not have a master RP when we disable the
* table when there are no rules in the A-TCAM and the last C-TCAM
* rule is deleted
*/
mlxsw_reg_pererp_pack(pererp_pl, region->id, false, false, 0, 0,
master_rp ? master_rp->id : 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
}
static int
mlxsw_sp_acl_erp_table_relocate(struct mlxsw_sp_acl_erp_table *erp_table)
{
struct mlxsw_sp_acl_erp *erp;
int err;
list_for_each_entry(erp, &erp_table->atcam_erps_list, list) {
err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp);
if (err)
goto err_table_erp_add;
}
return 0;
err_table_erp_add:
list_for_each_entry_continue_reverse(erp, &erp_table->atcam_erps_list,
list)
mlxsw_sp_acl_erp_table_erp_del(erp);
return err;
}
static int
mlxsw_sp_acl_erp_table_expand(struct mlxsw_sp_acl_erp_table *erp_table)
{
unsigned int num_erps, old_num_erps = erp_table->num_max_atcam_erps;
struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
unsigned long old_base_index = erp_table->base_index;
bool ctcam_le = erp_table->num_ctcam_erps > 0;
int err;
if (erp_table->num_atcam_erps < erp_table->num_max_atcam_erps)
return 0;
if (erp_table->num_max_atcam_erps == MLXSW_SP_ACL_ERP_MAX_PER_REGION)
return -ENOBUFS;
num_erps = old_num_erps + erp_core->num_erp_banks;
err = mlxsw_sp_acl_erp_table_alloc(erp_core, num_erps,
erp_table->aregion->type,
&erp_table->base_index);
if (err)
return err;
erp_table->num_max_atcam_erps = num_erps;
err = mlxsw_sp_acl_erp_table_relocate(erp_table);
if (err)
goto err_table_relocate;
err = mlxsw_sp_acl_erp_table_enable(erp_table, ctcam_le);
if (err)
goto err_table_enable;
mlxsw_sp_acl_erp_table_free(erp_core, old_num_erps,
erp_table->aregion->type, old_base_index);
return 0;
err_table_enable:
err_table_relocate:
erp_table->num_max_atcam_erps = old_num_erps;
mlxsw_sp_acl_erp_table_free(erp_core, num_erps,
erp_table->aregion->type,
erp_table->base_index);
erp_table->base_index = old_base_index;
return err;
}
static int
mlxsw_sp_acl_erp_region_table_trans(struct mlxsw_sp_acl_erp_table *erp_table)
{
struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
struct mlxsw_sp_acl_erp *master_rp;
int err;
/* Initially, allocate a single eRP row. Expand later as needed */
err = mlxsw_sp_acl_erp_table_alloc(erp_core, erp_core->num_erp_banks,
erp_table->aregion->type,
&erp_table->base_index);
if (err)
return err;
erp_table->num_max_atcam_erps = erp_core->num_erp_banks;
/* Transition the sole RP currently configured (the master RP)
* to the eRP table
*/
master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table);
if (!master_rp) {
err = -EINVAL;
goto err_table_master_rp;
}
/* Maintain the same eRP bank for the master RP, so that we
* wouldn't need to update the bloom filter
*/
master_rp->index = master_rp->index % erp_core->num_erp_banks;
__set_bit(master_rp->index, erp_table->erp_index_bitmap);
err = mlxsw_sp_acl_erp_table_erp_add(erp_table, master_rp);
if (err)
goto err_table_master_rp_add;
err = mlxsw_sp_acl_erp_table_enable(erp_table, false);
if (err)
goto err_table_enable;
return 0;
err_table_enable:
mlxsw_sp_acl_erp_table_erp_del(master_rp);
err_table_master_rp_add:
__clear_bit(master_rp->index, erp_table->erp_index_bitmap);
err_table_master_rp:
mlxsw_sp_acl_erp_table_free(erp_core, erp_table->num_max_atcam_erps,
erp_table->aregion->type,
erp_table->base_index);
return err;
}
static void
mlxsw_sp_acl_erp_region_master_mask_trans(struct mlxsw_sp_acl_erp_table *erp_table)
{
struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
struct mlxsw_sp_acl_erp *master_rp;
mlxsw_sp_acl_erp_table_disable(erp_table);
master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table);
if (!master_rp)
return;
mlxsw_sp_acl_erp_table_erp_del(master_rp);
__clear_bit(master_rp->index, erp_table->erp_index_bitmap);
mlxsw_sp_acl_erp_table_free(erp_core, erp_table->num_max_atcam_erps,
erp_table->aregion->type,
erp_table->base_index);
}
static int
mlxsw_sp_acl_erp_region_erp_add(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp *erp)
{
struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
bool ctcam_le = erp_table->num_ctcam_erps > 0;
char pererp_pl[MLXSW_REG_PERERP_LEN];
mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0,
erp_table->base_index, 0);
mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap,
MLXSW_SP_ACL_ERP_MAX_PER_REGION);
mlxsw_reg_pererp_erpt_vector_set(pererp_pl, erp->index, true);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
}
static void mlxsw_sp_acl_erp_region_erp_del(struct mlxsw_sp_acl_erp *erp)
{
struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
bool ctcam_le = erp_table->num_ctcam_erps > 0;
char pererp_pl[MLXSW_REG_PERERP_LEN];
mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0,
erp_table->base_index, 0);
mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap,
MLXSW_SP_ACL_ERP_MAX_PER_REGION);
mlxsw_reg_pererp_erpt_vector_set(pererp_pl, erp->index, false);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
}
static int
mlxsw_sp_acl_erp_region_ctcam_enable(struct mlxsw_sp_acl_erp_table *erp_table)
{
/* No need to re-enable lookup in the C-TCAM */
if (erp_table->num_ctcam_erps > 1)
return 0;
return mlxsw_sp_acl_erp_table_enable(erp_table, true);
}
static void
mlxsw_sp_acl_erp_region_ctcam_disable(struct mlxsw_sp_acl_erp_table *erp_table)
{
/* Only disable C-TCAM lookup when last C-TCAM eRP is deleted */
if (erp_table->num_ctcam_erps > 1)
return;
mlxsw_sp_acl_erp_table_enable(erp_table, false);
}
static void
mlxsw_sp_acl_erp_ctcam_table_ops_set(struct mlxsw_sp_acl_erp_table *erp_table)
{
switch (erp_table->num_atcam_erps) {
case 2:
/* Keep using the eRP table, but correctly set the
* operations pointer so that when an A-TCAM eRP is
* deleted we will transition to use the master mask
*/
erp_table->ops = &erp_two_masks_ops;
break;
case 1:
/* We only kept the eRP table because we had C-TCAM
* eRPs in use. Now that the last C-TCAM eRP is gone we
* can stop using the table and transition to use the
* master mask
*/
mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
erp_table->ops = &erp_single_mask_ops;
break;
case 0:
/* There are no more eRPs of any kind used by the region
* so free its eRP table and transition to initial state
*/
mlxsw_sp_acl_erp_table_disable(erp_table);
mlxsw_sp_acl_erp_table_free(erp_table->erp_core,
erp_table->num_max_atcam_erps,
erp_table->aregion->type,
erp_table->base_index);
erp_table->ops = &erp_no_mask_ops;
break;
default:
break;
}
}
static struct mlxsw_sp_acl_erp *
__mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp_key *key)
{
struct mlxsw_sp_acl_erp *erp;
int err;
erp = kzalloc(sizeof(*erp), GFP_KERNEL);
if (!erp)
return ERR_PTR(-ENOMEM);
memcpy(&erp->key, key, sizeof(*key));
bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
MLXSW_SP_ACL_TCAM_MASK_LEN);
refcount_set(&erp->refcnt, 1);
erp_table->num_ctcam_erps++;
erp->erp_table = erp_table;
err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp);
if (err)
goto err_master_mask_set;
err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node,
mlxsw_sp_acl_erp_ht_params);
if (err)
goto err_rhashtable_insert;
err = mlxsw_sp_acl_erp_region_ctcam_enable(erp_table);
if (err)
goto err_erp_region_ctcam_enable;
/* When C-TCAM is used, the eRP table must be used */
erp_table->ops = &erp_multiple_masks_ops;
return erp;
err_erp_region_ctcam_enable:
rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
mlxsw_sp_acl_erp_ht_params);
err_rhashtable_insert:
mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
err_master_mask_set:
erp_table->num_ctcam_erps--;
kfree(erp);
return ERR_PTR(err);
}
static struct mlxsw_sp_acl_erp *
mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp_key *key)
{
struct mlxsw_sp_acl_erp *erp;
int err;
/* There is a special situation where we need to spill rules
* into the C-TCAM, yet the region is still using a master
* mask and thus not performing a lookup in the C-TCAM. This
* can happen when two rules that only differ in priority - and
* thus sharing the same key - are programmed. In this case
* we transition the region to use an eRP table
*/
err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
if (err)
return ERR_PTR(err);
erp = __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
if (IS_ERR(erp)) {
err = PTR_ERR(erp);
goto err_erp_create;
}
return erp;
err_erp_create:
mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
return ERR_PTR(err);
}
static void
mlxsw_sp_acl_erp_ctcam_mask_destroy(struct mlxsw_sp_acl_erp *erp)
{
struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
mlxsw_sp_acl_erp_region_ctcam_disable(erp_table);
rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
mlxsw_sp_acl_erp_ht_params);
mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
erp_table->num_ctcam_erps--;
kfree(erp);
/* Once the last C-TCAM eRP was destroyed, the state we
* transition to depends on the number of A-TCAM eRPs currently
* in use
*/
if (erp_table->num_ctcam_erps > 0)
return;
mlxsw_sp_acl_erp_ctcam_table_ops_set(erp_table);
}
static struct mlxsw_sp_acl_erp *
mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp_key *key)
{
struct mlxsw_sp_acl_erp *erp;
int err;
if (key->ctcam)
return __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
/* Expand the eRP table for the new eRP, if needed */
err = mlxsw_sp_acl_erp_table_expand(erp_table);
if (err)
return ERR_PTR(err);
erp = mlxsw_sp_acl_erp_generic_create(erp_table, key);
if (IS_ERR(erp))
return erp;
err = mlxsw_sp_acl_erp_index_get(erp_table, &erp->index);
if (err)
goto err_erp_index_get;
err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp);
if (err)
goto err_table_erp_add;
err = mlxsw_sp_acl_erp_region_erp_add(erp_table, erp);
if (err)
goto err_region_erp_add;
erp_table->ops = &erp_multiple_masks_ops;
return erp;
err_region_erp_add:
mlxsw_sp_acl_erp_table_erp_del(erp);
err_table_erp_add:
mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
err_erp_index_get:
mlxsw_sp_acl_erp_generic_destroy(erp);
return ERR_PTR(err);
}
static void
mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp *erp)
{
if (erp->key.ctcam)
return mlxsw_sp_acl_erp_ctcam_mask_destroy(erp);
mlxsw_sp_acl_erp_region_erp_del(erp);
mlxsw_sp_acl_erp_table_erp_del(erp);
mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
mlxsw_sp_acl_erp_generic_destroy(erp);
if (erp_table->num_atcam_erps == 2 && erp_table->num_ctcam_erps == 0)
erp_table->ops = &erp_two_masks_ops;
}
static struct mlxsw_sp_acl_erp *
mlxsw_sp_acl_erp_second_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp_key *key)
{
struct mlxsw_sp_acl_erp *erp;
int err;
if (key->ctcam)
return mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
/* Transition to use eRP table instead of master mask */
err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
if (err)
return ERR_PTR(err);
erp = mlxsw_sp_acl_erp_generic_create(erp_table, key);
if (IS_ERR(erp)) {
err = PTR_ERR(erp);
goto err_erp_create;
}
err = mlxsw_sp_acl_erp_index_get(erp_table, &erp->index);
if (err)
goto err_erp_index_get;
err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp);
if (err)
goto err_table_erp_add;
err = mlxsw_sp_acl_erp_region_erp_add(erp_table, erp);
if (err)
goto err_region_erp_add;
erp_table->ops = &erp_two_masks_ops;
return erp;
err_region_erp_add:
mlxsw_sp_acl_erp_table_erp_del(erp);
err_table_erp_add:
mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
err_erp_index_get:
mlxsw_sp_acl_erp_generic_destroy(erp);
err_erp_create:
mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
return ERR_PTR(err);
}
static void
mlxsw_sp_acl_erp_second_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp *erp)
{
if (erp->key.ctcam)
return mlxsw_sp_acl_erp_ctcam_mask_destroy(erp);
mlxsw_sp_acl_erp_region_erp_del(erp);
mlxsw_sp_acl_erp_table_erp_del(erp);
mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
mlxsw_sp_acl_erp_generic_destroy(erp);
/* Transition to use master mask instead of eRP table */
mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
erp_table->ops = &erp_single_mask_ops;
}
static struct mlxsw_sp_acl_erp *
mlxsw_sp_acl_erp_first_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp_key *key)
{
struct mlxsw_sp_acl_erp *erp;
if (key->ctcam)
return ERR_PTR(-EINVAL);
erp = mlxsw_sp_acl_erp_generic_create(erp_table, key);
if (IS_ERR(erp))
return erp;
erp_table->ops = &erp_single_mask_ops;
return erp;
}
static void
mlxsw_sp_acl_erp_first_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp *erp)
{
mlxsw_sp_acl_erp_generic_destroy(erp);
erp_table->ops = &erp_no_mask_ops;
}
static void
mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp *erp)
{
WARN_ON(1);
}
struct mlxsw_sp_acl_erp *
mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
const char *mask, bool ctcam)
{
struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
struct mlxsw_sp_acl_erp_key key;
struct mlxsw_sp_acl_erp *erp;
/* eRPs are allocated from a shared resource, but currently all
* allocations are done under RTNL.
*/
ASSERT_RTNL();
memcpy(key.mask, mask, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
key.ctcam = ctcam;
erp = rhashtable_lookup_fast(&erp_table->erp_ht, &key,
mlxsw_sp_acl_erp_ht_params);
if (erp) {
refcount_inc(&erp->refcnt);
return erp;
}
return erp_table->ops->erp_create(erp_table, &key);
}
void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_erp *erp)
{
struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
ASSERT_RTNL();
if (!refcount_dec_and_test(&erp->refcnt))
return;
erp_table->ops->erp_destroy(erp_table, erp);
}
static struct mlxsw_sp_acl_erp_table *
mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
{
struct mlxsw_sp_acl_erp_table *erp_table;
int err;
erp_table = kzalloc(sizeof(*erp_table), GFP_KERNEL);
if (!erp_table)
return ERR_PTR(-ENOMEM);
err = rhashtable_init(&erp_table->erp_ht, &mlxsw_sp_acl_erp_ht_params);
if (err)
goto err_rhashtable_init;
erp_table->erp_core = aregion->atcam->erp_core;
erp_table->ops = &erp_no_mask_ops;
INIT_LIST_HEAD(&erp_table->atcam_erps_list);
erp_table->aregion = aregion;
return erp_table;
err_rhashtable_init:
kfree(erp_table);
return ERR_PTR(err);
}
static void
mlxsw_sp_acl_erp_table_destroy(struct mlxsw_sp_acl_erp_table *erp_table)
{
WARN_ON(!list_empty(&erp_table->atcam_erps_list));
rhashtable_destroy(&erp_table->erp_ht);
kfree(erp_table);
}
static int
mlxsw_sp_acl_erp_master_mask_init(struct mlxsw_sp_acl_atcam_region *aregion)
{
struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
char percr_pl[MLXSW_REG_PERCR_LEN];
mlxsw_reg_percr_pack(percr_pl, aregion->region->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl);
}
static int
mlxsw_sp_acl_erp_region_param_init(struct mlxsw_sp_acl_atcam_region *aregion)
{
struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
char pererp_pl[MLXSW_REG_PERERP_LEN];
mlxsw_reg_pererp_pack(pererp_pl, aregion->region->id, false, false, 0,
0, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
}
int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion)
{
struct mlxsw_sp_acl_erp_table *erp_table;
int err;
erp_table = mlxsw_sp_acl_erp_table_create(aregion);
if (IS_ERR(erp_table))
return PTR_ERR(erp_table);
aregion->erp_table = erp_table;
/* Initialize the region's master mask to all zeroes */
err = mlxsw_sp_acl_erp_master_mask_init(aregion);
if (err)
goto err_erp_master_mask_init;
/* Initialize the region to not use the eRP table */
err = mlxsw_sp_acl_erp_region_param_init(aregion);
if (err)
goto err_erp_region_param_init;
return 0;
err_erp_region_param_init:
err_erp_master_mask_init:
mlxsw_sp_acl_erp_table_destroy(erp_table);
return err;
}
void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion)
{
mlxsw_sp_acl_erp_table_destroy(aregion->erp_table);
}
static int
mlxsw_sp_acl_erp_tables_sizes_query(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_erp_core *erp_core)
{
unsigned int size;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_2KB) ||
!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_4KB) ||
!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_8KB) ||
!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_12KB))
return -EIO;
size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_2KB);
erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB] = size;
size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_4KB);
erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB] = size;
size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_8KB);
erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB] = size;
size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_12KB);
erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB] = size;
return 0;
}
static int mlxsw_sp_acl_erp_tables_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_erp_core *erp_core)
{
unsigned int erpt_bank_size;
int err;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_ERPT_BANK_SIZE) ||
!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_ERPT_BANKS))
return -EIO;
erpt_bank_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_MAX_ERPT_BANK_SIZE);
erp_core->num_erp_banks = MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_MAX_ERPT_BANKS);
erp_core->erp_tables = gen_pool_create(0, -1);
if (!erp_core->erp_tables)
return -ENOMEM;
gen_pool_set_algo(erp_core->erp_tables, gen_pool_best_fit, NULL);
err = gen_pool_add(erp_core->erp_tables,
MLXSW_SP_ACL_ERP_GENALLOC_OFFSET, erpt_bank_size,
-1);
if (err)
goto err_gen_pool_add;
/* Different regions require masks of different sizes */
err = mlxsw_sp_acl_erp_tables_sizes_query(mlxsw_sp, erp_core);
if (err)
goto err_erp_tables_sizes_query;
return 0;
err_erp_tables_sizes_query:
err_gen_pool_add:
gen_pool_destroy(erp_core->erp_tables);
return err;
}
static void mlxsw_sp_acl_erp_tables_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_erp_core *erp_core)
{
gen_pool_destroy(erp_core->erp_tables);
}
int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam)
{
struct mlxsw_sp_acl_erp_core *erp_core;
int err;
erp_core = kzalloc(sizeof(*erp_core), GFP_KERNEL);
if (!erp_core)
return -ENOMEM;
erp_core->mlxsw_sp = mlxsw_sp;
atcam->erp_core = erp_core;
err = mlxsw_sp_acl_erp_tables_init(mlxsw_sp, erp_core);
if (err)
goto err_erp_tables_init;
return 0;
err_erp_tables_init:
kfree(erp_core);
return err;
}
void mlxsw_sp_acl_erps_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam)
{
mlxsw_sp_acl_erp_tables_fini(mlxsw_sp, atcam->erp_core);
kfree(atcam->erp_core);
}
...@@ -577,7 +577,7 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, ...@@ -577,7 +577,7 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
if (err) if (err)
goto err_tcam_region_enable; goto err_tcam_region_enable;
err = ops->region_init(mlxsw_sp, region->priv, region); err = ops->region_init(mlxsw_sp, region->priv, tcam->priv, region);
if (err) if (err)
goto err_tcam_region_init; goto err_tcam_region_init;
......
...@@ -92,6 +92,9 @@ mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp, ...@@ -92,6 +92,9 @@ mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U) #define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
#define MLXSW_SP_ACL_TCAM_MASK_LEN \
(MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN * BITS_PER_BYTE)
struct mlxsw_sp_acl_tcam_group; struct mlxsw_sp_acl_tcam_group;
struct mlxsw_sp_acl_tcam_region { struct mlxsw_sp_acl_tcam_region {
...@@ -109,6 +112,7 @@ struct mlxsw_sp_acl_tcam_region { ...@@ -109,6 +112,7 @@ struct mlxsw_sp_acl_tcam_region {
struct mlxsw_sp_acl_ctcam_region { struct mlxsw_sp_acl_ctcam_region {
struct parman *parman; struct parman *parman;
const struct mlxsw_sp_acl_ctcam_region_ops *ops;
struct mlxsw_sp_acl_tcam_region *region; struct mlxsw_sp_acl_tcam_region *region;
}; };
...@@ -120,9 +124,19 @@ struct mlxsw_sp_acl_ctcam_entry { ...@@ -120,9 +124,19 @@ struct mlxsw_sp_acl_ctcam_entry {
struct parman_item parman_item; struct parman_item parman_item;
}; };
int mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ctcam_region_ops {
int (*entry_insert)(struct mlxsw_sp_acl_ctcam_region *cregion,
struct mlxsw_sp_acl_ctcam_entry *centry,
const char *mask);
void (*entry_remove)(struct mlxsw_sp_acl_ctcam_region *cregion,
struct mlxsw_sp_acl_ctcam_entry *centry);
};
int
mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ctcam_region *cregion, struct mlxsw_sp_acl_ctcam_region *cregion,
struct mlxsw_sp_acl_tcam_region *region); struct mlxsw_sp_acl_tcam_region *region,
const struct mlxsw_sp_acl_ctcam_region_ops *ops);
void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion); void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion);
void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion, void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion,
struct mlxsw_sp_acl_ctcam_chunk *cchunk, struct mlxsw_sp_acl_ctcam_chunk *cchunk,
...@@ -144,9 +158,102 @@ mlxsw_sp_acl_ctcam_entry_offset(struct mlxsw_sp_acl_ctcam_entry *centry) ...@@ -144,9 +158,102 @@ mlxsw_sp_acl_ctcam_entry_offset(struct mlxsw_sp_acl_ctcam_entry *centry)
return centry->parman_item.index; return centry->parman_item.index;
} }
enum mlxsw_sp_acl_atcam_region_type {
MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB,
MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB,
MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB,
MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB,
__MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX,
};
#define MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX \
(__MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX - 1)
struct mlxsw_sp_acl_atcam {
struct mlxsw_sp_acl_erp_core *erp_core;
};
struct mlxsw_sp_acl_atcam_region {
struct rhashtable entries_ht; /* A-TCAM only */
struct mlxsw_sp_acl_ctcam_region cregion;
const struct mlxsw_sp_acl_atcam_region_ops *ops;
struct mlxsw_sp_acl_tcam_region *region;
struct mlxsw_sp_acl_atcam *atcam;
enum mlxsw_sp_acl_atcam_region_type type;
struct mlxsw_sp_acl_erp_table *erp_table;
void *priv;
};
struct mlxsw_sp_acl_atcam_entry_ht_key {
char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */
u8 erp_id;
};
struct mlxsw_sp_acl_atcam_chunk {
struct mlxsw_sp_acl_ctcam_chunk cchunk;
};
struct mlxsw_sp_acl_atcam_entry {
struct rhash_head ht_node;
struct mlxsw_sp_acl_atcam_entry_ht_key ht_key;
struct mlxsw_sp_acl_ctcam_entry centry;
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
struct mlxsw_sp_acl_erp *erp;
};
static inline struct mlxsw_sp_acl_atcam_region *
mlxsw_sp_acl_tcam_cregion_aregion(struct mlxsw_sp_acl_ctcam_region *cregion)
{
return container_of(cregion, struct mlxsw_sp_acl_atcam_region, cregion);
}
static inline struct mlxsw_sp_acl_atcam_entry *
mlxsw_sp_acl_tcam_centry_aentry(struct mlxsw_sp_acl_ctcam_entry *centry)
{
return container_of(centry, struct mlxsw_sp_acl_atcam_entry, centry);
}
int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp,
u16 region_id); u16 region_id);
int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, int
struct mlxsw_sp_acl_tcam_region *region); mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam,
struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_tcam_region *region,
const struct mlxsw_sp_acl_ctcam_region_ops *ops);
void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion);
void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_chunk *achunk,
unsigned int priority);
void mlxsw_sp_acl_atcam_chunk_fini(struct mlxsw_sp_acl_atcam_chunk *achunk);
int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_chunk *achunk,
struct mlxsw_sp_acl_atcam_entry *aentry,
struct mlxsw_sp_acl_rule_info *rulei);
void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_chunk *achunk,
struct mlxsw_sp_acl_atcam_entry *aentry);
int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam);
void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam);
struct mlxsw_sp_acl_erp;
bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp);
u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp);
struct mlxsw_sp_acl_erp *
mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
const char *mask, bool ctcam);
void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_erp *erp);
int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion);
void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion);
int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam);
void mlxsw_sp_acl_erps_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment