Commit 98e60dce authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-Introduce-initial-Spectrum-2-support'

Ido Schimmel says:

====================
mlxsw: Introduce initial Spectrum-2 support

This patch set adds initial support for the Spectrum-2 ASIC. The first
two patches add Spectrum-2 specific KVD linear (KVDL) manager. Unlike
the Spectrum ASIC, there is no linear memory and instead the type of the
entry (e.g., nexthop) and its index are hashed and the entry is placed
in the computed address in the hash-based KVD memory.

The third patch adds Spectrum-2 stubs in the multicast routing code.
Support for multicast routing will be added later on.

Patches 4-15 add ACL support. The Spectrum-2 ASIC includes an
algorithmic TCAM (A-TCAM) and a regular circuit TCAM (C-TCAM) for rules
that can't be inserted into the A-TCAM. This set does not make use of
the A-TCAM and only places rules in the C-TCAM. This provides equivalent
scale and performance to the Spectrum ASIC. A follow-up patch set will
introduce A-TCAM support.

The last patch extends the main driver file to work with both ASICs.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c0b78038 c3ab4354
......@@ -15,15 +15,17 @@ mlxsw_switchx2-objs := switchx2.o
obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o spectrum_router.o \
spectrum1_kvdl.o spectrum_kvdl.o \
spectrum1_kvdl.o spectrum2_kvdl.o \
spectrum_kvdl.o \
spectrum_acl_tcam.o spectrum_acl_ctcam.o \
spectrum1_acl_tcam.o \
spectrum_acl_atcam.o \
spectrum1_acl_tcam.o spectrum2_acl_tcam.o \
spectrum_acl.o \
spectrum_flower.o spectrum_cnt.o \
spectrum_fid.o spectrum_ipip.o \
spectrum_acl_flex_actions.o \
spectrum_acl_flex_keys.o \
spectrum1_mr_tcam.o \
spectrum1_mr_tcam.o spectrum2_mr_tcam.o \
spectrum_mr_tcam.o spectrum_mr.o \
spectrum_qdisc.o spectrum_span.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
......
......@@ -430,6 +430,12 @@ char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block)
}
EXPORT_SYMBOL(mlxsw_afa_block_first_set);
char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block)
{
return block->cur_set->ht_key.enc_actions;
}
EXPORT_SYMBOL(mlxsw_afa_block_cur_set);
u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block)
{
/* First set is never in KVD linear. So the first set
......@@ -441,6 +447,15 @@ u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block)
}
EXPORT_SYMBOL(mlxsw_afa_block_first_kvdl_index);
int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity)
{
u32 kvdl_index = mlxsw_afa_block_first_kvdl_index(block);
return block->afa->ops->kvdl_set_activity_get(block->afa->ops_priv,
kvdl_index, activity);
}
EXPORT_SYMBOL(mlxsw_afa_block_activity_get);
int mlxsw_afa_block_continue(struct mlxsw_afa_block *block)
{
if (block->finished)
......
......@@ -45,6 +45,8 @@ struct mlxsw_afa_ops {
int (*kvdl_set_add)(void *priv, u32 *p_kvdl_index,
char *enc_actions, bool is_first);
void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first);
int (*kvdl_set_activity_get)(void *priv, u32 kvdl_index,
bool *activity);
int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port);
void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index);
int (*counter_index_get)(void *priv, unsigned int *p_counter_index);
......@@ -65,7 +67,9 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa);
void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block);
int mlxsw_afa_block_commit(struct mlxsw_afa_block *block);
char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block);
char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block);
u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block);
int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity);
int mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block);
......
......@@ -416,24 +416,74 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
}
EXPORT_SYMBOL(mlxsw_afk_values_add_buf);
static void mlxsw_sp_afk_encode_u32(const struct mlxsw_item *storage_item,
const struct mlxsw_item *output_item,
char *storage, char *output)
{
u32 value;
value = __mlxsw_item_get32(storage, storage_item, 0);
__mlxsw_item_set32(output, output_item, 0, value);
}
static void mlxsw_sp_afk_encode_buf(const struct mlxsw_item *storage_item,
const struct mlxsw_item *output_item,
char *storage, char *output)
{
char *storage_data = __mlxsw_item_data(storage, storage_item, 0);
char *output_data = __mlxsw_item_data(output, output_item, 0);
size_t len = output_item->size.bytes;
memcpy(output_data, storage_data, len);
}
static void
mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
char *output, char *storage)
{
const struct mlxsw_item *storage_item = &elinst->info->item;
const struct mlxsw_item *output_item = &elinst->item;
if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32)
mlxsw_sp_afk_encode_u32(storage_item, output_item,
storage, output);
else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF)
mlxsw_sp_afk_encode_buf(storage_item, output_item,
storage, output);
}
#define MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE 16
void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
char *key, char *mask)
{
char block_mask[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
char block_key[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
const struct mlxsw_afk_element_inst *elinst;
enum mlxsw_afk_element element;
int block_index;
int block_index, i;
for (i = 0; i < key_info->blocks_count; i++) {
memset(block_key, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
memset(block_mask, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
mlxsw_afk_element_usage_for_each(element, &values->elusage) {
elinst = mlxsw_afk_key_info_elinst_get(key_info,
element,
&block_index);
if (!elinst || block_index != i)
continue;
mlxsw_sp_afk_encode_one(elinst, block_key,
values->storage.key);
mlxsw_sp_afk_encode_one(elinst, block_mask,
values->storage.mask);
}
mlxsw_afk_element_usage_for_each(element, &values->elusage) {
elinst = mlxsw_afk_key_info_elinst_get(key_info, element,
&block_index);
if (!elinst)
continue;
mlxsw_afk->ops->encode_one(elinst, block_index,
values->storage.key, key);
mlxsw_afk->ops->encode_one(elinst, block_index,
values->storage.mask, mask);
mlxsw_afk->ops->encode_block(block_key, i, key);
mlxsw_afk->ops->encode_block(block_mask, i, mask);
}
}
EXPORT_SYMBOL(mlxsw_afk_encode);
......@@ -219,8 +219,7 @@ struct mlxsw_afk;
struct mlxsw_afk_ops {
const struct mlxsw_afk_block *blocks;
unsigned int blocks_count;
void (*encode_one)(const struct mlxsw_afk_element_inst *elinst,
int block_index, char *storage, char *output);
void (*encode_block)(char *block, int block_index, char *output);
};
struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
......
......@@ -39,6 +39,7 @@
#define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738
#define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84
#define PCI_DEVICE_ID_MELLANOX_SPECTRUM2 0xcf6c
#define PCI_DEVICE_ID_MELLANOX_SWITCHIB 0xcb20
#define PCI_DEVICE_ID_MELLANOX_SWITCHIB2 0xcf08
......
This diff is collapsed.
......@@ -501,6 +501,9 @@ int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp,
extern const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops;
int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
/* spectrum2_kvdl.c */
extern const struct mlxsw_sp_kvdl_ops mlxsw_sp2_kvdl_ops;
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
struct mlxsw_afk_element_values values;
......@@ -621,6 +624,8 @@ struct mlxsw_sp_acl_tcam_ops {
int (*region_init)(struct mlxsw_sp *mlxsw_sp, void *region_priv,
struct mlxsw_sp_acl_tcam_region *region);
void (*region_fini)(struct mlxsw_sp *mlxsw_sp, void *region_priv);
int (*region_associate)(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region);
size_t chunk_priv_size;
void (*chunk_init)(void *region_priv, void *chunk_priv,
unsigned int priority);
......@@ -641,11 +646,16 @@ struct mlxsw_sp_acl_tcam_ops {
/* spectrum1_acl_tcam.c */
extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops;
/* spectrum2_acl_tcam.c */
extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops;
/* spectrum_acl_flex_actions.c */
extern const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops;
extern const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops;
/* spectrum_acl_flex_keys.c */
extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops;
extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops;
/* spectrum_flower.c */
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
......@@ -727,4 +737,7 @@ struct mlxsw_sp_mr_tcam_ops {
/* spectrum1_mr_tcam.c */
extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops;
/* spectrum2_mr_tcam.c */
extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops;
#endif
......@@ -151,6 +151,13 @@ mlxsw_sp1_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv)
mlxsw_sp_acl_ctcam_region_fini(&region->cregion);
}
static int
mlxsw_sp1_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
{
return 0;
}
static void mlxsw_sp1_acl_tcam_chunk_init(void *region_priv, void *chunk_priv,
unsigned int priority)
{
......@@ -235,6 +242,7 @@ const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops = {
.region_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_region),
.region_init = mlxsw_sp1_acl_tcam_region_init,
.region_fini = mlxsw_sp1_acl_tcam_region_fini,
.region_associate = mlxsw_sp1_acl_tcam_region_associate,
.chunk_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_chunk),
.chunk_init = mlxsw_sp1_acl_tcam_chunk_init,
.chunk_fini = mlxsw_sp1_acl_tcam_chunk_fini,
......
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include "spectrum.h"
#include "spectrum_acl_tcam.h"
#include "core_acl_flex_actions.h"
struct mlxsw_sp2_acl_tcam {
u32 kvdl_index;
unsigned int kvdl_count;
};
struct mlxsw_sp2_acl_tcam_region {
struct mlxsw_sp_acl_ctcam_region cregion;
};
struct mlxsw_sp2_acl_tcam_chunk {
struct mlxsw_sp_acl_ctcam_chunk cchunk;
};
struct mlxsw_sp2_acl_tcam_entry {
struct mlxsw_sp_acl_ctcam_entry centry;
struct mlxsw_afa_block *act_block;
};
static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
struct mlxsw_sp_acl_tcam *_tcam)
{
struct mlxsw_sp2_acl_tcam *tcam = priv;
struct mlxsw_afa_block *afa_block;
char pefa_pl[MLXSW_REG_PEFA_LEN];
char pgcr_pl[MLXSW_REG_PGCR_LEN];
char *enc_actions;
int i;
int err;
tcam->kvdl_count = _tcam->max_regions;
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
tcam->kvdl_count, &tcam->kvdl_index);
if (err)
return err;
/* Create flex action block, set default action (continue)
* but don't commit. We need just the current set encoding
* to be written using PEFA register to all indexes for all regions.
*/
afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
if (!afa_block) {
err = -ENOMEM;
goto err_afa_block;
}
err = mlxsw_afa_block_continue(afa_block);
if (WARN_ON(err))
goto err_afa_block_continue;
enc_actions = mlxsw_afa_block_cur_set(afa_block);
for (i = 0; i < tcam->kvdl_count; i++) {
mlxsw_reg_pefa_pack(pefa_pl, tcam->kvdl_index + i,
true, enc_actions);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
if (err)
goto err_pefa_write;
}
mlxsw_reg_pgcr_pack(pgcr_pl, tcam->kvdl_index);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pgcr), pgcr_pl);
if (err)
goto err_pgcr_write;
mlxsw_afa_block_destroy(afa_block);
return 0;
err_pgcr_write:
err_pefa_write:
err_afa_block_continue:
mlxsw_afa_block_destroy(afa_block);
err_afa_block:
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
tcam->kvdl_count, tcam->kvdl_index);
return err;
}
static void mlxsw_sp2_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
{
struct mlxsw_sp2_acl_tcam *tcam = priv;
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
tcam->kvdl_count, tcam->kvdl_index);
}
static int
mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv,
struct mlxsw_sp_acl_tcam_region *_region)
{
struct mlxsw_sp2_acl_tcam_region *region = region_priv;
int err;
err = mlxsw_sp_acl_atcam_region_init(mlxsw_sp, _region);
if (err)
return err;
return mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &region->cregion,
_region);
}
static void
mlxsw_sp2_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv)
{
struct mlxsw_sp2_acl_tcam_region *region = region_priv;
mlxsw_sp_acl_ctcam_region_fini(&region->cregion);
}
static int
mlxsw_sp2_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
{
return mlxsw_sp_acl_atcam_region_associate(mlxsw_sp, region->id);
}
static void mlxsw_sp2_acl_tcam_chunk_init(void *region_priv, void *chunk_priv,
unsigned int priority)
{
struct mlxsw_sp2_acl_tcam_region *region = region_priv;
struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
mlxsw_sp_acl_ctcam_chunk_init(&region->cregion, &chunk->cchunk,
priority);
}
static void mlxsw_sp2_acl_tcam_chunk_fini(void *chunk_priv)
{
struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
mlxsw_sp_acl_ctcam_chunk_fini(&chunk->cchunk);
}
static int mlxsw_sp2_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
void *region_priv, void *chunk_priv,
void *entry_priv,
struct mlxsw_sp_acl_rule_info *rulei)
{
struct mlxsw_sp2_acl_tcam_region *region = region_priv;
struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
entry->act_block = rulei->act_block;
return mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &region->cregion,
&chunk->cchunk, &entry->centry,
rulei, true);
}
static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
void *region_priv, void *chunk_priv,
void *entry_priv)
{
struct mlxsw_sp2_acl_tcam_region *region = region_priv;
struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &region->cregion,
&chunk->cchunk, &entry->centry);
}
static int
mlxsw_sp2_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
void *region_priv, void *entry_priv,
bool *activity)
{
struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
return mlxsw_afa_block_activity_get(entry->act_block, activity);
}
const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops = {
.key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX2,
.priv_size = sizeof(struct mlxsw_sp2_acl_tcam),
.init = mlxsw_sp2_acl_tcam_init,
.fini = mlxsw_sp2_acl_tcam_fini,
.region_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_region),
.region_init = mlxsw_sp2_acl_tcam_region_init,
.region_fini = mlxsw_sp2_acl_tcam_region_fini,
.region_associate = mlxsw_sp2_acl_tcam_region_associate,
.chunk_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_chunk),
.chunk_init = mlxsw_sp2_acl_tcam_chunk_init,
.chunk_fini = mlxsw_sp2_acl_tcam_chunk_fini,
.entry_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_entry),
.entry_add = mlxsw_sp2_acl_tcam_entry_add,
.entry_del = mlxsw_sp2_acl_tcam_entry_del,
.entry_activity_get = mlxsw_sp2_acl_tcam_entry_activity_get,
};
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/bitops.h>
#include "spectrum.h"
#include "core.h"
#include "reg.h"
#include "resources.h"
struct mlxsw_sp2_kvdl_part_info {
u8 res_type;
/* For each defined partititon we need to know how many
* usage bits we need and how many indexes there are
* represented by a single bit. This could be got from FW
* querying appropriate resources. So have the resource
* ids for for this purpose in partition definition.
*/
enum mlxsw_res_id usage_bit_count_res_id;
enum mlxsw_res_id index_range_res_id;
};
#define MLXSW_SP2_KVDL_PART_INFO(_entry_type, _res_type, \
_usage_bit_count_res_id, _index_range_res_id) \
[MLXSW_SP_KVDL_ENTRY_TYPE_##_entry_type] = { \
.res_type = _res_type, \
.usage_bit_count_res_id = MLXSW_RES_ID_##_usage_bit_count_res_id, \
.index_range_res_id = MLXSW_RES_ID_##_index_range_res_id, \
}
static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = {
MLXSW_SP2_KVDL_PART_INFO(ADJ, 0x21, KVD_SIZE, MAX_KVD_LINEAR_RANGE),
MLXSW_SP2_KVDL_PART_INFO(ACTSET, 0x23, MAX_KVD_ACTION_SETS,
MAX_KVD_ACTION_SETS),
MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE),
MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE),
};
#define MLXSW_SP2_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp2_kvdl_parts_info)
struct mlxsw_sp2_kvdl_part {
const struct mlxsw_sp2_kvdl_part_info *info;
unsigned int usage_bit_count;
unsigned int indexes_per_usage_bit;
unsigned int last_allocated_bit;
unsigned long usage[0]; /* Usage bits */
};
struct mlxsw_sp2_kvdl {
struct mlxsw_sp2_kvdl_part *parts[MLXSW_SP2_KVDL_PARTS_INFO_LEN];
};
static int mlxsw_sp2_kvdl_part_find_zero_bits(struct mlxsw_sp2_kvdl_part *part,
unsigned int bit_count,
unsigned int *p_bit)
{
unsigned int start_bit;
unsigned int bit;
unsigned int i;
bool wrap = false;
start_bit = part->last_allocated_bit + 1;
if (start_bit == part->usage_bit_count)
start_bit = 0;
bit = start_bit;
again:
bit = find_next_zero_bit(part->usage, part->usage_bit_count, bit);
if (!wrap && bit + bit_count >= part->usage_bit_count) {
wrap = true;
bit = 0;
goto again;
}
if (wrap && bit + bit_count >= start_bit)
return -ENOBUFS;
for (i = 0; i < bit_count; i++) {
if (test_bit(bit + i, part->usage)) {
bit += bit_count;
goto again;
}
}
*p_bit = bit;
return 0;
}
static int mlxsw_sp2_kvdl_part_alloc(struct mlxsw_sp2_kvdl_part *part,
unsigned int size,
u32 *p_kvdl_index)
{
unsigned int bit_count;
unsigned int bit;
unsigned int i;
int err;
bit_count = DIV_ROUND_UP(size, part->indexes_per_usage_bit);
err = mlxsw_sp2_kvdl_part_find_zero_bits(part, bit_count, &bit);
if (err)
return err;
for (i = 0; i < bit_count; i++)
__set_bit(bit + i, part->usage);
*p_kvdl_index = bit * part->indexes_per_usage_bit;
return 0;
}
static int mlxsw_sp2_kvdl_rec_del(struct mlxsw_sp *mlxsw_sp, u8 res_type,
u16 size, u32 kvdl_index)
{
char *iedr_pl;
int err;
iedr_pl = kmalloc(MLXSW_REG_IEDR_LEN, GFP_KERNEL);
if (!iedr_pl)
return -ENOMEM;
mlxsw_reg_iedr_pack(iedr_pl);
mlxsw_reg_iedr_rec_pack(iedr_pl, 0, res_type, size, kvdl_index);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(iedr), iedr_pl);
kfree(iedr_pl);
return err;
}
static void mlxsw_sp2_kvdl_part_free(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp2_kvdl_part *part,
unsigned int size, u32 kvdl_index)
{
unsigned int bit_count;
unsigned int bit;
unsigned int i;
int err;
/* We need to ask FW to delete previously used KVD linear index */
err = mlxsw_sp2_kvdl_rec_del(mlxsw_sp, part->info->res_type,
size, kvdl_index);
if (err)
return;
bit_count = DIV_ROUND_UP(size, part->indexes_per_usage_bit);
bit = kvdl_index / part->indexes_per_usage_bit;
for (i = 0; i < bit_count; i++)
__clear_bit(bit + i, part->usage);
}
static int mlxsw_sp2_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv,
enum mlxsw_sp_kvdl_entry_type type,
unsigned int entry_count,
u32 *p_entry_index)
{
unsigned int size = entry_count * mlxsw_sp_kvdl_entry_size(type);
struct mlxsw_sp2_kvdl *kvdl = priv;
struct mlxsw_sp2_kvdl_part *part = kvdl->parts[type];
return mlxsw_sp2_kvdl_part_alloc(part, size, p_entry_index);
}
static void mlxsw_sp2_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv,
enum mlxsw_sp_kvdl_entry_type type,
unsigned int entry_count,
int entry_index)
{
unsigned int size = entry_count * mlxsw_sp_kvdl_entry_size(type);
struct mlxsw_sp2_kvdl *kvdl = priv;
struct mlxsw_sp2_kvdl_part *part = kvdl->parts[type];
return mlxsw_sp2_kvdl_part_free(mlxsw_sp, part, size, entry_index);
}
static int mlxsw_sp2_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
void *priv,
enum mlxsw_sp_kvdl_entry_type type,
unsigned int entry_count,
unsigned int *p_alloc_count)
{
*p_alloc_count = entry_count;
return 0;
}
static struct mlxsw_sp2_kvdl_part *
mlxsw_sp2_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp2_kvdl_part_info *info)
{
unsigned int indexes_per_usage_bit;
struct mlxsw_sp2_kvdl_part *part;
unsigned int index_range;
unsigned int usage_bit_count;
size_t usage_size;
if (!mlxsw_core_res_valid(mlxsw_sp->core,
info->usage_bit_count_res_id) ||
!mlxsw_core_res_valid(mlxsw_sp->core,
info->index_range_res_id))
return ERR_PTR(-EIO);
usage_bit_count = mlxsw_core_res_get(mlxsw_sp->core,
info->usage_bit_count_res_id);
index_range = mlxsw_core_res_get(mlxsw_sp->core,
info->index_range_res_id);
/* For some partitions, one usage bit represents a group of indexes.
* That's why we compute the number of indexes per usage bit here,
* according to queried resources.
*/
indexes_per_usage_bit = index_range / usage_bit_count;
usage_size = BITS_TO_LONGS(usage_bit_count) * sizeof(unsigned long);
part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
if (!part)
return ERR_PTR(-ENOMEM);
part->info = info;
part->usage_bit_count = usage_bit_count;
part->indexes_per_usage_bit = indexes_per_usage_bit;
part->last_allocated_bit = usage_bit_count - 1;
return part;
}
static void mlxsw_sp2_kvdl_part_fini(struct mlxsw_sp2_kvdl_part *part)
{
kfree(part);
}
static int mlxsw_sp2_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp2_kvdl *kvdl)
{
const struct mlxsw_sp2_kvdl_part_info *info;
int i;
int err;
for (i = 0; i < MLXSW_SP2_KVDL_PARTS_INFO_LEN; i++) {
info = &mlxsw_sp2_kvdl_parts_info[i];
kvdl->parts[i] = mlxsw_sp2_kvdl_part_init(mlxsw_sp, info);
if (IS_ERR(kvdl->parts[i])) {
err = PTR_ERR(kvdl->parts[i]);
goto err_kvdl_part_init;
}
}
return 0;
err_kvdl_part_init:
for (i--; i >= 0; i--)
mlxsw_sp2_kvdl_part_fini(kvdl->parts[i]);
return err;
}
static void mlxsw_sp2_kvdl_parts_fini(struct mlxsw_sp2_kvdl *kvdl)
{
int i;
for (i = 0; i < MLXSW_SP2_KVDL_PARTS_INFO_LEN; i++)
mlxsw_sp2_kvdl_part_fini(kvdl->parts[i]);
}
static int mlxsw_sp2_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv)
{
struct mlxsw_sp2_kvdl *kvdl = priv;
return mlxsw_sp2_kvdl_parts_init(mlxsw_sp, kvdl);
}
static void mlxsw_sp2_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
{
struct mlxsw_sp2_kvdl *kvdl = priv;
mlxsw_sp2_kvdl_parts_fini(kvdl);
}
const struct mlxsw_sp_kvdl_ops mlxsw_sp2_kvdl_ops = {
.priv_size = sizeof(struct mlxsw_sp2_kvdl),
.init = mlxsw_sp2_kvdl_init,
.fini = mlxsw_sp2_kvdl_fini,
.alloc = mlxsw_sp2_kvdl_alloc,
.free = mlxsw_sp2_kvdl_free,
.alloc_size_query = mlxsw_sp2_kvdl_alloc_size_query,
};
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include "core_acl_flex_actions.h"
#include "spectrum.h"
#include "spectrum_mr.h"
static int
mlxsw_sp2_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv,
struct mlxsw_sp_mr_route_key *key,
struct mlxsw_afa_block *afa_block,
enum mlxsw_sp_mr_route_prio prio)
{
return 0;
}
static void
mlxsw_sp2_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv,
struct mlxsw_sp_mr_route_key *key)
{
}
static int
mlxsw_sp2_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp,
void *route_priv,
struct mlxsw_sp_mr_route_key *key,
struct mlxsw_afa_block *afa_block)
{
return 0;
}
static int mlxsw_sp2_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
{
return 0;
}
static void mlxsw_sp2_mr_tcam_fini(void *priv)
{
}
const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops = {
.init = mlxsw_sp2_mr_tcam_init,
.fini = mlxsw_sp2_mr_tcam_fini,
.route_create = mlxsw_sp2_mr_tcam_route_create,
.route_destroy = mlxsw_sp2_mr_tcam_route_destroy,
.route_update = mlxsw_sp2_mr_tcam_route_update,
};
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2018 Ido Schimmel <idosch@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include "reg.h"
#include "core.h"
#include "spectrum.h"
#include "spectrum_acl_tcam.h"
int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp,
u16 region_id)
{
char perar_pl[MLXSW_REG_PERAR_LEN];
/* For now, just assume that every region has 12 key blocks */
u16 hw_region = region_id * 3;
u64 max_regions;
max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
if (hw_region >= max_regions)
return -ENOBUFS;
mlxsw_reg_perar_pack(perar_pl, region_id, hw_region);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perar), perar_pl);
}
static int mlxsw_sp_acl_atcam_region_param_init(struct mlxsw_sp *mlxsw_sp,
u16 region_id)
{
char percr_pl[MLXSW_REG_PERCR_LEN];
mlxsw_reg_percr_pack(percr_pl, region_id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl);
}
static int
mlxsw_sp_acl_atcam_region_erp_init(struct mlxsw_sp *mlxsw_sp,
u16 region_id)
{
char pererp_pl[MLXSW_REG_PERERP_LEN];
mlxsw_reg_pererp_pack(pererp_pl, region_id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
}
int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
{
int err;
err = mlxsw_sp_acl_atcam_region_associate(mlxsw_sp, region->id);
if (err)
return err;
err = mlxsw_sp_acl_atcam_region_param_init(mlxsw_sp, region->id);
if (err)
return err;
err = mlxsw_sp_acl_atcam_region_erp_init(mlxsw_sp, region->id);
if (err)
return err;
return 0;
}
......@@ -38,7 +38,7 @@
#include "spectrum_span.h"
static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
char *enc_actions, bool is_first)
char *enc_actions, bool is_first, bool ca)
{
struct mlxsw_sp *mlxsw_sp = priv;
char pefa_pl[MLXSW_REG_PEFA_LEN];
......@@ -55,7 +55,7 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
1, &kvdl_index);
if (err)
return err;
mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, ca, enc_actions);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
if (err)
goto err_pefa_write;
......@@ -68,6 +68,20 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
return err;
}
static int mlxsw_sp1_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
char *enc_actions, bool is_first)
{
return mlxsw_sp_act_kvdl_set_add(priv, p_kvdl_index, enc_actions,
is_first, false);
}
static int mlxsw_sp2_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
char *enc_actions, bool is_first)
{
return mlxsw_sp_act_kvdl_set_add(priv, p_kvdl_index, enc_actions,
is_first, true);
}
static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
bool is_first)
{
......@@ -79,6 +93,27 @@ static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
1, kvdl_index);
}
static int mlxsw_sp1_act_kvdl_set_activity_get(void *priv, u32 kvdl_index,
bool *activity)
{
return -EOPNOTSUPP;
}
static int mlxsw_sp2_act_kvdl_set_activity_get(void *priv, u32 kvdl_index,
bool *activity)
{
struct mlxsw_sp *mlxsw_sp = priv;
char pefa_pl[MLXSW_REG_PEFA_LEN];
int err;
mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, true, NULL);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
if (err)
return err;
mlxsw_reg_pefa_unpack(pefa_pl, activity);
return 0;
}
static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
u8 local_port)
{
......@@ -158,14 +193,28 @@ mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress)
}
const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = {
.kvdl_set_add = mlxsw_sp_act_kvdl_set_add,
.kvdl_set_add = mlxsw_sp1_act_kvdl_set_add,
.kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
.kvdl_set_activity_get = mlxsw_sp1_act_kvdl_set_activity_get,
.kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
.kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
.counter_index_get = mlxsw_sp_act_counter_index_get,
.counter_index_put = mlxsw_sp_act_counter_index_put,
.mirror_add = mlxsw_sp_act_mirror_add,
.mirror_del = mlxsw_sp_act_mirror_del,
};
const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = {
.kvdl_set_add = mlxsw_sp2_act_kvdl_set_add,
.kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
.kvdl_set_activity_get = mlxsw_sp2_act_kvdl_set_activity_get,
.kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
.kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
.counter_index_get = mlxsw_sp_act_counter_index_get,
.counter_index_put = mlxsw_sp_act_counter_index_put,
.mirror_add = mlxsw_sp_act_mirror_add,
.mirror_del = mlxsw_sp_act_mirror_del,
.dummy_first_set = true,
};
int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp)
......
......@@ -127,48 +127,190 @@ static const struct mlxsw_afk_block mlxsw_sp1_afk_blocks[] = {
MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type),
};
static void mlxsw_sp1_afk_encode_u32(const struct mlxsw_item *storage_item,
const struct mlxsw_item *output_item,
char *storage, char *output_indexed)
{
u32 value;
value = __mlxsw_item_get32(storage, storage_item, 0);
__mlxsw_item_set32(output_indexed, output_item, 0, value);
}
static void mlxsw_sp1_afk_encode_buf(const struct mlxsw_item *storage_item,
const struct mlxsw_item *output_item,
char *storage, char *output_indexed)
{
char *storage_data = __mlxsw_item_data(storage, storage_item, 0);
char *output_data = __mlxsw_item_data(output_indexed, output_item, 0);
size_t len = output_item->size.bytes;
memcpy(output_data, storage_data, len);
}
#define MLXSW_SP1_AFK_KEY_BLOCK_SIZE 16
static void
mlxsw_sp1_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
int block_index, char *storage, char *output)
static void mlxsw_sp1_afk_encode_block(char *block, int block_index,
char *output)
{
unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE;
char *output_indexed = output + offset;
const struct mlxsw_item *storage_item = &elinst->info->item;
const struct mlxsw_item *output_item = &elinst->item;
if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32)
mlxsw_sp1_afk_encode_u32(storage_item, output_item,
storage, output_indexed);
else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF)
mlxsw_sp1_afk_encode_buf(storage_item, output_item,
storage, output_indexed);
memcpy(output_indexed, block, MLXSW_SP1_AFK_KEY_BLOCK_SIZE);
}
const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = {
.blocks = mlxsw_sp1_afk_blocks,
.blocks_count = ARRAY_SIZE(mlxsw_sp1_afk_blocks),
.encode_one = mlxsw_sp1_afk_encode_one,
.encode_block = mlxsw_sp1_afk_encode_block,
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x04, 4),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_1[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_2[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x04, 2),
MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_3[] = {
MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3),
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_4[] = {
MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3),
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x04, 0, 16),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5[] = {
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x04, 0, 8), /* RX_ACL_SYSTEM_PORT */
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_0[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = {
MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x04, 0, 6),
MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 6, 2),
MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 8, 8),
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x04, 16, 8),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x04, 4),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_3[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x04, 4),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_4[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_5[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x04, 4),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_0[] = {
MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x04, 16, 16),
MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x04, 0, 16),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = {
MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x04, 16, 9), /* TCP_CONTROL + TCP_ECN */
};
static const struct mlxsw_afk_block mlxsw_sp2_afk_blocks[] = {
MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_mac_0),
MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_mac_1),
MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_mac_2),
MLXSW_AFK_BLOCK(0x13, mlxsw_sp_afk_element_info_mac_3),
MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4),
MLXSW_AFK_BLOCK(0x15, mlxsw_sp_afk_element_info_mac_5),
MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0),
MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1),
MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2),
MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0),
MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1),
MLXSW_AFK_BLOCK(0x42, mlxsw_sp_afk_element_info_ipv6_2),
MLXSW_AFK_BLOCK(0x43, mlxsw_sp_afk_element_info_ipv6_3),
MLXSW_AFK_BLOCK(0x44, mlxsw_sp_afk_element_info_ipv6_4),
MLXSW_AFK_BLOCK(0x45, mlxsw_sp_afk_element_info_ipv6_5),
MLXSW_AFK_BLOCK(0x90, mlxsw_sp_afk_element_info_l4_0),
MLXSW_AFK_BLOCK(0x92, mlxsw_sp_afk_element_info_l4_2),
};
#define MLXSW_SP2_AFK_BITS_PER_BLOCK 36
/* A block in Spectrum-2 is of the following form:
*
* +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
* | | | | | | | | | | | | | | | | | | | | | | | | | | | | |35|34|33|32|
* +-----------------------------------------------------------------------------------------------+
* |31|30|29|28|27|26|25|24|23|22|21|20|19|18|17|16|15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
* +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
*/
MLXSW_ITEM64(sp2_afk, block, value, 0x00, 0, MLXSW_SP2_AFK_BITS_PER_BLOCK);
/* The key / mask block layout in Spectrum-2 is of the following form:
*
* +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
* | | | | | | | | | | | | | | | | | block11_high |
* +-----------------------------------------------------------------------------------------------+
* | block11_low | block10_high |
* +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
* ...
*/
struct mlxsw_sp2_afk_block_layout {
unsigned short offset;
struct mlxsw_item item;
};
#define MLXSW_SP2_AFK_BLOCK_LAYOUT(_block, _offset, _shift) \
{ \
.offset = _offset, \
{ \
.shift = _shift, \
.size = {.bits = MLXSW_SP2_AFK_BITS_PER_BLOCK}, \
.name = #_block, \
} \
} \
static const struct mlxsw_sp2_afk_block_layout mlxsw_sp2_afk_blocks_layout[] = {
MLXSW_SP2_AFK_BLOCK_LAYOUT(block0, 0x30, 0),
MLXSW_SP2_AFK_BLOCK_LAYOUT(block1, 0x2C, 4),
MLXSW_SP2_AFK_BLOCK_LAYOUT(block2, 0x28, 8),
MLXSW_SP2_AFK_BLOCK_LAYOUT(block3, 0x24, 12),
MLXSW_SP2_AFK_BLOCK_LAYOUT(block4, 0x20, 16),
MLXSW_SP2_AFK_BLOCK_LAYOUT(block5, 0x1C, 20),
MLXSW_SP2_AFK_BLOCK_LAYOUT(block6, 0x18, 24),
MLXSW_SP2_AFK_BLOCK_LAYOUT(block7, 0x14, 28),
MLXSW_SP2_AFK_BLOCK_LAYOUT(block8, 0x0C, 0),
MLXSW_SP2_AFK_BLOCK_LAYOUT(block9, 0x08, 4),
MLXSW_SP2_AFK_BLOCK_LAYOUT(block10, 0x04, 8),
MLXSW_SP2_AFK_BLOCK_LAYOUT(block11, 0x00, 12),
};
static void mlxsw_sp2_afk_encode_block(char *block, int block_index,
char *output)
{
u64 block_value = mlxsw_sp2_afk_block_value_get(block);
const struct mlxsw_sp2_afk_block_layout *block_layout;
if (WARN_ON(block_index < 0 ||
block_index >= ARRAY_SIZE(mlxsw_sp2_afk_blocks_layout)))
return;
block_layout = &mlxsw_sp2_afk_blocks_layout[block_index];
__mlxsw_item_set64(output + block_layout->offset,
&block_layout->item, 0, block_value);
}
const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = {
.blocks = mlxsw_sp2_afk_blocks,
.blocks_count = ARRAY_SIZE(mlxsw_sp2_afk_blocks),
.encode_block = mlxsw_sp2_afk_encode_block,
};
......@@ -547,6 +547,10 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_region_id_get;
err = ops->region_associate(mlxsw_sp, region);
if (err)
goto err_tcam_region_associate;
region->key_type = ops->key_type;
err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
if (err)
......@@ -567,6 +571,7 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
err_tcam_region_enable:
mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
err_tcam_region_alloc:
err_tcam_region_associate:
mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
err_region_id_get:
mlxsw_afk_key_info_put(region->key_info);
......
......@@ -143,4 +143,9 @@ mlxsw_sp_acl_ctcam_entry_offset(struct mlxsw_sp_acl_ctcam_entry *centry)
return centry->parman_item.index;
}
int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp,
u16 region_id);
int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment