Commit ea64ce6d authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-Add-support-for-buffer-drops-mirroring'

Petr Machata says:

====================
mlxsw: Add support for buffer drops mirroring

This set offloads the recently introduced qevent infrastructure in TC and
allows mlxsw to support mirroring of packets that were dropped due to
buffer related reasons (e.g., early drops) during forwarding.

Up until now mlxsw only supported mirroring that was either triggered by
per-port triggers (i.e., via matchall) or by the policy engine (i.e.,
via flower). Packets that are dropped due to buffer related reasons are
mirrored using a third type of trigger, a global trigger.

Global triggers are bound once to a mirroring (SPAN) agent and enabled
on a per-{port, TC} basis. This allows users, for example, to request
that only packets that were early dropped on a specific netdev to be
mirrored.

Patch set overview:

Patch #1 extends flow_block_offload and indirect offload structure to pass
a scheduler instead of a netdevice. That is necessary, because binding type
and netdevice are not a unique identifier of the block anymore.

Patches #2-#3 add the required registers to support above mentioned
functionality.

Patches #4-#6 gradually add support for global mirroring triggers.

Patch #7 adds support for enablement of global mirroring triggers.

Patches #8-#11 are cleanups in the flow offload code and shuffle some
code around to make the qevent offload easier.

Patch #12 implements offload of RED early_drop qevent.

Patch #13 extends the RED selftest for offloaded datapath to cover
early_drop qevent.

v2:
- Patch #1:
    - In struct flow_block_indr, track both sch and dev.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e1d82f7a 1add9212
......@@ -1888,7 +1888,7 @@ static void bnxt_tc_setup_indr_rel(void *cb_priv)
kfree(priv);
}
static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct Qdisc *sch, struct bnxt *bp,
struct flow_block_offload *f, void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
{
......@@ -1911,7 +1911,7 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb,
cb_priv, cb_priv,
bnxt_tc_setup_indr_rel, f,
netdev, data, bp, cleanup);
netdev, sch, data, bp, cleanup);
if (IS_ERR(block_cb)) {
list_del(&cb_priv->list);
kfree(cb_priv);
......@@ -1946,7 +1946,7 @@ static bool bnxt_is_netdev_indr_offload(struct net_device *netdev)
return netif_is_vxlan(netdev);
}
static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv,
static int bnxt_tc_setup_indr_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
enum tc_setup_type type, void *type_data,
void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
......@@ -1956,8 +1956,7 @@ static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv,
switch (type) {
case TC_SETUP_BLOCK:
return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data, data,
cleanup);
return bnxt_tc_setup_indr_block(netdev, sch, cb_priv, type_data, data, cleanup);
default:
break;
}
......
......@@ -404,7 +404,7 @@ static void mlx5e_rep_indr_block_unbind(void *cb_priv)
static LIST_HEAD(mlx5e_block_cb_list);
static int
mlx5e_rep_indr_setup_block(struct net_device *netdev,
mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
struct mlx5e_rep_priv *rpriv,
struct flow_block_offload *f,
flow_setup_cb_t *setup_cb,
......@@ -442,7 +442,7 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev,
block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv,
mlx5e_rep_indr_block_unbind,
f, netdev, data, rpriv,
f, netdev, sch, data, rpriv,
cleanup);
if (IS_ERR(block_cb)) {
list_del(&indr_priv->list);
......@@ -472,18 +472,18 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev,
}
static
int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv,
int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
enum tc_setup_type type, void *type_data,
void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
{
switch (type) {
case TC_SETUP_BLOCK:
return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data,
mlx5e_rep_indr_setup_tc_cb,
data, cleanup);
case TC_SETUP_FT:
return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data,
mlx5e_rep_indr_setup_ft_cb,
data, cleanup);
default:
......
......@@ -9502,6 +9502,106 @@ MLXSW_ITEM32(reg, mogcr, ptp_iftc, 0x00, 1, 1);
*/
MLXSW_ITEM32(reg, mogcr, ptp_eftc, 0x00, 0, 1);
/* MPAGR - Monitoring Port Analyzer Global Register
* ------------------------------------------------
* This register is used for global port analyzer configurations.
* Note: This register is not supported by current FW versions for Spectrum-1.
*/
#define MLXSW_REG_MPAGR_ID 0x9089
#define MLXSW_REG_MPAGR_LEN 0x0C
MLXSW_REG_DEFINE(mpagr, MLXSW_REG_MPAGR_ID, MLXSW_REG_MPAGR_LEN);
enum mlxsw_reg_mpagr_trigger {
MLXSW_REG_MPAGR_TRIGGER_EGRESS,
MLXSW_REG_MPAGR_TRIGGER_INGRESS,
MLXSW_REG_MPAGR_TRIGGER_INGRESS_WRED,
MLXSW_REG_MPAGR_TRIGGER_INGRESS_SHARED_BUFFER,
MLXSW_REG_MPAGR_TRIGGER_INGRESS_ING_CONG,
MLXSW_REG_MPAGR_TRIGGER_INGRESS_EGR_CONG,
MLXSW_REG_MPAGR_TRIGGER_EGRESS_ECN,
MLXSW_REG_MPAGR_TRIGGER_EGRESS_HIGH_LATENCY,
};
/* reg_mpagr_trigger
* Mirror trigger.
* Access: Index
*/
MLXSW_ITEM32(reg, mpagr, trigger, 0x00, 0, 4);
/* reg_mpagr_pa_id
* Port analyzer ID.
* Access: RW
*/
MLXSW_ITEM32(reg, mpagr, pa_id, 0x04, 0, 4);
/* reg_mpagr_probability_rate
* Sampling rate.
* Valid values are: 1 to 3.5*10^9
* Value of 1 means "sample all". Default is 1.
* Access: RW
*/
MLXSW_ITEM32(reg, mpagr, probability_rate, 0x08, 0, 32);
static inline void mlxsw_reg_mpagr_pack(char *payload,
enum mlxsw_reg_mpagr_trigger trigger,
u8 pa_id, u32 probability_rate)
{
MLXSW_REG_ZERO(mpagr, payload);
mlxsw_reg_mpagr_trigger_set(payload, trigger);
mlxsw_reg_mpagr_pa_id_set(payload, pa_id);
mlxsw_reg_mpagr_probability_rate_set(payload, probability_rate);
}
/* MOMTE - Monitoring Mirror Trigger Enable Register
* -------------------------------------------------
* This register is used to configure the mirror enable for different mirror
* reasons.
*/
#define MLXSW_REG_MOMTE_ID 0x908D
#define MLXSW_REG_MOMTE_LEN 0x10
MLXSW_REG_DEFINE(momte, MLXSW_REG_MOMTE_ID, MLXSW_REG_MOMTE_LEN);
/* reg_momte_local_port
* Local port number.
* Access: Index
*/
MLXSW_ITEM32(reg, momte, local_port, 0x00, 16, 8);
enum mlxsw_reg_momte_type {
MLXSW_REG_MOMTE_TYPE_WRED = 0x20,
MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS = 0x31,
MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS_DESCRIPTORS = 0x32,
MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_EGRESS_PORT = 0x33,
MLXSW_REG_MOMTE_TYPE_ING_CONG = 0x40,
MLXSW_REG_MOMTE_TYPE_EGR_CONG = 0x50,
MLXSW_REG_MOMTE_TYPE_ECN = 0x60,
MLXSW_REG_MOMTE_TYPE_HIGH_LATENCY = 0x70,
};
/* reg_momte_type
* Type of mirroring.
* Access: Index
*/
MLXSW_ITEM32(reg, momte, type, 0x04, 0, 8);
/* reg_momte_tclass_en
* TClass/PG mirror enable. Each bit represents corresponding tclass.
* 0: disable (default)
* 1: enable
* Access: RW
*/
MLXSW_ITEM_BIT_ARRAY(reg, momte, tclass_en, 0x08, 0x08, 1);
static inline void mlxsw_reg_momte_pack(char *payload, u8 local_port,
enum mlxsw_reg_momte_type type)
{
MLXSW_REG_ZERO(momte, payload);
mlxsw_reg_momte_local_port_set(payload, local_port);
mlxsw_reg_momte_type_set(payload, type);
}
/* MTPPPC - Time Precision Packet Port Configuration
* -------------------------------------------------
* This register serves for configuration of which PTP messages should be
......@@ -10853,6 +10953,8 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mgpc),
MLXSW_REG(mprs),
MLXSW_REG(mogcr),
MLXSW_REG(mpagr),
MLXSW_REG(momte),
MLXSW_REG(mtpppc),
MLXSW_REG(mtpptr),
MLXSW_REG(mtptpt),
......
......@@ -175,10 +175,6 @@ struct mlxsw_sp_mlxfw_dev {
struct mlxsw_sp *mlxsw_sp;
};
struct mlxsw_sp_span_ops {
u32 (*buffsize_get)(int mtu, u32 speed);
};
static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
u16 component_index, u32 *p_max_size,
u8 *p_align_bits, u16 *p_max_write_size)
......@@ -1333,6 +1329,21 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
return 0;
}
static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f)
{
switch (f->binder_type) {
case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
default:
return -EOPNOTSUPP;
}
}
static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
......@@ -2812,52 +2823,6 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
.get_stats = mlxsw_sp2_get_stats,
};
static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed)
{
return mtu * 5 / 2;
}
static const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = {
.buffsize_get = mlxsw_sp1_span_buffsize_get,
};
#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
#define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor)
{
return 3 * mtu + buffer_factor * speed / 1000;
}
static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed)
{
int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR;
return __mlxsw_sp_span_buffsize_get(mtu, speed, factor);
}
static const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = {
.buffsize_get = mlxsw_sp2_span_buffsize_get,
};
static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed)
{
int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR;
return __mlxsw_sp_span_buffsize_get(mtu, speed, factor);
}
static const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = {
.buffsize_get = mlxsw_sp3_span_buffsize_get,
};
u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed)
{
u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu);
return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1;
}
static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr);
......
......@@ -539,7 +539,6 @@ int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int *p_counter_index);
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index);
u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed);
bool mlxsw_sp_port_dev_check(const struct net_device *dev);
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
......@@ -711,7 +710,6 @@ struct mlxsw_sp_flow_block {
struct mlxsw_sp_flow_block_binding {
struct list_head list;
struct net_device *dev;
struct mlxsw_sp_port *mlxsw_sp_port;
bool ingress;
};
......@@ -769,8 +767,9 @@ mlxsw_sp_flow_block_is_mixed_bound(const struct mlxsw_sp_flow_block *block)
struct mlxsw_sp_flow_block *mlxsw_sp_flow_block_create(struct mlxsw_sp *mlxsw_sp,
struct net *net);
void mlxsw_sp_flow_block_destroy(struct mlxsw_sp_flow_block *block);
int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f);
int mlxsw_sp_setup_tc_block_clsact(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f,
bool ingress);
/* spectrum_acl.c */
struct mlxsw_sp_acl_ruleset;
......@@ -962,6 +961,30 @@ extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops;
extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops;
/* spectrum_matchall.c */
enum mlxsw_sp_mall_action_type {
MLXSW_SP_MALL_ACTION_TYPE_MIRROR,
MLXSW_SP_MALL_ACTION_TYPE_SAMPLE,
MLXSW_SP_MALL_ACTION_TYPE_TRAP,
};
struct mlxsw_sp_mall_mirror_entry {
const struct net_device *to_dev;
int span_id;
};
struct mlxsw_sp_mall_entry {
struct list_head list;
unsigned long cookie;
unsigned int priority;
enum mlxsw_sp_mall_action_type type;
bool ingress;
union {
struct mlxsw_sp_mall_mirror_entry mirror;
struct mlxsw_sp_port_sample sample;
};
struct rcu_head rcu;
};
int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_flow_block *block,
struct tc_cls_matchall_offload *f);
......@@ -1008,6 +1031,8 @@ int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_tbf_qopt_offload *p);
int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_fifo_qopt_offload *p);
int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f);
/* spectrum_fid.c */
bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
......
......@@ -219,8 +219,7 @@ static int mlxsw_sp_setup_tc_block_bind(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_tc_block_release);
if (IS_ERR(block_cb)) {
mlxsw_sp_flow_block_destroy(flow_block);
err = PTR_ERR(block_cb);
goto err_cb_register;
return PTR_ERR(block_cb);
}
register_block = true;
} else {
......@@ -247,7 +246,6 @@ static int mlxsw_sp_setup_tc_block_bind(struct mlxsw_sp_port *mlxsw_sp_port,
err_block_bind:
if (!flow_block_cb_decref(block_cb))
flow_block_cb_free(block_cb);
err_cb_register:
return err;
}
......@@ -279,18 +277,10 @@ static void mlxsw_sp_setup_tc_block_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f)
int mlxsw_sp_setup_tc_block_clsact(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f,
bool ingress)
{
bool ingress;
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
ingress = true;
else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
ingress = false;
else
return -EOPNOTSUPP;
f->driver_block_list = &mlxsw_sp_block_cb_list;
switch (f->command) {
......
......@@ -10,29 +10,6 @@
#include "spectrum_span.h"
#include "reg.h"
enum mlxsw_sp_mall_action_type {
MLXSW_SP_MALL_ACTION_TYPE_MIRROR,
MLXSW_SP_MALL_ACTION_TYPE_SAMPLE,
};
struct mlxsw_sp_mall_mirror_entry {
const struct net_device *to_dev;
int span_id;
};
struct mlxsw_sp_mall_entry {
struct list_head list;
unsigned long cookie;
unsigned int priority;
enum mlxsw_sp_mall_action_type type;
bool ingress;
union {
struct mlxsw_sp_mall_mirror_entry mirror;
struct mlxsw_sp_port_sample sample;
};
struct rcu_head rcu;
};
static struct mlxsw_sp_mall_entry *
mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie)
{
......
......@@ -8,6 +8,7 @@
#include <net/red.h>
#include "spectrum.h"
#include "spectrum_span.h"
#include "reg.h"
#define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
......@@ -1272,6 +1273,477 @@ int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
struct mlxsw_sp_qevent_block {
struct list_head binding_list;
struct list_head mall_entry_list;
struct mlxsw_sp *mlxsw_sp;
};
struct mlxsw_sp_qevent_binding {
struct list_head list;
struct mlxsw_sp_port *mlxsw_sp_port;
u32 handle;
int tclass_num;
enum mlxsw_sp_span_trigger span_trigger;
};
static LIST_HEAD(mlxsw_sp_qevent_block_cb_list);
static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mall_entry *mall_entry,
struct mlxsw_sp_qevent_binding *qevent_binding)
{
struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
struct mlxsw_sp_span_trigger_parms trigger_parms = {};
int span_id;
int err;
err = mlxsw_sp_span_agent_get(mlxsw_sp, mall_entry->mirror.to_dev, &span_id);
if (err)
return err;
err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, true);
if (err)
goto err_analyzed_port_get;
trigger_parms.span_id = span_id;
err = mlxsw_sp_span_agent_bind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
&trigger_parms);
if (err)
goto err_agent_bind;
err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, qevent_binding->span_trigger,
qevent_binding->tclass_num);
if (err)
goto err_trigger_enable;
mall_entry->mirror.span_id = span_id;
return 0;
err_trigger_enable:
mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
&trigger_parms);
err_agent_bind:
mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
err_analyzed_port_get:
mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
return err;
}
static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mall_entry *mall_entry,
struct mlxsw_sp_qevent_binding *qevent_binding)
{
struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
struct mlxsw_sp_span_trigger_parms trigger_parms = {
.span_id = mall_entry->mirror.span_id,
};
mlxsw_sp_span_trigger_disable(mlxsw_sp_port, qevent_binding->span_trigger,
qevent_binding->tclass_num);
mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
&trigger_parms);
mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
}
static int mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mall_entry *mall_entry,
struct mlxsw_sp_qevent_binding *qevent_binding)
{
switch (mall_entry->type) {
case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding);
default:
/* This should have been validated away. */
WARN_ON(1);
return -EOPNOTSUPP;
}
}
static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mall_entry *mall_entry,
struct mlxsw_sp_qevent_binding *qevent_binding)
{
switch (mall_entry->type) {
case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
return mlxsw_sp_qevent_mirror_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
default:
WARN_ON(1);
return;
}
}
static int mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block,
struct mlxsw_sp_qevent_binding *qevent_binding)
{
struct mlxsw_sp_mall_entry *mall_entry;
int err;
list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) {
err = mlxsw_sp_qevent_entry_configure(qevent_block->mlxsw_sp, mall_entry,
qevent_binding);
if (err)
goto err_entry_configure;
}
return 0;
err_entry_configure:
list_for_each_entry_continue_reverse(mall_entry, &qevent_block->mall_entry_list, list)
mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
qevent_binding);
return err;
}
static void mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block *qevent_block,
struct mlxsw_sp_qevent_binding *qevent_binding)
{
struct mlxsw_sp_mall_entry *mall_entry;
list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list)
mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
qevent_binding);
}
static int mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block)
{
struct mlxsw_sp_qevent_binding *qevent_binding;
int err;
list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) {
err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding);
if (err)
goto err_binding_configure;
}
return 0;
err_binding_configure:
list_for_each_entry_continue_reverse(qevent_binding, &qevent_block->binding_list, list)
mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
return err;
}
static void mlxsw_sp_qevent_block_deconfigure(struct mlxsw_sp_qevent_block *qevent_block)
{
struct mlxsw_sp_qevent_binding *qevent_binding;
list_for_each_entry(qevent_binding, &qevent_block->binding_list, list)
mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
}
static struct mlxsw_sp_mall_entry *
mlxsw_sp_qevent_mall_entry_find(struct mlxsw_sp_qevent_block *block, unsigned long cookie)
{
struct mlxsw_sp_mall_entry *mall_entry;
list_for_each_entry(mall_entry, &block->mall_entry_list, list)
if (mall_entry->cookie == cookie)
return mall_entry;
return NULL;
}
static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_qevent_block *qevent_block,
struct tc_cls_matchall_offload *f)
{
struct mlxsw_sp_mall_entry *mall_entry;
struct flow_action_entry *act;
int err;
/* It should not currently be possible to replace a matchall rule. So
* this must be a new rule.
*/
if (!list_empty(&qevent_block->mall_entry_list)) {
NL_SET_ERR_MSG(f->common.extack, "At most one filter supported");
return -EOPNOTSUPP;
}
if (f->rule->action.num_entries != 1) {
NL_SET_ERR_MSG(f->common.extack, "Only singular actions supported");
return -EOPNOTSUPP;
}
if (f->common.chain_index) {
NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
return -EOPNOTSUPP;
}
if (f->common.protocol != htons(ETH_P_ALL)) {
NL_SET_ERR_MSG(f->common.extack, "Protocol matching not supported");
return -EOPNOTSUPP;
}
act = &f->rule->action.entries[0];
if (!(act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED)) {
NL_SET_ERR_MSG(f->common.extack, "HW counters not supported on qevents");
return -EOPNOTSUPP;
}
mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
if (!mall_entry)
return -ENOMEM;
mall_entry->cookie = f->cookie;
if (act->id == FLOW_ACTION_MIRRED) {
mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
mall_entry->mirror.to_dev = act->dev;
} else {
NL_SET_ERR_MSG(f->common.extack, "Unsupported action");
err = -EOPNOTSUPP;
goto err_unsupported_action;
}
list_add_tail(&mall_entry->list, &qevent_block->mall_entry_list);
err = mlxsw_sp_qevent_block_configure(qevent_block);
if (err)
goto err_block_configure;
return 0;
err_block_configure:
list_del(&mall_entry->list);
err_unsupported_action:
kfree(mall_entry);
return err;
}
static void mlxsw_sp_qevent_mall_destroy(struct mlxsw_sp_qevent_block *qevent_block,
struct tc_cls_matchall_offload *f)
{
struct mlxsw_sp_mall_entry *mall_entry;
mall_entry = mlxsw_sp_qevent_mall_entry_find(qevent_block, f->cookie);
if (!mall_entry)
return;
mlxsw_sp_qevent_block_deconfigure(qevent_block);
list_del(&mall_entry->list);
kfree(mall_entry);
}
static int mlxsw_sp_qevent_block_mall_cb(struct mlxsw_sp_qevent_block *qevent_block,
struct tc_cls_matchall_offload *f)
{
struct mlxsw_sp *mlxsw_sp = qevent_block->mlxsw_sp;
switch (f->command) {
case TC_CLSMATCHALL_REPLACE:
return mlxsw_sp_qevent_mall_replace(mlxsw_sp, qevent_block, f);
case TC_CLSMATCHALL_DESTROY:
mlxsw_sp_qevent_mall_destroy(qevent_block, f);
return 0;
default:
return -EOPNOTSUPP;
}
}
static int mlxsw_sp_qevent_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
{
struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
switch (type) {
case TC_SETUP_CLSMATCHALL:
return mlxsw_sp_qevent_block_mall_cb(qevent_block, type_data);
default:
return -EOPNOTSUPP;
}
}
static struct mlxsw_sp_qevent_block *mlxsw_sp_qevent_block_create(struct mlxsw_sp *mlxsw_sp,
struct net *net)
{
struct mlxsw_sp_qevent_block *qevent_block;
qevent_block = kzalloc(sizeof(*qevent_block), GFP_KERNEL);
if (!qevent_block)
return NULL;
INIT_LIST_HEAD(&qevent_block->binding_list);
INIT_LIST_HEAD(&qevent_block->mall_entry_list);
qevent_block->mlxsw_sp = mlxsw_sp;
return qevent_block;
}
static void
mlxsw_sp_qevent_block_destroy(struct mlxsw_sp_qevent_block *qevent_block)
{
WARN_ON(!list_empty(&qevent_block->binding_list));
WARN_ON(!list_empty(&qevent_block->mall_entry_list));
kfree(qevent_block);
}
static void mlxsw_sp_qevent_block_release(void *cb_priv)
{
struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
mlxsw_sp_qevent_block_destroy(qevent_block);
}
static struct mlxsw_sp_qevent_binding *
mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, int tclass_num,
enum mlxsw_sp_span_trigger span_trigger)
{
struct mlxsw_sp_qevent_binding *binding;
binding = kzalloc(sizeof(*binding), GFP_KERNEL);
if (!binding)
return ERR_PTR(-ENOMEM);
binding->mlxsw_sp_port = mlxsw_sp_port;
binding->handle = handle;
binding->tclass_num = tclass_num;
binding->span_trigger = span_trigger;
return binding;
}
static void
mlxsw_sp_qevent_binding_destroy(struct mlxsw_sp_qevent_binding *binding)
{
kfree(binding);
}
static struct mlxsw_sp_qevent_binding *
mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
u32 handle,
enum mlxsw_sp_span_trigger span_trigger)
{
struct mlxsw_sp_qevent_binding *qevent_binding;
list_for_each_entry(qevent_binding, &block->binding_list, list)
if (qevent_binding->mlxsw_sp_port == mlxsw_sp_port &&
qevent_binding->handle == handle &&
qevent_binding->span_trigger == span_trigger)
return qevent_binding;
return NULL;
}
static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f,
enum mlxsw_sp_span_trigger span_trigger)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_qevent_binding *qevent_binding;
struct mlxsw_sp_qevent_block *qevent_block;
struct flow_block_cb *block_cb;
struct mlxsw_sp_qdisc *qdisc;
bool register_block = false;
int err;
block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
if (!block_cb) {
qevent_block = mlxsw_sp_qevent_block_create(mlxsw_sp, f->net);
if (!qevent_block)
return -ENOMEM;
block_cb = flow_block_cb_alloc(mlxsw_sp_qevent_block_cb, mlxsw_sp, qevent_block,
mlxsw_sp_qevent_block_release);
if (IS_ERR(block_cb)) {
mlxsw_sp_qevent_block_destroy(qevent_block);
return PTR_ERR(block_cb);
}
register_block = true;
} else {
qevent_block = flow_block_cb_priv(block_cb);
}
flow_block_cb_incref(block_cb);
qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port, f->sch->handle);
if (!qdisc) {
NL_SET_ERR_MSG(f->extack, "Qdisc not offloaded");
err = -ENOENT;
goto err_find_qdisc;
}
if (WARN_ON(mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
span_trigger))) {
err = -EEXIST;
goto err_binding_exists;
}
qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port, f->sch->handle,
qdisc->tclass_num, span_trigger);
if (IS_ERR(qevent_binding)) {
err = PTR_ERR(qevent_binding);
goto err_binding_create;
}
err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding);
if (err)
goto err_binding_configure;
list_add(&qevent_binding->list, &qevent_block->binding_list);
if (register_block) {
flow_block_cb_add(block_cb, f);
list_add_tail(&block_cb->driver_list, &mlxsw_sp_qevent_block_cb_list);
}
return 0;
err_binding_configure:
mlxsw_sp_qevent_binding_destroy(qevent_binding);
err_binding_create:
err_binding_exists:
err_find_qdisc:
if (!flow_block_cb_decref(block_cb))
flow_block_cb_free(block_cb);
return err;
}
static void mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f,
enum mlxsw_sp_span_trigger span_trigger)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_qevent_binding *qevent_binding;
struct mlxsw_sp_qevent_block *qevent_block;
struct flow_block_cb *block_cb;
block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
if (!block_cb)
return;
qevent_block = flow_block_cb_priv(block_cb);
qevent_binding = mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
span_trigger);
if (!qevent_binding)
return;
list_del(&qevent_binding->list);
mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
mlxsw_sp_qevent_binding_destroy(qevent_binding);
if (!flow_block_cb_decref(block_cb)) {
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
}
}
static int mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f,
enum mlxsw_sp_span_trigger span_trigger)
{
f->driver_block_list = &mlxsw_sp_qevent_block_cb_list;
switch (f->command) {
case FLOW_BLOCK_BIND:
return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f, span_trigger);
case FLOW_BLOCK_UNBIND:
mlxsw_sp_setup_tc_block_qevent_unbind(mlxsw_sp_port, f, span_trigger);
return 0;
default:
return -EOPNOTSUPP;
}
}
int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f)
{
return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f, MLXSW_SP_SPAN_TRIGGER_EARLY_DROP);
}
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp_qdisc_state *qdisc_state;
......
......@@ -21,6 +21,7 @@
struct mlxsw_sp_span {
struct work_struct work;
struct mlxsw_sp *mlxsw_sp;
const struct mlxsw_sp_span_trigger_ops **span_trigger_ops_arr;
struct list_head analyzed_ports_list;
struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */
struct list_head trigger_entries_list;
......@@ -38,12 +39,31 @@ struct mlxsw_sp_span_analyzed_port {
struct mlxsw_sp_span_trigger_entry {
struct list_head list; /* Member of trigger_entries_list */
struct mlxsw_sp_span *span;
const struct mlxsw_sp_span_trigger_ops *ops;
refcount_t ref_count;
u8 local_port;
enum mlxsw_sp_span_trigger trigger;
struct mlxsw_sp_span_trigger_parms parms;
};
enum mlxsw_sp_span_trigger_type {
MLXSW_SP_SPAN_TRIGGER_TYPE_PORT,
MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL,
};
struct mlxsw_sp_span_trigger_ops {
int (*bind)(struct mlxsw_sp_span_trigger_entry *trigger_entry);
void (*unbind)(struct mlxsw_sp_span_trigger_entry *trigger_entry);
bool (*matches)(struct mlxsw_sp_span_trigger_entry *trigger_entry,
enum mlxsw_sp_span_trigger trigger,
struct mlxsw_sp_port *mlxsw_sp_port);
int (*enable)(struct mlxsw_sp_span_trigger_entry *trigger_entry,
struct mlxsw_sp_port *mlxsw_sp_port, u8 tc);
void (*disable)(struct mlxsw_sp_span_trigger_entry *trigger_entry,
struct mlxsw_sp_port *mlxsw_sp_port, u8 tc);
};
static void mlxsw_sp_span_respin_work(struct work_struct *work);
static u64 mlxsw_sp_span_occ_get(void *priv)
......@@ -57,7 +77,7 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_span *span;
int i, entries_count;
int i, entries_count, err;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
return -EIO;
......@@ -77,11 +97,20 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < mlxsw_sp->span->entries_count; i++)
mlxsw_sp->span->entries[i].id = i;
err = mlxsw_sp->span_ops->init(mlxsw_sp);
if (err)
goto err_init;
devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
mlxsw_sp_span_occ_get, mlxsw_sp);
INIT_WORK(&span->work, mlxsw_sp_span_respin_work);
return 0;
err_init:
mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock);
kfree(mlxsw_sp->span);
return err;
}
void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
......@@ -766,6 +795,14 @@ static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
return 0;
}
static u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu,
u32 speed)
{
u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu);
return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1;
}
static int
mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
{
......@@ -1051,7 +1088,7 @@ void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
__mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span,
__mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span *span,
struct mlxsw_sp_span_trigger_entry *
trigger_entry, bool enable)
{
......@@ -1076,19 +1113,254 @@ __mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span,
}
static int
mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span,
struct mlxsw_sp_span_trigger_entry *
mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span_trigger_entry *
trigger_entry)
{
return __mlxsw_sp_span_trigger_entry_bind(span, trigger_entry, true);
return __mlxsw_sp_span_trigger_port_bind(trigger_entry->span,
trigger_entry, true);
}
static void
mlxsw_sp_span_trigger_entry_unbind(struct mlxsw_sp_span *span,
struct mlxsw_sp_span_trigger_entry *
mlxsw_sp_span_trigger_port_unbind(struct mlxsw_sp_span_trigger_entry *
trigger_entry)
{
__mlxsw_sp_span_trigger_entry_bind(span, trigger_entry, false);
__mlxsw_sp_span_trigger_port_bind(trigger_entry->span, trigger_entry,
false);
}
static bool
mlxsw_sp_span_trigger_port_matches(struct mlxsw_sp_span_trigger_entry *
trigger_entry,
enum mlxsw_sp_span_trigger trigger,
struct mlxsw_sp_port *mlxsw_sp_port)
{
return trigger_entry->trigger == trigger &&
trigger_entry->local_port == mlxsw_sp_port->local_port;
}
static int
mlxsw_sp_span_trigger_port_enable(struct mlxsw_sp_span_trigger_entry *
trigger_entry,
struct mlxsw_sp_port *mlxsw_sp_port, u8 tc)
{
/* Port trigger are enabled during binding. */
return 0;
}
static void
mlxsw_sp_span_trigger_port_disable(struct mlxsw_sp_span_trigger_entry *
trigger_entry,
struct mlxsw_sp_port *mlxsw_sp_port, u8 tc)
{
}
static const struct mlxsw_sp_span_trigger_ops
mlxsw_sp_span_trigger_port_ops = {
.bind = mlxsw_sp_span_trigger_port_bind,
.unbind = mlxsw_sp_span_trigger_port_unbind,
.matches = mlxsw_sp_span_trigger_port_matches,
.enable = mlxsw_sp_span_trigger_port_enable,
.disable = mlxsw_sp_span_trigger_port_disable,
};
static int
mlxsw_sp1_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry *
trigger_entry)
{
return -EOPNOTSUPP;
}
static void
mlxsw_sp1_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry *
trigger_entry)
{
}
static bool
mlxsw_sp1_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry *
trigger_entry,
enum mlxsw_sp_span_trigger trigger,
struct mlxsw_sp_port *mlxsw_sp_port)
{
WARN_ON_ONCE(1);
return false;
}
static int
mlxsw_sp1_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry *
trigger_entry,
struct mlxsw_sp_port *mlxsw_sp_port,
u8 tc)
{
return -EOPNOTSUPP;
}
static void
mlxsw_sp1_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry *
trigger_entry,
struct mlxsw_sp_port *mlxsw_sp_port,
u8 tc)
{
}
static const struct mlxsw_sp_span_trigger_ops
mlxsw_sp1_span_trigger_global_ops = {
.bind = mlxsw_sp1_span_trigger_global_bind,
.unbind = mlxsw_sp1_span_trigger_global_unbind,
.matches = mlxsw_sp1_span_trigger_global_matches,
.enable = mlxsw_sp1_span_trigger_global_enable,
.disable = mlxsw_sp1_span_trigger_global_disable,
};
static const struct mlxsw_sp_span_trigger_ops *
mlxsw_sp1_span_trigger_ops_arr[] = {
[MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops,
[MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] =
&mlxsw_sp1_span_trigger_global_ops,
};
static int
mlxsw_sp2_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry *
trigger_entry)
{
struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp;
enum mlxsw_reg_mpagr_trigger trigger;
char mpagr_pl[MLXSW_REG_MPAGR_LEN];
switch (trigger_entry->trigger) {
case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_SHARED_BUFFER;
break;
case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_WRED;
break;
case MLXSW_SP_SPAN_TRIGGER_ECN:
trigger = MLXSW_REG_MPAGR_TRIGGER_EGRESS_ECN;
break;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
mlxsw_reg_mpagr_pack(mpagr_pl, trigger, trigger_entry->parms.span_id,
1);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpagr), mpagr_pl);
}
static void
mlxsw_sp2_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry *
trigger_entry)
{
/* There is no unbinding for global triggers. The trigger should be
* disabled on all ports by now.
*/
}
static bool
mlxsw_sp2_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry *
trigger_entry,
enum mlxsw_sp_span_trigger trigger,
struct mlxsw_sp_port *mlxsw_sp_port)
{
return trigger_entry->trigger == trigger;
}
static int
__mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry *
trigger_entry,
struct mlxsw_sp_port *mlxsw_sp_port,
u8 tc, bool enable)
{
struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp;
char momte_pl[MLXSW_REG_MOMTE_LEN];
enum mlxsw_reg_momte_type type;
int err;
switch (trigger_entry->trigger) {
case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
type = MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS;
break;
case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
type = MLXSW_REG_MOMTE_TYPE_WRED;
break;
case MLXSW_SP_SPAN_TRIGGER_ECN:
type = MLXSW_REG_MOMTE_TYPE_ECN;
break;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
/* Query existing configuration in order to only change the state of
* the specified traffic class.
*/
mlxsw_reg_momte_pack(momte_pl, mlxsw_sp_port->local_port, type);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(momte), momte_pl);
if (err)
return err;
mlxsw_reg_momte_tclass_en_set(momte_pl, tc, enable);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(momte), momte_pl);
}
static int
mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry *
trigger_entry,
struct mlxsw_sp_port *mlxsw_sp_port,
u8 tc)
{
return __mlxsw_sp2_span_trigger_global_enable(trigger_entry,
mlxsw_sp_port, tc, true);
}
static void
mlxsw_sp2_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry *
trigger_entry,
struct mlxsw_sp_port *mlxsw_sp_port,
u8 tc)
{
__mlxsw_sp2_span_trigger_global_enable(trigger_entry, mlxsw_sp_port, tc,
false);
}
static const struct mlxsw_sp_span_trigger_ops
mlxsw_sp2_span_trigger_global_ops = {
.bind = mlxsw_sp2_span_trigger_global_bind,
.unbind = mlxsw_sp2_span_trigger_global_unbind,
.matches = mlxsw_sp2_span_trigger_global_matches,
.enable = mlxsw_sp2_span_trigger_global_enable,
.disable = mlxsw_sp2_span_trigger_global_disable,
};
static const struct mlxsw_sp_span_trigger_ops *
mlxsw_sp2_span_trigger_ops_arr[] = {
[MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops,
[MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] =
&mlxsw_sp2_span_trigger_global_ops,
};
static void
mlxsw_sp_span_trigger_ops_set(struct mlxsw_sp_span_trigger_entry *trigger_entry)
{
struct mlxsw_sp_span *span = trigger_entry->span;
enum mlxsw_sp_span_trigger_type type;
switch (trigger_entry->trigger) {
case MLXSW_SP_SPAN_TRIGGER_INGRESS: /* fall-through */
case MLXSW_SP_SPAN_TRIGGER_EGRESS:
type = MLXSW_SP_SPAN_TRIGGER_TYPE_PORT;
break;
case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: /* fall-through */
case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: /* fall-through */
case MLXSW_SP_SPAN_TRIGGER_ECN:
type = MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL;
break;
default:
WARN_ON_ONCE(1);
return;
}
trigger_entry->ops = span->span_trigger_ops_arr[type];
}
static struct mlxsw_sp_span_trigger_entry *
......@@ -1106,12 +1378,15 @@ mlxsw_sp_span_trigger_entry_create(struct mlxsw_sp_span *span,
return ERR_PTR(-ENOMEM);
refcount_set(&trigger_entry->ref_count, 1);
trigger_entry->local_port = mlxsw_sp_port->local_port;
trigger_entry->local_port = mlxsw_sp_port ? mlxsw_sp_port->local_port :
0;
trigger_entry->trigger = trigger;
memcpy(&trigger_entry->parms, parms, sizeof(trigger_entry->parms));
trigger_entry->span = span;
mlxsw_sp_span_trigger_ops_set(trigger_entry);
list_add_tail(&trigger_entry->list, &span->trigger_entries_list);
err = mlxsw_sp_span_trigger_entry_bind(span, trigger_entry);
err = trigger_entry->ops->bind(trigger_entry);
if (err)
goto err_trigger_entry_bind;
......@@ -1128,7 +1403,7 @@ mlxsw_sp_span_trigger_entry_destroy(struct mlxsw_sp_span *span,
struct mlxsw_sp_span_trigger_entry *
trigger_entry)
{
mlxsw_sp_span_trigger_entry_unbind(span, trigger_entry);
trigger_entry->ops->unbind(trigger_entry);
list_del(&trigger_entry->list);
kfree(trigger_entry);
}
......@@ -1141,8 +1416,8 @@ mlxsw_sp_span_trigger_entry_find(struct mlxsw_sp_span *span,
struct mlxsw_sp_span_trigger_entry *trigger_entry;
list_for_each_entry(trigger_entry, &span->trigger_entries_list, list) {
if (trigger_entry->trigger == trigger &&
trigger_entry->local_port == mlxsw_sp_port->local_port)
if (trigger_entry->ops->matches(trigger_entry, trigger,
mlxsw_sp_port))
return trigger_entry;
}
......@@ -1207,3 +1482,93 @@ void mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_span_trigger_entry_destroy(mlxsw_sp->span, trigger_entry);
}
int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_sp_span_trigger trigger, u8 tc)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_span_trigger_entry *trigger_entry;
ASSERT_RTNL();
trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
trigger,
mlxsw_sp_port);
if (WARN_ON_ONCE(!trigger_entry))
return -EINVAL;
return trigger_entry->ops->enable(trigger_entry, mlxsw_sp_port, tc);
}
void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_sp_span_trigger trigger, u8 tc)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_span_trigger_entry *trigger_entry;
ASSERT_RTNL();
trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
trigger,
mlxsw_sp_port);
if (WARN_ON_ONCE(!trigger_entry))
return;
return trigger_entry->ops->disable(trigger_entry, mlxsw_sp_port, tc);
}
static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp1_span_trigger_ops_arr;
return 0;
}
static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed)
{
return mtu * 5 / 2;
}
const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = {
.init = mlxsw_sp1_span_init,
.buffsize_get = mlxsw_sp1_span_buffsize_get,
};
static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp2_span_trigger_ops_arr;
return 0;
}
#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
#define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor)
{
return 3 * mtu + buffer_factor * speed / 1000;
}
static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed)
{
int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR;
return __mlxsw_sp_span_buffsize_get(mtu, speed, factor);
}
const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = {
.init = mlxsw_sp2_span_init,
.buffsize_get = mlxsw_sp2_span_buffsize_get,
};
static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed)
{
int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR;
return __mlxsw_sp_span_buffsize_get(mtu, speed, factor);
}
const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = {
.init = mlxsw_sp2_span_init,
.buffsize_get = mlxsw_sp3_span_buffsize_get,
};
......@@ -26,6 +26,9 @@ struct mlxsw_sp_span_parms {
enum mlxsw_sp_span_trigger {
MLXSW_SP_SPAN_TRIGGER_INGRESS,
MLXSW_SP_SPAN_TRIGGER_EGRESS,
MLXSW_SP_SPAN_TRIGGER_TAIL_DROP,
MLXSW_SP_SPAN_TRIGGER_EARLY_DROP,
MLXSW_SP_SPAN_TRIGGER_ECN,
};
struct mlxsw_sp_span_trigger_parms {
......@@ -34,6 +37,11 @@ struct mlxsw_sp_span_trigger_parms {
struct mlxsw_sp_span_entry_ops;
struct mlxsw_sp_span_ops {
int (*init)(struct mlxsw_sp *mlxsw_sp);
u32 (*buffsize_get)(int mtu, u32 speed);
};
struct mlxsw_sp_span_entry {
const struct net_device *to_dev;
const struct mlxsw_sp_span_entry_ops *ops;
......@@ -81,5 +89,13 @@ mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_span_trigger trigger,
struct mlxsw_sp_port *mlxsw_sp_port,
const struct mlxsw_sp_span_trigger_parms *parms);
int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_sp_span_trigger trigger, u8 tc);
void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_sp_span_trigger trigger, u8 tc);
extern const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops;
extern const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops;
extern const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops;
#endif
......@@ -458,7 +458,7 @@ void nfp_flower_qos_cleanup(struct nfp_app *app);
int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_matchall_offload *flow);
void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb);
int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
enum tc_setup_type type, void *type_data,
void *data,
void (*cleanup)(struct flow_block_cb *block_cb));
......
......@@ -1646,7 +1646,7 @@ void nfp_flower_setup_indr_tc_release(void *cb_priv)
}
static int
nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, struct nfp_app *app,
struct flow_block_offload *f, void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
{
......@@ -1680,7 +1680,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb,
cb_priv, cb_priv,
nfp_flower_setup_indr_tc_release,
f, netdev, data, app, cleanup);
f, netdev, sch, data, app, cleanup);
if (IS_ERR(block_cb)) {
list_del(&cb_priv->list);
kfree(cb_priv);
......@@ -1711,7 +1711,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
}
int
nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
enum tc_setup_type type, void *type_data,
void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
......@@ -1721,7 +1721,7 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
switch (type) {
case TC_SETUP_BLOCK:
return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
return nfp_flower_setup_indr_tc_block(netdev, sch, cb_priv,
type_data, data, cleanup);
default:
return -EOPNOTSUPP;
......
......@@ -444,6 +444,7 @@ struct flow_block_offload {
struct list_head cb_list;
struct list_head *driver_block_list;
struct netlink_ext_ack *extack;
struct Qdisc *sch;
};
enum tc_setup_type;
......@@ -455,6 +456,7 @@ struct flow_block_cb;
struct flow_block_indr {
struct list_head list;
struct net_device *dev;
struct Qdisc *sch;
enum flow_block_binder_type binder_type;
void *data;
void *cb_priv;
......@@ -479,7 +481,8 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
void *cb_ident, void *cb_priv,
void (*release)(void *cb_priv),
struct flow_block_offload *bo,
struct net_device *dev, void *data,
struct net_device *dev,
struct Qdisc *sch, void *data,
void *indr_cb_priv,
void (*cleanup)(struct flow_block_cb *block_cb));
void flow_block_cb_free(struct flow_block_cb *block_cb);
......@@ -553,7 +556,7 @@ static inline void flow_block_init(struct flow_block *flow_block)
INIT_LIST_HEAD(&flow_block->cb_list);
}
typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
typedef int flow_indr_block_bind_cb_t(struct net_device *dev, struct Qdisc *sch, void *cb_priv,
enum tc_setup_type type, void *type_data,
void *data,
void (*cleanup)(struct flow_block_cb *block_cb));
......@@ -561,7 +564,7 @@ typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv);
void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
void (*release)(void *cb_priv));
int flow_indr_dev_setup_offload(struct net_device *dev,
int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
enum tc_setup_type type, void *data,
struct flow_block_offload *bo,
void (*cleanup)(struct flow_block_cb *block_cb));
......
......@@ -429,7 +429,7 @@ EXPORT_SYMBOL(flow_indr_dev_unregister);
static void flow_block_indr_init(struct flow_block_cb *flow_block,
struct flow_block_offload *bo,
struct net_device *dev, void *data,
struct net_device *dev, struct Qdisc *sch, void *data,
void *cb_priv,
void (*cleanup)(struct flow_block_cb *block_cb))
{
......@@ -437,6 +437,7 @@ static void flow_block_indr_init(struct flow_block_cb *flow_block,
flow_block->indr.data = data;
flow_block->indr.cb_priv = cb_priv;
flow_block->indr.dev = dev;
flow_block->indr.sch = sch;
flow_block->indr.cleanup = cleanup;
}
......@@ -444,7 +445,8 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
void *cb_ident, void *cb_priv,
void (*release)(void *cb_priv),
struct flow_block_offload *bo,
struct net_device *dev, void *data,
struct net_device *dev,
struct Qdisc *sch, void *data,
void *indr_cb_priv,
void (*cleanup)(struct flow_block_cb *block_cb))
{
......@@ -454,7 +456,7 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
if (IS_ERR(block_cb))
goto out;
flow_block_indr_init(block_cb, bo, dev, data, indr_cb_priv, cleanup);
flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup);
list_add(&block_cb->indr.list, &flow_block_indr_list);
out:
......@@ -462,7 +464,7 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
}
EXPORT_SYMBOL(flow_indr_block_cb_alloc);
int flow_indr_dev_setup_offload(struct net_device *dev,
int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
enum tc_setup_type type, void *data,
struct flow_block_offload *bo,
void (*cleanup)(struct flow_block_cb *block_cb))
......@@ -471,7 +473,7 @@ int flow_indr_dev_setup_offload(struct net_device *dev,
mutex_lock(&flow_indr_block_lock);
list_for_each_entry(this, &flow_block_indr_dev_list, list)
this->cb(dev, this->cb_priv, type, bo, data, cleanup);
this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
mutex_unlock(&flow_indr_block_lock);
......
......@@ -964,7 +964,7 @@ static int nf_flow_table_indr_offload_cmd(struct flow_block_offload *bo,
nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable,
extack);
return flow_indr_dev_setup_offload(dev, TC_SETUP_FT, flowtable, bo,
return flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_FT, flowtable, bo,
nf_flow_table_indr_cleanup);
}
......
......@@ -312,7 +312,7 @@ static int nft_indr_block_offload_cmd(struct nft_base_chain *basechain,
nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack);
err = flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, basechain, &bo,
err = flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_BLOCK, basechain, &bo,
nft_indr_block_cleanup);
if (err < 0)
return err;
......
......@@ -622,7 +622,7 @@ static int tcf_block_setup(struct tcf_block *block,
struct flow_block_offload *bo);
static void tcf_block_offload_init(struct flow_block_offload *bo,
struct net_device *dev,
struct net_device *dev, struct Qdisc *sch,
enum flow_block_command command,
enum flow_block_binder_type binder_type,
struct flow_block *flow_block,
......@@ -634,6 +634,7 @@ static void tcf_block_offload_init(struct flow_block_offload *bo,
bo->block = flow_block;
bo->block_shared = shared;
bo->extack = extack;
bo->sch = sch;
INIT_LIST_HEAD(&bo->cb_list);
}
......@@ -644,10 +645,11 @@ static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
{
struct tcf_block *block = block_cb->indr.data;
struct net_device *dev = block_cb->indr.dev;
struct Qdisc *sch = block_cb->indr.sch;
struct netlink_ext_ack extack = {};
struct flow_block_offload bo;
tcf_block_offload_init(&bo, dev, FLOW_BLOCK_UNBIND,
tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
block_cb->indr.binder_type,
&block->flow_block, tcf_block_shared(block),
&extack);
......@@ -666,14 +668,14 @@ static bool tcf_block_offload_in_use(struct tcf_block *block)
}
static int tcf_block_offload_cmd(struct tcf_block *block,
struct net_device *dev,
struct net_device *dev, struct Qdisc *sch,
struct tcf_block_ext_info *ei,
enum flow_block_command command,
struct netlink_ext_ack *extack)
{
struct flow_block_offload bo = {};
tcf_block_offload_init(&bo, dev, command, ei->binder_type,
tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
&block->flow_block, tcf_block_shared(block),
extack);
......@@ -690,7 +692,7 @@ static int tcf_block_offload_cmd(struct tcf_block *block,
return tcf_block_setup(block, &bo);
}
flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, block, &bo,
flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
tc_block_indr_cleanup);
tcf_block_setup(block, &bo);
......@@ -717,7 +719,7 @@ static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
goto err_unlock;
}
err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack);
err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
if (err == -EOPNOTSUPP)
goto no_offload_dev_inc;
if (err)
......@@ -744,7 +746,7 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
int err;
down_write(&block->cb_lock);
err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
if (err == -EOPNOTSUPP)
goto no_offload_dev_dec;
up_write(&block->cb_lock);
......
......@@ -121,6 +121,7 @@ h1_destroy()
h2_create()
{
host_create $h2 2
tc qdisc add dev $h2 clsact
# Some of the tests in this suite use multicast traffic. As this traffic
# enters BR2_10 resp. BR2_11, it is flooded to all other ports. Thus
......@@ -141,6 +142,7 @@ h2_create()
h2_destroy()
{
ethtool -s $h2 autoneg on
tc qdisc del dev $h2 clsact
host_destroy $h2
}
......@@ -336,6 +338,17 @@ get_qdisc_npackets()
qdisc_stats_get $swp3 $(get_qdisc_handle $vlan) .packets
}
send_packets()
{
local vlan=$1; shift
local proto=$1; shift
local pkts=$1; shift
$MZ $h2.$vlan -p 8000 -a own -b $h3_mac \
-A $(ipaddr 2 $vlan) -B $(ipaddr 3 $vlan) \
-t $proto -q -c $pkts "$@"
}
# This sends traffic in an attempt to build a backlog of $size. Returns 0 on
# success. After 10 failed attempts it bails out and returns 1. It dumps the
# backlog size to stdout.
......@@ -364,9 +377,7 @@ build_backlog()
return 1
fi
$MZ $h2.$vlan -p 8000 -a own -b $h3_mac \
-A $(ipaddr 2 $vlan) -B $(ipaddr 3 $vlan) \
-t $proto -q -c $pkts "$@"
send_packets $vlan $proto $pkts "$@"
done
}
......@@ -531,3 +542,92 @@ do_mc_backlog_test()
log_test "TC $((vlan - 10)): Qdisc reports MC backlog"
}
do_drop_test()
{
local vlan=$1; shift
local limit=$1; shift
local trigger=$1; shift
local subtest=$1; shift
local fetch_counter=$1; shift
local backlog
local base
local now
local pct
RET=0
start_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) $h3_mac
# Create a bit of a backlog and observe no mirroring due to drops.
qevent_rule_install_$subtest
base=$($fetch_counter)
build_backlog $vlan $((2 * limit / 3)) udp >/dev/null
busywait 1100 until_counter_is ">= $((base + 1))" $fetch_counter >/dev/null
check_fail $? "Spurious packets observed without buffer pressure"
qevent_rule_uninstall_$subtest
# Push to the queue until it's at the limit. The configured limit is
# rounded by the qdisc and then by the driver, so this is the best we
# can do to get to the real limit of the system. Do this with the rules
# uninstalled so that the inevitable drops don't get counted.
build_backlog $vlan $((3 * limit / 2)) udp >/dev/null
qevent_rule_install_$subtest
base=$($fetch_counter)
send_packets $vlan udp 11
now=$(busywait 1100 until_counter_is ">= $((base + 10))" $fetch_counter)
check_err $? "Dropped packets not observed: 11 expected, $((now - base)) seen"
# When no extra traffic is injected, there should be no mirroring.
busywait 1100 until_counter_is ">= $((base + 20))" $fetch_counter >/dev/null
check_fail $? "Spurious packets observed"
# When the rule is uninstalled, there should be no mirroring.
qevent_rule_uninstall_$subtest
send_packets $vlan udp 11
busywait 1100 until_counter_is ">= $((base + 20))" $fetch_counter >/dev/null
check_fail $? "Spurious packets observed after uninstall"
log_test "TC $((vlan - 10)): ${trigger}ped packets $subtest'd"
stop_traffic
sleep 1
}
qevent_rule_install_mirror()
{
tc filter add block 10 pref 1234 handle 102 matchall skip_sw \
action mirred egress mirror dev $swp2 hw_stats disabled
}
qevent_rule_uninstall_mirror()
{
tc filter del block 10 pref 1234 handle 102 matchall
}
qevent_counter_fetch_mirror()
{
tc_rule_handle_stats_get "dev $h2 ingress" 101
}
do_drop_mirror_test()
{
local vlan=$1; shift
local limit=$1; shift
local qevent_name=$1; shift
tc filter add dev $h2 ingress pref 1 handle 101 prot ip \
flower skip_sw ip_proto udp \
action drop
do_drop_test "$vlan" "$limit" "$qevent_name" mirror \
qevent_counter_fetch_mirror
tc filter del dev $h2 ingress pref 1 handle 101 flower
}
......@@ -7,6 +7,7 @@ ALL_TESTS="
ecn_nodrop_test
red_test
mc_backlog_test
red_mirror_test
"
: ${QDISC:=ets}
source sch_red_core.sh
......@@ -83,6 +84,16 @@ mc_backlog_test()
uninstall_qdisc
}
red_mirror_test()
{
install_qdisc qevent early_drop block 10
do_drop_mirror_test 10 $BACKLOG1 early_drop
do_drop_mirror_test 11 $BACKLOG2 early_drop
uninstall_qdisc
}
trap cleanup EXIT
setup_prepare
......
......@@ -7,6 +7,7 @@ ALL_TESTS="
ecn_nodrop_test
red_test
mc_backlog_test
red_mirror_test
"
source sch_red_core.sh
......@@ -57,6 +58,13 @@ mc_backlog_test()
uninstall_qdisc
}
red_mirror_test()
{
install_qdisc qevent early_drop block 10
do_drop_mirror_test 10 $BACKLOG
uninstall_qdisc
}
trap cleanup EXIT
setup_prepare
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment