Commit 5da1033b authored by David S. Miller's avatar David S. Miller

Merge branch 'ptp-over-udp-dsa'

Vladimir Oltean says:

====================
Support PTP over UDP with the ocelot-8021q DSA tagging protocol

The alternative tag_8021q-based tagger for Ocelot switches, added here:
https://patchwork.kernel.org/project/netdevbpf/cover/20210129010009.3959398-1-olteanv@gmail.com/

gained support for PTP over L2 here:
https://patchwork.kernel.org/project/netdevbpf/cover/20210213223801.1334216-1-olteanv@gmail.com/

mostly as a minimum viable requirement. That PTP support was mostly
self-contained code that installed some rules to replicate PTP packets
on the CPU queue, in felix_setup_mmio_filtering().

However ocelot-8021q starts to look more interesting for general purpose
usage, so it is now time to reduce the technical debt by integrating the
PTP traps used by Felix for tag_8021q with the rest of the Ocelot driver.

There is further consolidation of traps to be done. The cookies used by
MRP traps overlap with the cookies used for tag_8021q PTP traps, so
those features could not be used at the same time.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c8b441d2 29940ce3
......@@ -50,7 +50,7 @@ static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid,
outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
outer_tagging_rule->prio = 1;
outer_tagging_rule->id.cookie = port;
outer_tagging_rule->id.cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port);
outer_tagging_rule->id.tc_offload = false;
outer_tagging_rule->block_id = VCAP_ES0;
outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
......@@ -103,7 +103,7 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
untagging_rule->vlan.vid.value = vid;
untagging_rule->vlan.vid.mask = VLAN_VID_MASK;
untagging_rule->prio = 1;
untagging_rule->id.cookie = port;
untagging_rule->id.cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port);
untagging_rule->id.tc_offload = false;
untagging_rule->block_id = VCAP_IS1;
untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
......@@ -124,7 +124,7 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
redirect_rule->ingress_port_mask = BIT(upstream);
redirect_rule->pag = port;
redirect_rule->prio = 1;
redirect_rule->id.cookie = port;
redirect_rule->id.cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port);
redirect_rule->id.tc_offload = false;
redirect_rule->block_id = VCAP_IS2;
redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
......@@ -267,148 +267,82 @@ static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
mutex_unlock(&ocelot->fwd_domain_lock);
}
/* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module.
* If the quirk_no_xtr_irq is in place, then also copy those PTP frames to the
* tag_8021q CPU port.
/* On switches with no extraction IRQ wired, trapped packets need to be
* replicated over Ethernet as well, otherwise we'd get no notification of
* their arrival when using the ocelot-8021q tagging protocol.
*/
static int felix_setup_mmio_filtering(struct felix *felix)
static int felix_update_trapping_destinations(struct dsa_switch *ds,
bool using_tag_8021q)
{
unsigned long user_ports = dsa_user_ports(felix->ds);
struct ocelot_vcap_filter *redirect_rule;
struct ocelot_vcap_filter *tagging_rule;
struct ocelot *ocelot = &felix->ocelot;
struct dsa_switch *ds = felix->ds;
int cpu = -1, port, ret;
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
struct ocelot_vcap_filter *trap;
enum ocelot_mask_mode mask_mode;
unsigned long port_mask;
struct dsa_port *dp;
bool cpu_copy_ena;
int cpu = -1, err;
tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
if (!tagging_rule)
return -ENOMEM;
if (!felix->info->quirk_no_xtr_irq)
return 0;
redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
if (!redirect_rule) {
kfree(tagging_rule);
return -ENOMEM;
/* Figure out the current CPU port */
dsa_switch_for_each_cpu_port(dp, ds) {
cpu = dp->index;
break;
}
for (port = 0; port < ocelot->num_phys_ports; port++) {
if (dsa_is_cpu_port(ds, port)) {
cpu = port;
break;
/* We are sure that "cpu" was found, otherwise
* dsa_tree_setup_default_cpu() would have failed earlier.
*/
/* Make sure all traps are set up for that destination */
list_for_each_entry(trap, &ocelot->traps, trap_list) {
/* Figure out the current trapping destination */
if (using_tag_8021q) {
/* Redirect to the tag_8021q CPU port. If timestamps
* are necessary, also copy trapped packets to the CPU
* port module.
*/
mask_mode = OCELOT_MASK_MODE_REDIRECT;
port_mask = BIT(cpu);
cpu_copy_ena = !!trap->take_ts;
} else {
/* Trap packets only to the CPU port module, which is
* redirected to the NPI port (the DSA CPU port)
*/
mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
port_mask = 0;
cpu_copy_ena = true;
}
}
if (cpu < 0) {
kfree(tagging_rule);
kfree(redirect_rule);
return -EINVAL;
}
if (trap->action.mask_mode == mask_mode &&
trap->action.port_mask == port_mask &&
trap->action.cpu_copy_ena == cpu_copy_ena)
continue;
tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE;
*(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588);
*(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff);
tagging_rule->ingress_port_mask = user_ports;
tagging_rule->prio = 1;
tagging_rule->id.cookie = ocelot->num_phys_ports;
tagging_rule->id.tc_offload = false;
tagging_rule->block_id = VCAP_IS1;
tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
tagging_rule->lookup = 0;
tagging_rule->action.pag_override_mask = 0xff;
tagging_rule->action.pag_val = ocelot->num_phys_ports;
ret = ocelot_vcap_filter_add(ocelot, tagging_rule, NULL);
if (ret) {
kfree(tagging_rule);
kfree(redirect_rule);
return ret;
}
trap->action.mask_mode = mask_mode;
trap->action.port_mask = port_mask;
trap->action.cpu_copy_ena = cpu_copy_ena;
redirect_rule->key_type = OCELOT_VCAP_KEY_ANY;
redirect_rule->ingress_port_mask = user_ports;
redirect_rule->pag = ocelot->num_phys_ports;
redirect_rule->prio = 1;
redirect_rule->id.cookie = ocelot->num_phys_ports;
redirect_rule->id.tc_offload = false;
redirect_rule->block_id = VCAP_IS2;
redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
redirect_rule->lookup = 0;
redirect_rule->action.cpu_copy_ena = true;
if (felix->info->quirk_no_xtr_irq) {
/* Redirect to the tag_8021q CPU but also copy PTP packets to
* the CPU port module
*/
redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
redirect_rule->action.port_mask = BIT(cpu);
} else {
/* Trap PTP packets only to the CPU port module (which is
* redirected to the NPI port)
*/
redirect_rule->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
redirect_rule->action.port_mask = 0;
}
ret = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL);
if (ret) {
ocelot_vcap_filter_del(ocelot, tagging_rule);
kfree(redirect_rule);
return ret;
err = ocelot_vcap_filter_replace(ocelot, trap);
if (err)
return err;
}
/* The ownership of the CPU port module's queues might have just been
* transferred to the tag_8021q tagger from the NPI-based tagger.
* So there might still be all sorts of crap in the queues. On the
* other hand, the MMIO-based matching of PTP frames is very brittle,
* so we need to be careful that there are no extra frames to be
* dequeued over MMIO, since we would never know to discard them.
*/
ocelot_drain_cpu_queue(ocelot, 0);
return 0;
}
static int felix_teardown_mmio_filtering(struct felix *felix)
{
struct ocelot_vcap_filter *tagging_rule, *redirect_rule;
struct ocelot_vcap_block *block_vcap_is1;
struct ocelot_vcap_block *block_vcap_is2;
struct ocelot *ocelot = &felix->ocelot;
int err;
block_vcap_is1 = &ocelot->block[VCAP_IS1];
block_vcap_is2 = &ocelot->block[VCAP_IS2];
tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
ocelot->num_phys_ports,
false);
if (!tagging_rule)
return -ENOENT;
err = ocelot_vcap_filter_del(ocelot, tagging_rule);
if (err)
return err;
redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
ocelot->num_phys_ports,
false);
if (!redirect_rule)
return -ENOENT;
return ocelot_vcap_filter_del(ocelot, redirect_rule);
}
static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
unsigned long cpu_flood;
int port, err;
struct dsa_port *dp;
int err;
felix_8021q_cpu_port_init(ocelot, cpu);
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
dsa_switch_for_each_available_port(dp, ds) {
/* This overwrites ocelot_init():
* Do not forward BPDU frames to the CPU port module,
* for 2 reasons:
......@@ -421,7 +355,7 @@ static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0),
ANA_PORT_CPU_FWD_BPDU_CFG, port);
ANA_PORT_CPU_FWD_BPDU_CFG, dp->index);
}
/* In tag_8021q mode, the CPU port module is unused, except for PTP
......@@ -437,10 +371,19 @@ static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
if (err)
return err;
err = felix_setup_mmio_filtering(felix);
err = felix_update_trapping_destinations(ds, true);
if (err)
goto out_tag_8021q_unregister;
/* The ownership of the CPU port module's queues might have just been
* transferred to the tag_8021q tagger from the NPI-based tagger.
* So there might still be all sorts of crap in the queues. On the
* other hand, the MMIO-based matching of PTP frames is very brittle,
* so we need to be careful that there are no extra frames to be
* dequeued over MMIO, since we would never know to discard them.
*/
ocelot_drain_cpu_queue(ocelot, 0);
return 0;
out_tag_8021q_unregister:
......@@ -451,27 +394,24 @@ static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
int err, port;
struct dsa_port *dp;
int err;
err = felix_teardown_mmio_filtering(felix);
err = felix_update_trapping_destinations(ds, false);
if (err)
dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d",
err);
dsa_tag_8021q_unregister(ds);
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
dsa_switch_for_each_available_port(dp, ds) {
/* Restore the logic from ocelot_init:
* do not forward BPDU frames to the front ports.
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
ANA_PORT_CPU_FWD_BPDU_CFG,
port);
dp->index);
}
felix_8021q_cpu_port_deinit(ocelot, cpu);
......@@ -1192,7 +1132,8 @@ static int felix_setup(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
int port, err;
struct dsa_port *dp;
int err;
err = felix_init_structs(felix, ds->num_ports);
if (err)
......@@ -1211,30 +1152,24 @@ static int felix_setup(struct dsa_switch *ds)
}
}
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
ocelot_init_port(ocelot, port);
dsa_switch_for_each_available_port(dp, ds) {
ocelot_init_port(ocelot, dp->index);
/* Set the default QoS Classification based on PCP and DEI
* bits of vlan tag.
*/
felix_port_qos_map_init(ocelot, port);
felix_port_qos_map_init(ocelot, dp->index);
}
err = ocelot_devlink_sb_register(ocelot);
if (err)
goto out_deinit_ports;
for (port = 0; port < ds->num_ports; port++) {
if (!dsa_is_cpu_port(ds, port))
continue;
dsa_switch_for_each_cpu_port(dp, ds) {
/* The initial tag protocol is NPI which always returns 0, so
* there's no real point in checking for errors.
*/
felix_set_tag_protocol(ds, port, felix->tag_proto);
felix_set_tag_protocol(ds, dp->index, felix->tag_proto);
break;
}
......@@ -1244,12 +1179,8 @@ static int felix_setup(struct dsa_switch *ds)
return 0;
out_deinit_ports:
for (port = 0; port < ocelot->num_phys_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
ocelot_deinit_port(ocelot, port);
}
dsa_switch_for_each_available_port(dp, ds)
ocelot_deinit_port(ocelot, dp->index);
ocelot_deinit_timestamp(ocelot);
ocelot_deinit(ocelot);
......@@ -1265,22 +1196,15 @@ static void felix_teardown(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
int port;
for (port = 0; port < ds->num_ports; port++) {
if (!dsa_is_cpu_port(ds, port))
continue;
struct dsa_port *dp;
felix_del_tag_protocol(ds, port, felix->tag_proto);
dsa_switch_for_each_cpu_port(dp, ds) {
felix_del_tag_protocol(ds, dp->index, felix->tag_proto);
break;
}
for (port = 0; port < ocelot->num_phys_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
ocelot_deinit_port(ocelot, port);
}
dsa_switch_for_each_available_port(dp, ds)
ocelot_deinit_port(ocelot, dp->index);
ocelot_devlink_sb_unregister(ocelot);
ocelot_deinit_timestamp(ocelot);
......@@ -1302,8 +1226,17 @@ static int felix_hwtstamp_set(struct dsa_switch *ds, int port,
struct ifreq *ifr)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
bool using_tag_8021q;
int err;
err = ocelot_hwstamp_set(ocelot, port, ifr);
if (err)
return err;
using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
return ocelot_hwstamp_set(ocelot, port, ifr);
return felix_update_trapping_destinations(ds, using_tag_8021q);
}
static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
......@@ -1430,8 +1363,17 @@ static int felix_cls_flower_add(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
bool using_tag_8021q;
int err;
err = ocelot_cls_flower_replace(ocelot, port, cls, ingress);
if (err)
return err;
using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
return ocelot_cls_flower_replace(ocelot, port, cls, ingress);
return felix_update_trapping_destinations(ds, using_tag_8021q);
}
static int felix_cls_flower_del(struct dsa_switch *ds, int port,
......
......@@ -1468,9 +1468,9 @@ ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
trap->key.ipv6.dport.mask = 0xffff;
}
static int ocelot_trap_add(struct ocelot *ocelot, int port,
unsigned long cookie,
void (*populate)(struct ocelot_vcap_filter *f))
int ocelot_trap_add(struct ocelot *ocelot, int port,
unsigned long cookie, bool take_ts,
void (*populate)(struct ocelot_vcap_filter *f))
{
struct ocelot_vcap_block *block_vcap_is2;
struct ocelot_vcap_filter *trap;
......@@ -1496,6 +1496,8 @@ static int ocelot_trap_add(struct ocelot *ocelot, int port,
trap->action.cpu_copy_ena = true;
trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
trap->action.port_mask = 0;
trap->take_ts = take_ts;
list_add_tail(&trap->trap_list, &ocelot->traps);
new = true;
}
......@@ -1507,16 +1509,17 @@ static int ocelot_trap_add(struct ocelot *ocelot, int port,
err = ocelot_vcap_filter_replace(ocelot, trap);
if (err) {
trap->ingress_port_mask &= ~BIT(port);
if (!trap->ingress_port_mask)
if (!trap->ingress_port_mask) {
list_del(&trap->trap_list);
kfree(trap);
}
return err;
}
return 0;
}
static int ocelot_trap_del(struct ocelot *ocelot, int port,
unsigned long cookie)
int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie)
{
struct ocelot_vcap_block *block_vcap_is2;
struct ocelot_vcap_filter *trap;
......@@ -1529,39 +1532,42 @@ static int ocelot_trap_del(struct ocelot *ocelot, int port,
return 0;
trap->ingress_port_mask &= ~BIT(port);
if (!trap->ingress_port_mask)
if (!trap->ingress_port_mask) {
list_del(&trap->trap_list);
return ocelot_vcap_filter_del(ocelot, trap);
}
return ocelot_vcap_filter_replace(ocelot, trap);
}
static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port)
{
unsigned long l2_cookie = ocelot->num_phys_ports + 1;
unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot);
return ocelot_trap_add(ocelot, port, l2_cookie,
return ocelot_trap_add(ocelot, port, l2_cookie, true,
ocelot_populate_l2_ptp_trap_key);
}
static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port)
{
unsigned long l2_cookie = ocelot->num_phys_ports + 1;
unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot);
return ocelot_trap_del(ocelot, port, l2_cookie);
}
static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port)
{
unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2;
unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3;
unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot);
unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot);
int err;
err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie,
err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie, true,
ocelot_populate_ipv4_ptp_event_trap_key);
if (err)
return err;
err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie,
err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie, false,
ocelot_populate_ipv4_ptp_general_trap_key);
if (err)
ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
......@@ -1571,8 +1577,8 @@ static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port)
static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port)
{
unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2;
unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3;
unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot);
unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot);
int err;
err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
......@@ -1582,16 +1588,16 @@ static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port)
static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port)
{
unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4;
unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5;
unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot);
unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot);
int err;
err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie,
err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie, true,
ocelot_populate_ipv6_ptp_event_trap_key);
if (err)
return err;
err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie,
err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie, false,
ocelot_populate_ipv6_ptp_general_trap_key);
if (err)
ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
......@@ -1601,8 +1607,8 @@ static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port)
static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port)
{
unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4;
unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5;
unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot);
unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot);
int err;
err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
......
......@@ -21,6 +21,7 @@
#include <soc/mscc/ocelot_dev.h>
#include <soc/mscc/ocelot_ana.h>
#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot.h>
#include "ocelot_rew.h"
#include "ocelot_qs.h"
......@@ -102,6 +103,11 @@ int ocelot_port_devlink_init(struct ocelot *ocelot, int port,
enum devlink_port_flavour flavour);
void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port);
int ocelot_trap_add(struct ocelot *ocelot, int port,
unsigned long cookie, bool take_ts,
void (*populate)(struct ocelot_vcap_filter *f));
int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie);
extern struct notifier_block ocelot_netdevice_nb;
extern struct notifier_block ocelot_switchdev_nb;
extern struct notifier_block ocelot_switchdev_blocking_nb;
......
......@@ -279,6 +279,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->action.cpu_copy_ena = true;
filter->action.cpu_qu_num = 0;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
list_add_tail(&filter->trap_list, &ocelot->traps);
break;
case FLOW_ACTION_POLICE:
if (filter->block_id == PSFP_BLOCK_ID) {
......@@ -840,6 +841,8 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
ret = ocelot_flower_parse(ocelot, port, ingress, f, filter);
if (ret) {
if (!list_empty(&filter->trap_list))
list_del(&filter->trap_list);
kfree(filter);
return ret;
}
......
......@@ -60,7 +60,7 @@ static int ocelot_mrp_redirect_add_vcap(struct ocelot *ocelot, int src_port,
filter->key_type = OCELOT_VCAP_KEY_ETYPE;
filter->prio = 1;
filter->id.cookie = src_port;
filter->id.cookie = OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, src_port);
filter->id.tc_offload = false;
filter->block_id = VCAP_IS2;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
......@@ -77,39 +77,30 @@ static int ocelot_mrp_redirect_add_vcap(struct ocelot *ocelot, int src_port,
return err;
}
static int ocelot_mrp_copy_add_vcap(struct ocelot *ocelot, int port,
int prio, unsigned long cookie)
static void ocelot_populate_mrp_trap_key(struct ocelot_vcap_filter *filter)
{
const u8 mrp_mask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
struct ocelot_vcap_filter *filter;
int err;
filter = kzalloc(sizeof(*filter), GFP_KERNEL);
if (!filter)
return -ENOMEM;
filter->key_type = OCELOT_VCAP_KEY_ETYPE;
filter->prio = prio;
filter->id.cookie = cookie;
filter->id.tc_offload = false;
filter->block_id = VCAP_IS2;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
filter->ingress_port_mask = BIT(port);
/* Here is possible to use control or test dmac because the mask
* doesn't cover the LSB
*/
ether_addr_copy(filter->key.etype.dmac.value, mrp_test_dmac);
ether_addr_copy(filter->key.etype.dmac.mask, mrp_mask);
filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
filter->action.port_mask = 0x0;
filter->action.cpu_copy_ena = true;
filter->action.cpu_qu_num = OCELOT_MRP_CPUQ;
}
err = ocelot_vcap_filter_add(ocelot, filter, NULL);
if (err)
kfree(filter);
static int ocelot_mrp_trap_add(struct ocelot *ocelot, int port)
{
unsigned long cookie = OCELOT_VCAP_IS2_MRP_TRAP(ocelot);
return err;
return ocelot_trap_add(ocelot, port, cookie, false,
ocelot_populate_mrp_trap_key);
}
static int ocelot_mrp_trap_del(struct ocelot *ocelot, int port)
{
unsigned long cookie = OCELOT_VCAP_IS2_MRP_TRAP(ocelot);
return ocelot_trap_del(ocelot, port, cookie);
}
static void ocelot_mrp_save_mac(struct ocelot *ocelot,
......@@ -186,7 +177,7 @@ int ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port,
ocelot_mrp_save_mac(ocelot, ocelot_port);
if (mrp->ring_role != BR_MRP_RING_ROLE_MRC)
return ocelot_mrp_copy_add_vcap(ocelot, port, 1, port);
return ocelot_mrp_trap_add(ocelot, port);
dst_port = ocelot_mrp_find_partner_port(ocelot, ocelot_port);
if (dst_port == -1)
......@@ -196,10 +187,10 @@ int ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port,
if (err)
return err;
err = ocelot_mrp_copy_add_vcap(ocelot, port, 2,
port + ocelot->num_phys_ports);
err = ocelot_mrp_trap_add(ocelot, port);
if (err) {
ocelot_mrp_del_vcap(ocelot, port);
ocelot_mrp_del_vcap(ocelot,
OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port));
return err;
}
......@@ -211,7 +202,7 @@ int ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port,
const struct switchdev_obj_ring_role_mrp *mrp)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
int i;
int err, i;
if (!ocelot_port)
return -EOPNOTSUPP;
......@@ -222,8 +213,11 @@ int ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port,
if (ocelot_port->mrp_ring_id != mrp->ring_id)
return 0;
ocelot_mrp_del_vcap(ocelot, port);
ocelot_mrp_del_vcap(ocelot, port + ocelot->num_phys_ports);
err = ocelot_mrp_trap_del(ocelot, port);
if (err)
return err;
ocelot_mrp_del_vcap(ocelot, OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port));
for (i = 0; i < ocelot->num_phys_ports; ++i) {
ocelot_port = ocelot->ports[i];
......
......@@ -1401,6 +1401,7 @@ int ocelot_vcap_init(struct ocelot *ocelot)
}
INIT_LIST_HEAD(&ocelot->dummy_rules);
INIT_LIST_HEAD(&ocelot->traps);
INIT_LIST_HEAD(&ocelot->vcap_pol.pol_list);
return 0;
......
......@@ -105,8 +105,6 @@
#define REG_RESERVED_ADDR 0xffffffff
#define REG_RESERVED(reg) REG(reg, REG_RESERVED_ADDR)
#define OCELOT_MRP_CPUQ 7
enum ocelot_target {
ANA = 1,
QS,
......@@ -691,6 +689,7 @@ struct ocelot {
u8 base_mac[ETH_ALEN];
struct list_head vlans;
struct list_head traps;
/* Switches like VSC9959 have flooding per traffic class */
int num_flooding_pgids;
......
......@@ -8,6 +8,20 @@
#include <soc/mscc/ocelot.h>
/* Cookie definitions for private VCAP filters installed by the driver.
* Must be unique per VCAP block.
*/
#define OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port) (port)
#define OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port) (port)
#define OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port) (port)
#define OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port) ((ocelot)->num_phys_ports + (port))
#define OCELOT_VCAP_IS2_MRP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2)
#define OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 1)
#define OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 2)
#define OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 3)
#define OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 4)
#define OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 5)
/* =================================================================
* VCAP Common
* =================================================================
......@@ -666,6 +680,7 @@ struct ocelot_vcap_id {
struct ocelot_vcap_filter {
struct list_head list;
struct list_head trap_list;
enum ocelot_vcap_filter_type type;
int block_id;
......@@ -678,6 +693,7 @@ struct ocelot_vcap_filter {
struct ocelot_vcap_action action;
struct ocelot_vcap_stats stats;
/* For VCAP IS1 and IS2 */
bool take_ts;
unsigned long ingress_port_mask;
/* For VCAP ES0 */
struct ocelot_vcap_port ingress_port;
......
......@@ -32,6 +32,13 @@ static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp,
if (!xmit_work_fn || !xmit_worker)
return NULL;
/* PTP over IP packets need UDP checksumming. We may have inherited
* NETIF_F_HW_CSUM from the DSA master, but these packets are not sent
* through the DSA master, so calculate the checksum here.
*/
if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
return NULL;
xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC);
if (!xmit_work)
return NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment