Commit 09934b03 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-dsa-lantiq-Add-bridge-offloading'

Hauke Mehrtens says:

====================
net: dsa: lantiq: Add bridge offloading

This adds bridge offloading for the Intel / Lantiq GSWIP 2.1 switch.

Changes since:
v2:
 - Added Fixes tag to patch 1
 - Fixed typo
 - added GSWIP_TABLE_MAC_BRIDGE_STATIC and made use of it
 - used GSWIP_TABLE_MAC_BRIDGE in more places

v1:
 - fix typo signle -> single
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 14cfbdac 58c59ef9
......@@ -4,7 +4,25 @@
*
* Copyright (C) 2010 Lantiq Deutschland
* Copyright (C) 2012 John Crispin <john@phrozen.org>
* Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
* Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
*
* The VLAN and bridge model the GSWIP hardware uses does not directly
* matches the model DSA uses.
*
* The hardware has 64 possible table entries for bridges with one VLAN
* ID, one flow id and a list of ports for each bridge. All entries which
* match the same flow ID are combined in the mac learning table, they
* act as one global bridge.
* The hardware does not support VLAN filter on the port, but on the
* bridge, this driver converts the DSA model to the hardware.
*
* The CPU gets all the exception frames which do not match any forwarding
* rule and the CPU port is also added to all bridges. This makes it possible
* to handle all the special cases easily in software.
* At the initialization the driver allocates one bridge table entry for
* each switch port which is used when the port is used without an
* explicit bridge. This prevents the frames from being forwarded
* between all LAN ports by default.
*/
#include <linux/clk.h>
......@@ -148,19 +166,29 @@
#define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
#define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
#define GSWIP_PCE_GCTRL_0 0x456
#define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */
#define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
#define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
#define GSWIP_PCE_GCTRL_1 0x457
#define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
#define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
#define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11)
#define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */
#define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */
#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */
#define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
#define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
#define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
#define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
#define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
#define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
#define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA))
#define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */
#define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */
#define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */
#define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */
#define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */
#define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
#define GSWIP_MAC_FLEN 0x8C5
#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
......@@ -183,6 +211,11 @@
#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
#define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */
#define GSWIP_TABLE_ACTIVE_VLAN 0x01
#define GSWIP_TABLE_VLAN_MAPPING 0x02
#define GSWIP_TABLE_MAC_BRIDGE 0x0b
#define GSWIP_TABLE_MAC_BRIDGE_STATIC 0x01 /* Static not, aging entry */
#define XRX200_GPHY_FW_ALIGN (16 * 1024)
struct gswip_hw_info {
......@@ -202,6 +235,12 @@ struct gswip_gphy_fw {
char *fw_name;
};
struct gswip_vlan {
struct net_device *bridge;
u16 vid;
u8 fid;
};
struct gswip_priv {
__iomem void *gswip;
__iomem void *mdio;
......@@ -211,8 +250,22 @@ struct gswip_priv {
struct dsa_switch *ds;
struct device *dev;
struct regmap *rcu_regmap;
struct gswip_vlan vlans[64];
int num_gphy_fw;
struct gswip_gphy_fw *gphy_fw;
u32 port_vlan_filter;
};
struct gswip_pce_table_entry {
u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index
u16 table; // PCE_TBL_CTRL.ADDR = pData->table
u16 key[8];
u16 val[5];
u16 mask;
u8 gmap;
bool type;
bool valid;
bool key_mode;
};
struct gswip_rmon_cnt_desc {
......@@ -447,10 +500,153 @@ static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
return of_mdiobus_register(ds->slave_mii_bus, mdio_np);
}
static int gswip_pce_table_entry_read(struct gswip_priv *priv,
struct gswip_pce_table_entry *tbl)
{
int i;
int err;
u16 crtl;
u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
GSWIP_PCE_TBL_CTRL_BAS);
if (err)
return err;
gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS,
GSWIP_PCE_TBL_CTRL);
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
GSWIP_PCE_TBL_CTRL_BAS);
if (err)
return err;
for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i));
for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
tbl->val[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_VAL(i));
tbl->mask = gswip_switch_r(priv, GSWIP_PCE_TBL_MASK);
crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE);
tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
return 0;
}
static int gswip_pce_table_entry_write(struct gswip_priv *priv,
struct gswip_pce_table_entry *tbl)
{
int i;
int err;
u16 crtl;
u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
GSWIP_PCE_TBL_CTRL_BAS);
if (err)
return err;
gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
tbl->table | addr_mode,
GSWIP_PCE_TBL_CTRL);
for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
gswip_switch_w(priv, tbl->key[i], GSWIP_PCE_TBL_KEY(i));
for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
gswip_switch_w(priv, tbl->val[i], GSWIP_PCE_TBL_VAL(i));
gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
tbl->table | addr_mode,
GSWIP_PCE_TBL_CTRL);
gswip_switch_w(priv, tbl->mask, GSWIP_PCE_TBL_MASK);
crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD |
GSWIP_PCE_TBL_CTRL_GMAP_MASK);
if (tbl->type)
crtl |= GSWIP_PCE_TBL_CTRL_TYPE;
if (tbl->valid)
crtl |= GSWIP_PCE_TBL_CTRL_VLD;
crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK;
crtl |= GSWIP_PCE_TBL_CTRL_BAS;
gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL);
return gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
GSWIP_PCE_TBL_CTRL_BAS);
}
/* Add the LAN port into a bridge with the CPU port by
* default. This prevents automatic forwarding of
* packages between the LAN ports when no explicit
* bridge is configured.
*/
static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
{
struct gswip_pce_table_entry vlan_active = {0,};
struct gswip_pce_table_entry vlan_mapping = {0,};
unsigned int cpu_port = priv->hw_info->cpu_port;
unsigned int max_ports = priv->hw_info->max_ports;
int err;
if (port >= max_ports) {
dev_err(priv->dev, "single port for %i supported\n", port);
return -EIO;
}
vlan_active.index = port + 1;
vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
vlan_active.key[0] = 0; /* vid */
vlan_active.val[0] = port + 1 /* fid */;
vlan_active.valid = add;
err = gswip_pce_table_entry_write(priv, &vlan_active);
if (err) {
dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
return err;
}
if (!add)
return 0;
vlan_mapping.index = port + 1;
vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
vlan_mapping.val[0] = 0 /* vid */;
vlan_mapping.val[1] = BIT(port) | BIT(cpu_port);
vlan_mapping.val[2] = 0;
err = gswip_pce_table_entry_write(priv, &vlan_mapping);
if (err) {
dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
return err;
}
return 0;
}
static int gswip_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
struct gswip_priv *priv = ds->priv;
int err;
if (!dsa_is_cpu_port(ds, port)) {
err = gswip_add_single_port_br(priv, port, true);
if (err)
return err;
}
/* RMON Counter Enable for port */
gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port));
......@@ -461,8 +657,6 @@ static int gswip_port_enable(struct dsa_switch *ds, int port,
GSWIP_FDMA_PCTRLp(port));
gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
GSWIP_SDMA_PCTRLp(port));
gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
GSWIP_PCE_PCTRL_0p(port));
if (!dsa_is_cpu_port(ds, port)) {
u32 macconf = GSWIP_MDIO_PHY_LINK_AUTO |
......@@ -535,6 +729,39 @@ static int gswip_pce_load_microcode(struct gswip_priv *priv)
return 0;
}
static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering)
{
struct gswip_priv *priv = ds->priv;
struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
/* Do not allow changing the VLAN filtering options while in bridge */
if (!!(priv->port_vlan_filter & BIT(port)) != vlan_filtering && bridge)
return -EIO;
if (vlan_filtering) {
/* Use port based VLAN tag */
gswip_switch_mask(priv,
GSWIP_PCE_VCTRL_VSR,
GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
GSWIP_PCE_VCTRL_VEMR,
GSWIP_PCE_VCTRL(port));
gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0,
GSWIP_PCE_PCTRL_0p(port));
} else {
/* Use port based VLAN tag */
gswip_switch_mask(priv,
GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
GSWIP_PCE_VCTRL_VEMR,
GSWIP_PCE_VCTRL_VSR,
GSWIP_PCE_VCTRL(port));
gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_TVM,
GSWIP_PCE_PCTRL_0p(port));
}
return 0;
}
static int gswip_setup(struct dsa_switch *ds)
{
struct gswip_priv *priv = ds->priv;
......@@ -547,8 +774,10 @@ static int gswip_setup(struct dsa_switch *ds)
gswip_switch_w(priv, 0, GSWIP_SWRES);
/* disable port fetch/store dma on all ports */
for (i = 0; i < priv->hw_info->max_ports; i++)
for (i = 0; i < priv->hw_info->max_ports; i++) {
gswip_port_disable(ds, i);
gswip_port_vlan_filtering(ds, i, false);
}
/* enable Switch */
gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB);
......@@ -578,6 +807,10 @@ static int gswip_setup(struct dsa_switch *ds)
gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
GSWIP_FDMA_PCTRLp(cpu_port));
/* accept special tag in ingress direction */
gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
GSWIP_PCE_PCTRL_0p(cpu_port));
gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
GSWIP_MAC_CTRL_2p(cpu_port));
gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8, GSWIP_MAC_FLEN);
......@@ -587,10 +820,15 @@ static int gswip_setup(struct dsa_switch *ds)
/* VLAN aware Switching */
gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0);
/* Mac Address Table Lock */
gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_1_MAC_GLOCK |
GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD,
GSWIP_PCE_GCTRL_1);
/* Flush MAC Table */
gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MTFL, GSWIP_PCE_GCTRL_0);
err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0,
GSWIP_PCE_GCTRL_0_MTFL);
if (err) {
dev_err(priv->dev, "MAC flushing didn't finish\n");
return err;
}
gswip_port_enable(ds, cpu_port, NULL);
return 0;
......@@ -602,6 +840,551 @@ static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
return DSA_TAG_PROTO_GSWIP;
}
static int gswip_vlan_active_create(struct gswip_priv *priv,
struct net_device *bridge,
int fid, u16 vid)
{
struct gswip_pce_table_entry vlan_active = {0,};
unsigned int max_ports = priv->hw_info->max_ports;
int idx = -1;
int err;
int i;
/* Look for a free slot */
for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
if (!priv->vlans[i].bridge) {
idx = i;
break;
}
}
if (idx == -1)
return -ENOSPC;
if (fid == -1)
fid = idx;
vlan_active.index = idx;
vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
vlan_active.key[0] = vid;
vlan_active.val[0] = fid;
vlan_active.valid = true;
err = gswip_pce_table_entry_write(priv, &vlan_active);
if (err) {
dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
return err;
}
priv->vlans[idx].bridge = bridge;
priv->vlans[idx].vid = vid;
priv->vlans[idx].fid = fid;
return idx;
}
static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx)
{
struct gswip_pce_table_entry vlan_active = {0,};
int err;
vlan_active.index = idx;
vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
vlan_active.valid = false;
err = gswip_pce_table_entry_write(priv, &vlan_active);
if (err)
dev_err(priv->dev, "failed to delete active VLAN: %d\n", err);
priv->vlans[idx].bridge = NULL;
return err;
}
static int gswip_vlan_add_unaware(struct gswip_priv *priv,
struct net_device *bridge, int port)
{
struct gswip_pce_table_entry vlan_mapping = {0,};
unsigned int max_ports = priv->hw_info->max_ports;
unsigned int cpu_port = priv->hw_info->cpu_port;
bool active_vlan_created = false;
int idx = -1;
int i;
int err;
/* Check if there is already a page for this bridge */
for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
if (priv->vlans[i].bridge == bridge) {
idx = i;
break;
}
}
/* If this bridge is not programmed yet, add a Active VLAN table
* entry in a free slot and prepare the VLAN mapping table entry.
*/
if (idx == -1) {
idx = gswip_vlan_active_create(priv, bridge, -1, 0);
if (idx < 0)
return idx;
active_vlan_created = true;
vlan_mapping.index = idx;
vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
/* VLAN ID byte, maps to the VLAN ID of vlan active table */
vlan_mapping.val[0] = 0;
} else {
/* Read the existing VLAN mapping entry from the switch */
vlan_mapping.index = idx;
vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
err = gswip_pce_table_entry_read(priv, &vlan_mapping);
if (err) {
dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
err);
return err;
}
}
/* Update the VLAN mapping entry and write it to the switch */
vlan_mapping.val[1] |= BIT(cpu_port);
vlan_mapping.val[1] |= BIT(port);
err = gswip_pce_table_entry_write(priv, &vlan_mapping);
if (err) {
dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
/* In case an Active VLAN was creaetd delete it again */
if (active_vlan_created)
gswip_vlan_active_remove(priv, idx);
return err;
}
gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
return 0;
}
static int gswip_vlan_add_aware(struct gswip_priv *priv,
struct net_device *bridge, int port,
u16 vid, bool untagged,
bool pvid)
{
struct gswip_pce_table_entry vlan_mapping = {0,};
unsigned int max_ports = priv->hw_info->max_ports;
unsigned int cpu_port = priv->hw_info->cpu_port;
bool active_vlan_created = false;
int idx = -1;
int fid = -1;
int i;
int err;
/* Check if there is already a page for this bridge */
for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
if (priv->vlans[i].bridge == bridge) {
if (fid != -1 && fid != priv->vlans[i].fid)
dev_err(priv->dev, "one bridge with multiple flow ids\n");
fid = priv->vlans[i].fid;
if (priv->vlans[i].vid == vid) {
idx = i;
break;
}
}
}
/* If this bridge is not programmed yet, add a Active VLAN table
* entry in a free slot and prepare the VLAN mapping table entry.
*/
if (idx == -1) {
idx = gswip_vlan_active_create(priv, bridge, fid, vid);
if (idx < 0)
return idx;
active_vlan_created = true;
vlan_mapping.index = idx;
vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
/* VLAN ID byte, maps to the VLAN ID of vlan active table */
vlan_mapping.val[0] = vid;
} else {
/* Read the existing VLAN mapping entry from the switch */
vlan_mapping.index = idx;
vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
err = gswip_pce_table_entry_read(priv, &vlan_mapping);
if (err) {
dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
err);
return err;
}
}
vlan_mapping.val[0] = vid;
/* Update the VLAN mapping entry and write it to the switch */
vlan_mapping.val[1] |= BIT(cpu_port);
vlan_mapping.val[2] |= BIT(cpu_port);
vlan_mapping.val[1] |= BIT(port);
if (untagged)
vlan_mapping.val[2] &= ~BIT(port);
else
vlan_mapping.val[2] |= BIT(port);
err = gswip_pce_table_entry_write(priv, &vlan_mapping);
if (err) {
dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
/* In case an Active VLAN was creaetd delete it again */
if (active_vlan_created)
gswip_vlan_active_remove(priv, idx);
return err;
}
if (pvid)
gswip_switch_w(priv, idx, GSWIP_PCE_DEFPVID(port));
return 0;
}
static int gswip_vlan_remove(struct gswip_priv *priv,
struct net_device *bridge, int port,
u16 vid, bool pvid, bool vlan_aware)
{
struct gswip_pce_table_entry vlan_mapping = {0,};
unsigned int max_ports = priv->hw_info->max_ports;
unsigned int cpu_port = priv->hw_info->cpu_port;
int idx = -1;
int i;
int err;
/* Check if there is already a page for this bridge */
for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
if (priv->vlans[i].bridge == bridge &&
(!vlan_aware || priv->vlans[i].vid == vid)) {
idx = i;
break;
}
}
if (idx == -1) {
dev_err(priv->dev, "bridge to leave does not exists\n");
return -ENOENT;
}
vlan_mapping.index = idx;
vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
err = gswip_pce_table_entry_read(priv, &vlan_mapping);
if (err) {
dev_err(priv->dev, "failed to read VLAN mapping: %d\n", err);
return err;
}
vlan_mapping.val[1] &= ~BIT(port);
vlan_mapping.val[2] &= ~BIT(port);
err = gswip_pce_table_entry_write(priv, &vlan_mapping);
if (err) {
dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
return err;
}
/* In case all ports are removed from the bridge, remove the VLAN */
if ((vlan_mapping.val[1] & ~BIT(cpu_port)) == 0) {
err = gswip_vlan_active_remove(priv, idx);
if (err) {
dev_err(priv->dev, "failed to write active VLAN: %d\n",
err);
return err;
}
}
/* GSWIP 2.2 (GRX300) and later program here the VID directly. */
if (pvid)
gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
return 0;
}
static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
struct net_device *bridge)
{
struct gswip_priv *priv = ds->priv;
int err;
/* When the bridge uses VLAN filtering we have to configure VLAN
* specific bridges. No bridge is configured here.
*/
if (!br_vlan_enabled(bridge)) {
err = gswip_vlan_add_unaware(priv, bridge, port);
if (err)
return err;
priv->port_vlan_filter &= ~BIT(port);
} else {
priv->port_vlan_filter |= BIT(port);
}
return gswip_add_single_port_br(priv, port, false);
}
static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
struct net_device *bridge)
{
struct gswip_priv *priv = ds->priv;
gswip_add_single_port_br(priv, port, true);
/* When the bridge uses VLAN filtering we have to configure VLAN
* specific bridges. No bridge is configured here.
*/
if (!br_vlan_enabled(bridge))
gswip_vlan_remove(priv, bridge, port, 0, true, false);
}
static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct gswip_priv *priv = ds->priv;
struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
unsigned int max_ports = priv->hw_info->max_ports;
u16 vid;
int i;
int pos = max_ports;
/* We only support VLAN filtering on bridges */
if (!dsa_is_cpu_port(ds, port) && !bridge)
return -EOPNOTSUPP;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
int idx = -1;
/* Check if there is already a page for this VLAN */
for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
if (priv->vlans[i].bridge == bridge &&
priv->vlans[i].vid == vid) {
idx = i;
break;
}
}
/* If this VLAN is not programmed yet, we have to reserve
* one entry in the VLAN table. Make sure we start at the
* next position round.
*/
if (idx == -1) {
/* Look for a free slot */
for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
if (!priv->vlans[pos].bridge) {
idx = pos;
pos++;
break;
}
}
if (idx == -1)
return -ENOSPC;
}
}
return 0;
}
static void gswip_port_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct gswip_priv *priv = ds->priv;
struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
u16 vid;
/* We have to receive all packets on the CPU port and should not
* do any VLAN filtering here. This is also called with bridge
* NULL and then we do not know for which bridge to configure
* this.
*/
if (dsa_is_cpu_port(ds, port))
return;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
gswip_vlan_add_aware(priv, bridge, port, vid, untagged, pvid);
}
static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct gswip_priv *priv = ds->priv;
struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
u16 vid;
int err;
/* We have to receive all packets on the CPU port and should not
* do any VLAN filtering here. This is also called with bridge
* NULL and then we do not know for which bridge to configure
* this.
*/
if (dsa_is_cpu_port(ds, port))
return 0;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
err = gswip_vlan_remove(priv, bridge, port, vid, pvid, true);
if (err)
return err;
}
return 0;
}
static void gswip_port_fast_age(struct dsa_switch *ds, int port)
{
struct gswip_priv *priv = ds->priv;
struct gswip_pce_table_entry mac_bridge = {0,};
int i;
int err;
for (i = 0; i < 2048; i++) {
mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
mac_bridge.index = i;
err = gswip_pce_table_entry_read(priv, &mac_bridge);
if (err) {
dev_err(priv->dev, "failed to read mac brigde: %d\n",
err);
return;
}
if (!mac_bridge.valid)
continue;
if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC)
continue;
if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) != port)
continue;
mac_bridge.valid = false;
err = gswip_pce_table_entry_write(priv, &mac_bridge);
if (err) {
dev_err(priv->dev, "failed to write mac brigde: %d\n",
err);
return;
}
}
}
static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
struct gswip_priv *priv = ds->priv;
u32 stp_state;
switch (state) {
case BR_STATE_DISABLED:
gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
GSWIP_SDMA_PCTRLp(port));
return;
case BR_STATE_BLOCKING:
case BR_STATE_LISTENING:
stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN;
break;
case BR_STATE_LEARNING:
stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING;
break;
case BR_STATE_FORWARDING:
stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING;
break;
default:
dev_err(priv->dev, "invalid STP state: %d\n", state);
return;
}
gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
GSWIP_SDMA_PCTRLp(port));
gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_PSTATE_MASK, stp_state,
GSWIP_PCE_PCTRL_0p(port));
}
static int gswip_port_fdb(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid, bool add)
{
struct gswip_priv *priv = ds->priv;
struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
struct gswip_pce_table_entry mac_bridge = {0,};
unsigned int cpu_port = priv->hw_info->cpu_port;
int fid = -1;
int i;
int err;
if (!bridge)
return -EINVAL;
for (i = cpu_port; i < ARRAY_SIZE(priv->vlans); i++) {
if (priv->vlans[i].bridge == bridge) {
fid = priv->vlans[i].fid;
break;
}
}
if (fid == -1) {
dev_err(priv->dev, "Port not part of a bridge\n");
return -EINVAL;
}
mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
mac_bridge.key_mode = true;
mac_bridge.key[0] = addr[5] | (addr[4] << 8);
mac_bridge.key[1] = addr[3] | (addr[2] << 8);
mac_bridge.key[2] = addr[1] | (addr[0] << 8);
mac_bridge.key[3] = fid;
mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */
mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_STATIC;
mac_bridge.valid = add;
err = gswip_pce_table_entry_write(priv, &mac_bridge);
if (err)
dev_err(priv->dev, "failed to write mac brigde: %d\n", err);
return err;
}
static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid)
{
return gswip_port_fdb(ds, port, addr, vid, true);
}
static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid)
{
return gswip_port_fdb(ds, port, addr, vid, false);
}
static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
struct gswip_priv *priv = ds->priv;
struct gswip_pce_table_entry mac_bridge = {0,};
unsigned char addr[6];
int i;
int err;
for (i = 0; i < 2048; i++) {
mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
mac_bridge.index = i;
err = gswip_pce_table_entry_read(priv, &mac_bridge);
if (err) {
dev_err(priv->dev, "failed to write mac brigde: %d\n",
err);
return err;
}
if (!mac_bridge.valid)
continue;
addr[5] = mac_bridge.key[0] & 0xff;
addr[4] = (mac_bridge.key[0] >> 8) & 0xff;
addr[3] = mac_bridge.key[1] & 0xff;
addr[2] = (mac_bridge.key[1] >> 8) & 0xff;
addr[1] = mac_bridge.key[2] & 0xff;
addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
if (mac_bridge.val[0] & BIT(port))
cb(addr, 0, true, data);
} else {
if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port)
cb(addr, 0, false, data);
}
}
return 0;
}
static void gswip_phylink_validate(struct dsa_switch *ds, int port,
unsigned long *supported,
struct phylink_link_state *state)
......@@ -809,6 +1592,17 @@ static const struct dsa_switch_ops gswip_switch_ops = {
.setup = gswip_setup,
.port_enable = gswip_port_enable,
.port_disable = gswip_port_disable,
.port_bridge_join = gswip_port_bridge_join,
.port_bridge_leave = gswip_port_bridge_leave,
.port_fast_age = gswip_port_fast_age,
.port_vlan_filtering = gswip_port_vlan_filtering,
.port_vlan_prepare = gswip_port_vlan_prepare,
.port_vlan_add = gswip_port_vlan_add,
.port_vlan_del = gswip_port_vlan_del,
.port_stp_state_set = gswip_port_stp_state_set,
.port_fdb_add = gswip_port_fdb_add,
.port_fdb_del = gswip_port_fdb_del,
.port_fdb_dump = gswip_port_fdb_dump,
.phylink_validate = gswip_phylink_validate,
.phylink_mac_config = gswip_phylink_mac_config,
.phylink_mac_link_down = gswip_phylink_mac_link_down,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment