Commit f97e971d authored by David S. Miller's avatar David S. Miller

Merge branch 'sparx5-mrouter'

Casper Andersson says:

====================
net: sparx5: add mrouter support

This series adds support for multicast router ports to SparX5. To manage
mrouter ports the driver must keep track of mdb entries. When adding an
mrouter port the driver has to iterate over all mdb entries and modify
them accordingly.

v2:
- add bailout in free_mdb
- re-arrange mdb struct to avoid holes
- change devm_kzalloc -> kzalloc
- change GFP_ATOMIC -> GFP_KERNEL
- fix spelling
====================
Reviewed-by: default avatarSteen Hegelund <Steen.Hegelund@microchip.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9c5d03d3 04e551d6
......@@ -277,6 +277,7 @@ static int sparx5_create_port(struct sparx5 *sparx5,
spx5_port->custom_etype = 0x8880; /* Vitesse */
spx5_port->phylink_pcs.poll = true;
spx5_port->phylink_pcs.ops = &sparx5_phylink_pcs_ops;
spx5_port->is_mrouter = false;
sparx5->ports[config->portno] = spx5_port;
err = sparx5_port_init(sparx5, spx5_port, &config->conf);
......@@ -661,6 +662,9 @@ static int sparx5_start(struct sparx5 *sparx5)
queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work,
SPX5_MACT_PULL_DELAY);
mutex_init(&sparx5->mdb_lock);
INIT_LIST_HEAD(&sparx5->mdb_entries);
err = sparx5_register_netdevs(sparx5);
if (err)
return err;
......
......@@ -190,6 +190,7 @@ struct sparx5_port {
u8 ptp_cmd;
u16 ts_id;
struct sk_buff_head tx_skbs;
bool is_mrouter;
};
enum sparx5_core_clockfreq {
......@@ -215,6 +216,15 @@ struct sparx5_skb_cb {
unsigned long jiffies;
};
struct sparx5_mdb_entry {
struct list_head list;
DECLARE_BITMAP(port_mask, SPX5_PORTS);
unsigned char addr[ETH_ALEN];
bool cpu_copy;
u16 vid;
u16 pgid_idx;
};
#define SPARX5_PTP_TIMEOUT msecs_to_jiffies(10)
#define SPARX5_SKB_CB(skb) \
((struct sparx5_skb_cb *)((skb)->cb))
......@@ -256,6 +266,10 @@ struct sparx5 {
struct list_head mact_entries;
/* mac table list (mact_entries) mutex */
struct mutex mact_lock;
/* SW MDB table */
struct list_head mdb_entries;
/* mdb list mutex */
struct mutex mdb_lock;
struct delayed_work mact_work;
struct workqueue_struct *mact_queue;
/* Board specifics */
......@@ -325,6 +339,7 @@ void sparx5_mact_init(struct sparx5 *sparx5);
/* sparx5_vlan.c */
void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable);
void sparx5_pgid_clear(struct sparx5 *spx5, int pgid);
void sparx5_pgid_read_mask(struct sparx5 *sparx5, int pgid, u32 portmask[3]);
void sparx5_update_fwd(struct sparx5 *sparx5);
void sparx5_vlan_init(struct sparx5 *sparx5);
......
......@@ -29,14 +29,23 @@ static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port,
return 0;
}
static void sparx5_port_update_mcast_ip_flood(struct sparx5_port *port, bool flood_flag)
{
bool should_flood = flood_flag || port->is_mrouter;
int pgid;
for (pgid = PGID_IPV4_MC_DATA; pgid <= PGID_IPV6_MC_CTRL; pgid++)
sparx5_pgid_update_mask(port, pgid, should_flood);
}
static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
struct switchdev_brport_flags flags)
{
int pgid;
if (flags.mask & BR_MCAST_FLOOD) {
sparx5_pgid_update_mask(port, PGID_MC_FLOOD, !!(flags.val & BR_MCAST_FLOOD));
sparx5_port_update_mcast_ip_flood(port, !!(flags.val & BR_MCAST_FLOOD));
}
if (flags.mask & BR_MCAST_FLOOD)
for (pgid = PGID_MC_FLOOD; pgid <= PGID_IPV6_MC_CTRL; pgid++)
sparx5_pgid_update_mask(port, pgid, !!(flags.val & BR_MCAST_FLOOD));
if (flags.mask & BR_FLOOD)
sparx5_pgid_update_mask(port, PGID_UC_FLOOD, !!(flags.val & BR_FLOOD));
if (flags.mask & BR_BCAST_FLOOD)
......@@ -82,6 +91,37 @@ static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
sparx5_set_ageing(port->sparx5, ageing_time);
}
static void sparx5_port_attr_mrouter_set(struct sparx5_port *port,
struct net_device *orig_dev,
bool enable)
{
struct sparx5 *sparx5 = port->sparx5;
struct sparx5_mdb_entry *e;
bool flood_flag;
if ((enable && port->is_mrouter) || (!enable && !port->is_mrouter))
return;
/* Add/del mrouter port on all active mdb entries in HW.
* Don't change entry port mask, since that represents
* ports that actually joined that group.
*/
mutex_lock(&sparx5->mdb_lock);
list_for_each_entry(e, &sparx5->mdb_entries, list) {
if (!test_bit(port->portno, e->port_mask) &&
ether_addr_is_ip_mcast(e->addr))
sparx5_pgid_update_mask(port, e->pgid_idx, enable);
}
mutex_unlock(&sparx5->mdb_lock);
/* Enable/disable flooding depending on if port is mrouter port
* or if mcast flood is enabled.
*/
port->is_mrouter = enable;
flood_flag = br_port_flag_is_set(port->ndev, BR_MCAST_FLOOD);
sparx5_port_update_mcast_ip_flood(port, flood_flag);
}
static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
......@@ -110,6 +150,11 @@ static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
port->vlan_aware = attr->u.vlan_filtering;
sparx5_vlan_port_apply(port->sparx5, port);
break;
case SWITCHDEV_ATTR_ID_PORT_MROUTER:
sparx5_port_attr_mrouter_set(port,
attr->orig_dev,
attr->u.mrouter);
break;
default:
return -EOPNOTSUPP;
}
......@@ -386,16 +431,95 @@ static int sparx5_handle_port_vlan_add(struct net_device *dev,
v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
}
static int sparx5_alloc_mdb_entry(struct sparx5 *sparx5,
const unsigned char *addr,
u16 vid,
struct sparx5_mdb_entry **entry_out)
{
struct sparx5_mdb_entry *entry;
u16 pgid_idx;
int err;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
err = sparx5_pgid_alloc_mcast(sparx5, &pgid_idx);
if (err) {
kfree(entry);
return err;
}
memcpy(entry->addr, addr, ETH_ALEN);
entry->vid = vid;
entry->pgid_idx = pgid_idx;
mutex_lock(&sparx5->mdb_lock);
list_add_tail(&entry->list, &sparx5->mdb_entries);
mutex_unlock(&sparx5->mdb_lock);
*entry_out = entry;
return 0;
}
static void sparx5_free_mdb_entry(struct sparx5 *sparx5,
const unsigned char *addr,
u16 vid)
{
struct sparx5_mdb_entry *entry, *tmp;
mutex_lock(&sparx5->mdb_lock);
list_for_each_entry_safe(entry, tmp, &sparx5->mdb_entries, list) {
if ((vid == 0 || entry->vid == vid) &&
ether_addr_equal(addr, entry->addr)) {
list_del(&entry->list);
sparx5_pgid_free(sparx5, entry->pgid_idx);
kfree(entry);
goto out;
}
}
out:
mutex_unlock(&sparx5->mdb_lock);
}
static struct sparx5_mdb_entry *sparx5_mdb_get_entry(struct sparx5 *sparx5,
const unsigned char *addr,
u16 vid)
{
struct sparx5_mdb_entry *e, *found = NULL;
mutex_lock(&sparx5->mdb_lock);
list_for_each_entry(e, &sparx5->mdb_entries, list) {
if (ether_addr_equal(e->addr, addr) && e->vid == vid) {
found = e;
goto out;
}
}
out:
mutex_unlock(&sparx5->mdb_lock);
return found;
}
static void sparx5_cpu_copy_ena(struct sparx5 *spx5, u16 pgid, bool enable)
{
spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(enable),
ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
ANA_AC_PGID_MISC_CFG(pgid));
}
static int sparx5_handle_port_mdb_add(struct net_device *dev,
struct notifier_block *nb,
const struct switchdev_obj_port_mdb *v)
{
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *spx5 = port->sparx5;
u16 pgid_idx, vid;
u32 mact_entry;
bool is_host;
int res, err;
struct sparx5_mdb_entry *entry;
bool is_host, is_new;
int err, i;
u16 vid;
if (!sparx5_netdevice_check(dev))
return -EOPNOTSUPP;
......@@ -410,66 +534,36 @@ static int sparx5_handle_port_mdb_add(struct net_device *dev,
else
vid = v->vid;
res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
if (res == 0) {
pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
/* MC_IDX starts after the port masks in the PGID table */
pgid_idx += SPX5_PORTS;
if (is_host)
spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
ANA_AC_PGID_MISC_CFG(pgid_idx));
else
sparx5_pgid_update_mask(port, pgid_idx, true);
} else {
err = sparx5_pgid_alloc_mcast(spx5, &pgid_idx);
if (err) {
netdev_warn(dev, "multicast pgid table full\n");
is_new = false;
entry = sparx5_mdb_get_entry(spx5, v->addr, vid);
if (!entry) {
err = sparx5_alloc_mdb_entry(spx5, v->addr, vid, &entry);
is_new = true;
if (err)
return err;
}
if (is_host)
spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
ANA_AC_PGID_MISC_CFG(pgid_idx));
else
sparx5_pgid_update_mask(port, pgid_idx, true);
err = sparx5_mact_learn(spx5, pgid_idx, v->addr, vid);
if (err) {
netdev_warn(dev, "could not learn mac address %pM\n", v->addr);
sparx5_pgid_free(spx5, pgid_idx);
sparx5_pgid_update_mask(port, pgid_idx, false);
return err;
}
mutex_lock(&spx5->mdb_lock);
/* Add any mrouter ports to the new entry */
if (is_new && ether_addr_is_ip_mcast(v->addr))
for (i = 0; i < SPX5_PORTS; i++)
if (spx5->ports[i] && spx5->ports[i]->is_mrouter)
sparx5_pgid_update_mask(spx5->ports[i],
entry->pgid_idx,
true);
if (is_host && !entry->cpu_copy) {
sparx5_cpu_copy_ena(spx5, entry->pgid_idx, true);
entry->cpu_copy = true;
} else if (!is_host) {
sparx5_pgid_update_mask(port, entry->pgid_idx, true);
set_bit(port->portno, entry->port_mask);
}
mutex_unlock(&spx5->mdb_lock);
return 0;
}
static int sparx5_mdb_del_entry(struct net_device *dev,
struct sparx5 *spx5,
const unsigned char mac[ETH_ALEN],
const u16 vid,
u16 pgid_idx)
{
int err;
sparx5_mact_learn(spx5, entry->pgid_idx, entry->addr, entry->vid);
err = sparx5_mact_forget(spx5, mac, vid);
if (err) {
netdev_warn(dev, "could not forget mac address %pM", mac);
return err;
}
err = sparx5_pgid_free(spx5, pgid_idx);
if (err) {
netdev_err(dev, "attempted to free already freed pgid\n");
return err;
}
return 0;
}
......@@ -479,42 +573,45 @@ static int sparx5_handle_port_mdb_del(struct net_device *dev,
{
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *spx5 = port->sparx5;
u16 pgid_idx, vid;
u32 mact_entry, res, pgid_entry[3], misc_cfg;
bool host_ena;
struct sparx5_mdb_entry *entry;
bool is_host;
u16 vid;
if (!sparx5_netdevice_check(dev))
return -EOPNOTSUPP;
is_host = netif_is_bridge_master(v->obj.orig_dev);
if (!br_vlan_enabled(spx5->hw_bridge_dev))
vid = 1;
else
vid = v->vid;
res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
if (res == 0) {
pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
/* MC_IDX starts after the port masks in the PGID table */
pgid_idx += SPX5_PORTS;
if (netif_is_bridge_master(v->obj.orig_dev))
spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(0),
ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
ANA_AC_PGID_MISC_CFG(pgid_idx));
else
sparx5_pgid_update_mask(port, pgid_idx, false);
entry = sparx5_mdb_get_entry(spx5, v->addr, vid);
if (!entry)
return 0;
misc_cfg = spx5_rd(spx5, ANA_AC_PGID_MISC_CFG(pgid_idx));
host_ena = ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_GET(misc_cfg);
mutex_lock(&spx5->mdb_lock);
if (is_host && entry->cpu_copy) {
sparx5_cpu_copy_ena(spx5, entry->pgid_idx, false);
entry->cpu_copy = false;
} else if (!is_host) {
clear_bit(port->portno, entry->port_mask);
sparx5_pgid_read_mask(spx5, pgid_idx, pgid_entry);
if (bitmap_empty((unsigned long *)pgid_entry, SPX5_PORTS) && !host_ena)
/* No ports or CPU are in MC group. Remove entry */
return sparx5_mdb_del_entry(dev, spx5, v->addr, vid, pgid_idx);
/* Port not mrouter port or addr is L2 mcast, remove port from mask. */
if (!port->is_mrouter || !ether_addr_is_ip_mcast(v->addr))
sparx5_pgid_update_mask(port, entry->pgid_idx, false);
}
mutex_unlock(&spx5->mdb_lock);
if (bitmap_empty(entry->port_mask, SPX5_PORTS) && !entry->cpu_copy) {
/* Clear pgid in case mrouter ports exists
* that are not part of the group.
*/
sparx5_pgid_clear(spx5, entry->pgid_idx);
sparx5_mact_forget(spx5, entry->addr, entry->vid);
sparx5_free_mdb_entry(spx5, entry->addr, entry->vid);
}
return 0;
}
......
......@@ -138,6 +138,13 @@ void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable)
}
}
void sparx5_pgid_clear(struct sparx5 *spx5, int pgid)
{
spx5_wr(0, spx5, ANA_AC_PGID_CFG(pgid));
spx5_wr(0, spx5, ANA_AC_PGID_CFG1(pgid));
spx5_wr(0, spx5, ANA_AC_PGID_CFG2(pgid));
}
void sparx5_pgid_read_mask(struct sparx5 *spx5, int pgid, u32 portmask[3])
{
portmask[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid));
......
......@@ -428,6 +428,28 @@ static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
return true;
}
static inline bool ether_addr_is_ipv4_mcast(const u8 *addr)
{
u8 base[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
return ether_addr_equal_masked(addr, base, mask);
}
static inline bool ether_addr_is_ipv6_mcast(const u8 *addr)
{
u8 base[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
u8 mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
return ether_addr_equal_masked(addr, base, mask);
}
static inline bool ether_addr_is_ip_mcast(const u8 *addr)
{
return ether_addr_is_ipv4_mcast(addr) ||
ether_addr_is_ipv6_mcast(addr);
}
/**
* ether_addr_to_u64 - Convert an Ethernet address into a u64 value.
* @addr: Pointer to a six-byte array containing the Ethernet address
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment