Commit 5882d526 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-dsa-bcm_sf2-Store-rules-in-lists'

Florian Fainelli says:

====================
net: dsa: bcm_sf2: Store rules in lists

This patch series changes the bcm-sf2 driver to keep a copy of the
inserted rules as opposed to using the HW as a storage area for a number
of reasons:

- this helps us with doing duplicate rule detection in a faster way, it
  would have required a full rule read before

- this helps with Pablo's on-going work to convert ethtool_rx_flow_spec
  to a more generic flow rule structure by having fewer code paths to
  convert to the new structure/helpers

- we need to cache copies to restore them during drive resumption,
  because depending on the low power mode the system has entered, the
  switch may have lost all of its context
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 95772ec9 80f8dea8
...@@ -710,6 +710,10 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds) ...@@ -710,6 +710,10 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
return ret; return ret;
} }
ret = bcm_sf2_cfp_resume(ds);
if (ret)
return ret;
if (priv->hw_params.num_gphy == 1) if (priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(ds, true); bcm_sf2_gphy_enable_set(ds, true);
...@@ -1061,6 +1065,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) ...@@ -1061,6 +1065,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
spin_lock_init(&priv->indir_lock); spin_lock_init(&priv->indir_lock);
mutex_init(&priv->stats_mutex); mutex_init(&priv->stats_mutex);
mutex_init(&priv->cfp.lock); mutex_init(&priv->cfp.lock);
INIT_LIST_HEAD(&priv->cfp.rules_list);
/* CFP rule #0 cannot be used for specific classifications, flag it as /* CFP rule #0 cannot be used for specific classifications, flag it as
* permanently used * permanently used
...@@ -1166,6 +1171,7 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev) ...@@ -1166,6 +1171,7 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
priv->wol_ports_mask = 0; priv->wol_ports_mask = 0;
dsa_unregister_switch(priv->dev->ds); dsa_unregister_switch(priv->dev->ds);
bcm_sf2_cfp_exit(priv->dev->ds);
/* Disable all ports and interrupts */ /* Disable all ports and interrupts */
bcm_sf2_sw_suspend(priv->dev->ds); bcm_sf2_sw_suspend(priv->dev->ds);
bcm_sf2_mdio_unregister(priv); bcm_sf2_mdio_unregister(priv);
......
...@@ -56,6 +56,7 @@ struct bcm_sf2_cfp_priv { ...@@ -56,6 +56,7 @@ struct bcm_sf2_cfp_priv {
DECLARE_BITMAP(used, CFP_NUM_RULES); DECLARE_BITMAP(used, CFP_NUM_RULES);
DECLARE_BITMAP(unique, CFP_NUM_RULES); DECLARE_BITMAP(unique, CFP_NUM_RULES);
unsigned int rules_cnt; unsigned int rules_cnt;
struct list_head rules_list;
}; };
struct bcm_sf2_priv { struct bcm_sf2_priv {
...@@ -213,5 +214,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, ...@@ -213,5 +214,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc); struct ethtool_rxnfc *nfc);
int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv); int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv);
void bcm_sf2_cfp_exit(struct dsa_switch *ds);
int bcm_sf2_cfp_resume(struct dsa_switch *ds);
#endif /* __BCM_SF2_H */ #endif /* __BCM_SF2_H */
...@@ -20,6 +20,12 @@ ...@@ -20,6 +20,12 @@
#include "bcm_sf2.h" #include "bcm_sf2.h"
#include "bcm_sf2_regs.h" #include "bcm_sf2_regs.h"
struct cfp_rule {
int port;
struct ethtool_rx_flow_spec fs;
struct list_head next;
};
struct cfp_udf_slice_layout { struct cfp_udf_slice_layout {
u8 slices[UDFS_PER_SLICE]; u8 slices[UDFS_PER_SLICE];
u32 mask_value; u32 mask_value;
...@@ -515,6 +521,61 @@ static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv, ...@@ -515,6 +521,61 @@ static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
core_writel(priv, reg, offset); core_writel(priv, reg, offset);
} }
static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv,
int port, u32 location)
{
struct cfp_rule *rule = NULL;
list_for_each_entry(rule, &priv->cfp.rules_list, next) {
if (rule->port == port && rule->fs.location == location)
break;
};
return rule;
}
static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
struct ethtool_rx_flow_spec *fs)
{
struct cfp_rule *rule = NULL;
size_t fs_size = 0;
int ret = 1;
if (list_empty(&priv->cfp.rules_list))
return ret;
list_for_each_entry(rule, &priv->cfp.rules_list, next) {
ret = 1;
if (rule->port != port)
continue;
if (rule->fs.flow_type != fs->flow_type ||
rule->fs.ring_cookie != fs->ring_cookie ||
rule->fs.m_ext.data[0] != fs->m_ext.data[0])
continue;
switch (fs->flow_type & ~FLOW_EXT) {
case TCP_V6_FLOW:
case UDP_V6_FLOW:
fs_size = sizeof(struct ethtool_tcpip6_spec);
break;
case TCP_V4_FLOW:
case UDP_V4_FLOW:
fs_size = sizeof(struct ethtool_tcpip4_spec);
break;
default:
continue;
}
ret = memcmp(&rule->fs.h_u, &fs->h_u, fs_size);
ret |= memcmp(&rule->fs.m_u, &fs->m_u, fs_size);
if (ret == 0)
break;
}
return ret;
}
static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
unsigned int port_num, unsigned int port_num,
unsigned int queue_num, unsigned int queue_num,
...@@ -728,27 +789,14 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, ...@@ -728,27 +789,14 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
return ret; return ret;
} }
static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
struct ethtool_rx_flow_spec *fs) struct ethtool_rx_flow_spec *fs)
{ {
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = ds->ports[port].cpu_dp->index; s8 cpu_port = ds->ports[port].cpu_dp->index;
__u64 ring_cookie = fs->ring_cookie; __u64 ring_cookie = fs->ring_cookie;
unsigned int queue_num, port_num; unsigned int queue_num, port_num;
int ret = -EINVAL; int ret;
/* Check for unsupported extensions */
if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype ||
fs->m_ext.data[1]))
return -EINVAL;
if (fs->location != RX_CLS_LOC_ANY &&
test_bit(fs->location, priv->cfp.used))
return -EBUSY;
if (fs->location != RX_CLS_LOC_ANY &&
fs->location > bcm_sf2_cfp_rule_size(priv))
return -EINVAL;
/* This rule is a Wake-on-LAN filter and we must specifically /* This rule is a Wake-on-LAN filter and we must specifically
* target the CPU port in order for it to be working. * target the CPU port in order for it to be working.
...@@ -787,12 +835,54 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, ...@@ -787,12 +835,54 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
queue_num, fs); queue_num, fs);
break; break;
default: default:
ret = -EINVAL;
break; break;
} }
return ret; return ret;
} }
static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
struct ethtool_rx_flow_spec *fs)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct cfp_rule *rule = NULL;
int ret = -EINVAL;
/* Check for unsupported extensions */
if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype ||
fs->m_ext.data[1]))
return -EINVAL;
if (fs->location != RX_CLS_LOC_ANY &&
test_bit(fs->location, priv->cfp.used))
return -EBUSY;
if (fs->location != RX_CLS_LOC_ANY &&
fs->location > bcm_sf2_cfp_rule_size(priv))
return -EINVAL;
ret = bcm_sf2_cfp_rule_cmp(priv, port, fs);
if (ret == 0)
return -EEXIST;
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule)
return -ENOMEM;
ret = bcm_sf2_cfp_rule_insert(ds, port, fs);
if (ret) {
kfree(rule);
return ret;
}
rule->port = port;
memcpy(&rule->fs, fs, sizeof(*fs));
list_add_tail(&rule->next, &priv->cfp.rules_list);
return ret;
}
static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port, static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
u32 loc, u32 *next_loc) u32 loc, u32 *next_loc)
{ {
...@@ -830,19 +920,12 @@ static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port, ...@@ -830,19 +920,12 @@ static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
return 0; return 0;
} }
static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, static int bcm_sf2_cfp_rule_remove(struct bcm_sf2_priv *priv, int port,
u32 loc) u32 loc)
{ {
u32 next_loc = 0; u32 next_loc = 0;
int ret; int ret;
/* Refuse deleting unused rules, and those that are not unique since
* that could leave IPv6 rules with one of the chained rule in the
* table.
*/
if (!test_bit(loc, priv->cfp.unique) || loc == 0)
return -EINVAL;
ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc); ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
if (ret) if (ret)
return ret; return ret;
...@@ -854,318 +937,54 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, ...@@ -854,318 +937,54 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
return ret; return ret;
} }
static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow) static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
{ {
unsigned int i; struct cfp_rule *rule;
int ret;
for (i = 0; i < sizeof(flow->m_u); i++)
flow->m_u.hdata[i] ^= 0xff;
flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
flow->m_ext.data[0] ^= cpu_to_be32(~0);
flow->m_ext.data[1] ^= cpu_to_be32(~0);
}
static int bcm_sf2_cfp_unslice_ipv4(struct bcm_sf2_priv *priv,
struct ethtool_tcpip4_spec *v4_spec,
bool mask)
{
u32 reg, offset, ipv4;
u16 src_dst_port;
if (mask)
offset = CORE_CFP_MASK_PORT(3);
else
offset = CORE_CFP_DATA_PORT(3);
reg = core_readl(priv, offset);
/* src port [15:8] */
src_dst_port = reg << 8;
if (mask)
offset = CORE_CFP_MASK_PORT(2);
else
offset = CORE_CFP_DATA_PORT(2);
reg = core_readl(priv, offset);
/* src port [7:0] */
src_dst_port |= (reg >> 24);
v4_spec->pdst = cpu_to_be16(src_dst_port);
v4_spec->psrc = cpu_to_be16((u16)(reg >> 8));
/* IPv4 dst [15:8] */
ipv4 = (reg & 0xff) << 8;
if (mask)
offset = CORE_CFP_MASK_PORT(1);
else
offset = CORE_CFP_DATA_PORT(1);
reg = core_readl(priv, offset);
/* IPv4 dst [31:16] */
ipv4 |= ((reg >> 8) & 0xffff) << 16;
/* IPv4 dst [7:0] */
ipv4 |= (reg >> 24) & 0xff;
v4_spec->ip4dst = cpu_to_be32(ipv4);
/* IPv4 src [15:8] */
ipv4 = (reg & 0xff) << 8;
if (mask)
offset = CORE_CFP_MASK_PORT(0);
else
offset = CORE_CFP_DATA_PORT(0);
reg = core_readl(priv, offset);
/* Once the TCAM is programmed, the mask reflects the slice number /* Refuse deleting unused rules, and those that are not unique since
* being matched, don't bother checking it when reading back the * that could leave IPv6 rules with one of the chained rule in the
* mask spec * table.
*/ */
if (!mask && !(reg & SLICE_VALID)) if (!test_bit(loc, priv->cfp.unique) || loc == 0)
return -EINVAL; return -EINVAL;
/* IPv4 src [7:0] */ rule = bcm_sf2_cfp_rule_find(priv, port, loc);
ipv4 |= (reg >> 24) & 0xff; if (!rule)
/* IPv4 src [31:16] */
ipv4 |= ((reg >> 8) & 0xffff) << 16;
v4_spec->ip4src = cpu_to_be32(ipv4);
return 0;
}
static int bcm_sf2_cfp_ipv4_rule_get(struct bcm_sf2_priv *priv, int port,
struct ethtool_rx_flow_spec *fs)
{
struct ethtool_tcpip4_spec *v4_spec = NULL, *v4_m_spec = NULL;
u32 reg;
int ret;
reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
case IPPROTO_TCP:
fs->flow_type = TCP_V4_FLOW;
v4_spec = &fs->h_u.tcp_ip4_spec;
v4_m_spec = &fs->m_u.tcp_ip4_spec;
break;
case IPPROTO_UDP:
fs->flow_type = UDP_V4_FLOW;
v4_spec = &fs->h_u.udp_ip4_spec;
v4_m_spec = &fs->m_u.udp_ip4_spec;
break;
default:
return -EINVAL; return -EINVAL;
}
fs->m_ext.data[0] = cpu_to_be32((reg >> IP_FRAG_SHIFT) & 1);
v4_spec->tos = (reg >> IPTOS_SHIFT) & IPTOS_MASK;
ret = bcm_sf2_cfp_unslice_ipv4(priv, v4_spec, false);
if (ret)
return ret;
return bcm_sf2_cfp_unslice_ipv4(priv, v4_m_spec, true);
}
static int bcm_sf2_cfp_unslice_ipv6(struct bcm_sf2_priv *priv, ret = bcm_sf2_cfp_rule_remove(priv, port, loc);
__be32 *ip6_addr, __be16 *port,
bool mask)
{
u32 reg, tmp, offset;
/* C-Tag [31:24] list_del(&rule->next);
* UDF_n_B8 [23:8] (port) kfree(rule);
* UDF_n_B7 (upper) [7:0] (addr[15:8])
*/
if (mask)
offset = CORE_CFP_MASK_PORT(4);
else
offset = CORE_CFP_DATA_PORT(4);
reg = core_readl(priv, offset);
*port = cpu_to_be32(reg) >> 8;
tmp = (u32)(reg & 0xff) << 8;
/* UDF_n_B7 (lower) [31:24] (addr[7:0]) return ret;
* UDF_n_B6 [23:8] (addr[31:16])
* UDF_n_B5 (upper) [7:0] (addr[47:40])
*/
if (mask)
offset = CORE_CFP_MASK_PORT(3);
else
offset = CORE_CFP_DATA_PORT(3);
reg = core_readl(priv, offset);
tmp |= (reg >> 24) & 0xff;
tmp |= (u32)((reg >> 8) << 16);
ip6_addr[3] = cpu_to_be32(tmp);
tmp = (u32)(reg & 0xff) << 8;
/* UDF_n_B5 (lower) [31:24] (addr[39:32])
* UDF_n_B4 [23:8] (addr[63:48])
* UDF_n_B3 (upper) [7:0] (addr[79:72])
*/
if (mask)
offset = CORE_CFP_MASK_PORT(2);
else
offset = CORE_CFP_DATA_PORT(2);
reg = core_readl(priv, offset);
tmp |= (reg >> 24) & 0xff;
tmp |= (u32)((reg >> 8) << 16);
ip6_addr[2] = cpu_to_be32(tmp);
tmp = (u32)(reg & 0xff) << 8;
/* UDF_n_B3 (lower) [31:24] (addr[71:64])
* UDF_n_B2 [23:8] (addr[95:80])
* UDF_n_B1 (upper) [7:0] (addr[111:104])
*/
if (mask)
offset = CORE_CFP_MASK_PORT(1);
else
offset = CORE_CFP_DATA_PORT(1);
reg = core_readl(priv, offset);
tmp |= (reg >> 24) & 0xff;
tmp |= (u32)((reg >> 8) << 16);
ip6_addr[1] = cpu_to_be32(tmp);
tmp = (u32)(reg & 0xff) << 8;
/* UDF_n_B1 (lower) [31:24] (addr[103:96])
* UDF_n_B0 [23:8] (addr[127:112])
* Reserved [7:4]
* Slice ID [3:2]
* Slice valid [1:0]
*/
if (mask)
offset = CORE_CFP_MASK_PORT(0);
else
offset = CORE_CFP_DATA_PORT(0);
reg = core_readl(priv, offset);
tmp |= (reg >> 24) & 0xff;
tmp |= (u32)((reg >> 8) << 16);
ip6_addr[0] = cpu_to_be32(tmp);
if (!mask && !(reg & SLICE_VALID))
return -EINVAL;
return 0;
} }
static int bcm_sf2_cfp_ipv6_rule_get(struct bcm_sf2_priv *priv, int port, static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
struct ethtool_rx_flow_spec *fs,
u32 next_loc)
{ {
struct ethtool_tcpip6_spec *v6_spec = NULL, *v6_m_spec = NULL; unsigned int i;
u32 reg;
int ret;
/* UDPv6 and TCPv6 both use ethtool_tcpip6_spec so we are fine
* assuming tcp_ip6_spec here being an union.
*/
v6_spec = &fs->h_u.tcp_ip6_spec;
v6_m_spec = &fs->m_u.tcp_ip6_spec;
/* Read the second half first */
ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6dst, &v6_spec->pdst,
false);
if (ret)
return ret;
ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6dst,
&v6_m_spec->pdst, true);
if (ret)
return ret;
/* Read last to avoid next entry clobbering the results during search
* operations. We would not have the port enabled for this rule, so
* don't bother checking it.
*/
(void)core_readl(priv, CORE_CFP_DATA_PORT(7));
/* The slice number is valid, so read the rule we are chained from now
* which is our first half.
*/
bcm_sf2_cfp_rule_addr_set(priv, next_loc);
ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
if (ret)
return ret;
reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
case IPPROTO_TCP:
fs->flow_type = TCP_V6_FLOW;
break;
case IPPROTO_UDP:
fs->flow_type = UDP_V6_FLOW;
break;
default:
return -EINVAL;
}
ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6src, &v6_spec->psrc, for (i = 0; i < sizeof(flow->m_u); i++)
false); flow->m_u.hdata[i] ^= 0xff;
if (ret)
return ret;
return bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6src, flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
&v6_m_spec->psrc, true); flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
flow->m_ext.data[0] ^= cpu_to_be32(~0);
flow->m_ext.data[1] ^= cpu_to_be32(~0);
} }
static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port, static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
struct ethtool_rxnfc *nfc) struct ethtool_rxnfc *nfc)
{ {
u32 reg, ipv4_or_chain_id; struct cfp_rule *rule;
unsigned int queue_num;
int ret;
bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location);
ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM);
if (ret)
return ret;
reg = core_readl(priv, CORE_ACT_POL_DATA0); rule = bcm_sf2_cfp_rule_find(priv, port, nfc->fs.location);
if (!rule)
ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
if (ret)
return ret;
/* Extract the destination port */
nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) &
DST_MAP_IB_MASK) - 1;
/* There is no Port 6, so we compensate for that here */
if (nfc->fs.ring_cookie >= 6)
nfc->fs.ring_cookie++;
nfc->fs.ring_cookie *= SF2_NUM_EGRESS_QUEUES;
/* Extract the destination queue */
queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK;
nfc->fs.ring_cookie += queue_num;
/* Extract the L3_FRAMING or CHAIN_ID */
reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
/* With IPv6 rules this would contain a non-zero chain ID since
* we reserve entry 0 and it cannot be used. So if we read 0 here
* this means an IPv4 rule.
*/
ipv4_or_chain_id = (reg >> L3_FRAMING_SHIFT) & 0xff;
if (ipv4_or_chain_id == 0)
ret = bcm_sf2_cfp_ipv4_rule_get(priv, port, &nfc->fs);
else
ret = bcm_sf2_cfp_ipv6_rule_get(priv, port, &nfc->fs,
ipv4_or_chain_id);
if (ret)
return ret;
/* Read last to avoid next entry clobbering the results during search
* operations
*/
reg = core_readl(priv, CORE_CFP_DATA_PORT(7));
if (!(reg & 1 << port))
return -EINVAL; return -EINVAL;
memcpy(&nfc->fs, &rule->fs, sizeof(rule->fs));
bcm_sf2_invert_masks(&nfc->fs); bcm_sf2_invert_masks(&nfc->fs);
/* Put the TCAM size here */ /* Put the TCAM size here */
...@@ -1302,3 +1121,51 @@ int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv) ...@@ -1302,3 +1121,51 @@ int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
return 0; return 0;
} }
void bcm_sf2_cfp_exit(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct cfp_rule *rule, *n;
if (list_empty(&priv->cfp.rules_list))
return;
list_for_each_entry_safe_reverse(rule, n, &priv->cfp.rules_list, next)
bcm_sf2_cfp_rule_del(priv, rule->port, rule->fs.location);
}
int bcm_sf2_cfp_resume(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct cfp_rule *rule;
int ret = 0;
u32 reg;
if (list_empty(&priv->cfp.rules_list))
return ret;
reg = core_readl(priv, CORE_CFP_CTL_REG);
reg &= ~CFP_EN_MAP_MASK;
core_writel(priv, reg, CORE_CFP_CTL_REG);
ret = bcm_sf2_cfp_rst(priv);
if (ret)
return ret;
list_for_each_entry(rule, &priv->cfp.rules_list, next) {
ret = bcm_sf2_cfp_rule_remove(priv, rule->port,
rule->fs.location);
if (ret) {
dev_err(ds->dev, "failed to remove rule\n");
return ret;
}
ret = bcm_sf2_cfp_rule_insert(ds, rule->port, &rule->fs);
if (ret) {
dev_err(ds->dev, "failed to restore rule\n");
return ret;
}
};
return ret;
}
...@@ -1068,6 +1068,7 @@ static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable) ...@@ -1068,6 +1068,7 @@ static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
{ {
unsigned int index;
u32 reg; u32 reg;
/* Disable RXCHK, active filters and Broadcom tag matching */ /* Disable RXCHK, active filters and Broadcom tag matching */
...@@ -1076,6 +1077,15 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) ...@@ -1076,6 +1077,15 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN); RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN);
rxchk_writel(priv, reg, RXCHK_CONTROL); rxchk_writel(priv, reg, RXCHK_CONTROL);
/* Make sure we restore correct CID index in case HW lost
* its context during deep idle state
*/
for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
rxchk_writel(priv, priv->filters_loc[index] <<
RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index));
rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
}
/* Clear the MagicPacket detection logic */ /* Clear the MagicPacket detection logic */
mpd_enable_set(priv, false); mpd_enable_set(priv, false);
...@@ -2189,6 +2199,7 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv, ...@@ -2189,6 +2199,7 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index)); rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index)); rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
priv->filters_loc[index] = nfc->fs.location;
set_bit(index, priv->filters); set_bit(index, priv->filters);
return 0; return 0;
...@@ -2208,6 +2219,7 @@ static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv, ...@@ -2208,6 +2219,7 @@ static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv,
* be taken care of during suspend time by bcm_sysport_suspend_to_wol * be taken care of during suspend time by bcm_sysport_suspend_to_wol
*/ */
clear_bit(index, priv->filters); clear_bit(index, priv->filters);
priv->filters_loc[index] = 0;
return 0; return 0;
} }
......
...@@ -786,6 +786,7 @@ struct bcm_sysport_priv { ...@@ -786,6 +786,7 @@ struct bcm_sysport_priv {
/* Ethtool */ /* Ethtool */
u32 msg_enable; u32 msg_enable;
DECLARE_BITMAP(filters, RXCHK_BRCM_TAG_MAX); DECLARE_BITMAP(filters, RXCHK_BRCM_TAG_MAX);
u32 filters_loc[RXCHK_BRCM_TAG_MAX];
struct bcm_sysport_stats64 stats64; struct bcm_sysport_stats64 stats64;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment