Commit 1c60c7f9 authored by Florian Fainelli's avatar Florian Fainelli Committed by David S. Miller

net: dsa: bcm_sf2: Get rid of unmarshalling functions

Now that we have migrated the CFP rule handling to a list with a
software copy, the delete/get operation just returns what is on the
list, no need to read from the hardware which is both slow and more
error prone.
Signed-off-by: default avatarFlorian Fainelli <f.fainelli@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1c0130f0
......@@ -974,316 +974,6 @@ static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
flow->m_ext.data[1] ^= cpu_to_be32(~0);
}
static int __maybe_unused bcm_sf2_cfp_unslice_ipv4(struct bcm_sf2_priv *priv,
struct ethtool_tcpip4_spec *v4_spec,
bool mask)
{
u32 reg, offset, ipv4;
u16 src_dst_port;
if (mask)
offset = CORE_CFP_MASK_PORT(3);
else
offset = CORE_CFP_DATA_PORT(3);
reg = core_readl(priv, offset);
/* src port [15:8] */
src_dst_port = reg << 8;
if (mask)
offset = CORE_CFP_MASK_PORT(2);
else
offset = CORE_CFP_DATA_PORT(2);
reg = core_readl(priv, offset);
/* src port [7:0] */
src_dst_port |= (reg >> 24);
v4_spec->pdst = cpu_to_be16(src_dst_port);
v4_spec->psrc = cpu_to_be16((u16)(reg >> 8));
/* IPv4 dst [15:8] */
ipv4 = (reg & 0xff) << 8;
if (mask)
offset = CORE_CFP_MASK_PORT(1);
else
offset = CORE_CFP_DATA_PORT(1);
reg = core_readl(priv, offset);
/* IPv4 dst [31:16] */
ipv4 |= ((reg >> 8) & 0xffff) << 16;
/* IPv4 dst [7:0] */
ipv4 |= (reg >> 24) & 0xff;
v4_spec->ip4dst = cpu_to_be32(ipv4);
/* IPv4 src [15:8] */
ipv4 = (reg & 0xff) << 8;
if (mask)
offset = CORE_CFP_MASK_PORT(0);
else
offset = CORE_CFP_DATA_PORT(0);
reg = core_readl(priv, offset);
/* Once the TCAM is programmed, the mask reflects the slice number
* being matched, don't bother checking it when reading back the
* mask spec
*/
if (!mask && !(reg & SLICE_VALID))
return -EINVAL;
/* IPv4 src [7:0] */
ipv4 |= (reg >> 24) & 0xff;
/* IPv4 src [31:16] */
ipv4 |= ((reg >> 8) & 0xffff) << 16;
v4_spec->ip4src = cpu_to_be32(ipv4);
return 0;
}
static int bcm_sf2_cfp_ipv4_rule_get(struct bcm_sf2_priv *priv, int port,
struct ethtool_rx_flow_spec *fs)
{
struct ethtool_tcpip4_spec *v4_spec = NULL, *v4_m_spec = NULL;
u32 reg;
int ret;
reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
case IPPROTO_TCP:
fs->flow_type = TCP_V4_FLOW;
v4_spec = &fs->h_u.tcp_ip4_spec;
v4_m_spec = &fs->m_u.tcp_ip4_spec;
break;
case IPPROTO_UDP:
fs->flow_type = UDP_V4_FLOW;
v4_spec = &fs->h_u.udp_ip4_spec;
v4_m_spec = &fs->m_u.udp_ip4_spec;
break;
default:
return -EINVAL;
}
fs->m_ext.data[0] = cpu_to_be32((reg >> IP_FRAG_SHIFT) & 1);
v4_spec->tos = (reg >> IPTOS_SHIFT) & IPTOS_MASK;
ret = bcm_sf2_cfp_unslice_ipv4(priv, v4_spec, false);
if (ret)
return ret;
return bcm_sf2_cfp_unslice_ipv4(priv, v4_m_spec, true);
}
static int __maybe_unused bcm_sf2_cfp_unslice_ipv6(struct bcm_sf2_priv *priv,
__be32 *ip6_addr,
__be16 *port,
bool mask)
{
u32 reg, tmp, offset;
/* C-Tag [31:24]
* UDF_n_B8 [23:8] (port)
* UDF_n_B7 (upper) [7:0] (addr[15:8])
*/
if (mask)
offset = CORE_CFP_MASK_PORT(4);
else
offset = CORE_CFP_DATA_PORT(4);
reg = core_readl(priv, offset);
*port = cpu_to_be32(reg) >> 8;
tmp = (u32)(reg & 0xff) << 8;
/* UDF_n_B7 (lower) [31:24] (addr[7:0])
* UDF_n_B6 [23:8] (addr[31:16])
* UDF_n_B5 (upper) [7:0] (addr[47:40])
*/
if (mask)
offset = CORE_CFP_MASK_PORT(3);
else
offset = CORE_CFP_DATA_PORT(3);
reg = core_readl(priv, offset);
tmp |= (reg >> 24) & 0xff;
tmp |= (u32)((reg >> 8) << 16);
ip6_addr[3] = cpu_to_be32(tmp);
tmp = (u32)(reg & 0xff) << 8;
/* UDF_n_B5 (lower) [31:24] (addr[39:32])
* UDF_n_B4 [23:8] (addr[63:48])
* UDF_n_B3 (upper) [7:0] (addr[79:72])
*/
if (mask)
offset = CORE_CFP_MASK_PORT(2);
else
offset = CORE_CFP_DATA_PORT(2);
reg = core_readl(priv, offset);
tmp |= (reg >> 24) & 0xff;
tmp |= (u32)((reg >> 8) << 16);
ip6_addr[2] = cpu_to_be32(tmp);
tmp = (u32)(reg & 0xff) << 8;
/* UDF_n_B3 (lower) [31:24] (addr[71:64])
* UDF_n_B2 [23:8] (addr[95:80])
* UDF_n_B1 (upper) [7:0] (addr[111:104])
*/
if (mask)
offset = CORE_CFP_MASK_PORT(1);
else
offset = CORE_CFP_DATA_PORT(1);
reg = core_readl(priv, offset);
tmp |= (reg >> 24) & 0xff;
tmp |= (u32)((reg >> 8) << 16);
ip6_addr[1] = cpu_to_be32(tmp);
tmp = (u32)(reg & 0xff) << 8;
/* UDF_n_B1 (lower) [31:24] (addr[103:96])
* UDF_n_B0 [23:8] (addr[127:112])
* Reserved [7:4]
* Slice ID [3:2]
* Slice valid [1:0]
*/
if (mask)
offset = CORE_CFP_MASK_PORT(0);
else
offset = CORE_CFP_DATA_PORT(0);
reg = core_readl(priv, offset);
tmp |= (reg >> 24) & 0xff;
tmp |= (u32)((reg >> 8) << 16);
ip6_addr[0] = cpu_to_be32(tmp);
if (!mask && !(reg & SLICE_VALID))
return -EINVAL;
return 0;
}
static int __maybe_unused bcm_sf2_cfp_ipv6_rule_get(struct bcm_sf2_priv *priv,
int port,
struct ethtool_rx_flow_spec *fs,
u32 next_loc)
{
struct ethtool_tcpip6_spec *v6_spec = NULL, *v6_m_spec = NULL;
u32 reg;
int ret;
/* UDPv6 and TCPv6 both use ethtool_tcpip6_spec so we are fine
* assuming tcp_ip6_spec here being an union.
*/
v6_spec = &fs->h_u.tcp_ip6_spec;
v6_m_spec = &fs->m_u.tcp_ip6_spec;
/* Read the second half first */
ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6dst, &v6_spec->pdst,
false);
if (ret)
return ret;
ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6dst,
&v6_m_spec->pdst, true);
if (ret)
return ret;
/* Read last to avoid next entry clobbering the results during search
* operations. We would not have the port enabled for this rule, so
* don't bother checking it.
*/
(void)core_readl(priv, CORE_CFP_DATA_PORT(7));
/* The slice number is valid, so read the rule we are chained from now
* which is our first half.
*/
bcm_sf2_cfp_rule_addr_set(priv, next_loc);
ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
if (ret)
return ret;
reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
case IPPROTO_TCP:
fs->flow_type = TCP_V6_FLOW;
break;
case IPPROTO_UDP:
fs->flow_type = UDP_V6_FLOW;
break;
default:
return -EINVAL;
}
ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6src, &v6_spec->psrc,
false);
if (ret)
return ret;
return bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6src,
&v6_m_spec->psrc, true);
}
static int __maybe_unused bcm_sf2_cfp_rule_get_hw(struct bcm_sf2_priv *priv,
int port,
struct ethtool_rxnfc *nfc)
{
u32 reg, ipv4_or_chain_id;
unsigned int queue_num;
int ret;
bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location);
ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM);
if (ret)
return ret;
reg = core_readl(priv, CORE_ACT_POL_DATA0);
ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
if (ret)
return ret;
/* Extract the destination port */
nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) &
DST_MAP_IB_MASK) - 1;
/* There is no Port 6, so we compensate for that here */
if (nfc->fs.ring_cookie >= 6)
nfc->fs.ring_cookie++;
nfc->fs.ring_cookie *= SF2_NUM_EGRESS_QUEUES;
/* Extract the destination queue */
queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK;
nfc->fs.ring_cookie += queue_num;
/* Extract the L3_FRAMING or CHAIN_ID */
reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
/* With IPv6 rules this would contain a non-zero chain ID since
* we reserve entry 0 and it cannot be used. So if we read 0 here
* this means an IPv4 rule.
*/
ipv4_or_chain_id = (reg >> L3_FRAMING_SHIFT) & 0xff;
if (ipv4_or_chain_id == 0)
ret = bcm_sf2_cfp_ipv4_rule_get(priv, port, &nfc->fs);
else
ret = bcm_sf2_cfp_ipv6_rule_get(priv, port, &nfc->fs,
ipv4_or_chain_id);
if (ret)
return ret;
/* Read last to avoid next entry clobbering the results during search
* operations
*/
reg = core_readl(priv, CORE_CFP_DATA_PORT(7));
if (!(reg & 1 << port))
return -EINVAL;
bcm_sf2_invert_masks(&nfc->fs);
/* Put the TCAM size here */
nfc->data = bcm_sf2_cfp_rule_size(priv);
return 0;
}
static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
struct ethtool_rxnfc *nfc)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment