Commit 09bef832 authored by David S. Miller's avatar David S. Miller

Merge branch 'stmmac-vlan-priority-rx-steering'

Ong Boon Leong says:

====================
stmmac: add VLAN priority based RX steering

The current tc flower implementation in stmmac supports both L3 and L4
filter offloading. This patch adds the support of VLAN priority based
RX frame steering into different Rx Queues.

The patches have been tested on both configuration test (include L3/L4)
and traffic test (multi VLAN ping streams with RX Frame Steering) below:-

> tc qdisc delete dev eth0 ingress

> tc qdisc del dev eth0 parent root 2&> /dev/null
> tc qdisc del dev eth0 parent ffff: 2&> /dev/null

> tc qdisc add dev eth0 ingress

> tc filter add dev eth0 parent ffff: protocol ip flower dst_ip 192.168.0.1 \
  src_ip 192.168.1.1 ip_proto tcp dst_port 5201 src_port 6201 action drop

> tc filter add dev eth0 parent ffff: protocol ip flower dst_ip 192.168.0.2 \
  src_ip 192.168.1.2 ip_proto tcp dst_port 5202 src_port 6202 action drop

> tc filter show dev eth0 ingress
filter parent ffff: protocol ip pref 49151 flower chain 0
filter parent ffff: protocol ip pref 49151 flower chain 0 handle 0x1
  eth_type ipv4
  ip_proto tcp
  dst_ip 192.168.0.2
  src_ip 192.168.1.2
  dst_port 5202
  src_port 6202
  in_hw in_hw_count 1
        action order 1: gact action drop
         random type none pass val 0
         index 2 ref 1 bind 1

filter parent ffff: protocol ip pref 49152 flower chain 0
filter parent ffff: protocol ip pref 49152 flower chain 0 handle 0x1
  eth_type ipv4
  ip_proto tcp
  dst_ip 192.168.0.1
  src_ip 192.168.1.1
  dst_port 5201
  src_port 6201
  in_hw in_hw_count 1
        action order 1: gact action drop
         random type none pass val 0
         index 1 ref 1 bind 1

> tc qdisc delete dev eth0 ingress

> tc qdisc del dev eth0 parent root 2&> /dev/null
> tc qdisc del dev eth0 parent ffff: 2&> /dev/null

> tc qdisc add dev eth0 ingress

> tc qdisc add dev eth0 root mqprio num_tc 4 \
  map 0 1 2 3 0 0 0 0 0 0 0 0 0 0 0 0 \
  queues 1@0 1@1 1@2 1@3 hw 0

> tc filter add dev eth0 parent ffff: protocol 802.1Q flower vlan_prio 0 hw_tc 3

> tc filter add dev eth0 parent ffff: protocol 802.1Q flower vlan_prio 1 hw_tc 2

> tc filter add dev eth0 parent ffff: protocol 802.1Q flower vlan_prio 2 hw_tc 1

> tc filter add dev eth0 parent ffff: protocol 802.1Q flower vlan_prio 3 hw_tc 0

> tc filter show dev eth0 ingress
filter parent ffff: protocol 802.1Q pref 49149 flower chain 0
filter parent ffff: protocol 802.1Q pref 49149 flower chain 0 handle 0x1 hw_tc 0
  vlan_prio 3
  in_hw in_hw_count 1
filter parent ffff: protocol 802.1Q pref 49150 flower chain 0
filter parent ffff: protocol 802.1Q pref 49150 flower chain 0 handle 0x1 hw_tc 1
  vlan_prio 2
  in_hw in_hw_count 1
filter parent ffff: protocol 802.1Q pref 49151 flower chain 0
filter parent ffff: protocol 802.1Q pref 49151 flower chain 0 handle 0x1 hw_tc 2
  vlan_prio 1
  in_hw in_hw_count 1
filter parent ffff: protocol 802.1Q pref 49152 flower chain 0
filter parent ffff: protocol 802.1Q pref 49152 flower chain 0 handle 0x1 hw_tc 3
  vlan_prio 0
  in_hw in_hw_count 1

> tc qdisc delete dev eth0 ingress

> ip address flush dev eth0
> ip address add 169.254.1.11/24 dev eth0

> ip link delete dev eth0.vlan1 2> /dev/null
> ip link add link eth0 name eth0.vlan1 type vlan id 1
> ip address flush dev eth0.vlan1 2> /dev/null
> ip address add 169.254.11.11/24 dev eth0.vlan1

> ip link delete dev eth0.vlan2 2> /dev/null
> ip link add link eth0 name eth0.vlan2 type vlan id 2
> ip address flush dev eth0.vlan2 2> /dev/null
> ip address add 169.254.12.11/24 dev eth0.vlan2

> ip link delete dev eth0.vlan3 2> /dev/null
> ip link add link eth0 name eth0.vlan3 type vlan id 3
> ip address flush dev eth0.vlan3 2> /dev/null
> ip address add 169.254.13.11/24 dev eth0.vlan3

> ip link delete dev eth0.vlan4 2> /dev/null
> ip link add link eth0 name eth0.vlan4 type vlan id 4
> ip address flush dev eth0.vlan4 2> /dev/null
> ip address add 169.254.14.11/24 dev eth0.vlan4

> ip address flush dev eth0
> ip address add 169.254.1.22/24 dev eth0

> ip link delete dev eth0.vlan1 2> /dev/null
> ip link add link eth0 name eth0.vlan1 type vlan id 1
> ip address flush dev eth0.vlan1 2> /dev/null
> ip address add 169.254.11.22/24 dev eth0.vlan1

> ip link delete dev eth0.vlan2 2> /dev/null
> ip link add link eth0 name eth0.vlan2 type vlan id 2
> ip address flush dev eth0.vlan2 2> /dev/null
> ip address add 169.254.12.22/24 dev eth0.vlan2

> ip link delete dev eth0.vlan3 2> /dev/null
> ip link add link eth0 name eth0.vlan3 type vlan id 3
> ip address flush dev eth0.vlan3 2> /dev/null
> ip address add 169.254.13.22/24 dev eth0.vlan3

> ip link delete dev eth0.vlan4 2> /dev/null
> ip link add link eth0 name eth0.vlan4 type vlan id 4
> ip address flush dev eth0.vlan4 2> /dev/null
> ip address add 169.254.14.22/24 dev eth0.vlan4

> mkdir -p /sys/fs/cgroup/net_prio/grp0
> echo eth0 0 > /sys/fs/cgroup/net_prio/grp0/net_prio.ifpriomap
> echo eth0.vlan1 0 >  /sys/fs/cgroup/net_prio/grp0/net_prio.ifpriomap
> mkdir -p /sys/fs/cgroup/net_prio/grp1
> echo eth0 0 > /sys/fs/cgroup/net_prio/grp1/net_prio.ifpriomap
> echo eth0.vlan2 1 >  /sys/fs/cgroup/net_prio/grp1/net_prio.ifpriomap
> mkdir -p /sys/fs/cgroup/net_prio/grp2
> echo eth0 0 > /sys/fs/cgroup/net_prio/grp2/net_prio.ifpriomap
> echo eth0.vlan3 2 >  /sys/fs/cgroup/net_prio/grp2/net_prio.ifpriomap
> mkdir -p /sys/fs/cgroup/net_prio/grp3
> echo eth0 0 > /sys/fs/cgroup/net_prio/grp3/net_prio.ifpriomap
> echo eth0.vlan4 3 >  /sys/fs/cgroup/net_prio/grp3/net_prio.ifpriomap

> tc qdisc del dev eth0 parent root 2&> /dev/null
> tc qdisc del dev eth0 parent ffff: 2&> /dev/null

> tc qdisc add dev eth0 ingress
> tc qdisc add dev eth0 root mqprio num_tc 4 map 0 1 2 3 0 0 0 0 0 0 0 0 0 0 0 0 queues 1@0 1@1 1@2 1@3 hw 0

> tc filter add dev eth0 parent ffff: protocol 802.1Q flower vlan_prio 0 hw_tc 0

> tc filter add dev eth0 parent ffff: protocol 802.1Q flower vlan_prio 1 hw_tc 1

> tc filter add dev eth0 parent ffff: protocol 802.1Q flower vlan_prio 2 hw_tc 2

> tc filter add dev eth0 parent ffff: protocol 802.1Q flower vlan_prio 3 hw_tc 3

> ip link set eth0.vlan1 type vlan egress-qos-map 0:0
> ip link set eth0.vlan2 type vlan egress-qos-map 1:1
> ip link set eth0.vlan3 type vlan egress-qos-map 2:2
> ip link set eth0.vlan4 type vlan egress-qos-map 3:3

> tc filter show dev eth0 ingress
filter parent ffff: protocol 802.1Q pref 49149 flower chain 0
filter parent ffff: protocol 802.1Q pref 49149 flower chain 0 handle 0x1 hw_tc 3
  vlan_prio 3
  in_hw in_hw_count 1
filter parent ffff: protocol 802.1Q pref 49150 flower chain 0
filter parent ffff: protocol 802.1Q pref 49150 flower chain 0 handle 0x1 hw_tc 2
  vlan_prio 2
  in_hw in_hw_count 1
filter parent ffff: protocol 802.1Q pref 49151 flower chain 0
filter parent ffff: protocol 802.1Q pref 49151 flower chain 0 handle 0x1 hw_tc 1
  vlan_prio 1
  in_hw in_hw_count 1
filter parent ffff: protocol 802.1Q pref 49152 flower chain 0
filter parent ffff: protocol 802.1Q pref 49152 flower chain 0 handle 0x1 hw_tc 0
  vlan_prio 0
  in_hw in_hw_count 1

> echo 1 > /proc/irq/131/smp_affinity
> echo 1 > /proc/irq/132/smp_affinity

> echo 4 > /proc/irq/133/smp_affinity
> echo 4 > /proc/irq/134/smp_affinity

> echo 4 > /proc/irq/135/smp_affinity
> echo 4 > /proc/irq/136/smp_affinity

> echo 2 > /proc/irq/137/smp_affinity
> echo 2 > /proc/irq/138/smp_affinity

> ping -i 0.001 169.254.11.22 2&> /dev/null &
> PID1="$!"
> echo $PID1 >  /sys/fs/cgroup/net_prio/grp0/cgroup.procs

> ping -i 0.001 169.254.12.22 2&> /dev/null &
> PID2="$!"
> echo $PID2 >  /sys/fs/cgroup/net_prio/grp1/cgroup.procs

> ping -i 0.001 169.254.13.22 2&> /dev/null &
> PID3="$!"
> echo $PID3 >  /sys/fs/cgroup/net_prio/grp2/cgroup.procs

> ping -i 0.001 169.254.14.22 2&> /dev/null &
> PID4="$!"
> echo $PID4 >  /sys/fs/cgroup/net_prio/grp3/cgroup.procs

> ping -i 0.001 169.254.11.11 2&> /dev/null &
> PID1="$!"
> echo $PID1 >  /sys/fs/cgroup/net_prio/grp0/cgroup.procs

> ping -i 0.001 169.254.12.11 2&> /dev/null &
> PID2="$!"
> echo $PID2 >  /sys/fs/cgroup/net_prio/grp1/cgroup.procs

> ping -i 0.001 169.254.13.11 2&> /dev/null &
> PID3="$!"
> echo $PID3 >  /sys/fs/cgroup/net_prio/grp2/cgroup.procs

> ping -i 0.001 169.254.14.11 2&> /dev/null &
> PID4="$!"
> echo $PID4 >  /sys/fs/cgroup/net_prio/grp3/cgroup.procs

> watch -n 0.5 -d "cat /proc/interrupts | grep eth0"
 131:     251918         41          0          0  IR-PCI-MSI 477184-edge      eth0:rx-0
 132:      18969          1          0          0  IR-PCI-MSI 477185-edge      eth0:tx-0
 133:          0          0     295872          0  IR-PCI-MSI 477186-edge      eth0:rx-1
 134:          0          0      16136          0  IR-PCI-MSI 477187-edge      eth0:tx-1
 135:          0          0     288042          0  IR-PCI-MSI 477188-edge      eth0:rx-2
 136:          0          0      16135          0  IR-PCI-MSI 477189-edge      eth0:tx-2
 137:          0     211177          0          0  IR-PCI-MSI 477190-edge      eth0:rx-3
 138:          2      16144          0          0  IR-PCI-MSI 477191-edge      eth0:tx-3
 139:          0          0          0          0  IR-PCI-MSI 477192-edge      eth0:rx-4
 140:          0          0          0          0  IR-PCI-MSI 477193-edge      eth0:tx-4
 141:          0          0          0          0  IR-PCI-MSI 477194-edge      eth0:rx-5
 142:          0          0          0          0  IR-PCI-MSI 477195-edge      eth0:tx-5
 143:          0          0          0          0  IR-PCI-MSI 477196-edge      eth0:rx-6
 144:          0          0          0          0  IR-PCI-MSI 477197-edge      eth0:tx-6
 145:          0          0          0          0  IR-PCI-MSI 477198-edge      eth0:rx-7
 146:          0          0          0          0  IR-PCI-MSI 477199-edge      eth0:tx-7
 157:          0          0          0          0  IR-PCI-MSI 477210-edge      eth0:safety-ue

====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 31222162 0e039f5c
......@@ -598,6 +598,87 @@ static int tc_del_flow(struct stmmac_priv *priv,
return ret;
}
#define VLAN_PRIO_FULL_MASK (0x07)
static int tc_add_vlan_flow(struct stmmac_priv *priv,
struct flow_cls_offload *cls)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
struct flow_match_vlan match;
/* Nothing to do here */
if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
return -EINVAL;
if (tc < 0) {
netdev_err(priv->dev, "Invalid traffic class\n");
return -EINVAL;
}
flow_rule_match_vlan(rule, &match);
if (match.mask->vlan_priority) {
u32 prio;
if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
netdev_err(priv->dev, "Only full mask is supported for VLAN priority");
return -EINVAL;
}
prio = BIT(match.key->vlan_priority);
stmmac_rx_queue_prio(priv, priv->hw, prio, tc);
}
return 0;
}
static int tc_del_vlan_flow(struct stmmac_priv *priv,
struct flow_cls_offload *cls)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
/* Nothing to do here */
if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
return -EINVAL;
if (tc < 0) {
netdev_err(priv->dev, "Invalid traffic class\n");
return -EINVAL;
}
stmmac_rx_queue_prio(priv, priv->hw, 0, tc);
return 0;
}
static int tc_add_flow_cls(struct stmmac_priv *priv,
struct flow_cls_offload *cls)
{
int ret;
ret = tc_add_flow(priv, cls);
if (!ret)
return ret;
return tc_add_vlan_flow(priv, cls);
}
static int tc_del_flow_cls(struct stmmac_priv *priv,
struct flow_cls_offload *cls)
{
int ret;
ret = tc_del_flow(priv, cls);
if (!ret)
return ret;
return tc_del_vlan_flow(priv, cls);
}
static int tc_setup_cls(struct stmmac_priv *priv,
struct flow_cls_offload *cls)
{
......@@ -609,10 +690,10 @@ static int tc_setup_cls(struct stmmac_priv *priv,
switch (cls->command) {
case FLOW_CLS_REPLACE:
ret = tc_add_flow(priv, cls);
ret = tc_add_flow_cls(priv, cls);
break;
case FLOW_CLS_DESTROY:
ret = tc_del_flow(priv, cls);
ret = tc_del_flow_cls(priv, cls);
break;
default:
return -EOPNOTSUPP;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment