Commit a1b7c5fd authored by John Fastabend's avatar John Fastabend Committed by David S. Miller

net: sched: add cls_u32 offload hooks for netdevs

This patch allows netdev drivers to consume cls_u32 offloads via
the ndo_setup_tc ndo op.

This works aligns with how network drivers have been doing qdisc
offloads for mqprio.
Signed-off-by: default avatarJohn Fastabend <john.r.fastabend@intel.com>
Acked-by: default avatarJiri Pirko <jiri@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 16e5cc64
...@@ -779,17 +779,21 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, ...@@ -779,17 +779,21 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
typedef u16 (*select_queue_fallback_t)(struct net_device *dev, typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
struct sk_buff *skb); struct sk_buff *skb);
/* This structure holds attributes of qdisc and classifiers /* These structures hold the attributes of qdisc and classifiers
* that are being passed to the netdevice through the setup_tc op. * that are being passed to the netdevice through the setup_tc op.
*/ */
enum { enum {
TC_SETUP_MQPRIO, TC_SETUP_MQPRIO,
TC_SETUP_CLSU32,
}; };
struct tc_cls_u32_offload;
struct tc_to_netdev { struct tc_to_netdev {
unsigned int type; unsigned int type;
union { union {
u8 tc; u8 tc;
struct tc_cls_u32_offload *cls_u32;
}; };
}; };
......
...@@ -358,4 +358,38 @@ tcf_match_indev(struct sk_buff *skb, int ifindex) ...@@ -358,4 +358,38 @@ tcf_match_indev(struct sk_buff *skb, int ifindex)
} }
#endif /* CONFIG_NET_CLS_IND */ #endif /* CONFIG_NET_CLS_IND */
struct tc_cls_u32_knode {
struct tcf_exts *exts;
u8 fshift;
u32 handle;
u32 val;
u32 mask;
u32 link_handle;
struct tc_u32_sel *sel;
};
struct tc_cls_u32_hnode {
u32 handle;
u32 prio;
unsigned int divisor;
};
enum tc_clsu32_command {
TC_CLSU32_NEW_KNODE,
TC_CLSU32_REPLACE_KNODE,
TC_CLSU32_DELETE_KNODE,
TC_CLSU32_NEW_HNODE,
TC_CLSU32_REPLACE_HNODE,
TC_CLSU32_DELETE_HNODE,
};
struct tc_cls_u32_offload {
/* knode values */
enum tc_clsu32_command command;
union {
struct tc_cls_u32_knode knode;
struct tc_cls_u32_hnode hnode;
};
};
#endif #endif
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include <net/netlink.h> #include <net/netlink.h>
#include <net/act_api.h> #include <net/act_api.h>
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
#include <linux/netdevice.h>
struct tc_u_knode { struct tc_u_knode {
struct tc_u_knode __rcu *next; struct tc_u_knode __rcu *next;
...@@ -424,6 +425,93 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key) ...@@ -424,6 +425,93 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
return 0; return 0;
} }
static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
{
struct net_device *dev = tp->q->dev_queue->dev;
struct tc_cls_u32_offload u32_offload = {0};
struct tc_to_netdev offload;
offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload;
if (dev->netdev_ops->ndo_setup_tc) {
offload.cls_u32->command = TC_CLSU32_DELETE_KNODE;
offload.cls_u32->knode.handle = handle;
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
tp->protocol, &offload);
}
}
static void u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
{
struct net_device *dev = tp->q->dev_queue->dev;
struct tc_cls_u32_offload u32_offload = {0};
struct tc_to_netdev offload;
offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload;
if (dev->netdev_ops->ndo_setup_tc) {
offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
offload.cls_u32->hnode.divisor = h->divisor;
offload.cls_u32->hnode.handle = h->handle;
offload.cls_u32->hnode.prio = h->prio;
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
tp->protocol, &offload);
}
}
static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
{
struct net_device *dev = tp->q->dev_queue->dev;
struct tc_cls_u32_offload u32_offload = {0};
struct tc_to_netdev offload;
offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload;
if (dev->netdev_ops->ndo_setup_tc) {
offload.cls_u32->command = TC_CLSU32_DELETE_HNODE;
offload.cls_u32->hnode.divisor = h->divisor;
offload.cls_u32->hnode.handle = h->handle;
offload.cls_u32->hnode.prio = h->prio;
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
tp->protocol, &offload);
}
}
static void u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n)
{
struct net_device *dev = tp->q->dev_queue->dev;
struct tc_cls_u32_offload u32_offload = {0};
struct tc_to_netdev offload;
offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload;
if (dev->netdev_ops->ndo_setup_tc) {
offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
offload.cls_u32->knode.handle = n->handle;
offload.cls_u32->knode.fshift = n->fshift;
#ifdef CONFIG_CLS_U32_MARK
offload.cls_u32->knode.val = n->val;
offload.cls_u32->knode.mask = n->mask;
#else
offload.cls_u32->knode.val = 0;
offload.cls_u32->knode.mask = 0;
#endif
offload.cls_u32->knode.sel = &n->sel;
offload.cls_u32->knode.exts = &n->exts;
if (n->ht_down)
offload.cls_u32->knode.link_handle = n->ht_down->handle;
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
tp->protocol, &offload);
}
}
static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
{ {
struct tc_u_knode *n; struct tc_u_knode *n;
...@@ -434,6 +522,7 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) ...@@ -434,6 +522,7 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
RCU_INIT_POINTER(ht->ht[h], RCU_INIT_POINTER(ht->ht[h],
rtnl_dereference(n->next)); rtnl_dereference(n->next));
tcf_unbind_filter(tp, &n->res); tcf_unbind_filter(tp, &n->res);
u32_remove_hw_knode(tp, n->handle);
call_rcu(&n->rcu, u32_delete_key_freepf_rcu); call_rcu(&n->rcu, u32_delete_key_freepf_rcu);
} }
} }
...@@ -454,6 +543,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) ...@@ -454,6 +543,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
phn; phn;
hn = &phn->next, phn = rtnl_dereference(*hn)) { hn = &phn->next, phn = rtnl_dereference(*hn)) {
if (phn == ht) { if (phn == ht) {
u32_clear_hw_hnode(tp, ht);
RCU_INIT_POINTER(*hn, ht->next); RCU_INIT_POINTER(*hn, ht->next);
kfree_rcu(ht, rcu); kfree_rcu(ht, rcu);
return 0; return 0;
...@@ -540,8 +630,10 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg) ...@@ -540,8 +630,10 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg)
if (ht == NULL) if (ht == NULL)
return 0; return 0;
if (TC_U32_KEY(ht->handle)) if (TC_U32_KEY(ht->handle)) {
u32_remove_hw_knode(tp, ht->handle);
return u32_delete_key(tp, (struct tc_u_knode *)ht); return u32_delete_key(tp, (struct tc_u_knode *)ht);
}
if (root_ht == ht) if (root_ht == ht)
return -EINVAL; return -EINVAL;
...@@ -769,6 +861,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, ...@@ -769,6 +861,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
u32_replace_knode(tp, tp_c, new); u32_replace_knode(tp, tp_c, new);
tcf_unbind_filter(tp, &n->res); tcf_unbind_filter(tp, &n->res);
call_rcu(&n->rcu, u32_delete_key_rcu); call_rcu(&n->rcu, u32_delete_key_rcu);
u32_replace_hw_knode(tp, new);
return 0; return 0;
} }
...@@ -795,6 +888,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, ...@@ -795,6 +888,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
RCU_INIT_POINTER(ht->next, tp_c->hlist); RCU_INIT_POINTER(ht->next, tp_c->hlist);
rcu_assign_pointer(tp_c->hlist, ht); rcu_assign_pointer(tp_c->hlist, ht);
*arg = (unsigned long)ht; *arg = (unsigned long)ht;
u32_replace_hw_hnode(tp, ht);
return 0; return 0;
} }
...@@ -877,7 +972,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, ...@@ -877,7 +972,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
RCU_INIT_POINTER(n->next, pins); RCU_INIT_POINTER(n->next, pins);
rcu_assign_pointer(*ins, n); rcu_assign_pointer(*ins, n);
u32_replace_hw_knode(tp, n);
*arg = (unsigned long)n; *arg = (unsigned long)n;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment