Commit b87a173e authored by Daniel Borkmann's avatar Daniel Borkmann Committed by David S. Miller

cls_cgroup: factor out classid retrieval

Split out retrieving the cgroups net_cls classid retrieval into its
own function, so that it can be reused later on from other parts of
the traffic control subsystem. If there's no skb->sk, then the small
helper returns 0 as well, which in cls_cgroup terms means 'could not
classify'.
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Cc: Thomas Graf <tgraf@suug.ch>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d9382bda
...@@ -49,9 +49,38 @@ static inline void sock_update_classid(struct sock *sk) ...@@ -49,9 +49,38 @@ static inline void sock_update_classid(struct sock *sk)
if (classid != sk->sk_classid) if (classid != sk->sk_classid)
sk->sk_classid = classid; sk->sk_classid = classid;
} }
static inline u32 task_get_classid(const struct sk_buff *skb)
{
u32 classid = task_cls_state(current)->classid;
/* Due to the nature of the classifier it is required to ignore all
* packets originating from softirq context as accessing `current'
* would lead to false results.
*
* This test assumes that all callers of dev_queue_xmit() explicitly
* disable bh. Knowing this, it is possible to detect softirq based
* calls by looking at the number of nested bh disable calls because
* softirqs always disables bh.
*/
if (in_serving_softirq()) {
/* If there is an sk_classid we'll use that. */
if (!skb->sk)
return 0;
classid = skb->sk->sk_classid;
}
return classid;
}
#else /* !CONFIG_CGROUP_NET_CLASSID */ #else /* !CONFIG_CGROUP_NET_CLASSID */
static inline void sock_update_classid(struct sock *sk) static inline void sock_update_classid(struct sock *sk)
{ {
} }
static inline u32 task_get_classid(const struct sk_buff *skb)
{
return 0;
}
#endif /* CONFIG_CGROUP_NET_CLASSID */ #endif /* CONFIG_CGROUP_NET_CLASSID */
#endif /* _NET_CLS_CGROUP_H */ #endif /* _NET_CLS_CGROUP_H */
...@@ -30,35 +30,16 @@ static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp, ...@@ -30,35 +30,16 @@ static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res) struct tcf_result *res)
{ {
struct cls_cgroup_head *head = rcu_dereference_bh(tp->root); struct cls_cgroup_head *head = rcu_dereference_bh(tp->root);
u32 classid; u32 classid = task_get_classid(skb);
classid = task_cls_state(current)->classid;
/*
* Due to the nature of the classifier it is required to ignore all
* packets originating from softirq context as accessing `current'
* would lead to false results.
*
* This test assumes that all callers of dev_queue_xmit() explicitely
* disable bh. Knowing this, it is possible to detect softirq based
* calls by looking at the number of nested bh disable calls because
* softirqs always disables bh.
*/
if (in_serving_softirq()) {
/* If there is an sk_classid we'll use that. */
if (!skb->sk)
return -1;
classid = skb->sk->sk_classid;
}
if (!classid) if (!classid)
return -1; return -1;
if (!tcf_em_tree_match(skb, &head->ematches, NULL)) if (!tcf_em_tree_match(skb, &head->ematches, NULL))
return -1; return -1;
res->classid = classid; res->classid = classid;
res->class = 0; res->class = 0;
return tcf_exts_exec(skb, &head->exts, res); return tcf_exts_exec(skb, &head->exts, res);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment