Commit cc7ec456 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net_sched: cleanups

Cleanup net/sched code to current CodingStyle and practices.

Reduce inline abuse
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7180a031
...@@ -78,7 +78,7 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, ...@@ -78,7 +78,7 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
struct tc_action *a, struct tcf_hashinfo *hinfo) struct tc_action *a, struct tcf_hashinfo *hinfo)
{ {
struct tcf_common *p; struct tcf_common *p;
int err = 0, index = -1,i = 0, s_i = 0, n_i = 0; int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
struct nlattr *nest; struct nlattr *nest;
read_lock_bh(hinfo->lock); read_lock_bh(hinfo->lock);
...@@ -126,7 +126,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a, ...@@ -126,7 +126,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
{ {
struct tcf_common *p, *s_p; struct tcf_common *p, *s_p;
struct nlattr *nest; struct nlattr *nest;
int i= 0, n_i = 0; int i = 0, n_i = 0;
nest = nla_nest_start(skb, a->order); nest = nla_nest_start(skb, a->order);
if (nest == NULL) if (nest == NULL)
...@@ -138,7 +138,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a, ...@@ -138,7 +138,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
while (p != NULL) { while (p != NULL) {
s_p = p->tcfc_next; s_p = p->tcfc_next;
if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo)) if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo))
module_put(a->ops->owner); module_put(a->ops->owner);
n_i++; n_i++;
p = s_p; p = s_p;
} }
...@@ -447,7 +447,8 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) ...@@ -447,7 +447,8 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
nest = nla_nest_start(skb, TCA_OPTIONS); nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL) if (nest == NULL)
goto nla_put_failure; goto nla_put_failure;
if ((err = tcf_action_dump_old(skb, a, bind, ref)) > 0) { err = tcf_action_dump_old(skb, a, bind, ref);
if (err > 0) {
nla_nest_end(skb, nest); nla_nest_end(skb, nest);
return err; return err;
} }
...@@ -491,7 +492,7 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est, ...@@ -491,7 +492,7 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est,
struct tc_action *a; struct tc_action *a;
struct tc_action_ops *a_o; struct tc_action_ops *a_o;
char act_name[IFNAMSIZ]; char act_name[IFNAMSIZ];
struct nlattr *tb[TCA_ACT_MAX+1]; struct nlattr *tb[TCA_ACT_MAX + 1];
struct nlattr *kind; struct nlattr *kind;
int err; int err;
...@@ -549,9 +550,9 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est, ...@@ -549,9 +550,9 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est,
goto err_free; goto err_free;
/* module count goes up only when brand new policy is created /* module count goes up only when brand new policy is created
if it exists and is only bound to in a_o->init() then * if it exists and is only bound to in a_o->init() then
ACT_P_CREATED is not returned (a zero is). * ACT_P_CREATED is not returned (a zero is).
*/ */
if (err != ACT_P_CREATED) if (err != ACT_P_CREATED)
module_put(a_o->owner); module_put(a_o->owner);
a->ops = a_o; a->ops = a_o;
...@@ -569,7 +570,7 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est, ...@@ -569,7 +570,7 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est,
struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est, struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est,
char *name, int ovr, int bind) char *name, int ovr, int bind)
{ {
struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct tc_action *head = NULL, *act, *act_prev = NULL; struct tc_action *head = NULL, *act, *act_prev = NULL;
int err; int err;
int i; int i;
...@@ -697,7 +698,7 @@ act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n, ...@@ -697,7 +698,7 @@ act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n,
static struct tc_action * static struct tc_action *
tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid) tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
{ {
struct nlattr *tb[TCA_ACT_MAX+1]; struct nlattr *tb[TCA_ACT_MAX + 1];
struct tc_action *a; struct tc_action *a;
int index; int index;
int err; int err;
...@@ -770,7 +771,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, ...@@ -770,7 +771,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
struct tcamsg *t; struct tcamsg *t;
struct netlink_callback dcb; struct netlink_callback dcb;
struct nlattr *nest; struct nlattr *nest;
struct nlattr *tb[TCA_ACT_MAX+1]; struct nlattr *tb[TCA_ACT_MAX + 1];
struct nlattr *kind; struct nlattr *kind;
struct tc_action *a = create_a(0); struct tc_action *a = create_a(0);
int err = -ENOMEM; int err = -ENOMEM;
...@@ -821,7 +822,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, ...@@ -821,7 +822,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
nlh->nlmsg_flags |= NLM_F_ROOT; nlh->nlmsg_flags |= NLM_F_ROOT;
module_put(a->ops->owner); module_put(a->ops->owner);
kfree(a); kfree(a);
err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); err = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
n->nlmsg_flags & NLM_F_ECHO);
if (err > 0) if (err > 0)
return 0; return 0;
...@@ -842,14 +844,14 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, ...@@ -842,14 +844,14 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
u32 pid, int event) u32 pid, int event)
{ {
int i, ret; int i, ret;
struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct tc_action *head = NULL, *act, *act_prev = NULL; struct tc_action *head = NULL, *act, *act_prev = NULL;
ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) { if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
if (tb[1] != NULL) if (tb[1] != NULL)
return tca_action_flush(net, tb[1], n, pid); return tca_action_flush(net, tb[1], n, pid);
else else
...@@ -892,7 +894,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, ...@@ -892,7 +894,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
/* now do the delete */ /* now do the delete */
tcf_action_destroy(head, 0); tcf_action_destroy(head, 0);
ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC, ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
n->nlmsg_flags&NLM_F_ECHO); n->nlmsg_flags & NLM_F_ECHO);
if (ret > 0) if (ret > 0)
return 0; return 0;
return ret; return ret;
...@@ -936,7 +938,7 @@ static int tcf_add_notify(struct net *net, struct tc_action *a, ...@@ -936,7 +938,7 @@ static int tcf_add_notify(struct net *net, struct tc_action *a,
nlh->nlmsg_len = skb_tail_pointer(skb) - b; nlh->nlmsg_len = skb_tail_pointer(skb) - b;
NETLINK_CB(skb).dst_group = RTNLGRP_TC; NETLINK_CB(skb).dst_group = RTNLGRP_TC;
err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags&NLM_F_ECHO); err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags & NLM_F_ECHO);
if (err > 0) if (err > 0)
err = 0; err = 0;
return err; return err;
...@@ -967,7 +969,7 @@ tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n, ...@@ -967,7 +969,7 @@ tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
/* dump then free all the actions after update; inserted policy /* dump then free all the actions after update; inserted policy
* stays intact * stays intact
* */ */
ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags); ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags);
for (a = act; a; a = act) { for (a = act; a; a = act) {
act = a->next; act = a->next;
...@@ -993,8 +995,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) ...@@ -993,8 +995,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
return -EINVAL; return -EINVAL;
} }
/* n->nlmsg_flags&NLM_F_CREATE /* n->nlmsg_flags & NLM_F_CREATE */
* */
switch (n->nlmsg_type) { switch (n->nlmsg_type) {
case RTM_NEWACTION: case RTM_NEWACTION:
/* we are going to assume all other flags /* we are going to assume all other flags
...@@ -1003,7 +1004,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) ...@@ -1003,7 +1004,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
* but since we want avoid ambiguity (eg when flags * but since we want avoid ambiguity (eg when flags
* is zero) then just set this * is zero) then just set this
*/ */
if (n->nlmsg_flags&NLM_F_REPLACE) if (n->nlmsg_flags & NLM_F_REPLACE)
ovr = 1; ovr = 1;
replay: replay:
ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr); ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr);
...@@ -1028,7 +1029,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) ...@@ -1028,7 +1029,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
static struct nlattr * static struct nlattr *
find_dump_kind(const struct nlmsghdr *n) find_dump_kind(const struct nlmsghdr *n)
{ {
struct nlattr *tb1, *tb2[TCA_ACT_MAX+1]; struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct nlattr *nla[TCAA_MAX + 1]; struct nlattr *nla[TCAA_MAX + 1];
struct nlattr *kind; struct nlattr *kind;
...@@ -1071,9 +1072,8 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -1071,9 +1072,8 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
} }
a_o = tc_lookup_action(kind); a_o = tc_lookup_action(kind);
if (a_o == NULL) { if (a_o == NULL)
return 0; return 0;
}
memset(&a, 0, sizeof(struct tc_action)); memset(&a, 0, sizeof(struct tc_action));
a.ops = a_o; a.ops = a_o;
......
...@@ -63,7 +63,7 @@ static int tcf_csum_init(struct nlattr *nla, struct nlattr *est, ...@@ -63,7 +63,7 @@ static int tcf_csum_init(struct nlattr *nla, struct nlattr *est,
if (nla == NULL) if (nla == NULL)
return -EINVAL; return -EINVAL;
err = nla_parse_nested(tb, TCA_CSUM_MAX, nla,csum_policy); err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy);
if (err < 0) if (err < 0)
return err; return err;
......
...@@ -50,7 +50,7 @@ static int gact_determ(struct tcf_gact *gact) ...@@ -50,7 +50,7 @@ static int gact_determ(struct tcf_gact *gact)
} }
typedef int (*g_rand)(struct tcf_gact *gact); typedef int (*g_rand)(struct tcf_gact *gact);
static g_rand gact_rand[MAX_RAND]= { NULL, gact_net_rand, gact_determ }; static g_rand gact_rand[MAX_RAND] = { NULL, gact_net_rand, gact_determ };
#endif /* CONFIG_GACT_PROB */ #endif /* CONFIG_GACT_PROB */
static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = { static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = {
...@@ -89,7 +89,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est, ...@@ -89,7 +89,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(parm->index, est, a, sizeof(*gact), pc = tcf_hash_create(parm->index, est, a, sizeof(*gact),
bind, &gact_idx_gen, &gact_hash_info); bind, &gact_idx_gen, &gact_hash_info);
if (IS_ERR(pc)) if (IS_ERR(pc))
return PTR_ERR(pc); return PTR_ERR(pc);
ret = ACT_P_CREATED; ret = ACT_P_CREATED;
} else { } else {
if (!ovr) { if (!ovr) {
...@@ -205,9 +205,9 @@ MODULE_LICENSE("GPL"); ...@@ -205,9 +205,9 @@ MODULE_LICENSE("GPL");
static int __init gact_init_module(void) static int __init gact_init_module(void)
{ {
#ifdef CONFIG_GACT_PROB #ifdef CONFIG_GACT_PROB
printk(KERN_INFO "GACT probability on\n"); pr_info("GACT probability on\n");
#else #else
printk(KERN_INFO "GACT probability NOT on\n"); pr_info("GACT probability NOT on\n");
#endif #endif
return tcf_register_action(&act_gact_ops); return tcf_register_action(&act_gact_ops);
} }
......
...@@ -138,7 +138,7 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est, ...@@ -138,7 +138,7 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(index, est, a, sizeof(*ipt), bind, pc = tcf_hash_create(index, est, a, sizeof(*ipt), bind,
&ipt_idx_gen, &ipt_hash_info); &ipt_idx_gen, &ipt_hash_info);
if (IS_ERR(pc)) if (IS_ERR(pc))
return PTR_ERR(pc); return PTR_ERR(pc);
ret = ACT_P_CREATED; ret = ACT_P_CREATED;
} else { } else {
if (!ovr) { if (!ovr) {
...@@ -162,7 +162,8 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est, ...@@ -162,7 +162,8 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est,
if (unlikely(!t)) if (unlikely(!t))
goto err2; goto err2;
if ((err = ipt_init_target(t, tname, hook)) < 0) err = ipt_init_target(t, tname, hook);
if (err < 0)
goto err3; goto err3;
spin_lock_bh(&ipt->tcf_lock); spin_lock_bh(&ipt->tcf_lock);
...@@ -212,8 +213,9 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a, ...@@ -212,8 +213,9 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
bstats_update(&ipt->tcf_bstats, skb); bstats_update(&ipt->tcf_bstats, skb);
/* yes, we have to worry about both in and out dev /* yes, we have to worry about both in and out dev
worry later - danger - this API seems to have changed * worry later - danger - this API seems to have changed
from earlier kernels */ * from earlier kernels
*/
par.in = skb->dev; par.in = skb->dev;
par.out = NULL; par.out = NULL;
par.hooknum = ipt->tcfi_hook; par.hooknum = ipt->tcfi_hook;
...@@ -253,9 +255,9 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ...@@ -253,9 +255,9 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
struct tc_cnt c; struct tc_cnt c;
/* for simple targets kernel size == user size /* for simple targets kernel size == user size
** user name = target name * user name = target name
** for foolproof you need to not assume this * for foolproof you need to not assume this
*/ */
t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC); t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
if (unlikely(!t)) if (unlikely(!t))
......
...@@ -41,13 +41,13 @@ static struct tcf_hashinfo mirred_hash_info = { ...@@ -41,13 +41,13 @@ static struct tcf_hashinfo mirred_hash_info = {
.lock = &mirred_lock, .lock = &mirred_lock,
}; };
static inline int tcf_mirred_release(struct tcf_mirred *m, int bind) static int tcf_mirred_release(struct tcf_mirred *m, int bind)
{ {
if (m) { if (m) {
if (bind) if (bind)
m->tcf_bindcnt--; m->tcf_bindcnt--;
m->tcf_refcnt--; m->tcf_refcnt--;
if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) { if (!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
list_del(&m->tcfm_list); list_del(&m->tcfm_list);
if (m->tcfm_dev) if (m->tcfm_dev)
dev_put(m->tcfm_dev); dev_put(m->tcfm_dev);
......
...@@ -69,7 +69,7 @@ static int tcf_nat_init(struct nlattr *nla, struct nlattr *est, ...@@ -69,7 +69,7 @@ static int tcf_nat_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind, pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
&nat_idx_gen, &nat_hash_info); &nat_idx_gen, &nat_hash_info);
if (IS_ERR(pc)) if (IS_ERR(pc))
return PTR_ERR(pc); return PTR_ERR(pc);
p = to_tcf_nat(pc); p = to_tcf_nat(pc);
ret = ACT_P_CREATED; ret = ACT_P_CREATED;
} else { } else {
......
...@@ -70,7 +70,7 @@ static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est, ...@@ -70,7 +70,7 @@ static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind, pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
&pedit_idx_gen, &pedit_hash_info); &pedit_idx_gen, &pedit_hash_info);
if (IS_ERR(pc)) if (IS_ERR(pc))
return PTR_ERR(pc); return PTR_ERR(pc);
p = to_pedit(pc); p = to_pedit(pc);
keys = kmalloc(ksize, GFP_KERNEL); keys = kmalloc(ksize, GFP_KERNEL);
if (keys == NULL) { if (keys == NULL) {
...@@ -127,11 +127,9 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, ...@@ -127,11 +127,9 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
int i, munged = 0; int i, munged = 0;
unsigned int off; unsigned int off;
if (skb_cloned(skb)) { if (skb_cloned(skb) &&
if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
return p->tcf_action; return p->tcf_action;
}
}
off = skb_network_offset(skb); off = skb_network_offset(skb);
......
...@@ -22,8 +22,8 @@ ...@@ -22,8 +22,8 @@
#include <net/act_api.h> #include <net/act_api.h>
#include <net/netlink.h> #include <net/netlink.h>
#define L2T(p,L) qdisc_l2t((p)->tcfp_R_tab, L) #define L2T(p, L) qdisc_l2t((p)->tcfp_R_tab, L)
#define L2T_P(p,L) qdisc_l2t((p)->tcfp_P_tab, L) #define L2T_P(p, L) qdisc_l2t((p)->tcfp_P_tab, L)
#define POL_TAB_MASK 15 #define POL_TAB_MASK 15
static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1]; static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
...@@ -37,8 +37,7 @@ static struct tcf_hashinfo police_hash_info = { ...@@ -37,8 +37,7 @@ static struct tcf_hashinfo police_hash_info = {
}; };
/* old policer structure from before tc actions */ /* old policer structure from before tc actions */
struct tc_police_compat struct tc_police_compat {
{
u32 index; u32 index;
int action; int action;
u32 limit; u32 limit;
...@@ -139,7 +138,7 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = { ...@@ -139,7 +138,7 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
static int tcf_act_police_locate(struct nlattr *nla, struct nlattr *est, static int tcf_act_police_locate(struct nlattr *nla, struct nlattr *est,
struct tc_action *a, int ovr, int bind) struct tc_action *a, int ovr, int bind)
{ {
unsigned h; unsigned int h;
int ret = 0, err; int ret = 0, err;
struct nlattr *tb[TCA_POLICE_MAX + 1]; struct nlattr *tb[TCA_POLICE_MAX + 1];
struct tc_police *parm; struct tc_police *parm;
......
...@@ -47,7 +47,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result ...@@ -47,7 +47,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result
/* print policy string followed by _ then packet count /* print policy string followed by _ then packet count
* Example if this was the 3rd packet and the string was "hello" * Example if this was the 3rd packet and the string was "hello"
* then it would look like "hello_3" (without quotes) * then it would look like "hello_3" (without quotes)
**/ */
pr_info("simple: %s_%d\n", pr_info("simple: %s_%d\n",
(char *)d->tcfd_defdata, d->tcf_bstats.packets); (char *)d->tcfd_defdata, d->tcf_bstats.packets);
spin_unlock(&d->tcf_lock); spin_unlock(&d->tcf_lock);
...@@ -125,7 +125,7 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est, ...@@ -125,7 +125,7 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind, pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind,
&simp_idx_gen, &simp_hash_info); &simp_idx_gen, &simp_hash_info);
if (IS_ERR(pc)) if (IS_ERR(pc))
return PTR_ERR(pc); return PTR_ERR(pc);
d = to_defact(pc); d = to_defact(pc);
ret = alloc_defdata(d, defdata); ret = alloc_defdata(d, defdata);
...@@ -149,7 +149,7 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est, ...@@ -149,7 +149,7 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
return ret; return ret;
} }
static inline int tcf_simp_cleanup(struct tc_action *a, int bind) static int tcf_simp_cleanup(struct tc_action *a, int bind)
{ {
struct tcf_defact *d = a->priv; struct tcf_defact *d = a->priv;
...@@ -158,8 +158,8 @@ static inline int tcf_simp_cleanup(struct tc_action *a, int bind) ...@@ -158,8 +158,8 @@ static inline int tcf_simp_cleanup(struct tc_action *a, int bind)
return 0; return 0;
} }
static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref) int bind, int ref)
{ {
unsigned char *b = skb_tail_pointer(skb); unsigned char *b = skb_tail_pointer(skb);
struct tcf_defact *d = a->priv; struct tcf_defact *d = a->priv;
......
...@@ -113,7 +113,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, ...@@ -113,7 +113,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind, pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind,
&skbedit_idx_gen, &skbedit_hash_info); &skbedit_idx_gen, &skbedit_hash_info);
if (IS_ERR(pc)) if (IS_ERR(pc))
return PTR_ERR(pc); return PTR_ERR(pc);
d = to_skbedit(pc); d = to_skbedit(pc);
ret = ACT_P_CREATED; ret = ACT_P_CREATED;
...@@ -144,7 +144,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, ...@@ -144,7 +144,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est,
return ret; return ret;
} }
static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind) static int tcf_skbedit_cleanup(struct tc_action *a, int bind)
{ {
struct tcf_skbedit *d = a->priv; struct tcf_skbedit *d = a->priv;
...@@ -153,8 +153,8 @@ static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind) ...@@ -153,8 +153,8 @@ static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind)
return 0; return 0;
} }
static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref) int bind, int ref)
{ {
unsigned char *b = skb_tail_pointer(skb); unsigned char *b = skb_tail_pointer(skb);
struct tcf_skbedit *d = a->priv; struct tcf_skbedit *d = a->priv;
......
...@@ -85,7 +85,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) ...@@ -85,7 +85,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
int rc = -ENOENT; int rc = -ENOENT;
write_lock(&cls_mod_lock); write_lock(&cls_mod_lock);
for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next) for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next)
if (t == ops) if (t == ops)
break; break;
...@@ -111,7 +111,7 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp) ...@@ -111,7 +111,7 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp)
u32 first = TC_H_MAKE(0xC0000000U, 0U); u32 first = TC_H_MAKE(0xC0000000U, 0U);
if (tp) if (tp)
first = tp->prio-1; first = tp->prio - 1;
return first; return first;
} }
...@@ -149,7 +149,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) ...@@ -149,7 +149,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (prio == 0) { if (prio == 0) {
/* If no priority is given, user wants we allocated it. */ /* If no priority is given, user wants we allocated it. */
if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE)) if (n->nlmsg_type != RTM_NEWTFILTER ||
!(n->nlmsg_flags & NLM_F_CREATE))
return -ENOENT; return -ENOENT;
prio = TC_H_MAKE(0x80000000U, 0U); prio = TC_H_MAKE(0x80000000U, 0U);
} }
...@@ -176,7 +177,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) ...@@ -176,7 +177,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
} }
/* Is it classful? */ /* Is it classful? */
if ((cops = q->ops->cl_ops) == NULL) cops = q->ops->cl_ops;
if (!cops)
return -EINVAL; return -EINVAL;
if (cops->tcf_chain == NULL) if (cops->tcf_chain == NULL)
...@@ -196,10 +198,11 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) ...@@ -196,10 +198,11 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
goto errout; goto errout;
/* Check the chain for existence of proto-tcf with this priority */ /* Check the chain for existence of proto-tcf with this priority */
for (back = chain; (tp=*back) != NULL; back = &tp->next) { for (back = chain; (tp = *back) != NULL; back = &tp->next) {
if (tp->prio >= prio) { if (tp->prio >= prio) {
if (tp->prio == prio) { if (tp->prio == prio) {
if (!nprio || (tp->protocol != protocol && protocol)) if (!nprio ||
(tp->protocol != protocol && protocol))
goto errout; goto errout;
} else } else
tp = NULL; tp = NULL;
...@@ -216,7 +219,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) ...@@ -216,7 +219,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
goto errout; goto errout;
err = -ENOENT; err = -ENOENT;
if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE)) if (n->nlmsg_type != RTM_NEWTFILTER ||
!(n->nlmsg_flags & NLM_F_CREATE))
goto errout; goto errout;
...@@ -420,7 +424,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -420,7 +424,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
return skb->len; return skb->len;
if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) dev = __dev_get_by_index(net, tcm->tcm_ifindex);
if (!dev)
return skb->len; return skb->len;
if (!tcm->tcm_parent) if (!tcm->tcm_parent)
...@@ -429,7 +434,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -429,7 +434,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
if (!q) if (!q)
goto out; goto out;
if ((cops = q->ops->cl_ops) == NULL) cops = q->ops->cl_ops;
if (!cops)
goto errout; goto errout;
if (cops->tcf_chain == NULL) if (cops->tcf_chain == NULL)
goto errout; goto errout;
...@@ -444,8 +450,9 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -444,8 +450,9 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
s_t = cb->args[0]; s_t = cb->args[0];
for (tp=*chain, t=0; tp; tp = tp->next, t++) { for (tp = *chain, t = 0; tp; tp = tp->next, t++) {
if (t < s_t) continue; if (t < s_t)
continue;
if (TC_H_MAJ(tcm->tcm_info) && if (TC_H_MAJ(tcm->tcm_info) &&
TC_H_MAJ(tcm->tcm_info) != tp->prio) TC_H_MAJ(tcm->tcm_info) != tp->prio)
continue; continue;
...@@ -468,10 +475,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -468,10 +475,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
arg.skb = skb; arg.skb = skb;
arg.cb = cb; arg.cb = cb;
arg.w.stop = 0; arg.w.stop = 0;
arg.w.skip = cb->args[1]-1; arg.w.skip = cb->args[1] - 1;
arg.w.count = 0; arg.w.count = 0;
tp->ops->walk(tp, &arg.w); tp->ops->walk(tp, &arg.w);
cb->args[1] = arg.w.count+1; cb->args[1] = arg.w.count + 1;
if (arg.w.stop) if (arg.w.stop)
break; break;
} }
......
...@@ -21,14 +21,12 @@ ...@@ -21,14 +21,12 @@
#include <net/act_api.h> #include <net/act_api.h>
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
struct basic_head struct basic_head {
{
u32 hgenerator; u32 hgenerator;
struct list_head flist; struct list_head flist;
}; };
struct basic_filter struct basic_filter {
{
u32 handle; u32 handle;
struct tcf_exts exts; struct tcf_exts exts;
struct tcf_ematch_tree ematches; struct tcf_ematch_tree ematches;
...@@ -92,8 +90,7 @@ static int basic_init(struct tcf_proto *tp) ...@@ -92,8 +90,7 @@ static int basic_init(struct tcf_proto *tp)
return 0; return 0;
} }
static inline void basic_delete_filter(struct tcf_proto *tp, static void basic_delete_filter(struct tcf_proto *tp, struct basic_filter *f)
struct basic_filter *f)
{ {
tcf_unbind_filter(tp, &f->res); tcf_unbind_filter(tp, &f->res);
tcf_exts_destroy(tp, &f->exts); tcf_exts_destroy(tp, &f->exts);
...@@ -135,9 +132,9 @@ static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = { ...@@ -135,9 +132,9 @@ static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = {
[TCA_BASIC_EMATCHES] = { .type = NLA_NESTED }, [TCA_BASIC_EMATCHES] = { .type = NLA_NESTED },
}; };
static inline int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f, static int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f,
unsigned long base, struct nlattr **tb, unsigned long base, struct nlattr **tb,
struct nlattr *est) struct nlattr *est)
{ {
int err = -EINVAL; int err = -EINVAL;
struct tcf_exts e; struct tcf_exts e;
...@@ -203,7 +200,7 @@ static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle, ...@@ -203,7 +200,7 @@ static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle,
} while (--i > 0 && basic_get(tp, head->hgenerator)); } while (--i > 0 && basic_get(tp, head->hgenerator));
if (i <= 0) { if (i <= 0) {
printk(KERN_ERR "Insufficient number of handles\n"); pr_err("Insufficient number of handles\n");
goto errout; goto errout;
} }
......
...@@ -56,7 +56,8 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, ...@@ -56,7 +56,8 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
{ {
struct cgroup_cls_state *cs; struct cgroup_cls_state *cs;
if (!(cs = kzalloc(sizeof(*cs), GFP_KERNEL))) cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
if (cgrp->parent) if (cgrp->parent)
...@@ -94,8 +95,7 @@ static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) ...@@ -94,8 +95,7 @@ static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
} }
struct cls_cgroup_head struct cls_cgroup_head {
{
u32 handle; u32 handle;
struct tcf_exts exts; struct tcf_exts exts;
struct tcf_ematch_tree ematches; struct tcf_ematch_tree ematches;
...@@ -166,7 +166,7 @@ static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base, ...@@ -166,7 +166,7 @@ static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca, u32 handle, struct nlattr **tca,
unsigned long *arg) unsigned long *arg)
{ {
struct nlattr *tb[TCA_CGROUP_MAX+1]; struct nlattr *tb[TCA_CGROUP_MAX + 1];
struct cls_cgroup_head *head = tp->root; struct cls_cgroup_head *head = tp->root;
struct tcf_ematch_tree t; struct tcf_ematch_tree t;
struct tcf_exts e; struct tcf_exts e;
......
...@@ -121,7 +121,7 @@ static u32 flow_get_proto_src(struct sk_buff *skb) ...@@ -121,7 +121,7 @@ static u32 flow_get_proto_src(struct sk_buff *skb)
if (!pskb_network_may_pull(skb, sizeof(*iph))) if (!pskb_network_may_pull(skb, sizeof(*iph)))
break; break;
iph = ip_hdr(skb); iph = ip_hdr(skb);
if (iph->frag_off & htons(IP_MF|IP_OFFSET)) if (iph->frag_off & htons(IP_MF | IP_OFFSET))
break; break;
poff = proto_ports_offset(iph->protocol); poff = proto_ports_offset(iph->protocol);
if (poff >= 0 && if (poff >= 0 &&
...@@ -163,7 +163,7 @@ static u32 flow_get_proto_dst(struct sk_buff *skb) ...@@ -163,7 +163,7 @@ static u32 flow_get_proto_dst(struct sk_buff *skb)
if (!pskb_network_may_pull(skb, sizeof(*iph))) if (!pskb_network_may_pull(skb, sizeof(*iph)))
break; break;
iph = ip_hdr(skb); iph = ip_hdr(skb);
if (iph->frag_off & htons(IP_MF|IP_OFFSET)) if (iph->frag_off & htons(IP_MF | IP_OFFSET))
break; break;
poff = proto_ports_offset(iph->protocol); poff = proto_ports_offset(iph->protocol);
if (poff >= 0 && if (poff >= 0 &&
......
...@@ -31,14 +31,12 @@ ...@@ -31,14 +31,12 @@
#define HTSIZE (PAGE_SIZE/sizeof(struct fw_filter *)) #define HTSIZE (PAGE_SIZE/sizeof(struct fw_filter *))
struct fw_head struct fw_head {
{
struct fw_filter *ht[HTSIZE]; struct fw_filter *ht[HTSIZE];
u32 mask; u32 mask;
}; };
struct fw_filter struct fw_filter {
{
struct fw_filter *next; struct fw_filter *next;
u32 id; u32 id;
struct tcf_result res; struct tcf_result res;
...@@ -53,7 +51,7 @@ static const struct tcf_ext_map fw_ext_map = { ...@@ -53,7 +51,7 @@ static const struct tcf_ext_map fw_ext_map = {
.police = TCA_FW_POLICE .police = TCA_FW_POLICE
}; };
static __inline__ int fw_hash(u32 handle) static inline int fw_hash(u32 handle)
{ {
if (HTSIZE == 4096) if (HTSIZE == 4096)
return ((handle >> 24) & 0xFFF) ^ return ((handle >> 24) & 0xFFF) ^
...@@ -82,14 +80,14 @@ static __inline__ int fw_hash(u32 handle) ...@@ -82,14 +80,14 @@ static __inline__ int fw_hash(u32 handle)
static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp, static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res) struct tcf_result *res)
{ {
struct fw_head *head = (struct fw_head*)tp->root; struct fw_head *head = (struct fw_head *)tp->root;
struct fw_filter *f; struct fw_filter *f;
int r; int r;
u32 id = skb->mark; u32 id = skb->mark;
if (head != NULL) { if (head != NULL) {
id &= head->mask; id &= head->mask;
for (f=head->ht[fw_hash(id)]; f; f=f->next) { for (f = head->ht[fw_hash(id)]; f; f = f->next) {
if (f->id == id) { if (f->id == id) {
*res = f->res; *res = f->res;
#ifdef CONFIG_NET_CLS_IND #ifdef CONFIG_NET_CLS_IND
...@@ -105,7 +103,8 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp, ...@@ -105,7 +103,8 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
} }
} else { } else {
/* old method */ /* old method */
if (id && (TC_H_MAJ(id) == 0 || !(TC_H_MAJ(id^tp->q->handle)))) { if (id && (TC_H_MAJ(id) == 0 ||
!(TC_H_MAJ(id ^ tp->q->handle)))) {
res->classid = id; res->classid = id;
res->class = 0; res->class = 0;
return 0; return 0;
...@@ -117,13 +116,13 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp, ...@@ -117,13 +116,13 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
static unsigned long fw_get(struct tcf_proto *tp, u32 handle) static unsigned long fw_get(struct tcf_proto *tp, u32 handle)
{ {
struct fw_head *head = (struct fw_head*)tp->root; struct fw_head *head = (struct fw_head *)tp->root;
struct fw_filter *f; struct fw_filter *f;
if (head == NULL) if (head == NULL)
return 0; return 0;
for (f=head->ht[fw_hash(handle)]; f; f=f->next) { for (f = head->ht[fw_hash(handle)]; f; f = f->next) {
if (f->id == handle) if (f->id == handle)
return (unsigned long)f; return (unsigned long)f;
} }
...@@ -139,8 +138,7 @@ static int fw_init(struct tcf_proto *tp) ...@@ -139,8 +138,7 @@ static int fw_init(struct tcf_proto *tp)
return 0; return 0;
} }
static inline void static void fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f)
fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f)
{ {
tcf_unbind_filter(tp, &f->res); tcf_unbind_filter(tp, &f->res);
tcf_exts_destroy(tp, &f->exts); tcf_exts_destroy(tp, &f->exts);
...@@ -156,8 +154,8 @@ static void fw_destroy(struct tcf_proto *tp) ...@@ -156,8 +154,8 @@ static void fw_destroy(struct tcf_proto *tp)
if (head == NULL) if (head == NULL)
return; return;
for (h=0; h<HTSIZE; h++) { for (h = 0; h < HTSIZE; h++) {
while ((f=head->ht[h]) != NULL) { while ((f = head->ht[h]) != NULL) {
head->ht[h] = f->next; head->ht[h] = f->next;
fw_delete_filter(tp, f); fw_delete_filter(tp, f);
} }
...@@ -167,14 +165,14 @@ static void fw_destroy(struct tcf_proto *tp) ...@@ -167,14 +165,14 @@ static void fw_destroy(struct tcf_proto *tp)
static int fw_delete(struct tcf_proto *tp, unsigned long arg) static int fw_delete(struct tcf_proto *tp, unsigned long arg)
{ {
struct fw_head *head = (struct fw_head*)tp->root; struct fw_head *head = (struct fw_head *)tp->root;
struct fw_filter *f = (struct fw_filter*)arg; struct fw_filter *f = (struct fw_filter *)arg;
struct fw_filter **fp; struct fw_filter **fp;
if (head == NULL || f == NULL) if (head == NULL || f == NULL)
goto out; goto out;
for (fp=&head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) { for (fp = &head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) {
if (*fp == f) { if (*fp == f) {
tcf_tree_lock(tp); tcf_tree_lock(tp);
*fp = f->next; *fp = f->next;
...@@ -240,7 +238,7 @@ static int fw_change(struct tcf_proto *tp, unsigned long base, ...@@ -240,7 +238,7 @@ static int fw_change(struct tcf_proto *tp, unsigned long base,
struct nlattr **tca, struct nlattr **tca,
unsigned long *arg) unsigned long *arg)
{ {
struct fw_head *head = (struct fw_head*)tp->root; struct fw_head *head = (struct fw_head *)tp->root;
struct fw_filter *f = (struct fw_filter *) *arg; struct fw_filter *f = (struct fw_filter *) *arg;
struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_FW_MAX + 1]; struct nlattr *tb[TCA_FW_MAX + 1];
...@@ -302,7 +300,7 @@ static int fw_change(struct tcf_proto *tp, unsigned long base, ...@@ -302,7 +300,7 @@ static int fw_change(struct tcf_proto *tp, unsigned long base,
static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg) static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{ {
struct fw_head *head = (struct fw_head*)tp->root; struct fw_head *head = (struct fw_head *)tp->root;
int h; int h;
if (head == NULL) if (head == NULL)
...@@ -332,7 +330,7 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh, ...@@ -332,7 +330,7 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t) struct sk_buff *skb, struct tcmsg *t)
{ {
struct fw_head *head = (struct fw_head *)tp->root; struct fw_head *head = (struct fw_head *)tp->root;
struct fw_filter *f = (struct fw_filter*)fh; struct fw_filter *f = (struct fw_filter *)fh;
unsigned char *b = skb_tail_pointer(skb); unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest; struct nlattr *nest;
......
...@@ -23,34 +23,30 @@ ...@@ -23,34 +23,30 @@
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
/* /*
1. For now we assume that route tags < 256. * 1. For now we assume that route tags < 256.
It allows to use direct table lookups, instead of hash tables. * It allows to use direct table lookups, instead of hash tables.
2. For now we assume that "from TAG" and "fromdev DEV" statements * 2. For now we assume that "from TAG" and "fromdev DEV" statements
are mutually exclusive. * are mutually exclusive.
3. "to TAG from ANY" has higher priority, than "to ANY from XXX" * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
*/ */
struct route4_fastmap struct route4_fastmap {
{
struct route4_filter *filter; struct route4_filter *filter;
u32 id; u32 id;
int iif; int iif;
}; };
struct route4_head struct route4_head {
{
struct route4_fastmap fastmap[16]; struct route4_fastmap fastmap[16];
struct route4_bucket *table[256+1]; struct route4_bucket *table[256 + 1];
}; };
struct route4_bucket struct route4_bucket {
{
/* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */ /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
struct route4_filter *ht[16+16+1]; struct route4_filter *ht[16 + 16 + 1];
}; };
struct route4_filter struct route4_filter {
{
struct route4_filter *next; struct route4_filter *next;
u32 id; u32 id;
int iif; int iif;
...@@ -61,20 +57,20 @@ struct route4_filter ...@@ -61,20 +57,20 @@ struct route4_filter
struct route4_bucket *bkt; struct route4_bucket *bkt;
}; };
#define ROUTE4_FAILURE ((struct route4_filter*)(-1L)) #define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
static const struct tcf_ext_map route_ext_map = { static const struct tcf_ext_map route_ext_map = {
.police = TCA_ROUTE4_POLICE, .police = TCA_ROUTE4_POLICE,
.action = TCA_ROUTE4_ACT .action = TCA_ROUTE4_ACT
}; };
static __inline__ int route4_fastmap_hash(u32 id, int iif) static inline int route4_fastmap_hash(u32 id, int iif)
{ {
return id&0xF; return id & 0xF;
} }
static inline static void
void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id) route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
{ {
spinlock_t *root_lock = qdisc_root_sleeping_lock(q); spinlock_t *root_lock = qdisc_root_sleeping_lock(q);
...@@ -83,32 +79,33 @@ void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id) ...@@ -83,32 +79,33 @@ void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
spin_unlock_bh(root_lock); spin_unlock_bh(root_lock);
} }
static inline void static void
route4_set_fastmap(struct route4_head *head, u32 id, int iif, route4_set_fastmap(struct route4_head *head, u32 id, int iif,
struct route4_filter *f) struct route4_filter *f)
{ {
int h = route4_fastmap_hash(id, iif); int h = route4_fastmap_hash(id, iif);
head->fastmap[h].id = id; head->fastmap[h].id = id;
head->fastmap[h].iif = iif; head->fastmap[h].iif = iif;
head->fastmap[h].filter = f; head->fastmap[h].filter = f;
} }
static __inline__ int route4_hash_to(u32 id) static inline int route4_hash_to(u32 id)
{ {
return id&0xFF; return id & 0xFF;
} }
static __inline__ int route4_hash_from(u32 id) static inline int route4_hash_from(u32 id)
{ {
return (id>>16)&0xF; return (id >> 16) & 0xF;
} }
static __inline__ int route4_hash_iif(int iif) static inline int route4_hash_iif(int iif)
{ {
return 16 + ((iif>>16)&0xF); return 16 + ((iif >> 16) & 0xF);
} }
static __inline__ int route4_hash_wild(void) static inline int route4_hash_wild(void)
{ {
return 32; return 32;
} }
...@@ -131,21 +128,22 @@ static __inline__ int route4_hash_wild(void) ...@@ -131,21 +128,22 @@ static __inline__ int route4_hash_wild(void)
static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp, static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res) struct tcf_result *res)
{ {
struct route4_head *head = (struct route4_head*)tp->root; struct route4_head *head = (struct route4_head *)tp->root;
struct dst_entry *dst; struct dst_entry *dst;
struct route4_bucket *b; struct route4_bucket *b;
struct route4_filter *f; struct route4_filter *f;
u32 id, h; u32 id, h;
int iif, dont_cache = 0; int iif, dont_cache = 0;
if ((dst = skb_dst(skb)) == NULL) dst = skb_dst(skb);
if (!dst)
goto failure; goto failure;
id = dst->tclassid; id = dst->tclassid;
if (head == NULL) if (head == NULL)
goto old_method; goto old_method;
iif = ((struct rtable*)dst)->fl.iif; iif = ((struct rtable *)dst)->fl.iif;
h = route4_fastmap_hash(id, iif); h = route4_fastmap_hash(id, iif);
if (id == head->fastmap[h].id && if (id == head->fastmap[h].id &&
...@@ -161,7 +159,8 @@ static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp, ...@@ -161,7 +159,8 @@ static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
h = route4_hash_to(id); h = route4_hash_to(id);
restart: restart:
if ((b = head->table[h]) != NULL) { b = head->table[h];
if (b) {
for (f = b->ht[route4_hash_from(id)]; f; f = f->next) for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
if (f->id == id) if (f->id == id)
ROUTE4_APPLY_RESULT(); ROUTE4_APPLY_RESULT();
...@@ -197,8 +196,9 @@ static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp, ...@@ -197,8 +196,9 @@ static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
static inline u32 to_hash(u32 id) static inline u32 to_hash(u32 id)
{ {
u32 h = id&0xFF; u32 h = id & 0xFF;
if (id&0x8000)
if (id & 0x8000)
h += 256; h += 256;
return h; return h;
} }
...@@ -211,17 +211,17 @@ static inline u32 from_hash(u32 id) ...@@ -211,17 +211,17 @@ static inline u32 from_hash(u32 id)
if (!(id & 0x8000)) { if (!(id & 0x8000)) {
if (id > 255) if (id > 255)
return 256; return 256;
return id&0xF; return id & 0xF;
} }
return 16 + (id&0xF); return 16 + (id & 0xF);
} }
static unsigned long route4_get(struct tcf_proto *tp, u32 handle) static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
{ {
struct route4_head *head = (struct route4_head*)tp->root; struct route4_head *head = (struct route4_head *)tp->root;
struct route4_bucket *b; struct route4_bucket *b;
struct route4_filter *f; struct route4_filter *f;
unsigned h1, h2; unsigned int h1, h2;
if (!head) if (!head)
return 0; return 0;
...@@ -230,11 +230,12 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle) ...@@ -230,11 +230,12 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
if (h1 > 256) if (h1 > 256)
return 0; return 0;
h2 = from_hash(handle>>16); h2 = from_hash(handle >> 16);
if (h2 > 32) if (h2 > 32)
return 0; return 0;
if ((b = head->table[h1]) != NULL) { b = head->table[h1];
if (b) {
for (f = b->ht[h2]; f; f = f->next) for (f = b->ht[h2]; f; f = f->next)
if (f->handle == handle) if (f->handle == handle)
return (unsigned long)f; return (unsigned long)f;
...@@ -251,7 +252,7 @@ static int route4_init(struct tcf_proto *tp) ...@@ -251,7 +252,7 @@ static int route4_init(struct tcf_proto *tp)
return 0; return 0;
} }
static inline void static void
route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f) route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
{ {
tcf_unbind_filter(tp, &f->res); tcf_unbind_filter(tp, &f->res);
...@@ -267,11 +268,12 @@ static void route4_destroy(struct tcf_proto *tp) ...@@ -267,11 +268,12 @@ static void route4_destroy(struct tcf_proto *tp)
if (head == NULL) if (head == NULL)
return; return;
for (h1=0; h1<=256; h1++) { for (h1 = 0; h1 <= 256; h1++) {
struct route4_bucket *b; struct route4_bucket *b;
if ((b = head->table[h1]) != NULL) { b = head->table[h1];
for (h2=0; h2<=32; h2++) { if (b) {
for (h2 = 0; h2 <= 32; h2++) {
struct route4_filter *f; struct route4_filter *f;
while ((f = b->ht[h2]) != NULL) { while ((f = b->ht[h2]) != NULL) {
...@@ -287,9 +289,9 @@ static void route4_destroy(struct tcf_proto *tp) ...@@ -287,9 +289,9 @@ static void route4_destroy(struct tcf_proto *tp)
static int route4_delete(struct tcf_proto *tp, unsigned long arg) static int route4_delete(struct tcf_proto *tp, unsigned long arg)
{ {
struct route4_head *head = (struct route4_head*)tp->root; struct route4_head *head = (struct route4_head *)tp->root;
struct route4_filter **fp, *f = (struct route4_filter*)arg; struct route4_filter **fp, *f = (struct route4_filter *)arg;
unsigned h = 0; unsigned int h = 0;
struct route4_bucket *b; struct route4_bucket *b;
int i; int i;
...@@ -299,7 +301,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg) ...@@ -299,7 +301,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg)
h = f->handle; h = f->handle;
b = f->bkt; b = f->bkt;
for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) { for (fp = &b->ht[from_hash(h >> 16)]; *fp; fp = &(*fp)->next) {
if (*fp == f) { if (*fp == f) {
tcf_tree_lock(tp); tcf_tree_lock(tp);
*fp = f->next; *fp = f->next;
...@@ -310,7 +312,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg) ...@@ -310,7 +312,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg)
/* Strip tree */ /* Strip tree */
for (i=0; i<=32; i++) for (i = 0; i <= 32; i++)
if (b->ht[i]) if (b->ht[i])
return 0; return 0;
...@@ -380,7 +382,8 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base, ...@@ -380,7 +382,8 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
} }
h1 = to_hash(nhandle); h1 = to_hash(nhandle);
if ((b = head->table[h1]) == NULL) { b = head->table[h1];
if (!b) {
err = -ENOBUFS; err = -ENOBUFS;
b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL); b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
if (b == NULL) if (b == NULL)
...@@ -391,6 +394,7 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base, ...@@ -391,6 +394,7 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
tcf_tree_unlock(tp); tcf_tree_unlock(tp);
} else { } else {
unsigned int h2 = from_hash(nhandle >> 16); unsigned int h2 = from_hash(nhandle >> 16);
err = -EEXIST; err = -EEXIST;
for (fp = b->ht[h2]; fp; fp = fp->next) for (fp = b->ht[h2]; fp; fp = fp->next)
if (fp->handle == f->handle) if (fp->handle == f->handle)
...@@ -444,7 +448,8 @@ static int route4_change(struct tcf_proto *tp, unsigned long base, ...@@ -444,7 +448,8 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
if (err < 0) if (err < 0)
return err; return err;
if ((f = (struct route4_filter*)*arg) != NULL) { f = (struct route4_filter *)*arg;
if (f) {
if (f->handle != handle && handle) if (f->handle != handle && handle)
return -EINVAL; return -EINVAL;
...@@ -481,7 +486,7 @@ static int route4_change(struct tcf_proto *tp, unsigned long base, ...@@ -481,7 +486,7 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
reinsert: reinsert:
h = from_hash(f->handle >> 16); h = from_hash(f->handle >> 16);
for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next) for (fp = &f->bkt->ht[h]; (f1 = *fp) != NULL; fp = &f1->next)
if (f->handle < f1->handle) if (f->handle < f1->handle)
break; break;
...@@ -492,7 +497,8 @@ static int route4_change(struct tcf_proto *tp, unsigned long base, ...@@ -492,7 +497,8 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
if (old_handle && f->handle != old_handle) { if (old_handle && f->handle != old_handle) {
th = to_hash(old_handle); th = to_hash(old_handle);
h = from_hash(old_handle >> 16); h = from_hash(old_handle >> 16);
if ((b = head->table[th]) != NULL) { b = head->table[th];
if (b) {
for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) { for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
if (*fp == f) { if (*fp == f) {
*fp = f->next; *fp = f->next;
...@@ -515,7 +521,7 @@ static int route4_change(struct tcf_proto *tp, unsigned long base, ...@@ -515,7 +521,7 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg) static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{ {
struct route4_head *head = tp->root; struct route4_head *head = tp->root;
unsigned h, h1; unsigned int h, h1;
if (head == NULL) if (head == NULL)
arg->stop = 1; arg->stop = 1;
...@@ -549,7 +555,7 @@ static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg) ...@@ -549,7 +555,7 @@ static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
static int route4_dump(struct tcf_proto *tp, unsigned long fh, static int route4_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t) struct sk_buff *skb, struct tcmsg *t)
{ {
struct route4_filter *f = (struct route4_filter*)fh; struct route4_filter *f = (struct route4_filter *)fh;
unsigned char *b = skb_tail_pointer(skb); unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest; struct nlattr *nest;
u32 id; u32 id;
...@@ -563,15 +569,15 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh, ...@@ -563,15 +569,15 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh,
if (nest == NULL) if (nest == NULL)
goto nla_put_failure; goto nla_put_failure;
if (!(f->handle&0x8000)) { if (!(f->handle & 0x8000)) {
id = f->id&0xFF; id = f->id & 0xFF;
NLA_PUT_U32(skb, TCA_ROUTE4_TO, id); NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
} }
if (f->handle&0x80000000) { if (f->handle & 0x80000000) {
if ((f->handle>>16) != 0xFFFF) if ((f->handle >> 16) != 0xFFFF)
NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif); NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
} else { } else {
id = f->id>>16; id = f->id >> 16;
NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id); NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
} }
if (f->res.classid) if (f->res.classid)
......
...@@ -66,28 +66,25 @@ ...@@ -66,28 +66,25 @@
powerful classification engine. */ powerful classification engine. */
struct rsvp_head struct rsvp_head {
{
u32 tmap[256/32]; u32 tmap[256/32];
u32 hgenerator; u32 hgenerator;
u8 tgenerator; u8 tgenerator;
struct rsvp_session *ht[256]; struct rsvp_session *ht[256];
}; };
struct rsvp_session struct rsvp_session {
{
struct rsvp_session *next; struct rsvp_session *next;
__be32 dst[RSVP_DST_LEN]; __be32 dst[RSVP_DST_LEN];
struct tc_rsvp_gpi dpi; struct tc_rsvp_gpi dpi;
u8 protocol; u8 protocol;
u8 tunnelid; u8 tunnelid;
/* 16 (src,sport) hash slots, and one wildcard source slot */ /* 16 (src,sport) hash slots, and one wildcard source slot */
struct rsvp_filter *ht[16+1]; struct rsvp_filter *ht[16 + 1];
}; };
struct rsvp_filter struct rsvp_filter {
{
struct rsvp_filter *next; struct rsvp_filter *next;
__be32 src[RSVP_DST_LEN]; __be32 src[RSVP_DST_LEN];
struct tc_rsvp_gpi spi; struct tc_rsvp_gpi spi;
...@@ -100,17 +97,19 @@ struct rsvp_filter ...@@ -100,17 +97,19 @@ struct rsvp_filter
struct rsvp_session *sess; struct rsvp_session *sess;
}; };
static __inline__ unsigned hash_dst(__be32 *dst, u8 protocol, u8 tunnelid) static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
{ {
unsigned h = (__force __u32)dst[RSVP_DST_LEN-1]; unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
h ^= h>>16; h ^= h>>16;
h ^= h>>8; h ^= h>>8;
return (h ^ protocol ^ tunnelid) & 0xFF; return (h ^ protocol ^ tunnelid) & 0xFF;
} }
static __inline__ unsigned hash_src(__be32 *src) static inline unsigned int hash_src(__be32 *src)
{ {
unsigned h = (__force __u32)src[RSVP_DST_LEN-1]; unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
h ^= h>>16; h ^= h>>16;
h ^= h>>8; h ^= h>>8;
h ^= h>>4; h ^= h>>4;
...@@ -134,10 +133,10 @@ static struct tcf_ext_map rsvp_ext_map = { ...@@ -134,10 +133,10 @@ static struct tcf_ext_map rsvp_ext_map = {
static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp, static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res) struct tcf_result *res)
{ {
struct rsvp_session **sht = ((struct rsvp_head*)tp->root)->ht; struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
struct rsvp_session *s; struct rsvp_session *s;
struct rsvp_filter *f; struct rsvp_filter *f;
unsigned h1, h2; unsigned int h1, h2;
__be32 *dst, *src; __be32 *dst, *src;
u8 protocol; u8 protocol;
u8 tunnelid = 0; u8 tunnelid = 0;
...@@ -162,13 +161,13 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp, ...@@ -162,13 +161,13 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
src = &nhptr->saddr.s6_addr32[0]; src = &nhptr->saddr.s6_addr32[0];
dst = &nhptr->daddr.s6_addr32[0]; dst = &nhptr->daddr.s6_addr32[0];
protocol = nhptr->nexthdr; protocol = nhptr->nexthdr;
xprt = ((u8*)nhptr) + sizeof(struct ipv6hdr); xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr);
#else #else
src = &nhptr->saddr; src = &nhptr->saddr;
dst = &nhptr->daddr; dst = &nhptr->daddr;
protocol = nhptr->protocol; protocol = nhptr->protocol;
xprt = ((u8*)nhptr) + (nhptr->ihl<<2); xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
if (nhptr->frag_off & htons(IP_MF|IP_OFFSET)) if (nhptr->frag_off & htons(IP_MF | IP_OFFSET))
return -1; return -1;
#endif #endif
...@@ -176,10 +175,10 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp, ...@@ -176,10 +175,10 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
h2 = hash_src(src); h2 = hash_src(src);
for (s = sht[h1]; s; s = s->next) { for (s = sht[h1]; s; s = s->next) {
if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] && if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
protocol == s->protocol && protocol == s->protocol &&
!(s->dpi.mask & !(s->dpi.mask &
(*(u32*)(xprt+s->dpi.offset)^s->dpi.key)) && (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) &&
#if RSVP_DST_LEN == 4 #if RSVP_DST_LEN == 4
dst[0] == s->dst[0] && dst[0] == s->dst[0] &&
dst[1] == s->dst[1] && dst[1] == s->dst[1] &&
...@@ -188,8 +187,8 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp, ...@@ -188,8 +187,8 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
tunnelid == s->tunnelid) { tunnelid == s->tunnelid) {
for (f = s->ht[h2]; f; f = f->next) { for (f = s->ht[h2]; f; f = f->next) {
if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN-1] && if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
!(f->spi.mask & (*(u32*)(xprt+f->spi.offset)^f->spi.key)) !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
#if RSVP_DST_LEN == 4 #if RSVP_DST_LEN == 4
&& &&
src[0] == f->src[0] && src[0] == f->src[0] &&
...@@ -205,7 +204,7 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp, ...@@ -205,7 +204,7 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
return 0; return 0;
tunnelid = f->res.classid; tunnelid = f->res.classid;
nhptr = (void*)(xprt + f->tunnelhdr - sizeof(*nhptr)); nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr));
goto restart; goto restart;
} }
} }
...@@ -224,11 +223,11 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp, ...@@ -224,11 +223,11 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle) static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle)
{ {
struct rsvp_session **sht = ((struct rsvp_head*)tp->root)->ht; struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
struct rsvp_session *s; struct rsvp_session *s;
struct rsvp_filter *f; struct rsvp_filter *f;
unsigned h1 = handle&0xFF; unsigned int h1 = handle & 0xFF;
unsigned h2 = (handle>>8)&0xFF; unsigned int h2 = (handle >> 8) & 0xFF;
if (h2 > 16) if (h2 > 16)
return 0; return 0;
...@@ -258,7 +257,7 @@ static int rsvp_init(struct tcf_proto *tp) ...@@ -258,7 +257,7 @@ static int rsvp_init(struct tcf_proto *tp)
return -ENOBUFS; return -ENOBUFS;
} }
static inline void static void
rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f) rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
{ {
tcf_unbind_filter(tp, &f->res); tcf_unbind_filter(tp, &f->res);
...@@ -277,13 +276,13 @@ static void rsvp_destroy(struct tcf_proto *tp) ...@@ -277,13 +276,13 @@ static void rsvp_destroy(struct tcf_proto *tp)
sht = data->ht; sht = data->ht;
for (h1=0; h1<256; h1++) { for (h1 = 0; h1 < 256; h1++) {
struct rsvp_session *s; struct rsvp_session *s;
while ((s = sht[h1]) != NULL) { while ((s = sht[h1]) != NULL) {
sht[h1] = s->next; sht[h1] = s->next;
for (h2=0; h2<=16; h2++) { for (h2 = 0; h2 <= 16; h2++) {
struct rsvp_filter *f; struct rsvp_filter *f;
while ((f = s->ht[h2]) != NULL) { while ((f = s->ht[h2]) != NULL) {
...@@ -299,13 +298,13 @@ static void rsvp_destroy(struct tcf_proto *tp) ...@@ -299,13 +298,13 @@ static void rsvp_destroy(struct tcf_proto *tp)
static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
{ {
struct rsvp_filter **fp, *f = (struct rsvp_filter*)arg; struct rsvp_filter **fp, *f = (struct rsvp_filter *)arg;
unsigned h = f->handle; unsigned int h = f->handle;
struct rsvp_session **sp; struct rsvp_session **sp;
struct rsvp_session *s = f->sess; struct rsvp_session *s = f->sess;
int i; int i;
for (fp = &s->ht[(h>>8)&0xFF]; *fp; fp = &(*fp)->next) { for (fp = &s->ht[(h >> 8) & 0xFF]; *fp; fp = &(*fp)->next) {
if (*fp == f) { if (*fp == f) {
tcf_tree_lock(tp); tcf_tree_lock(tp);
*fp = f->next; *fp = f->next;
...@@ -314,12 +313,12 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) ...@@ -314,12 +313,12 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
/* Strip tree */ /* Strip tree */
for (i=0; i<=16; i++) for (i = 0; i <= 16; i++)
if (s->ht[i]) if (s->ht[i])
return 0; return 0;
/* OK, session has no flows */ /* OK, session has no flows */
for (sp = &((struct rsvp_head*)tp->root)->ht[h&0xFF]; for (sp = &((struct rsvp_head *)tp->root)->ht[h & 0xFF];
*sp; sp = &(*sp)->next) { *sp; sp = &(*sp)->next) {
if (*sp == s) { if (*sp == s) {
tcf_tree_lock(tp); tcf_tree_lock(tp);
...@@ -337,13 +336,14 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) ...@@ -337,13 +336,14 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
return 0; return 0;
} }
static unsigned gen_handle(struct tcf_proto *tp, unsigned salt) static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
{ {
struct rsvp_head *data = tp->root; struct rsvp_head *data = tp->root;
int i = 0xFFFF; int i = 0xFFFF;
while (i-- > 0) { while (i-- > 0) {
u32 h; u32 h;
if ((data->hgenerator += 0x10000) == 0) if ((data->hgenerator += 0x10000) == 0)
data->hgenerator = 0x10000; data->hgenerator = 0x10000;
h = data->hgenerator|salt; h = data->hgenerator|salt;
...@@ -355,10 +355,10 @@ static unsigned gen_handle(struct tcf_proto *tp, unsigned salt) ...@@ -355,10 +355,10 @@ static unsigned gen_handle(struct tcf_proto *tp, unsigned salt)
static int tunnel_bts(struct rsvp_head *data) static int tunnel_bts(struct rsvp_head *data)
{ {
int n = data->tgenerator>>5; int n = data->tgenerator >> 5;
u32 b = 1<<(data->tgenerator&0x1F); u32 b = 1 << (data->tgenerator & 0x1F);
if (data->tmap[n]&b) if (data->tmap[n] & b)
return 0; return 0;
data->tmap[n] |= b; data->tmap[n] |= b;
return 1; return 1;
...@@ -372,10 +372,10 @@ static void tunnel_recycle(struct rsvp_head *data) ...@@ -372,10 +372,10 @@ static void tunnel_recycle(struct rsvp_head *data)
memset(tmap, 0, sizeof(tmap)); memset(tmap, 0, sizeof(tmap));
for (h1=0; h1<256; h1++) { for (h1 = 0; h1 < 256; h1++) {
struct rsvp_session *s; struct rsvp_session *s;
for (s = sht[h1]; s; s = s->next) { for (s = sht[h1]; s; s = s->next) {
for (h2=0; h2<=16; h2++) { for (h2 = 0; h2 <= 16; h2++) {
struct rsvp_filter *f; struct rsvp_filter *f;
for (f = s->ht[h2]; f; f = f->next) { for (f = s->ht[h2]; f; f = f->next) {
...@@ -395,8 +395,8 @@ static u32 gen_tunnel(struct rsvp_head *data) ...@@ -395,8 +395,8 @@ static u32 gen_tunnel(struct rsvp_head *data)
{ {
int i, k; int i, k;
for (k=0; k<2; k++) { for (k = 0; k < 2; k++) {
for (i=255; i>0; i--) { for (i = 255; i > 0; i--) {
if (++data->tgenerator == 0) if (++data->tgenerator == 0)
data->tgenerator = 1; data->tgenerator = 1;
if (tunnel_bts(data)) if (tunnel_bts(data))
...@@ -428,7 +428,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, ...@@ -428,7 +428,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
struct nlattr *opt = tca[TCA_OPTIONS-1]; struct nlattr *opt = tca[TCA_OPTIONS-1];
struct nlattr *tb[TCA_RSVP_MAX + 1]; struct nlattr *tb[TCA_RSVP_MAX + 1];
struct tcf_exts e; struct tcf_exts e;
unsigned h1, h2; unsigned int h1, h2;
__be32 *dst; __be32 *dst;
int err; int err;
...@@ -443,7 +443,8 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, ...@@ -443,7 +443,8 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
if (err < 0) if (err < 0)
return err; return err;
if ((f = (struct rsvp_filter*)*arg) != NULL) { f = (struct rsvp_filter *)*arg;
if (f) {
/* Node exists: adjust only classid */ /* Node exists: adjust only classid */
if (f->handle != handle && handle) if (f->handle != handle && handle)
...@@ -500,7 +501,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, ...@@ -500,7 +501,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
goto errout; goto errout;
} }
for (sp = &data->ht[h1]; (s=*sp) != NULL; sp = &s->next) { for (sp = &data->ht[h1]; (s = *sp) != NULL; sp = &s->next) {
if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] && if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
pinfo && pinfo->protocol == s->protocol && pinfo && pinfo->protocol == s->protocol &&
memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 && memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
...@@ -523,7 +524,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, ...@@ -523,7 +524,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
tcf_exts_change(tp, &f->exts, &e); tcf_exts_change(tp, &f->exts, &e);
for (fp = &s->ht[h2]; *fp; fp = &(*fp)->next) for (fp = &s->ht[h2]; *fp; fp = &(*fp)->next)
if (((*fp)->spi.mask&f->spi.mask) != f->spi.mask) if (((*fp)->spi.mask & f->spi.mask) != f->spi.mask)
break; break;
f->next = *fp; f->next = *fp;
wmb(); wmb();
...@@ -567,7 +568,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, ...@@ -567,7 +568,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg) static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{ {
struct rsvp_head *head = tp->root; struct rsvp_head *head = tp->root;
unsigned h, h1; unsigned int h, h1;
if (arg->stop) if (arg->stop)
return; return;
...@@ -598,7 +599,7 @@ static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg) ...@@ -598,7 +599,7 @@ static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
static int rsvp_dump(struct tcf_proto *tp, unsigned long fh, static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t) struct sk_buff *skb, struct tcmsg *t)
{ {
struct rsvp_filter *f = (struct rsvp_filter*)fh; struct rsvp_filter *f = (struct rsvp_filter *)fh;
struct rsvp_session *s; struct rsvp_session *s;
unsigned char *b = skb_tail_pointer(skb); unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest; struct nlattr *nest;
...@@ -624,7 +625,7 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh, ...@@ -624,7 +625,7 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
NLA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo); NLA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo);
if (f->res.classid) if (f->res.classid)
NLA_PUT_U32(skb, TCA_RSVP_CLASSID, f->res.classid); NLA_PUT_U32(skb, TCA_RSVP_CLASSID, f->res.classid);
if (((f->handle>>8)&0xFF) != 16) if (((f->handle >> 8) & 0xFF) != 16)
NLA_PUT(skb, TCA_RSVP_SRC, sizeof(f->src), f->src); NLA_PUT(skb, TCA_RSVP_SRC, sizeof(f->src), f->src);
if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0) if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0)
......
...@@ -249,7 +249,7 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle, ...@@ -249,7 +249,7 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
* of the hashing index is below the threshold. * of the hashing index is below the threshold.
*/ */
if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD) if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD)
cp.hash = (cp.mask >> cp.shift)+1; cp.hash = (cp.mask >> cp.shift) + 1;
else else
cp.hash = DEFAULT_HASH_SIZE; cp.hash = DEFAULT_HASH_SIZE;
} }
......
...@@ -42,8 +42,7 @@ ...@@ -42,8 +42,7 @@
#include <net/act_api.h> #include <net/act_api.h>
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
struct tc_u_knode struct tc_u_knode {
{
struct tc_u_knode *next; struct tc_u_knode *next;
u32 handle; u32 handle;
struct tc_u_hnode *ht_up; struct tc_u_hnode *ht_up;
...@@ -63,19 +62,17 @@ struct tc_u_knode ...@@ -63,19 +62,17 @@ struct tc_u_knode
struct tc_u32_sel sel; struct tc_u32_sel sel;
}; };
struct tc_u_hnode struct tc_u_hnode {
{
struct tc_u_hnode *next; struct tc_u_hnode *next;
u32 handle; u32 handle;
u32 prio; u32 prio;
struct tc_u_common *tp_c; struct tc_u_common *tp_c;
int refcnt; int refcnt;
unsigned divisor; unsigned int divisor;
struct tc_u_knode *ht[1]; struct tc_u_knode *ht[1];
}; };
struct tc_u_common struct tc_u_common {
{
struct tc_u_hnode *hlist; struct tc_u_hnode *hlist;
struct Qdisc *q; struct Qdisc *q;
int refcnt; int refcnt;
...@@ -87,9 +84,11 @@ static const struct tcf_ext_map u32_ext_map = { ...@@ -87,9 +84,11 @@ static const struct tcf_ext_map u32_ext_map = {
.police = TCA_U32_POLICE .police = TCA_U32_POLICE
}; };
static __inline__ unsigned u32_hash_fold(__be32 key, struct tc_u32_sel *sel, u8 fshift) static inline unsigned int u32_hash_fold(__be32 key,
const struct tc_u32_sel *sel,
u8 fshift)
{ {
unsigned h = ntohl(key & sel->hmask)>>fshift; unsigned int h = ntohl(key & sel->hmask) >> fshift;
return h; return h;
} }
...@@ -101,7 +100,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re ...@@ -101,7 +100,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
unsigned int off; unsigned int off;
} stack[TC_U32_MAXDEPTH]; } stack[TC_U32_MAXDEPTH];
struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; struct tc_u_hnode *ht = (struct tc_u_hnode *)tp->root;
unsigned int off = skb_network_offset(skb); unsigned int off = skb_network_offset(skb);
struct tc_u_knode *n; struct tc_u_knode *n;
int sdepth = 0; int sdepth = 0;
...@@ -120,7 +119,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re ...@@ -120,7 +119,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
struct tc_u32_key *key = n->sel.keys; struct tc_u32_key *key = n->sel.keys;
#ifdef CONFIG_CLS_U32_PERF #ifdef CONFIG_CLS_U32_PERF
n->pf->rcnt +=1; n->pf->rcnt += 1;
j = 0; j = 0;
#endif #endif
...@@ -133,7 +132,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re ...@@ -133,7 +132,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
} }
#endif #endif
for (i = n->sel.nkeys; i>0; i--, key++) { for (i = n->sel.nkeys; i > 0; i--, key++) {
int toff = off + key->off + (off2 & key->offmask); int toff = off + key->off + (off2 & key->offmask);
__be32 *data, _data; __be32 *data, _data;
...@@ -148,13 +147,13 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re ...@@ -148,13 +147,13 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
goto next_knode; goto next_knode;
} }
#ifdef CONFIG_CLS_U32_PERF #ifdef CONFIG_CLS_U32_PERF
n->pf->kcnts[j] +=1; n->pf->kcnts[j] += 1;
j++; j++;
#endif #endif
} }
if (n->ht_down == NULL) { if (n->ht_down == NULL) {
check_terminal: check_terminal:
if (n->sel.flags&TC_U32_TERMINAL) { if (n->sel.flags & TC_U32_TERMINAL) {
*res = n->res; *res = n->res;
#ifdef CONFIG_NET_CLS_IND #ifdef CONFIG_NET_CLS_IND
...@@ -164,7 +163,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re ...@@ -164,7 +163,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
} }
#endif #endif
#ifdef CONFIG_CLS_U32_PERF #ifdef CONFIG_CLS_U32_PERF
n->pf->rhit +=1; n->pf->rhit += 1;
#endif #endif
r = tcf_exts_exec(skb, &n->exts, res); r = tcf_exts_exec(skb, &n->exts, res);
if (r < 0) { if (r < 0) {
...@@ -197,10 +196,10 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re ...@@ -197,10 +196,10 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
sel = ht->divisor & u32_hash_fold(*data, &n->sel, sel = ht->divisor & u32_hash_fold(*data, &n->sel,
n->fshift); n->fshift);
} }
if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT))) if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
goto next_ht; goto next_ht;
if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) { if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
off2 = n->sel.off + 3; off2 = n->sel.off + 3;
if (n->sel.flags & TC_U32_VAROFFSET) { if (n->sel.flags & TC_U32_VAROFFSET) {
__be16 *data, _data; __be16 *data, _data;
...@@ -215,7 +214,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re ...@@ -215,7 +214,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
} }
off2 &= ~3; off2 &= ~3;
} }
if (n->sel.flags&TC_U32_EAT) { if (n->sel.flags & TC_U32_EAT) {
off += off2; off += off2;
off2 = 0; off2 = 0;
} }
...@@ -236,11 +235,11 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re ...@@ -236,11 +235,11 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
deadloop: deadloop:
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_WARNING "cls_u32: dead loop\n"); pr_warning("cls_u32: dead loop\n");
return -1; return -1;
} }
static __inline__ struct tc_u_hnode * static struct tc_u_hnode *
u32_lookup_ht(struct tc_u_common *tp_c, u32 handle) u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
{ {
struct tc_u_hnode *ht; struct tc_u_hnode *ht;
...@@ -252,10 +251,10 @@ u32_lookup_ht(struct tc_u_common *tp_c, u32 handle) ...@@ -252,10 +251,10 @@ u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
return ht; return ht;
} }
static __inline__ struct tc_u_knode * static struct tc_u_knode *
u32_lookup_key(struct tc_u_hnode *ht, u32 handle) u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
{ {
unsigned sel; unsigned int sel;
struct tc_u_knode *n = NULL; struct tc_u_knode *n = NULL;
sel = TC_U32_HASH(handle); sel = TC_U32_HASH(handle);
...@@ -300,7 +299,7 @@ static u32 gen_new_htid(struct tc_u_common *tp_c) ...@@ -300,7 +299,7 @@ static u32 gen_new_htid(struct tc_u_common *tp_c)
do { do {
if (++tp_c->hgenerator == 0x7FF) if (++tp_c->hgenerator == 0x7FF)
tp_c->hgenerator = 1; tp_c->hgenerator = 1;
} while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20)); } while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0; return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
} }
...@@ -378,9 +377,9 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key) ...@@ -378,9 +377,9 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
{ {
struct tc_u_knode *n; struct tc_u_knode *n;
unsigned h; unsigned int h;
for (h=0; h<=ht->divisor; h++) { for (h = 0; h <= ht->divisor; h++) {
while ((n = ht->ht[h]) != NULL) { while ((n = ht->ht[h]) != NULL) {
ht->ht[h] = n->next; ht->ht[h] = n->next;
...@@ -446,13 +445,13 @@ static void u32_destroy(struct tcf_proto *tp) ...@@ -446,13 +445,13 @@ static void u32_destroy(struct tcf_proto *tp)
static int u32_delete(struct tcf_proto *tp, unsigned long arg) static int u32_delete(struct tcf_proto *tp, unsigned long arg)
{ {
struct tc_u_hnode *ht = (struct tc_u_hnode*)arg; struct tc_u_hnode *ht = (struct tc_u_hnode *)arg;
if (ht == NULL) if (ht == NULL)
return 0; return 0;
if (TC_U32_KEY(ht->handle)) if (TC_U32_KEY(ht->handle))
return u32_delete_key(tp, (struct tc_u_knode*)ht); return u32_delete_key(tp, (struct tc_u_knode *)ht);
if (tp->root == ht) if (tp->root == ht)
return -EINVAL; return -EINVAL;
...@@ -470,14 +469,14 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg) ...@@ -470,14 +469,14 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg)
static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle) static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
{ {
struct tc_u_knode *n; struct tc_u_knode *n;
unsigned i = 0x7FF; unsigned int i = 0x7FF;
for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next) for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
if (i < TC_U32_NODE(n->handle)) if (i < TC_U32_NODE(n->handle))
i = TC_U32_NODE(n->handle); i = TC_U32_NODE(n->handle);
i++; i++;
return handle|(i>0xFFF ? 0xFFF : i); return handle | (i > 0xFFF ? 0xFFF : i);
} }
static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
...@@ -566,7 +565,8 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, ...@@ -566,7 +565,8 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
if (err < 0) if (err < 0)
return err; return err;
if ((n = (struct tc_u_knode*)*arg) != NULL) { n = (struct tc_u_knode *)*arg;
if (n) {
if (TC_U32_KEY(n->handle) == 0) if (TC_U32_KEY(n->handle) == 0)
return -EINVAL; return -EINVAL;
...@@ -574,7 +574,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, ...@@ -574,7 +574,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
} }
if (tb[TCA_U32_DIVISOR]) { if (tb[TCA_U32_DIVISOR]) {
unsigned divisor = nla_get_u32(tb[TCA_U32_DIVISOR]); unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
if (--divisor > 0x100) if (--divisor > 0x100)
return -EINVAL; return -EINVAL;
...@@ -585,7 +585,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, ...@@ -585,7 +585,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
if (handle == 0) if (handle == 0)
return -ENOMEM; return -ENOMEM;
} }
ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL); ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
if (ht == NULL) if (ht == NULL)
return -ENOBUFS; return -ENOBUFS;
ht->tp_c = tp_c; ht->tp_c = tp_c;
...@@ -683,7 +683,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg) ...@@ -683,7 +683,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
struct tc_u_common *tp_c = tp->data; struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *ht; struct tc_u_hnode *ht;
struct tc_u_knode *n; struct tc_u_knode *n;
unsigned h; unsigned int h;
if (arg->stop) if (arg->stop)
return; return;
...@@ -717,7 +717,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg) ...@@ -717,7 +717,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
static int u32_dump(struct tcf_proto *tp, unsigned long fh, static int u32_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t) struct sk_buff *skb, struct tcmsg *t)
{ {
struct tc_u_knode *n = (struct tc_u_knode*)fh; struct tc_u_knode *n = (struct tc_u_knode *)fh;
struct nlattr *nest; struct nlattr *nest;
if (n == NULL) if (n == NULL)
...@@ -730,8 +730,9 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh, ...@@ -730,8 +730,9 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
goto nla_put_failure; goto nla_put_failure;
if (TC_U32_KEY(n->handle) == 0) { if (TC_U32_KEY(n->handle) == 0) {
struct tc_u_hnode *ht = (struct tc_u_hnode*)fh; struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
u32 divisor = ht->divisor+1; u32 divisor = ht->divisor + 1;
NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor); NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor);
} else { } else {
NLA_PUT(skb, TCA_U32_SEL, NLA_PUT(skb, TCA_U32_SEL,
...@@ -755,7 +756,7 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh, ...@@ -755,7 +756,7 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
goto nla_put_failure; goto nla_put_failure;
#ifdef CONFIG_NET_CLS_IND #ifdef CONFIG_NET_CLS_IND
if(strlen(n->indev)) if (strlen(n->indev))
NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev); NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev);
#endif #endif
#ifdef CONFIG_CLS_U32_PERF #ifdef CONFIG_CLS_U32_PERF
......
...@@ -33,40 +33,41 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em, ...@@ -33,40 +33,41 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em,
return 0; return 0;
switch (cmp->align) { switch (cmp->align) {
case TCF_EM_ALIGN_U8: case TCF_EM_ALIGN_U8:
val = *ptr; val = *ptr;
break; break;
case TCF_EM_ALIGN_U16: case TCF_EM_ALIGN_U16:
val = get_unaligned_be16(ptr); val = get_unaligned_be16(ptr);
if (cmp_needs_transformation(cmp)) if (cmp_needs_transformation(cmp))
val = be16_to_cpu(val); val = be16_to_cpu(val);
break; break;
case TCF_EM_ALIGN_U32: case TCF_EM_ALIGN_U32:
/* Worth checking boundries? The branching seems /* Worth checking boundries? The branching seems
* to get worse. Visit again. */ * to get worse. Visit again.
val = get_unaligned_be32(ptr); */
val = get_unaligned_be32(ptr);
if (cmp_needs_transformation(cmp)) if (cmp_needs_transformation(cmp))
val = be32_to_cpu(val); val = be32_to_cpu(val);
break; break;
default: default:
return 0; return 0;
} }
if (cmp->mask) if (cmp->mask)
val &= cmp->mask; val &= cmp->mask;
switch (cmp->opnd) { switch (cmp->opnd) {
case TCF_EM_OPND_EQ: case TCF_EM_OPND_EQ:
return val == cmp->val; return val == cmp->val;
case TCF_EM_OPND_LT: case TCF_EM_OPND_LT:
return val < cmp->val; return val < cmp->val;
case TCF_EM_OPND_GT: case TCF_EM_OPND_GT:
return val > cmp->val; return val > cmp->val;
} }
return 0; return 0;
......
...@@ -73,21 +73,18 @@ ...@@ -73,21 +73,18 @@
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
#include <net/sock.h> #include <net/sock.h>
struct meta_obj struct meta_obj {
{
unsigned long value; unsigned long value;
unsigned int len; unsigned int len;
}; };
struct meta_value struct meta_value {
{
struct tcf_meta_val hdr; struct tcf_meta_val hdr;
unsigned long val; unsigned long val;
unsigned int len; unsigned int len;
}; };
struct meta_match struct meta_match {
{
struct meta_value lvalue; struct meta_value lvalue;
struct meta_value rvalue; struct meta_value rvalue;
}; };
...@@ -483,8 +480,7 @@ META_COLLECTOR(int_sk_write_pend) ...@@ -483,8 +480,7 @@ META_COLLECTOR(int_sk_write_pend)
* Meta value collectors assignment table * Meta value collectors assignment table
**************************************************************************/ **************************************************************************/
struct meta_ops struct meta_ops {
{
void (*get)(struct sk_buff *, struct tcf_pkt_info *, void (*get)(struct sk_buff *, struct tcf_pkt_info *,
struct meta_value *, struct meta_obj *, int *); struct meta_value *, struct meta_obj *, int *);
}; };
...@@ -494,7 +490,7 @@ struct meta_ops ...@@ -494,7 +490,7 @@ struct meta_ops
/* Meta value operations table listing all meta value collectors and /* Meta value operations table listing all meta value collectors and
* assigns them to a type and meta id. */ * assigns them to a type and meta id. */
static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = { static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = {
[TCF_META_TYPE_VAR] = { [TCF_META_TYPE_VAR] = {
[META_ID(DEV)] = META_FUNC(var_dev), [META_ID(DEV)] = META_FUNC(var_dev),
[META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if), [META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if),
...@@ -550,7 +546,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = { ...@@ -550,7 +546,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
} }
}; };
static inline struct meta_ops * meta_ops(struct meta_value *val) static inline struct meta_ops *meta_ops(struct meta_value *val)
{ {
return &__meta_ops[meta_type(val)][meta_id(val)]; return &__meta_ops[meta_type(val)][meta_id(val)];
} }
...@@ -649,9 +645,8 @@ static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv) ...@@ -649,9 +645,8 @@ static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
{ {
if (v->len == sizeof(unsigned long)) if (v->len == sizeof(unsigned long))
NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val); NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val);
else if (v->len == sizeof(u32)) { else if (v->len == sizeof(u32))
NLA_PUT_U32(skb, tlv, v->val); NLA_PUT_U32(skb, tlv, v->val);
}
return 0; return 0;
...@@ -663,8 +658,7 @@ static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv) ...@@ -663,8 +658,7 @@ static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
* Type specific operations table * Type specific operations table
**************************************************************************/ **************************************************************************/
struct meta_type_ops struct meta_type_ops {
{
void (*destroy)(struct meta_value *); void (*destroy)(struct meta_value *);
int (*compare)(struct meta_obj *, struct meta_obj *); int (*compare)(struct meta_obj *, struct meta_obj *);
int (*change)(struct meta_value *, struct nlattr *); int (*change)(struct meta_value *, struct nlattr *);
...@@ -672,7 +666,7 @@ struct meta_type_ops ...@@ -672,7 +666,7 @@ struct meta_type_ops
int (*dump)(struct sk_buff *, struct meta_value *, int); int (*dump)(struct sk_buff *, struct meta_value *, int);
}; };
static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX+1] = { static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = {
[TCF_META_TYPE_VAR] = { [TCF_META_TYPE_VAR] = {
.destroy = meta_var_destroy, .destroy = meta_var_destroy,
.compare = meta_var_compare, .compare = meta_var_compare,
...@@ -688,7 +682,7 @@ static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX+1] = { ...@@ -688,7 +682,7 @@ static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX+1] = {
} }
}; };
static inline struct meta_type_ops * meta_type_ops(struct meta_value *v) static inline struct meta_type_ops *meta_type_ops(struct meta_value *v)
{ {
return &__meta_type_ops[meta_type(v)]; return &__meta_type_ops[meta_type(v)];
} }
...@@ -713,7 +707,7 @@ static int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info, ...@@ -713,7 +707,7 @@ static int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info,
return err; return err;
if (meta_type_ops(v)->apply_extras) if (meta_type_ops(v)->apply_extras)
meta_type_ops(v)->apply_extras(v, dst); meta_type_ops(v)->apply_extras(v, dst);
return 0; return 0;
} }
...@@ -732,12 +726,12 @@ static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m, ...@@ -732,12 +726,12 @@ static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m,
r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value); r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value);
switch (meta->lvalue.hdr.op) { switch (meta->lvalue.hdr.op) {
case TCF_EM_OPND_EQ: case TCF_EM_OPND_EQ:
return !r; return !r;
case TCF_EM_OPND_LT: case TCF_EM_OPND_LT:
return r < 0; return r < 0;
case TCF_EM_OPND_GT: case TCF_EM_OPND_GT:
return r > 0; return r > 0;
} }
return 0; return 0;
...@@ -771,7 +765,7 @@ static inline int meta_change_data(struct meta_value *dst, struct nlattr *nla) ...@@ -771,7 +765,7 @@ static inline int meta_change_data(struct meta_value *dst, struct nlattr *nla)
static inline int meta_is_supported(struct meta_value *val) static inline int meta_is_supported(struct meta_value *val)
{ {
return (!meta_id(val) || meta_ops(val)->get); return !meta_id(val) || meta_ops(val)->get;
} }
static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = { static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = {
......
...@@ -18,8 +18,7 @@ ...@@ -18,8 +18,7 @@
#include <linux/tc_ematch/tc_em_nbyte.h> #include <linux/tc_ematch/tc_em_nbyte.h>
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
struct nbyte_data struct nbyte_data {
{
struct tcf_em_nbyte hdr; struct tcf_em_nbyte hdr;
char pattern[0]; char pattern[0];
}; };
......
...@@ -19,8 +19,7 @@ ...@@ -19,8 +19,7 @@
#include <linux/tc_ematch/tc_em_text.h> #include <linux/tc_ematch/tc_em_text.h>
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
struct text_match struct text_match {
{
u16 from_offset; u16 from_offset;
u16 to_offset; u16 to_offset;
u8 from_layer; u8 from_layer;
......
...@@ -35,7 +35,7 @@ static int em_u32_match(struct sk_buff *skb, struct tcf_ematch *em, ...@@ -35,7 +35,7 @@ static int em_u32_match(struct sk_buff *skb, struct tcf_ematch *em,
if (!tcf_valid_offset(skb, ptr, sizeof(u32))) if (!tcf_valid_offset(skb, ptr, sizeof(u32)))
return 0; return 0;
return !(((*(__be32*) ptr) ^ key->val) & key->mask); return !(((*(__be32 *) ptr) ^ key->val) & key->mask);
} }
static struct tcf_ematch_ops em_u32_ops = { static struct tcf_ematch_ops em_u32_ops = {
......
...@@ -93,7 +93,7 @@ ...@@ -93,7 +93,7 @@
static LIST_HEAD(ematch_ops); static LIST_HEAD(ematch_ops);
static DEFINE_RWLOCK(ematch_mod_lock); static DEFINE_RWLOCK(ematch_mod_lock);
static inline struct tcf_ematch_ops * tcf_em_lookup(u16 kind) static struct tcf_ematch_ops *tcf_em_lookup(u16 kind)
{ {
struct tcf_ematch_ops *e = NULL; struct tcf_ematch_ops *e = NULL;
...@@ -163,8 +163,8 @@ void tcf_em_unregister(struct tcf_ematch_ops *ops) ...@@ -163,8 +163,8 @@ void tcf_em_unregister(struct tcf_ematch_ops *ops)
} }
EXPORT_SYMBOL(tcf_em_unregister); EXPORT_SYMBOL(tcf_em_unregister);
static inline struct tcf_ematch * tcf_em_get_match(struct tcf_ematch_tree *tree, static inline struct tcf_ematch *tcf_em_get_match(struct tcf_ematch_tree *tree,
int index) int index)
{ {
return &tree->matches[index]; return &tree->matches[index];
} }
...@@ -184,7 +184,8 @@ static int tcf_em_validate(struct tcf_proto *tp, ...@@ -184,7 +184,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
if (em_hdr->kind == TCF_EM_CONTAINER) { if (em_hdr->kind == TCF_EM_CONTAINER) {
/* Special ematch called "container", carries an index /* Special ematch called "container", carries an index
* referencing an external ematch sequence. */ * referencing an external ematch sequence.
*/
u32 ref; u32 ref;
if (data_len < sizeof(ref)) if (data_len < sizeof(ref))
...@@ -195,7 +196,8 @@ static int tcf_em_validate(struct tcf_proto *tp, ...@@ -195,7 +196,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
goto errout; goto errout;
/* We do not allow backward jumps to avoid loops and jumps /* We do not allow backward jumps to avoid loops and jumps
* to our own position are of course illegal. */ * to our own position are of course illegal.
*/
if (ref <= idx) if (ref <= idx)
goto errout; goto errout;
...@@ -208,7 +210,8 @@ static int tcf_em_validate(struct tcf_proto *tp, ...@@ -208,7 +210,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
* which automatically releases the reference again, therefore * which automatically releases the reference again, therefore
* the module MUST not be given back under any circumstances * the module MUST not be given back under any circumstances
* here. Be aware, the destroy function assumes that the * here. Be aware, the destroy function assumes that the
* module is held if the ops field is non zero. */ * module is held if the ops field is non zero.
*/
em->ops = tcf_em_lookup(em_hdr->kind); em->ops = tcf_em_lookup(em_hdr->kind);
if (em->ops == NULL) { if (em->ops == NULL) {
...@@ -221,7 +224,8 @@ static int tcf_em_validate(struct tcf_proto *tp, ...@@ -221,7 +224,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
if (em->ops) { if (em->ops) {
/* We dropped the RTNL mutex in order to /* We dropped the RTNL mutex in order to
* perform the module load. Tell the caller * perform the module load. Tell the caller
* to replay the request. */ * to replay the request.
*/
module_put(em->ops->owner); module_put(em->ops->owner);
err = -EAGAIN; err = -EAGAIN;
} }
...@@ -230,7 +234,8 @@ static int tcf_em_validate(struct tcf_proto *tp, ...@@ -230,7 +234,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
} }
/* ematch module provides expected length of data, so we /* ematch module provides expected length of data, so we
* can do a basic sanity check. */ * can do a basic sanity check.
*/
if (em->ops->datalen && data_len < em->ops->datalen) if (em->ops->datalen && data_len < em->ops->datalen)
goto errout; goto errout;
...@@ -246,7 +251,8 @@ static int tcf_em_validate(struct tcf_proto *tp, ...@@ -246,7 +251,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
* TCF_EM_SIMPLE may be specified stating that the * TCF_EM_SIMPLE may be specified stating that the
* data only consists of a u32 integer and the module * data only consists of a u32 integer and the module
* does not expected a memory reference but rather * does not expected a memory reference but rather
* the value carried. */ * the value carried.
*/
if (em_hdr->flags & TCF_EM_SIMPLE) { if (em_hdr->flags & TCF_EM_SIMPLE) {
if (data_len < sizeof(u32)) if (data_len < sizeof(u32))
goto errout; goto errout;
...@@ -334,7 +340,8 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla, ...@@ -334,7 +340,8 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla,
* The array of rt attributes is parsed in the order as they are * The array of rt attributes is parsed in the order as they are
* provided, their type must be incremental from 1 to n. Even * provided, their type must be incremental from 1 to n. Even
* if it does not serve any real purpose, a failure of sticking * if it does not serve any real purpose, a failure of sticking
* to this policy will result in parsing failure. */ * to this policy will result in parsing failure.
*/
for (idx = 0; nla_ok(rt_match, list_len); idx++) { for (idx = 0; nla_ok(rt_match, list_len); idx++) {
err = -EINVAL; err = -EINVAL;
...@@ -359,7 +366,8 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla, ...@@ -359,7 +366,8 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla,
/* Check if the number of matches provided by userspace actually /* Check if the number of matches provided by userspace actually
* complies with the array of matches. The number was used for * complies with the array of matches. The number was used for
* the validation of references and a mismatch could lead to * the validation of references and a mismatch could lead to
* undefined references during the matching process. */ * undefined references during the matching process.
*/
if (idx != tree_hdr->nmatches) { if (idx != tree_hdr->nmatches) {
err = -EINVAL; err = -EINVAL;
goto errout_abort; goto errout_abort;
...@@ -449,7 +457,7 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv) ...@@ -449,7 +457,7 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv)
.flags = em->flags .flags = em->flags
}; };
NLA_PUT(skb, i+1, sizeof(em_hdr), &em_hdr); NLA_PUT(skb, i + 1, sizeof(em_hdr), &em_hdr);
if (em->ops && em->ops->dump) { if (em->ops && em->ops->dump) {
if (em->ops->dump(skb, em) < 0) if (em->ops->dump(skb, em) < 0)
...@@ -478,6 +486,7 @@ static inline int tcf_em_match(struct sk_buff *skb, struct tcf_ematch *em, ...@@ -478,6 +486,7 @@ static inline int tcf_em_match(struct sk_buff *skb, struct tcf_ematch *em,
struct tcf_pkt_info *info) struct tcf_pkt_info *info)
{ {
int r = em->ops->match(skb, em, info); int r = em->ops->match(skb, em, info);
return tcf_em_is_inverted(em) ? !r : r; return tcf_em_is_inverted(em) ? !r : r;
} }
...@@ -527,8 +536,8 @@ int __tcf_em_tree_match(struct sk_buff *skb, struct tcf_ematch_tree *tree, ...@@ -527,8 +536,8 @@ int __tcf_em_tree_match(struct sk_buff *skb, struct tcf_ematch_tree *tree,
stack_overflow: stack_overflow:
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_WARNING "tc ematch: local stack overflow," pr_warning("tc ematch: local stack overflow,"
" increase NET_EMATCH_STACK\n"); " increase NET_EMATCH_STACK\n");
return -1; return -1;
} }
EXPORT_SYMBOL(__tcf_em_tree_match); EXPORT_SYMBOL(__tcf_em_tree_match);
This diff is collapsed.
...@@ -319,7 +319,7 @@ static int atm_tc_delete(struct Qdisc *sch, unsigned long arg) ...@@ -319,7 +319,7 @@ static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
* creation), and one for the reference held when calling delete. * creation), and one for the reference held when calling delete.
*/ */
if (flow->ref < 2) { if (flow->ref < 2) {
printk(KERN_ERR "atm_tc_delete: flow->ref == %d\n", flow->ref); pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref);
return -EINVAL; return -EINVAL;
} }
if (flow->ref > 2) if (flow->ref > 2)
...@@ -384,12 +384,12 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -384,12 +384,12 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
} }
flow = NULL; flow = NULL;
done: done:
; ;
} }
if (!flow) if (!flow) {
flow = &p->link; flow = &p->link;
else { } else {
if (flow->vcc) if (flow->vcc)
ATM_SKB(skb)->atm_options = flow->vcc->atm_options; ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
/*@@@ looks good ... but it's not supposed to work :-) */ /*@@@ looks good ... but it's not supposed to work :-) */
...@@ -576,8 +576,7 @@ static void atm_tc_destroy(struct Qdisc *sch) ...@@ -576,8 +576,7 @@ static void atm_tc_destroy(struct Qdisc *sch)
list_for_each_entry_safe(flow, tmp, &p->flows, list) { list_for_each_entry_safe(flow, tmp, &p->flows, list) {
if (flow->ref > 1) if (flow->ref > 1)
printk(KERN_ERR "atm_destroy: %p->ref = %d\n", flow, pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref);
flow->ref);
atm_tc_put(sch, (unsigned long)flow); atm_tc_put(sch, (unsigned long)flow);
} }
tasklet_kill(&p->task); tasklet_kill(&p->task);
...@@ -616,9 +615,8 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, ...@@ -616,9 +615,8 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
} }
if (flow->excess) if (flow->excess)
NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid); NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid);
else { else
NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0); NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0);
}
nla_nest_end(skb, nest); nla_nest_end(skb, nest);
return skb->len; return skb->len;
......
This diff is collapsed.
...@@ -137,10 +137,10 @@ static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent, ...@@ -137,10 +137,10 @@ static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
mask = nla_get_u8(tb[TCA_DSMARK_MASK]); mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
if (tb[TCA_DSMARK_VALUE]) if (tb[TCA_DSMARK_VALUE])
p->value[*arg-1] = nla_get_u8(tb[TCA_DSMARK_VALUE]); p->value[*arg - 1] = nla_get_u8(tb[TCA_DSMARK_VALUE]);
if (tb[TCA_DSMARK_MASK]) if (tb[TCA_DSMARK_MASK])
p->mask[*arg-1] = mask; p->mask[*arg - 1] = mask;
err = 0; err = 0;
...@@ -155,8 +155,8 @@ static int dsmark_delete(struct Qdisc *sch, unsigned long arg) ...@@ -155,8 +155,8 @@ static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
if (!dsmark_valid_index(p, arg)) if (!dsmark_valid_index(p, arg))
return -EINVAL; return -EINVAL;
p->mask[arg-1] = 0xff; p->mask[arg - 1] = 0xff;
p->value[arg-1] = 0; p->value[arg - 1] = 0;
return 0; return 0;
} }
...@@ -175,7 +175,7 @@ static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker) ...@@ -175,7 +175,7 @@ static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
if (p->mask[i] == 0xff && !p->value[i]) if (p->mask[i] == 0xff && !p->value[i])
goto ignore; goto ignore;
if (walker->count >= walker->skip) { if (walker->count >= walker->skip) {
if (walker->fn(sch, i+1, walker) < 0) { if (walker->fn(sch, i + 1, walker) < 0) {
walker->stop = 1; walker->stop = 1;
break; break;
} }
...@@ -304,9 +304,8 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) ...@@ -304,9 +304,8 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
* and don't need yet another qdisc as a bypass. * and don't need yet another qdisc as a bypass.
*/ */
if (p->mask[index] != 0xff || p->value[index]) if (p->mask[index] != 0xff || p->value[index])
printk(KERN_WARNING pr_warning("dsmark_dequeue: unsupported protocol %d\n",
"dsmark_dequeue: unsupported protocol %d\n", ntohs(skb->protocol));
ntohs(skb->protocol));
break; break;
} }
...@@ -424,14 +423,14 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl, ...@@ -424,14 +423,14 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
if (!dsmark_valid_index(p, cl)) if (!dsmark_valid_index(p, cl))
return -EINVAL; return -EINVAL;
tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl-1); tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
tcm->tcm_info = p->q->handle; tcm->tcm_info = p->q->handle;
opts = nla_nest_start(skb, TCA_OPTIONS); opts = nla_nest_start(skb, TCA_OPTIONS);
if (opts == NULL) if (opts == NULL)
goto nla_put_failure; goto nla_put_failure;
NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl-1]); NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]);
NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl-1]); NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]);
return nla_nest_end(skb, opts); return nla_nest_end(skb, opts);
......
...@@ -19,12 +19,11 @@ ...@@ -19,12 +19,11 @@
/* 1 band FIFO pseudo-"scheduler" */ /* 1 band FIFO pseudo-"scheduler" */
struct fifo_sched_data struct fifo_sched_data {
{
u32 limit; u32 limit;
}; };
static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch) static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{ {
struct fifo_sched_data *q = qdisc_priv(sch); struct fifo_sched_data *q = qdisc_priv(sch);
...@@ -34,7 +33,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -34,7 +33,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
return qdisc_reshape_fail(skb, sch); return qdisc_reshape_fail(skb, sch);
} }
static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch) static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{ {
struct fifo_sched_data *q = qdisc_priv(sch); struct fifo_sched_data *q = qdisc_priv(sch);
...@@ -44,7 +43,7 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -44,7 +43,7 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
return qdisc_reshape_fail(skb, sch); return qdisc_reshape_fail(skb, sch);
} }
static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch) static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{ {
struct sk_buff *skb_head; struct sk_buff *skb_head;
struct fifo_sched_data *q = qdisc_priv(sch); struct fifo_sched_data *q = qdisc_priv(sch);
......
...@@ -87,8 +87,8 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, ...@@ -87,8 +87,8 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
*/ */
kfree_skb(skb); kfree_skb(skb);
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_WARNING "Dead loop on netdevice %s, " pr_warning("Dead loop on netdevice %s, fix it urgently!\n",
"fix it urgently!\n", dev_queue->dev->name); dev_queue->dev->name);
ret = qdisc_qlen(q); ret = qdisc_qlen(q);
} else { } else {
/* /*
...@@ -137,8 +137,8 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, ...@@ -137,8 +137,8 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
} else { } else {
/* Driver returned NETDEV_TX_BUSY - requeue skb */ /* Driver returned NETDEV_TX_BUSY - requeue skb */
if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
printk(KERN_WARNING "BUG %s code %d qlen %d\n", pr_warning("BUG %s code %d qlen %d\n",
dev->name, ret, q->q.qlen); dev->name, ret, q->q.qlen);
ret = dev_requeue_skb(skb, q); ret = dev_requeue_skb(skb, q);
} }
...@@ -412,8 +412,9 @@ static struct Qdisc noqueue_qdisc = { ...@@ -412,8 +412,9 @@ static struct Qdisc noqueue_qdisc = {
}; };
static const u8 prio2band[TC_PRIO_MAX+1] = static const u8 prio2band[TC_PRIO_MAX + 1] = {
{ 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 }; 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
};
/* 3-band FIFO queue: old style, but should be a bit faster than /* 3-band FIFO queue: old style, but should be a bit faster than
generic prio+fifo combination. generic prio+fifo combination.
...@@ -445,7 +446,7 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, ...@@ -445,7 +446,7 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
return priv->q + band; return priv->q + band;
} }
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc)
{ {
if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
int band = prio2band[skb->priority & TC_PRIO_MAX]; int band = prio2band[skb->priority & TC_PRIO_MAX];
...@@ -460,7 +461,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) ...@@ -460,7 +461,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
return qdisc_drop(skb, qdisc); return qdisc_drop(skb, qdisc);
} }
static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
{ {
struct pfifo_fast_priv *priv = qdisc_priv(qdisc); struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
int band = bitmap2band[priv->bitmap]; int band = bitmap2band[priv->bitmap];
...@@ -479,7 +480,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) ...@@ -479,7 +480,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
return NULL; return NULL;
} }
static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
{ {
struct pfifo_fast_priv *priv = qdisc_priv(qdisc); struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
int band = bitmap2band[priv->bitmap]; int band = bitmap2band[priv->bitmap];
...@@ -493,7 +494,7 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) ...@@ -493,7 +494,7 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
return NULL; return NULL;
} }
static void pfifo_fast_reset(struct Qdisc* qdisc) static void pfifo_fast_reset(struct Qdisc *qdisc)
{ {
int prio; int prio;
struct pfifo_fast_priv *priv = qdisc_priv(qdisc); struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
...@@ -510,7 +511,7 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) ...@@ -510,7 +511,7 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
{ {
struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
return skb->len; return skb->len;
...@@ -681,20 +682,18 @@ static void attach_one_default_qdisc(struct net_device *dev, ...@@ -681,20 +682,18 @@ static void attach_one_default_qdisc(struct net_device *dev,
struct netdev_queue *dev_queue, struct netdev_queue *dev_queue,
void *_unused) void *_unused)
{ {
struct Qdisc *qdisc; struct Qdisc *qdisc = &noqueue_qdisc;
if (dev->tx_queue_len) { if (dev->tx_queue_len) {
qdisc = qdisc_create_dflt(dev_queue, qdisc = qdisc_create_dflt(dev_queue,
&pfifo_fast_ops, TC_H_ROOT); &pfifo_fast_ops, TC_H_ROOT);
if (!qdisc) { if (!qdisc) {
printk(KERN_INFO "%s: activation failed\n", dev->name); netdev_info(dev, "activation failed\n");
return; return;
} }
/* Can by-pass the queue discipline for default qdisc */ /* Can by-pass the queue discipline for default qdisc */
qdisc->flags |= TCQ_F_CAN_BYPASS; qdisc->flags |= TCQ_F_CAN_BYPASS;
} else {
qdisc = &noqueue_qdisc;
} }
dev_queue->qdisc_sleeping = qdisc; dev_queue->qdisc_sleeping = qdisc;
} }
......
...@@ -32,8 +32,7 @@ ...@@ -32,8 +32,7 @@
struct gred_sched_data; struct gred_sched_data;
struct gred_sched; struct gred_sched;
struct gred_sched_data struct gred_sched_data {
{
u32 limit; /* HARD maximal queue length */ u32 limit; /* HARD maximal queue length */
u32 DP; /* the drop pramaters */ u32 DP; /* the drop pramaters */
u32 bytesin; /* bytes seen on virtualQ so far*/ u32 bytesin; /* bytes seen on virtualQ so far*/
...@@ -50,8 +49,7 @@ enum { ...@@ -50,8 +49,7 @@ enum {
GRED_RIO_MODE, GRED_RIO_MODE,
}; };
struct gred_sched struct gred_sched {
{
struct gred_sched_data *tab[MAX_DPs]; struct gred_sched_data *tab[MAX_DPs];
unsigned long flags; unsigned long flags;
u32 red_flags; u32 red_flags;
...@@ -150,17 +148,18 @@ static inline int gred_use_harddrop(struct gred_sched *t) ...@@ -150,17 +148,18 @@ static inline int gred_use_harddrop(struct gred_sched *t)
return t->red_flags & TC_RED_HARDDROP; return t->red_flags & TC_RED_HARDDROP;
} }
static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{ {
struct gred_sched_data *q=NULL; struct gred_sched_data *q = NULL;
struct gred_sched *t= qdisc_priv(sch); struct gred_sched *t = qdisc_priv(sch);
unsigned long qavg = 0; unsigned long qavg = 0;
u16 dp = tc_index_to_dp(skb); u16 dp = tc_index_to_dp(skb);
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
dp = t->def; dp = t->def;
if ((q = t->tab[dp]) == NULL) { q = t->tab[dp];
if (!q) {
/* Pass through packets not assigned to a DP /* Pass through packets not assigned to a DP
* if no default DP has been configured. This * if no default DP has been configured. This
* allows for DP flows to be left untouched. * allows for DP flows to be left untouched.
...@@ -183,7 +182,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -183,7 +182,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
for (i = 0; i < t->DPs; i++) { for (i = 0; i < t->DPs; i++) {
if (t->tab[i] && t->tab[i]->prio < q->prio && if (t->tab[i] && t->tab[i]->prio < q->prio &&
!red_is_idling(&t->tab[i]->parms)) !red_is_idling(&t->tab[i]->parms))
qavg +=t->tab[i]->parms.qavg; qavg += t->tab[i]->parms.qavg;
} }
} }
...@@ -203,28 +202,28 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -203,28 +202,28 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
gred_store_wred_set(t, q); gred_store_wred_set(t, q);
switch (red_action(&q->parms, q->parms.qavg + qavg)) { switch (red_action(&q->parms, q->parms.qavg + qavg)) {
case RED_DONT_MARK: case RED_DONT_MARK:
break; break;
case RED_PROB_MARK: case RED_PROB_MARK:
sch->qstats.overlimits++; sch->qstats.overlimits++;
if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) { if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
q->stats.prob_drop++; q->stats.prob_drop++;
goto congestion_drop; goto congestion_drop;
} }
q->stats.prob_mark++; q->stats.prob_mark++;
break; break;
case RED_HARD_MARK: case RED_HARD_MARK:
sch->qstats.overlimits++; sch->qstats.overlimits++;
if (gred_use_harddrop(t) || !gred_use_ecn(t) || if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
!INET_ECN_set_ce(skb)) { !INET_ECN_set_ce(skb)) {
q->stats.forced_drop++; q->stats.forced_drop++;
goto congestion_drop; goto congestion_drop;
} }
q->stats.forced_mark++; q->stats.forced_mark++;
break; break;
} }
if (q->backlog + qdisc_pkt_len(skb) <= q->limit) { if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
...@@ -241,7 +240,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -241,7 +240,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
return NET_XMIT_CN; return NET_XMIT_CN;
} }
static struct sk_buff *gred_dequeue(struct Qdisc* sch) static struct sk_buff *gred_dequeue(struct Qdisc *sch)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct gred_sched *t = qdisc_priv(sch); struct gred_sched *t = qdisc_priv(sch);
...@@ -254,9 +253,9 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch) ...@@ -254,9 +253,9 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch)
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_WARNING "GRED: Unable to relocate " pr_warning("GRED: Unable to relocate VQ 0x%x "
"VQ 0x%x after dequeue, screwing up " "after dequeue, screwing up "
"backlog.\n", tc_index_to_dp(skb)); "backlog.\n", tc_index_to_dp(skb));
} else { } else {
q->backlog -= qdisc_pkt_len(skb); q->backlog -= qdisc_pkt_len(skb);
...@@ -273,7 +272,7 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch) ...@@ -273,7 +272,7 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch)
return NULL; return NULL;
} }
static unsigned int gred_drop(struct Qdisc* sch) static unsigned int gred_drop(struct Qdisc *sch)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct gred_sched *t = qdisc_priv(sch); struct gred_sched *t = qdisc_priv(sch);
...@@ -286,9 +285,9 @@ static unsigned int gred_drop(struct Qdisc* sch) ...@@ -286,9 +285,9 @@ static unsigned int gred_drop(struct Qdisc* sch)
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_WARNING "GRED: Unable to relocate " pr_warning("GRED: Unable to relocate VQ 0x%x "
"VQ 0x%x while dropping, screwing up " "while dropping, screwing up "
"backlog.\n", tc_index_to_dp(skb)); "backlog.\n", tc_index_to_dp(skb));
} else { } else {
q->backlog -= len; q->backlog -= len;
q->stats.other++; q->stats.other++;
...@@ -308,7 +307,7 @@ static unsigned int gred_drop(struct Qdisc* sch) ...@@ -308,7 +307,7 @@ static unsigned int gred_drop(struct Qdisc* sch)
} }
static void gred_reset(struct Qdisc* sch) static void gred_reset(struct Qdisc *sch)
{ {
int i; int i;
struct gred_sched *t = qdisc_priv(sch); struct gred_sched *t = qdisc_priv(sch);
...@@ -369,8 +368,8 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps) ...@@ -369,8 +368,8 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
for (i = table->DPs; i < MAX_DPs; i++) { for (i = table->DPs; i < MAX_DPs; i++) {
if (table->tab[i]) { if (table->tab[i]) {
printk(KERN_WARNING "GRED: Warning: Destroying " pr_warning("GRED: Warning: Destroying "
"shadowed VQ 0x%x\n", i); "shadowed VQ 0x%x\n", i);
gred_destroy_vq(table->tab[i]); gred_destroy_vq(table->tab[i]);
table->tab[i] = NULL; table->tab[i] = NULL;
} }
......
...@@ -81,8 +81,7 @@ ...@@ -81,8 +81,7 @@
* that are expensive on 32-bit architectures. * that are expensive on 32-bit architectures.
*/ */
struct internal_sc struct internal_sc {
{
u64 sm1; /* scaled slope of the 1st segment */ u64 sm1; /* scaled slope of the 1st segment */
u64 ism1; /* scaled inverse-slope of the 1st segment */ u64 ism1; /* scaled inverse-slope of the 1st segment */
u64 dx; /* the x-projection of the 1st segment */ u64 dx; /* the x-projection of the 1st segment */
...@@ -92,8 +91,7 @@ struct internal_sc ...@@ -92,8 +91,7 @@ struct internal_sc
}; };
/* runtime service curve */ /* runtime service curve */
struct runtime_sc struct runtime_sc {
{
u64 x; /* current starting position on x-axis */ u64 x; /* current starting position on x-axis */
u64 y; /* current starting position on y-axis */ u64 y; /* current starting position on y-axis */
u64 sm1; /* scaled slope of the 1st segment */ u64 sm1; /* scaled slope of the 1st segment */
...@@ -104,15 +102,13 @@ struct runtime_sc ...@@ -104,15 +102,13 @@ struct runtime_sc
u64 ism2; /* scaled inverse-slope of the 2nd segment */ u64 ism2; /* scaled inverse-slope of the 2nd segment */
}; };
enum hfsc_class_flags enum hfsc_class_flags {
{
HFSC_RSC = 0x1, HFSC_RSC = 0x1,
HFSC_FSC = 0x2, HFSC_FSC = 0x2,
HFSC_USC = 0x4 HFSC_USC = 0x4
}; };
struct hfsc_class struct hfsc_class {
{
struct Qdisc_class_common cl_common; struct Qdisc_class_common cl_common;
unsigned int refcnt; /* usage count */ unsigned int refcnt; /* usage count */
...@@ -140,8 +136,8 @@ struct hfsc_class ...@@ -140,8 +136,8 @@ struct hfsc_class
u64 cl_cumul; /* cumulative work in bytes done by u64 cl_cumul; /* cumulative work in bytes done by
real-time criteria */ real-time criteria */
u64 cl_d; /* deadline*/ u64 cl_d; /* deadline*/
u64 cl_e; /* eligible time */ u64 cl_e; /* eligible time */
u64 cl_vt; /* virtual time */ u64 cl_vt; /* virtual time */
u64 cl_f; /* time when this class will fit for u64 cl_f; /* time when this class will fit for
link-sharing, max(myf, cfmin) */ link-sharing, max(myf, cfmin) */
...@@ -176,8 +172,7 @@ struct hfsc_class ...@@ -176,8 +172,7 @@ struct hfsc_class
unsigned long cl_nactive; /* number of active children */ unsigned long cl_nactive; /* number of active children */
}; };
struct hfsc_sched struct hfsc_sched {
{
u16 defcls; /* default class id */ u16 defcls; /* default class id */
struct hfsc_class root; /* root class */ struct hfsc_class root; /* root class */
struct Qdisc_class_hash clhash; /* class hash */ struct Qdisc_class_hash clhash; /* class hash */
...@@ -693,7 +688,7 @@ init_vf(struct hfsc_class *cl, unsigned int len) ...@@ -693,7 +688,7 @@ init_vf(struct hfsc_class *cl, unsigned int len)
if (go_active) { if (go_active) {
n = rb_last(&cl->cl_parent->vt_tree); n = rb_last(&cl->cl_parent->vt_tree);
if (n != NULL) { if (n != NULL) {
max_cl = rb_entry(n, struct hfsc_class,vt_node); max_cl = rb_entry(n, struct hfsc_class, vt_node);
/* /*
* set vt to the average of the min and max * set vt to the average of the min and max
* classes. if the parent's period didn't * classes. if the parent's period didn't
...@@ -1177,8 +1172,10 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) ...@@ -1177,8 +1172,10 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
return NULL; return NULL;
} }
#endif #endif
if ((cl = (struct hfsc_class *)res.class) == NULL) { cl = (struct hfsc_class *)res.class;
if ((cl = hfsc_find_class(res.classid, sch)) == NULL) if (!cl) {
cl = hfsc_find_class(res.classid, sch);
if (!cl)
break; /* filter selected invalid classid */ break; /* filter selected invalid classid */
if (cl->level >= head->level) if (cl->level >= head->level)
break; /* filter may only point downwards */ break; /* filter may only point downwards */
...@@ -1316,7 +1313,7 @@ hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc) ...@@ -1316,7 +1313,7 @@ hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
return -1; return -1;
} }
static inline int static int
hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl) hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
{ {
if ((cl->cl_flags & HFSC_RSC) && if ((cl->cl_flags & HFSC_RSC) &&
...@@ -1420,7 +1417,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch) ...@@ -1420,7 +1417,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch)
struct hfsc_class *cl; struct hfsc_class *cl;
u64 next_time = 0; u64 next_time = 0;
if ((cl = eltree_get_minel(q)) != NULL) cl = eltree_get_minel(q);
if (cl)
next_time = cl->cl_e; next_time = cl->cl_e;
if (q->root.cl_cfmin != 0) { if (q->root.cl_cfmin != 0) {
if (next_time == 0 || next_time > q->root.cl_cfmin) if (next_time == 0 || next_time > q->root.cl_cfmin)
...@@ -1626,7 +1624,8 @@ hfsc_dequeue(struct Qdisc *sch) ...@@ -1626,7 +1624,8 @@ hfsc_dequeue(struct Qdisc *sch)
* find the class with the minimum deadline among * find the class with the minimum deadline among
* the eligible classes. * the eligible classes.
*/ */
if ((cl = eltree_get_mindl(q, cur_time)) != NULL) { cl = eltree_get_mindl(q, cur_time);
if (cl) {
realtime = 1; realtime = 1;
} else { } else {
/* /*
......
This diff is collapsed.
...@@ -156,7 +156,7 @@ static unsigned int multiq_drop(struct Qdisc *sch) ...@@ -156,7 +156,7 @@ static unsigned int multiq_drop(struct Qdisc *sch)
unsigned int len; unsigned int len;
struct Qdisc *qdisc; struct Qdisc *qdisc;
for (band = q->bands-1; band >= 0; band--) { for (band = q->bands - 1; band >= 0; band--) {
qdisc = q->queues[band]; qdisc = q->queues[band];
if (qdisc->ops->drop) { if (qdisc->ops->drop) {
len = qdisc->ops->drop(qdisc); len = qdisc->ops->drop(qdisc);
...@@ -265,7 +265,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt) ...@@ -265,7 +265,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
for (i = 0; i < q->max_bands; i++) for (i = 0; i < q->max_bands; i++)
q->queues[i] = &noop_qdisc; q->queues[i] = &noop_qdisc;
err = multiq_tune(sch,opt); err = multiq_tune(sch, opt);
if (err) if (err)
kfree(q->queues); kfree(q->queues);
...@@ -346,7 +346,7 @@ static int multiq_dump_class(struct Qdisc *sch, unsigned long cl, ...@@ -346,7 +346,7 @@ static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
struct multiq_sched_data *q = qdisc_priv(sch); struct multiq_sched_data *q = qdisc_priv(sch);
tcm->tcm_handle |= TC_H_MIN(cl); tcm->tcm_handle |= TC_H_MIN(cl);
tcm->tcm_info = q->queues[cl-1]->handle; tcm->tcm_info = q->queues[cl - 1]->handle;
return 0; return 0;
} }
...@@ -378,7 +378,7 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg) ...@@ -378,7 +378,7 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
arg->count++; arg->count++;
continue; continue;
} }
if (arg->fn(sch, band+1, arg) < 0) { if (arg->fn(sch, band + 1, arg) < 0) {
arg->stop = 1; arg->stop = 1;
break; break;
} }
......
...@@ -211,8 +211,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -211,8 +211,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
cb = netem_skb_cb(skb); cb = netem_skb_cb(skb);
if (q->gap == 0 || /* not doing reordering */ if (q->gap == 0 || /* not doing reordering */
q->counter < q->gap || /* inside last reordering gap */ q->counter < q->gap || /* inside last reordering gap */
q->reorder < get_crandom(&q->reorder_cor)) { q->reorder < get_crandom(&q->reorder_cor)) {
psched_time_t now; psched_time_t now;
psched_tdiff_t delay; psched_tdiff_t delay;
...@@ -249,7 +249,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -249,7 +249,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return ret; return ret;
} }
static unsigned int netem_drop(struct Qdisc* sch) static unsigned int netem_drop(struct Qdisc *sch)
{ {
struct netem_sched_data *q = qdisc_priv(sch); struct netem_sched_data *q = qdisc_priv(sch);
unsigned int len = 0; unsigned int len = 0;
......
...@@ -22,8 +22,7 @@ ...@@ -22,8 +22,7 @@
#include <net/pkt_sched.h> #include <net/pkt_sched.h>
struct prio_sched_data struct prio_sched_data {
{
int bands; int bands;
struct tcf_proto *filter_list; struct tcf_proto *filter_list;
u8 prio2band[TC_PRIO_MAX+1]; u8 prio2band[TC_PRIO_MAX+1];
...@@ -54,7 +53,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) ...@@ -54,7 +53,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
if (!q->filter_list || err < 0) { if (!q->filter_list || err < 0) {
if (TC_H_MAJ(band)) if (TC_H_MAJ(band))
band = 0; band = 0;
return q->queues[q->prio2band[band&TC_PRIO_MAX]]; return q->queues[q->prio2band[band & TC_PRIO_MAX]];
} }
band = res.classid; band = res.classid;
} }
...@@ -107,7 +106,7 @@ static struct sk_buff *prio_peek(struct Qdisc *sch) ...@@ -107,7 +106,7 @@ static struct sk_buff *prio_peek(struct Qdisc *sch)
return NULL; return NULL;
} }
static struct sk_buff *prio_dequeue(struct Qdisc* sch) static struct sk_buff *prio_dequeue(struct Qdisc *sch)
{ {
struct prio_sched_data *q = qdisc_priv(sch); struct prio_sched_data *q = qdisc_priv(sch);
int prio; int prio;
...@@ -124,7 +123,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc* sch) ...@@ -124,7 +123,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc* sch)
} }
static unsigned int prio_drop(struct Qdisc* sch) static unsigned int prio_drop(struct Qdisc *sch)
{ {
struct prio_sched_data *q = qdisc_priv(sch); struct prio_sched_data *q = qdisc_priv(sch);
int prio; int prio;
...@@ -143,24 +142,24 @@ static unsigned int prio_drop(struct Qdisc* sch) ...@@ -143,24 +142,24 @@ static unsigned int prio_drop(struct Qdisc* sch)
static void static void
prio_reset(struct Qdisc* sch) prio_reset(struct Qdisc *sch)
{ {
int prio; int prio;
struct prio_sched_data *q = qdisc_priv(sch); struct prio_sched_data *q = qdisc_priv(sch);
for (prio=0; prio<q->bands; prio++) for (prio = 0; prio < q->bands; prio++)
qdisc_reset(q->queues[prio]); qdisc_reset(q->queues[prio]);
sch->q.qlen = 0; sch->q.qlen = 0;
} }
static void static void
prio_destroy(struct Qdisc* sch) prio_destroy(struct Qdisc *sch)
{ {
int prio; int prio;
struct prio_sched_data *q = qdisc_priv(sch); struct prio_sched_data *q = qdisc_priv(sch);
tcf_destroy_chain(&q->filter_list); tcf_destroy_chain(&q->filter_list);
for (prio=0; prio<q->bands; prio++) for (prio = 0; prio < q->bands; prio++)
qdisc_destroy(q->queues[prio]); qdisc_destroy(q->queues[prio]);
} }
...@@ -177,7 +176,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt) ...@@ -177,7 +176,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2) if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
return -EINVAL; return -EINVAL;
for (i=0; i<=TC_PRIO_MAX; i++) { for (i = 0; i <= TC_PRIO_MAX; i++) {
if (qopt->priomap[i] >= qopt->bands) if (qopt->priomap[i] >= qopt->bands)
return -EINVAL; return -EINVAL;
} }
...@@ -186,7 +185,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt) ...@@ -186,7 +185,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
q->bands = qopt->bands; q->bands = qopt->bands;
memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
for (i=q->bands; i<TCQ_PRIO_BANDS; i++) { for (i = q->bands; i < TCQ_PRIO_BANDS; i++) {
struct Qdisc *child = q->queues[i]; struct Qdisc *child = q->queues[i];
q->queues[i] = &noop_qdisc; q->queues[i] = &noop_qdisc;
if (child != &noop_qdisc) { if (child != &noop_qdisc) {
...@@ -196,9 +195,10 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt) ...@@ -196,9 +195,10 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
} }
sch_tree_unlock(sch); sch_tree_unlock(sch);
for (i=0; i<q->bands; i++) { for (i = 0; i < q->bands; i++) {
if (q->queues[i] == &noop_qdisc) { if (q->queues[i] == &noop_qdisc) {
struct Qdisc *child, *old; struct Qdisc *child, *old;
child = qdisc_create_dflt(sch->dev_queue, child = qdisc_create_dflt(sch->dev_queue,
&pfifo_qdisc_ops, &pfifo_qdisc_ops,
TC_H_MAKE(sch->handle, i + 1)); TC_H_MAKE(sch->handle, i + 1));
...@@ -224,7 +224,7 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt) ...@@ -224,7 +224,7 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt)
struct prio_sched_data *q = qdisc_priv(sch); struct prio_sched_data *q = qdisc_priv(sch);
int i; int i;
for (i=0; i<TCQ_PRIO_BANDS; i++) for (i = 0; i < TCQ_PRIO_BANDS; i++)
q->queues[i] = &noop_qdisc; q->queues[i] = &noop_qdisc;
if (opt == NULL) { if (opt == NULL) {
...@@ -232,7 +232,7 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt) ...@@ -232,7 +232,7 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt)
} else { } else {
int err; int err;
if ((err= prio_tune(sch, opt)) != 0) if ((err = prio_tune(sch, opt)) != 0)
return err; return err;
} }
return 0; return 0;
...@@ -245,7 +245,7 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -245,7 +245,7 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
struct tc_prio_qopt opt; struct tc_prio_qopt opt;
opt.bands = q->bands; opt.bands = q->bands;
memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1); memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
...@@ -342,7 +342,7 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg) ...@@ -342,7 +342,7 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
arg->count++; arg->count++;
continue; continue;
} }
if (arg->fn(sch, prio+1, arg) < 0) { if (arg->fn(sch, prio + 1, arg) < 0) {
arg->stop = 1; arg->stop = 1;
break; break;
} }
...@@ -350,7 +350,7 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg) ...@@ -350,7 +350,7 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
} }
} }
static struct tcf_proto ** prio_find_tcf(struct Qdisc *sch, unsigned long cl) static struct tcf_proto **prio_find_tcf(struct Qdisc *sch, unsigned long cl)
{ {
struct prio_sched_data *q = qdisc_priv(sch); struct prio_sched_data *q = qdisc_priv(sch);
......
...@@ -36,8 +36,7 @@ ...@@ -36,8 +36,7 @@
if RED works correctly. if RED works correctly.
*/ */
struct red_sched_data struct red_sched_data {
{
u32 limit; /* HARD maximal queue length */ u32 limit; /* HARD maximal queue length */
unsigned char flags; unsigned char flags;
struct red_parms parms; struct red_parms parms;
...@@ -55,7 +54,7 @@ static inline int red_use_harddrop(struct red_sched_data *q) ...@@ -55,7 +54,7 @@ static inline int red_use_harddrop(struct red_sched_data *q)
return q->flags & TC_RED_HARDDROP; return q->flags & TC_RED_HARDDROP;
} }
static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{ {
struct red_sched_data *q = qdisc_priv(sch); struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc; struct Qdisc *child = q->qdisc;
...@@ -67,29 +66,29 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -67,29 +66,29 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
red_end_of_idle_period(&q->parms); red_end_of_idle_period(&q->parms);
switch (red_action(&q->parms, q->parms.qavg)) { switch (red_action(&q->parms, q->parms.qavg)) {
case RED_DONT_MARK: case RED_DONT_MARK:
break; break;
case RED_PROB_MARK: case RED_PROB_MARK:
sch->qstats.overlimits++; sch->qstats.overlimits++;
if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) { if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
q->stats.prob_drop++; q->stats.prob_drop++;
goto congestion_drop; goto congestion_drop;
} }
q->stats.prob_mark++; q->stats.prob_mark++;
break; break;
case RED_HARD_MARK: case RED_HARD_MARK:
sch->qstats.overlimits++; sch->qstats.overlimits++;
if (red_use_harddrop(q) || !red_use_ecn(q) || if (red_use_harddrop(q) || !red_use_ecn(q) ||
!INET_ECN_set_ce(skb)) { !INET_ECN_set_ce(skb)) {
q->stats.forced_drop++; q->stats.forced_drop++;
goto congestion_drop; goto congestion_drop;
} }
q->stats.forced_mark++; q->stats.forced_mark++;
break; break;
} }
ret = qdisc_enqueue(skb, child); ret = qdisc_enqueue(skb, child);
...@@ -107,7 +106,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -107,7 +106,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
return NET_XMIT_CN; return NET_XMIT_CN;
} }
static struct sk_buff * red_dequeue(struct Qdisc* sch) static struct sk_buff *red_dequeue(struct Qdisc *sch)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct red_sched_data *q = qdisc_priv(sch); struct red_sched_data *q = qdisc_priv(sch);
...@@ -122,7 +121,7 @@ static struct sk_buff * red_dequeue(struct Qdisc* sch) ...@@ -122,7 +121,7 @@ static struct sk_buff * red_dequeue(struct Qdisc* sch)
return skb; return skb;
} }
static struct sk_buff * red_peek(struct Qdisc* sch) static struct sk_buff *red_peek(struct Qdisc *sch)
{ {
struct red_sched_data *q = qdisc_priv(sch); struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc; struct Qdisc *child = q->qdisc;
...@@ -130,7 +129,7 @@ static struct sk_buff * red_peek(struct Qdisc* sch) ...@@ -130,7 +129,7 @@ static struct sk_buff * red_peek(struct Qdisc* sch)
return child->ops->peek(child); return child->ops->peek(child);
} }
static unsigned int red_drop(struct Qdisc* sch) static unsigned int red_drop(struct Qdisc *sch)
{ {
struct red_sched_data *q = qdisc_priv(sch); struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc; struct Qdisc *child = q->qdisc;
...@@ -149,7 +148,7 @@ static unsigned int red_drop(struct Qdisc* sch) ...@@ -149,7 +148,7 @@ static unsigned int red_drop(struct Qdisc* sch)
return 0; return 0;
} }
static void red_reset(struct Qdisc* sch) static void red_reset(struct Qdisc *sch)
{ {
struct red_sched_data *q = qdisc_priv(sch); struct red_sched_data *q = qdisc_priv(sch);
...@@ -216,7 +215,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt) ...@@ -216,7 +215,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
return 0; return 0;
} }
static int red_init(struct Qdisc* sch, struct nlattr *opt) static int red_init(struct Qdisc *sch, struct nlattr *opt)
{ {
struct red_sched_data *q = qdisc_priv(sch); struct red_sched_data *q = qdisc_priv(sch);
......
...@@ -92,8 +92,7 @@ typedef unsigned char sfq_index; ...@@ -92,8 +92,7 @@ typedef unsigned char sfq_index;
* while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1] * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
* are 'pointers' to dep[] array * are 'pointers' to dep[] array
*/ */
struct sfq_head struct sfq_head {
{
sfq_index next; sfq_index next;
sfq_index prev; sfq_index prev;
}; };
...@@ -108,11 +107,10 @@ struct sfq_slot { ...@@ -108,11 +107,10 @@ struct sfq_slot {
short allot; /* credit for this slot */ short allot; /* credit for this slot */
}; };
struct sfq_sched_data struct sfq_sched_data {
{
/* Parameters */ /* Parameters */
int perturb_period; int perturb_period;
unsigned quantum; /* Allotment per round: MUST BE >= MTU */ unsigned int quantum; /* Allotment per round: MUST BE >= MTU */
int limit; int limit;
/* Variables */ /* Variables */
...@@ -137,12 +135,12 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index ...@@ -137,12 +135,12 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index
return &q->dep[val - SFQ_SLOTS]; return &q->dep[val - SFQ_SLOTS];
} }
static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1) static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
{ {
return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1); return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
} }
static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
{ {
u32 h, h2; u32 h, h2;
...@@ -157,13 +155,13 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) ...@@ -157,13 +155,13 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
iph = ip_hdr(skb); iph = ip_hdr(skb);
h = (__force u32)iph->daddr; h = (__force u32)iph->daddr;
h2 = (__force u32)iph->saddr ^ iph->protocol; h2 = (__force u32)iph->saddr ^ iph->protocol;
if (iph->frag_off & htons(IP_MF|IP_OFFSET)) if (iph->frag_off & htons(IP_MF | IP_OFFSET))
break; break;
poff = proto_ports_offset(iph->protocol); poff = proto_ports_offset(iph->protocol);
if (poff >= 0 && if (poff >= 0 &&
pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) { pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
iph = ip_hdr(skb); iph = ip_hdr(skb);
h2 ^= *(u32*)((void *)iph + iph->ihl * 4 + poff); h2 ^= *(u32 *)((void *)iph + iph->ihl * 4 + poff);
} }
break; break;
} }
...@@ -181,7 +179,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) ...@@ -181,7 +179,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
if (poff >= 0 && if (poff >= 0 &&
pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) { pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
iph = ipv6_hdr(skb); iph = ipv6_hdr(skb);
h2 ^= *(u32*)((void *)iph + sizeof(*iph) + poff); h2 ^= *(u32 *)((void *)iph + sizeof(*iph) + poff);
} }
break; break;
} }
......
...@@ -97,8 +97,7 @@ ...@@ -97,8 +97,7 @@
changed the limit is not effective anymore. changed the limit is not effective anymore.
*/ */
struct tbf_sched_data struct tbf_sched_data {
{
/* Parameters */ /* Parameters */
u32 limit; /* Maximal length of backlog: bytes */ u32 limit; /* Maximal length of backlog: bytes */
u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */ u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
...@@ -115,10 +114,10 @@ struct tbf_sched_data ...@@ -115,10 +114,10 @@ struct tbf_sched_data
struct qdisc_watchdog watchdog; /* Watchdog timer */ struct qdisc_watchdog watchdog; /* Watchdog timer */
}; };
#define L2T(q,L) qdisc_l2t((q)->R_tab,L) #define L2T(q, L) qdisc_l2t((q)->R_tab, L)
#define L2T_P(q,L) qdisc_l2t((q)->P_tab,L) #define L2T_P(q, L) qdisc_l2t((q)->P_tab, L)
static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{ {
struct tbf_sched_data *q = qdisc_priv(sch); struct tbf_sched_data *q = qdisc_priv(sch);
int ret; int ret;
...@@ -138,7 +137,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -138,7 +137,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
static unsigned int tbf_drop(struct Qdisc* sch) static unsigned int tbf_drop(struct Qdisc *sch)
{ {
struct tbf_sched_data *q = qdisc_priv(sch); struct tbf_sched_data *q = qdisc_priv(sch);
unsigned int len = 0; unsigned int len = 0;
...@@ -150,7 +149,7 @@ static unsigned int tbf_drop(struct Qdisc* sch) ...@@ -150,7 +149,7 @@ static unsigned int tbf_drop(struct Qdisc* sch)
return len; return len;
} }
static struct sk_buff *tbf_dequeue(struct Qdisc* sch) static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
{ {
struct tbf_sched_data *q = qdisc_priv(sch); struct tbf_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb; struct sk_buff *skb;
...@@ -209,7 +208,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch) ...@@ -209,7 +208,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
return NULL; return NULL;
} }
static void tbf_reset(struct Qdisc* sch) static void tbf_reset(struct Qdisc *sch)
{ {
struct tbf_sched_data *q = qdisc_priv(sch); struct tbf_sched_data *q = qdisc_priv(sch);
...@@ -227,7 +226,7 @@ static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = { ...@@ -227,7 +226,7 @@ static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
[TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
}; };
static int tbf_change(struct Qdisc* sch, struct nlattr *opt) static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
{ {
int err; int err;
struct tbf_sched_data *q = qdisc_priv(sch); struct tbf_sched_data *q = qdisc_priv(sch);
...@@ -236,7 +235,7 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt) ...@@ -236,7 +235,7 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
struct qdisc_rate_table *rtab = NULL; struct qdisc_rate_table *rtab = NULL;
struct qdisc_rate_table *ptab = NULL; struct qdisc_rate_table *ptab = NULL;
struct Qdisc *child = NULL; struct Qdisc *child = NULL;
int max_size,n; int max_size, n;
err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy); err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy);
if (err < 0) if (err < 0)
...@@ -259,15 +258,18 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt) ...@@ -259,15 +258,18 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
} }
for (n = 0; n < 256; n++) for (n = 0; n < 256; n++)
if (rtab->data[n] > qopt->buffer) break; if (rtab->data[n] > qopt->buffer)
max_size = (n << qopt->rate.cell_log)-1; break;
max_size = (n << qopt->rate.cell_log) - 1;
if (ptab) { if (ptab) {
int size; int size;
for (n = 0; n < 256; n++) for (n = 0; n < 256; n++)
if (ptab->data[n] > qopt->mtu) break; if (ptab->data[n] > qopt->mtu)
size = (n << qopt->peakrate.cell_log)-1; break;
if (size < max_size) max_size = size; size = (n << qopt->peakrate.cell_log) - 1;
if (size < max_size)
max_size = size;
} }
if (max_size < 0) if (max_size < 0)
goto done; goto done;
...@@ -310,7 +312,7 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt) ...@@ -310,7 +312,7 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
return err; return err;
} }
static int tbf_init(struct Qdisc* sch, struct nlattr *opt) static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
{ {
struct tbf_sched_data *q = qdisc_priv(sch); struct tbf_sched_data *q = qdisc_priv(sch);
...@@ -422,8 +424,7 @@ static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker) ...@@ -422,8 +424,7 @@ static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
} }
} }
static const struct Qdisc_class_ops tbf_class_ops = static const struct Qdisc_class_ops tbf_class_ops = {
{
.graft = tbf_graft, .graft = tbf_graft,
.leaf = tbf_leaf, .leaf = tbf_leaf,
.get = tbf_get, .get = tbf_get,
......
...@@ -53,8 +53,7 @@ ...@@ -53,8 +53,7 @@
which will not break load balancing, though native slave which will not break load balancing, though native slave
traffic will have the highest priority. */ traffic will have the highest priority. */
struct teql_master struct teql_master {
{
struct Qdisc_ops qops; struct Qdisc_ops qops;
struct net_device *dev; struct net_device *dev;
struct Qdisc *slaves; struct Qdisc *slaves;
...@@ -65,22 +64,21 @@ struct teql_master ...@@ -65,22 +64,21 @@ struct teql_master
unsigned long tx_dropped; unsigned long tx_dropped;
}; };
struct teql_sched_data struct teql_sched_data {
{
struct Qdisc *next; struct Qdisc *next;
struct teql_master *m; struct teql_master *m;
struct neighbour *ncache; struct neighbour *ncache;
struct sk_buff_head q; struct sk_buff_head q;
}; };
#define NEXT_SLAVE(q) (((struct teql_sched_data*)qdisc_priv(q))->next) #define NEXT_SLAVE(q) (((struct teql_sched_data *)qdisc_priv(q))->next)
#define FMASK (IFF_BROADCAST|IFF_POINTOPOINT) #define FMASK (IFF_BROADCAST | IFF_POINTOPOINT)
/* "teql*" qdisc routines */ /* "teql*" qdisc routines */
static int static int
teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) teql_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{ {
struct net_device *dev = qdisc_dev(sch); struct net_device *dev = qdisc_dev(sch);
struct teql_sched_data *q = qdisc_priv(sch); struct teql_sched_data *q = qdisc_priv(sch);
...@@ -97,7 +95,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -97,7 +95,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
} }
static struct sk_buff * static struct sk_buff *
teql_dequeue(struct Qdisc* sch) teql_dequeue(struct Qdisc *sch)
{ {
struct teql_sched_data *dat = qdisc_priv(sch); struct teql_sched_data *dat = qdisc_priv(sch);
struct netdev_queue *dat_queue; struct netdev_queue *dat_queue;
...@@ -117,13 +115,13 @@ teql_dequeue(struct Qdisc* sch) ...@@ -117,13 +115,13 @@ teql_dequeue(struct Qdisc* sch)
} }
static struct sk_buff * static struct sk_buff *
teql_peek(struct Qdisc* sch) teql_peek(struct Qdisc *sch)
{ {
/* teql is meant to be used as root qdisc */ /* teql is meant to be used as root qdisc */
return NULL; return NULL;
} }
static __inline__ void static inline void
teql_neigh_release(struct neighbour *n) teql_neigh_release(struct neighbour *n)
{ {
if (n) if (n)
...@@ -131,7 +129,7 @@ teql_neigh_release(struct neighbour *n) ...@@ -131,7 +129,7 @@ teql_neigh_release(struct neighbour *n)
} }
static void static void
teql_reset(struct Qdisc* sch) teql_reset(struct Qdisc *sch)
{ {
struct teql_sched_data *dat = qdisc_priv(sch); struct teql_sched_data *dat = qdisc_priv(sch);
...@@ -141,13 +139,14 @@ teql_reset(struct Qdisc* sch) ...@@ -141,13 +139,14 @@ teql_reset(struct Qdisc* sch)
} }
static void static void
teql_destroy(struct Qdisc* sch) teql_destroy(struct Qdisc *sch)
{ {
struct Qdisc *q, *prev; struct Qdisc *q, *prev;
struct teql_sched_data *dat = qdisc_priv(sch); struct teql_sched_data *dat = qdisc_priv(sch);
struct teql_master *master = dat->m; struct teql_master *master = dat->m;
if ((prev = master->slaves) != NULL) { prev = master->slaves;
if (prev) {
do { do {
q = NEXT_SLAVE(prev); q = NEXT_SLAVE(prev);
if (q == sch) { if (q == sch) {
...@@ -179,7 +178,7 @@ teql_destroy(struct Qdisc* sch) ...@@ -179,7 +178,7 @@ teql_destroy(struct Qdisc* sch)
static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
{ {
struct net_device *dev = qdisc_dev(sch); struct net_device *dev = qdisc_dev(sch);
struct teql_master *m = (struct teql_master*)sch->ops; struct teql_master *m = (struct teql_master *)sch->ops;
struct teql_sched_data *q = qdisc_priv(sch); struct teql_sched_data *q = qdisc_priv(sch);
if (dev->hard_header_len > m->dev->hard_header_len) if (dev->hard_header_len > m->dev->hard_header_len)
...@@ -290,7 +289,8 @@ static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -290,7 +289,8 @@ static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
nores = 0; nores = 0;
busy = 0; busy = 0;
if ((q = start) == NULL) q = start;
if (!q)
goto drop; goto drop;
do { do {
...@@ -355,10 +355,10 @@ static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -355,10 +355,10 @@ static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
static int teql_master_open(struct net_device *dev) static int teql_master_open(struct net_device *dev)
{ {
struct Qdisc * q; struct Qdisc *q;
struct teql_master *m = netdev_priv(dev); struct teql_master *m = netdev_priv(dev);
int mtu = 0xFFFE; int mtu = 0xFFFE;
unsigned flags = IFF_NOARP|IFF_MULTICAST; unsigned int flags = IFF_NOARP | IFF_MULTICAST;
if (m->slaves == NULL) if (m->slaves == NULL)
return -EUNATCH; return -EUNATCH;
...@@ -426,7 +426,7 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu) ...@@ -426,7 +426,7 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu)
do { do {
if (new_mtu > qdisc_dev(q)->mtu) if (new_mtu > qdisc_dev(q)->mtu)
return -EINVAL; return -EINVAL;
} while ((q=NEXT_SLAVE(q)) != m->slaves); } while ((q = NEXT_SLAVE(q)) != m->slaves);
} }
dev->mtu = new_mtu; dev->mtu = new_mtu;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment