Commit cc7ec456 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net_sched: cleanups

Cleanup net/sched code to current CodingStyle and practices.

Reduce inline abuse
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7180a031
......@@ -78,7 +78,7 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
struct tc_action *a, struct tcf_hashinfo *hinfo)
{
struct tcf_common *p;
int err = 0, index = -1,i = 0, s_i = 0, n_i = 0;
int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
struct nlattr *nest;
read_lock_bh(hinfo->lock);
......@@ -126,7 +126,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
{
struct tcf_common *p, *s_p;
struct nlattr *nest;
int i= 0, n_i = 0;
int i = 0, n_i = 0;
nest = nla_nest_start(skb, a->order);
if (nest == NULL)
......@@ -447,7 +447,8 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
if ((err = tcf_action_dump_old(skb, a, bind, ref)) > 0) {
err = tcf_action_dump_old(skb, a, bind, ref);
if (err > 0) {
nla_nest_end(skb, nest);
return err;
}
......@@ -491,7 +492,7 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est,
struct tc_action *a;
struct tc_action_ops *a_o;
char act_name[IFNAMSIZ];
struct nlattr *tb[TCA_ACT_MAX+1];
struct nlattr *tb[TCA_ACT_MAX + 1];
struct nlattr *kind;
int err;
......@@ -549,8 +550,8 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est,
goto err_free;
/* module count goes up only when brand new policy is created
if it exists and is only bound to in a_o->init() then
ACT_P_CREATED is not returned (a zero is).
* if it exists and is only bound to in a_o->init() then
* ACT_P_CREATED is not returned (a zero is).
*/
if (err != ACT_P_CREATED)
module_put(a_o->owner);
......@@ -569,7 +570,7 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est,
struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est,
char *name, int ovr, int bind)
{
struct nlattr *tb[TCA_ACT_MAX_PRIO+1];
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct tc_action *head = NULL, *act, *act_prev = NULL;
int err;
int i;
......@@ -697,7 +698,7 @@ act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n,
static struct tc_action *
tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
{
struct nlattr *tb[TCA_ACT_MAX+1];
struct nlattr *tb[TCA_ACT_MAX + 1];
struct tc_action *a;
int index;
int err;
......@@ -770,7 +771,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
struct tcamsg *t;
struct netlink_callback dcb;
struct nlattr *nest;
struct nlattr *tb[TCA_ACT_MAX+1];
struct nlattr *tb[TCA_ACT_MAX + 1];
struct nlattr *kind;
struct tc_action *a = create_a(0);
int err = -ENOMEM;
......@@ -821,7 +822,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
nlh->nlmsg_flags |= NLM_F_ROOT;
module_put(a->ops->owner);
kfree(a);
err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
err = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
n->nlmsg_flags & NLM_F_ECHO);
if (err > 0)
return 0;
......@@ -842,14 +844,14 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
u32 pid, int event)
{
int i, ret;
struct nlattr *tb[TCA_ACT_MAX_PRIO+1];
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct tc_action *head = NULL, *act, *act_prev = NULL;
ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL);
if (ret < 0)
return ret;
if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) {
if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
if (tb[1] != NULL)
return tca_action_flush(net, tb[1], n, pid);
else
......@@ -892,7 +894,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
/* now do the delete */
tcf_action_destroy(head, 0);
ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
n->nlmsg_flags&NLM_F_ECHO);
n->nlmsg_flags & NLM_F_ECHO);
if (ret > 0)
return 0;
return ret;
......@@ -936,7 +938,7 @@ static int tcf_add_notify(struct net *net, struct tc_action *a,
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
NETLINK_CB(skb).dst_group = RTNLGRP_TC;
err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags&NLM_F_ECHO);
err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags & NLM_F_ECHO);
if (err > 0)
err = 0;
return err;
......@@ -967,7 +969,7 @@ tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
/* dump then free all the actions after update; inserted policy
* stays intact
* */
*/
ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags);
for (a = act; a; a = act) {
act = a->next;
......@@ -993,8 +995,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
return -EINVAL;
}
/* n->nlmsg_flags&NLM_F_CREATE
* */
/* n->nlmsg_flags & NLM_F_CREATE */
switch (n->nlmsg_type) {
case RTM_NEWACTION:
/* we are going to assume all other flags
......@@ -1003,7 +1004,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
* but since we want avoid ambiguity (eg when flags
* is zero) then just set this
*/
if (n->nlmsg_flags&NLM_F_REPLACE)
if (n->nlmsg_flags & NLM_F_REPLACE)
ovr = 1;
replay:
ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr);
......@@ -1028,7 +1029,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
static struct nlattr *
find_dump_kind(const struct nlmsghdr *n)
{
struct nlattr *tb1, *tb2[TCA_ACT_MAX+1];
struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct nlattr *nla[TCAA_MAX + 1];
struct nlattr *kind;
......@@ -1071,9 +1072,8 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
}
a_o = tc_lookup_action(kind);
if (a_o == NULL) {
if (a_o == NULL)
return 0;
}
memset(&a, 0, sizeof(struct tc_action));
a.ops = a_o;
......
......@@ -63,7 +63,7 @@ static int tcf_csum_init(struct nlattr *nla, struct nlattr *est,
if (nla == NULL)
return -EINVAL;
err = nla_parse_nested(tb, TCA_CSUM_MAX, nla,csum_policy);
err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy);
if (err < 0)
return err;
......
......@@ -50,7 +50,7 @@ static int gact_determ(struct tcf_gact *gact)
}
typedef int (*g_rand)(struct tcf_gact *gact);
static g_rand gact_rand[MAX_RAND]= { NULL, gact_net_rand, gact_determ };
static g_rand gact_rand[MAX_RAND] = { NULL, gact_net_rand, gact_determ };
#endif /* CONFIG_GACT_PROB */
static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = {
......@@ -205,9 +205,9 @@ MODULE_LICENSE("GPL");
static int __init gact_init_module(void)
{
#ifdef CONFIG_GACT_PROB
printk(KERN_INFO "GACT probability on\n");
pr_info("GACT probability on\n");
#else
printk(KERN_INFO "GACT probability NOT on\n");
pr_info("GACT probability NOT on\n");
#endif
return tcf_register_action(&act_gact_ops);
}
......
......@@ -162,7 +162,8 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est,
if (unlikely(!t))
goto err2;
if ((err = ipt_init_target(t, tname, hook)) < 0)
err = ipt_init_target(t, tname, hook);
if (err < 0)
goto err3;
spin_lock_bh(&ipt->tcf_lock);
......@@ -212,8 +213,9 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
bstats_update(&ipt->tcf_bstats, skb);
/* yes, we have to worry about both in and out dev
worry later - danger - this API seems to have changed
from earlier kernels */
* worry later - danger - this API seems to have changed
* from earlier kernels
*/
par.in = skb->dev;
par.out = NULL;
par.hooknum = ipt->tcfi_hook;
......@@ -253,8 +255,8 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
struct tc_cnt c;
/* for simple targets kernel size == user size
** user name = target name
** for foolproof you need to not assume this
* user name = target name
* for foolproof you need to not assume this
*/
t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
......
......@@ -41,13 +41,13 @@ static struct tcf_hashinfo mirred_hash_info = {
.lock = &mirred_lock,
};
static inline int tcf_mirred_release(struct tcf_mirred *m, int bind)
static int tcf_mirred_release(struct tcf_mirred *m, int bind)
{
if (m) {
if (bind)
m->tcf_bindcnt--;
m->tcf_refcnt--;
if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
if (!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
list_del(&m->tcfm_list);
if (m->tcfm_dev)
dev_put(m->tcfm_dev);
......
......@@ -127,11 +127,9 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
int i, munged = 0;
unsigned int off;
if (skb_cloned(skb)) {
if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
if (skb_cloned(skb) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
return p->tcf_action;
}
}
off = skb_network_offset(skb);
......
......@@ -22,8 +22,8 @@
#include <net/act_api.h>
#include <net/netlink.h>
#define L2T(p,L) qdisc_l2t((p)->tcfp_R_tab, L)
#define L2T_P(p,L) qdisc_l2t((p)->tcfp_P_tab, L)
#define L2T(p, L) qdisc_l2t((p)->tcfp_R_tab, L)
#define L2T_P(p, L) qdisc_l2t((p)->tcfp_P_tab, L)
#define POL_TAB_MASK 15
static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
......@@ -37,8 +37,7 @@ static struct tcf_hashinfo police_hash_info = {
};
/* old policer structure from before tc actions */
struct tc_police_compat
{
struct tc_police_compat {
u32 index;
int action;
u32 limit;
......@@ -139,7 +138,7 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
static int tcf_act_police_locate(struct nlattr *nla, struct nlattr *est,
struct tc_action *a, int ovr, int bind)
{
unsigned h;
unsigned int h;
int ret = 0, err;
struct nlattr *tb[TCA_POLICE_MAX + 1];
struct tc_police *parm;
......
......@@ -47,7 +47,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result
/* print policy string followed by _ then packet count
* Example if this was the 3rd packet and the string was "hello"
* then it would look like "hello_3" (without quotes)
**/
*/
pr_info("simple: %s_%d\n",
(char *)d->tcfd_defdata, d->tcf_bstats.packets);
spin_unlock(&d->tcf_lock);
......@@ -149,7 +149,7 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
return ret;
}
static inline int tcf_simp_cleanup(struct tc_action *a, int bind)
static int tcf_simp_cleanup(struct tc_action *a, int bind)
{
struct tcf_defact *d = a->priv;
......@@ -158,7 +158,7 @@ static inline int tcf_simp_cleanup(struct tc_action *a, int bind)
return 0;
}
static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
......
......@@ -144,7 +144,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est,
return ret;
}
static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind)
static int tcf_skbedit_cleanup(struct tc_action *a, int bind)
{
struct tcf_skbedit *d = a->priv;
......@@ -153,7 +153,7 @@ static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind)
return 0;
}
static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
......
......@@ -85,7 +85,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
int rc = -ENOENT;
write_lock(&cls_mod_lock);
for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next)
for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next)
if (t == ops)
break;
......@@ -111,7 +111,7 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp)
u32 first = TC_H_MAKE(0xC0000000U, 0U);
if (tp)
first = tp->prio-1;
first = tp->prio - 1;
return first;
}
......@@ -149,7 +149,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (prio == 0) {
/* If no priority is given, user wants we allocated it. */
if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
if (n->nlmsg_type != RTM_NEWTFILTER ||
!(n->nlmsg_flags & NLM_F_CREATE))
return -ENOENT;
prio = TC_H_MAKE(0x80000000U, 0U);
}
......@@ -176,7 +177,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
}
/* Is it classful? */
if ((cops = q->ops->cl_ops) == NULL)
cops = q->ops->cl_ops;
if (!cops)
return -EINVAL;
if (cops->tcf_chain == NULL)
......@@ -196,10 +198,11 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
goto errout;
/* Check the chain for existence of proto-tcf with this priority */
for (back = chain; (tp=*back) != NULL; back = &tp->next) {
for (back = chain; (tp = *back) != NULL; back = &tp->next) {
if (tp->prio >= prio) {
if (tp->prio == prio) {
if (!nprio || (tp->protocol != protocol && protocol))
if (!nprio ||
(tp->protocol != protocol && protocol))
goto errout;
} else
tp = NULL;
......@@ -216,7 +219,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
goto errout;
err = -ENOENT;
if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
if (n->nlmsg_type != RTM_NEWTFILTER ||
!(n->nlmsg_flags & NLM_F_CREATE))
goto errout;
......@@ -420,7 +424,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
return skb->len;
if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
dev = __dev_get_by_index(net, tcm->tcm_ifindex);
if (!dev)
return skb->len;
if (!tcm->tcm_parent)
......@@ -429,7 +434,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
if (!q)
goto out;
if ((cops = q->ops->cl_ops) == NULL)
cops = q->ops->cl_ops;
if (!cops)
goto errout;
if (cops->tcf_chain == NULL)
goto errout;
......@@ -444,8 +450,9 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
s_t = cb->args[0];
for (tp=*chain, t=0; tp; tp = tp->next, t++) {
if (t < s_t) continue;
for (tp = *chain, t = 0; tp; tp = tp->next, t++) {
if (t < s_t)
continue;
if (TC_H_MAJ(tcm->tcm_info) &&
TC_H_MAJ(tcm->tcm_info) != tp->prio)
continue;
......@@ -468,10 +475,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
arg.skb = skb;
arg.cb = cb;
arg.w.stop = 0;
arg.w.skip = cb->args[1]-1;
arg.w.skip = cb->args[1] - 1;
arg.w.count = 0;
tp->ops->walk(tp, &arg.w);
cb->args[1] = arg.w.count+1;
cb->args[1] = arg.w.count + 1;
if (arg.w.stop)
break;
}
......
......@@ -21,14 +21,12 @@
#include <net/act_api.h>
#include <net/pkt_cls.h>
struct basic_head
{
struct basic_head {
u32 hgenerator;
struct list_head flist;
};
struct basic_filter
{
struct basic_filter {
u32 handle;
struct tcf_exts exts;
struct tcf_ematch_tree ematches;
......@@ -92,8 +90,7 @@ static int basic_init(struct tcf_proto *tp)
return 0;
}
static inline void basic_delete_filter(struct tcf_proto *tp,
struct basic_filter *f)
static void basic_delete_filter(struct tcf_proto *tp, struct basic_filter *f)
{
tcf_unbind_filter(tp, &f->res);
tcf_exts_destroy(tp, &f->exts);
......@@ -135,7 +132,7 @@ static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = {
[TCA_BASIC_EMATCHES] = { .type = NLA_NESTED },
};
static inline int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f,
static int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f,
unsigned long base, struct nlattr **tb,
struct nlattr *est)
{
......@@ -203,7 +200,7 @@ static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle,
} while (--i > 0 && basic_get(tp, head->hgenerator));
if (i <= 0) {
printk(KERN_ERR "Insufficient number of handles\n");
pr_err("Insufficient number of handles\n");
goto errout;
}
......
......@@ -56,7 +56,8 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
{
struct cgroup_cls_state *cs;
if (!(cs = kzalloc(sizeof(*cs), GFP_KERNEL)))
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return ERR_PTR(-ENOMEM);
if (cgrp->parent)
......@@ -94,8 +95,7 @@ static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
}
struct cls_cgroup_head
{
struct cls_cgroup_head {
u32 handle;
struct tcf_exts exts;
struct tcf_ematch_tree ematches;
......@@ -166,7 +166,7 @@ static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca,
unsigned long *arg)
{
struct nlattr *tb[TCA_CGROUP_MAX+1];
struct nlattr *tb[TCA_CGROUP_MAX + 1];
struct cls_cgroup_head *head = tp->root;
struct tcf_ematch_tree t;
struct tcf_exts e;
......
......@@ -121,7 +121,7 @@ static u32 flow_get_proto_src(struct sk_buff *skb)
if (!pskb_network_may_pull(skb, sizeof(*iph)))
break;
iph = ip_hdr(skb);
if (iph->frag_off & htons(IP_MF|IP_OFFSET))
if (iph->frag_off & htons(IP_MF | IP_OFFSET))
break;
poff = proto_ports_offset(iph->protocol);
if (poff >= 0 &&
......@@ -163,7 +163,7 @@ static u32 flow_get_proto_dst(struct sk_buff *skb)
if (!pskb_network_may_pull(skb, sizeof(*iph)))
break;
iph = ip_hdr(skb);
if (iph->frag_off & htons(IP_MF|IP_OFFSET))
if (iph->frag_off & htons(IP_MF | IP_OFFSET))
break;
poff = proto_ports_offset(iph->protocol);
if (poff >= 0 &&
......
......@@ -31,14 +31,12 @@
#define HTSIZE (PAGE_SIZE/sizeof(struct fw_filter *))
struct fw_head
{
struct fw_head {
struct fw_filter *ht[HTSIZE];
u32 mask;
};
struct fw_filter
{
struct fw_filter {
struct fw_filter *next;
u32 id;
struct tcf_result res;
......@@ -53,7 +51,7 @@ static const struct tcf_ext_map fw_ext_map = {
.police = TCA_FW_POLICE
};
static __inline__ int fw_hash(u32 handle)
static inline int fw_hash(u32 handle)
{
if (HTSIZE == 4096)
return ((handle >> 24) & 0xFFF) ^
......@@ -82,14 +80,14 @@ static __inline__ int fw_hash(u32 handle)
static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
{
struct fw_head *head = (struct fw_head*)tp->root;
struct fw_head *head = (struct fw_head *)tp->root;
struct fw_filter *f;
int r;
u32 id = skb->mark;
if (head != NULL) {
id &= head->mask;
for (f=head->ht[fw_hash(id)]; f; f=f->next) {
for (f = head->ht[fw_hash(id)]; f; f = f->next) {
if (f->id == id) {
*res = f->res;
#ifdef CONFIG_NET_CLS_IND
......@@ -105,7 +103,8 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
}
} else {
/* old method */
if (id && (TC_H_MAJ(id) == 0 || !(TC_H_MAJ(id^tp->q->handle)))) {
if (id && (TC_H_MAJ(id) == 0 ||
!(TC_H_MAJ(id ^ tp->q->handle)))) {
res->classid = id;
res->class = 0;
return 0;
......@@ -117,13 +116,13 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
static unsigned long fw_get(struct tcf_proto *tp, u32 handle)
{
struct fw_head *head = (struct fw_head*)tp->root;
struct fw_head *head = (struct fw_head *)tp->root;
struct fw_filter *f;
if (head == NULL)
return 0;
for (f=head->ht[fw_hash(handle)]; f; f=f->next) {
for (f = head->ht[fw_hash(handle)]; f; f = f->next) {
if (f->id == handle)
return (unsigned long)f;
}
......@@ -139,8 +138,7 @@ static int fw_init(struct tcf_proto *tp)
return 0;
}
static inline void
fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f)
static void fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f)
{
tcf_unbind_filter(tp, &f->res);
tcf_exts_destroy(tp, &f->exts);
......@@ -156,8 +154,8 @@ static void fw_destroy(struct tcf_proto *tp)
if (head == NULL)
return;
for (h=0; h<HTSIZE; h++) {
while ((f=head->ht[h]) != NULL) {
for (h = 0; h < HTSIZE; h++) {
while ((f = head->ht[h]) != NULL) {
head->ht[h] = f->next;
fw_delete_filter(tp, f);
}
......@@ -167,14 +165,14 @@ static void fw_destroy(struct tcf_proto *tp)
static int fw_delete(struct tcf_proto *tp, unsigned long arg)
{
struct fw_head *head = (struct fw_head*)tp->root;
struct fw_filter *f = (struct fw_filter*)arg;
struct fw_head *head = (struct fw_head *)tp->root;
struct fw_filter *f = (struct fw_filter *)arg;
struct fw_filter **fp;
if (head == NULL || f == NULL)
goto out;
for (fp=&head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) {
for (fp = &head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) {
if (*fp == f) {
tcf_tree_lock(tp);
*fp = f->next;
......@@ -240,7 +238,7 @@ static int fw_change(struct tcf_proto *tp, unsigned long base,
struct nlattr **tca,
unsigned long *arg)
{
struct fw_head *head = (struct fw_head*)tp->root;
struct fw_head *head = (struct fw_head *)tp->root;
struct fw_filter *f = (struct fw_filter *) *arg;
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_FW_MAX + 1];
......@@ -302,7 +300,7 @@ static int fw_change(struct tcf_proto *tp, unsigned long base,
static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
struct fw_head *head = (struct fw_head*)tp->root;
struct fw_head *head = (struct fw_head *)tp->root;
int h;
if (head == NULL)
......@@ -332,7 +330,7 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
struct fw_head *head = (struct fw_head *)tp->root;
struct fw_filter *f = (struct fw_filter*)fh;
struct fw_filter *f = (struct fw_filter *)fh;
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
......
......@@ -23,34 +23,30 @@
#include <net/pkt_cls.h>
/*
1. For now we assume that route tags < 256.
It allows to use direct table lookups, instead of hash tables.
2. For now we assume that "from TAG" and "fromdev DEV" statements
are mutually exclusive.
3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
* 1. For now we assume that route tags < 256.
* It allows to use direct table lookups, instead of hash tables.
* 2. For now we assume that "from TAG" and "fromdev DEV" statements
* are mutually exclusive.
* 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
*/
struct route4_fastmap
{
struct route4_fastmap {
struct route4_filter *filter;
u32 id;
int iif;
};
struct route4_head
{
struct route4_head {
struct route4_fastmap fastmap[16];
struct route4_bucket *table[256+1];
struct route4_bucket *table[256 + 1];
};
struct route4_bucket
{
struct route4_bucket {
/* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
struct route4_filter *ht[16+16+1];
struct route4_filter *ht[16 + 16 + 1];
};
struct route4_filter
{
struct route4_filter {
struct route4_filter *next;
u32 id;
int iif;
......@@ -61,20 +57,20 @@ struct route4_filter
struct route4_bucket *bkt;
};
#define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
#define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
static const struct tcf_ext_map route_ext_map = {
.police = TCA_ROUTE4_POLICE,
.action = TCA_ROUTE4_ACT
};
static __inline__ int route4_fastmap_hash(u32 id, int iif)
static inline int route4_fastmap_hash(u32 id, int iif)
{
return id&0xF;
return id & 0xF;
}
static inline
void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
static void
route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
{
spinlock_t *root_lock = qdisc_root_sleeping_lock(q);
......@@ -83,32 +79,33 @@ void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
spin_unlock_bh(root_lock);
}
static inline void
static void
route4_set_fastmap(struct route4_head *head, u32 id, int iif,
struct route4_filter *f)
{
int h = route4_fastmap_hash(id, iif);
head->fastmap[h].id = id;
head->fastmap[h].iif = iif;
head->fastmap[h].filter = f;
}
static __inline__ int route4_hash_to(u32 id)
static inline int route4_hash_to(u32 id)
{
return id&0xFF;
return id & 0xFF;
}
static __inline__ int route4_hash_from(u32 id)
static inline int route4_hash_from(u32 id)
{
return (id>>16)&0xF;
return (id >> 16) & 0xF;
}
static __inline__ int route4_hash_iif(int iif)
static inline int route4_hash_iif(int iif)
{
return 16 + ((iif>>16)&0xF);
return 16 + ((iif >> 16) & 0xF);
}
static __inline__ int route4_hash_wild(void)
static inline int route4_hash_wild(void)
{
return 32;
}
......@@ -131,21 +128,22 @@ static __inline__ int route4_hash_wild(void)
static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
{
struct route4_head *head = (struct route4_head*)tp->root;
struct route4_head *head = (struct route4_head *)tp->root;
struct dst_entry *dst;
struct route4_bucket *b;
struct route4_filter *f;
u32 id, h;
int iif, dont_cache = 0;
if ((dst = skb_dst(skb)) == NULL)
dst = skb_dst(skb);
if (!dst)
goto failure;
id = dst->tclassid;
if (head == NULL)
goto old_method;
iif = ((struct rtable*)dst)->fl.iif;
iif = ((struct rtable *)dst)->fl.iif;
h = route4_fastmap_hash(id, iif);
if (id == head->fastmap[h].id &&
......@@ -161,7 +159,8 @@ static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
h = route4_hash_to(id);
restart:
if ((b = head->table[h]) != NULL) {
b = head->table[h];
if (b) {
for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
if (f->id == id)
ROUTE4_APPLY_RESULT();
......@@ -197,8 +196,9 @@ static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
static inline u32 to_hash(u32 id)
{
u32 h = id&0xFF;
if (id&0x8000)
u32 h = id & 0xFF;
if (id & 0x8000)
h += 256;
return h;
}
......@@ -211,17 +211,17 @@ static inline u32 from_hash(u32 id)
if (!(id & 0x8000)) {
if (id > 255)
return 256;
return id&0xF;
return id & 0xF;
}
return 16 + (id&0xF);
return 16 + (id & 0xF);
}
static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
{
struct route4_head *head = (struct route4_head*)tp->root;
struct route4_head *head = (struct route4_head *)tp->root;
struct route4_bucket *b;
struct route4_filter *f;
unsigned h1, h2;
unsigned int h1, h2;
if (!head)
return 0;
......@@ -230,11 +230,12 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
if (h1 > 256)
return 0;
h2 = from_hash(handle>>16);
h2 = from_hash(handle >> 16);
if (h2 > 32)
return 0;
if ((b = head->table[h1]) != NULL) {
b = head->table[h1];
if (b) {
for (f = b->ht[h2]; f; f = f->next)
if (f->handle == handle)
return (unsigned long)f;
......@@ -251,7 +252,7 @@ static int route4_init(struct tcf_proto *tp)
return 0;
}
static inline void
static void
route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
{
tcf_unbind_filter(tp, &f->res);
......@@ -267,11 +268,12 @@ static void route4_destroy(struct tcf_proto *tp)
if (head == NULL)
return;
for (h1=0; h1<=256; h1++) {
for (h1 = 0; h1 <= 256; h1++) {
struct route4_bucket *b;
if ((b = head->table[h1]) != NULL) {
for (h2=0; h2<=32; h2++) {
b = head->table[h1];
if (b) {
for (h2 = 0; h2 <= 32; h2++) {
struct route4_filter *f;
while ((f = b->ht[h2]) != NULL) {
......@@ -287,9 +289,9 @@ static void route4_destroy(struct tcf_proto *tp)
static int route4_delete(struct tcf_proto *tp, unsigned long arg)
{
struct route4_head *head = (struct route4_head*)tp->root;
struct route4_filter **fp, *f = (struct route4_filter*)arg;
unsigned h = 0;
struct route4_head *head = (struct route4_head *)tp->root;
struct route4_filter **fp, *f = (struct route4_filter *)arg;
unsigned int h = 0;
struct route4_bucket *b;
int i;
......@@ -299,7 +301,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg)
h = f->handle;
b = f->bkt;
for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
for (fp = &b->ht[from_hash(h >> 16)]; *fp; fp = &(*fp)->next) {
if (*fp == f) {
tcf_tree_lock(tp);
*fp = f->next;
......@@ -310,7 +312,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg)
/* Strip tree */
for (i=0; i<=32; i++)
for (i = 0; i <= 32; i++)
if (b->ht[i])
return 0;
......@@ -380,7 +382,8 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
}
h1 = to_hash(nhandle);
if ((b = head->table[h1]) == NULL) {
b = head->table[h1];
if (!b) {
err = -ENOBUFS;
b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
if (b == NULL)
......@@ -391,6 +394,7 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
tcf_tree_unlock(tp);
} else {
unsigned int h2 = from_hash(nhandle >> 16);
err = -EEXIST;
for (fp = b->ht[h2]; fp; fp = fp->next)
if (fp->handle == f->handle)
......@@ -444,7 +448,8 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
if (err < 0)
return err;
if ((f = (struct route4_filter*)*arg) != NULL) {
f = (struct route4_filter *)*arg;
if (f) {
if (f->handle != handle && handle)
return -EINVAL;
......@@ -481,7 +486,7 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
reinsert:
h = from_hash(f->handle >> 16);
for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
for (fp = &f->bkt->ht[h]; (f1 = *fp) != NULL; fp = &f1->next)
if (f->handle < f1->handle)
break;
......@@ -492,7 +497,8 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
if (old_handle && f->handle != old_handle) {
th = to_hash(old_handle);
h = from_hash(old_handle >> 16);
if ((b = head->table[th]) != NULL) {
b = head->table[th];
if (b) {
for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
if (*fp == f) {
*fp = f->next;
......@@ -515,7 +521,7 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
struct route4_head *head = tp->root;
unsigned h, h1;
unsigned int h, h1;
if (head == NULL)
arg->stop = 1;
......@@ -549,7 +555,7 @@ static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
static int route4_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
struct route4_filter *f = (struct route4_filter*)fh;
struct route4_filter *f = (struct route4_filter *)fh;
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
u32 id;
......@@ -563,15 +569,15 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh,
if (nest == NULL)
goto nla_put_failure;
if (!(f->handle&0x8000)) {
id = f->id&0xFF;
if (!(f->handle & 0x8000)) {
id = f->id & 0xFF;
NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
}
if (f->handle&0x80000000) {
if ((f->handle>>16) != 0xFFFF)
if (f->handle & 0x80000000) {
if ((f->handle >> 16) != 0xFFFF)
NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
} else {
id = f->id>>16;
id = f->id >> 16;
NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
}
if (f->res.classid)
......
......@@ -66,28 +66,25 @@
powerful classification engine. */
struct rsvp_head
{
struct rsvp_head {
u32 tmap[256/32];
u32 hgenerator;
u8 tgenerator;
struct rsvp_session *ht[256];
};
struct rsvp_session
{
struct rsvp_session {
struct rsvp_session *next;
__be32 dst[RSVP_DST_LEN];
struct tc_rsvp_gpi dpi;
u8 protocol;
u8 tunnelid;
/* 16 (src,sport) hash slots, and one wildcard source slot */
struct rsvp_filter *ht[16+1];
struct rsvp_filter *ht[16 + 1];
};
struct rsvp_filter
{
struct rsvp_filter {
struct rsvp_filter *next;
__be32 src[RSVP_DST_LEN];
struct tc_rsvp_gpi spi;
......@@ -100,17 +97,19 @@ struct rsvp_filter
struct rsvp_session *sess;
};
static __inline__ unsigned hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
{
unsigned h = (__force __u32)dst[RSVP_DST_LEN-1];
unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
h ^= h>>16;
h ^= h>>8;
return (h ^ protocol ^ tunnelid) & 0xFF;
}
static __inline__ unsigned hash_src(__be32 *src)
static inline unsigned int hash_src(__be32 *src)
{
unsigned h = (__force __u32)src[RSVP_DST_LEN-1];
unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
h ^= h>>16;
h ^= h>>8;
h ^= h>>4;
......@@ -134,10 +133,10 @@ static struct tcf_ext_map rsvp_ext_map = {
static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
{
struct rsvp_session **sht = ((struct rsvp_head*)tp->root)->ht;
struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
struct rsvp_session *s;
struct rsvp_filter *f;
unsigned h1, h2;
unsigned int h1, h2;
__be32 *dst, *src;
u8 protocol;
u8 tunnelid = 0;
......@@ -162,13 +161,13 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
src = &nhptr->saddr.s6_addr32[0];
dst = &nhptr->daddr.s6_addr32[0];
protocol = nhptr->nexthdr;
xprt = ((u8*)nhptr) + sizeof(struct ipv6hdr);
xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr);
#else
src = &nhptr->saddr;
dst = &nhptr->daddr;
protocol = nhptr->protocol;
xprt = ((u8*)nhptr) + (nhptr->ihl<<2);
if (nhptr->frag_off & htons(IP_MF|IP_OFFSET))
xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
if (nhptr->frag_off & htons(IP_MF | IP_OFFSET))
return -1;
#endif
......@@ -176,10 +175,10 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
h2 = hash_src(src);
for (s = sht[h1]; s; s = s->next) {
if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
protocol == s->protocol &&
!(s->dpi.mask &
(*(u32*)(xprt+s->dpi.offset)^s->dpi.key)) &&
(*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) &&
#if RSVP_DST_LEN == 4
dst[0] == s->dst[0] &&
dst[1] == s->dst[1] &&
......@@ -188,8 +187,8 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
tunnelid == s->tunnelid) {
for (f = s->ht[h2]; f; f = f->next) {
if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN-1] &&
!(f->spi.mask & (*(u32*)(xprt+f->spi.offset)^f->spi.key))
if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
!(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
#if RSVP_DST_LEN == 4
&&
src[0] == f->src[0] &&
......@@ -205,7 +204,7 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
return 0;
tunnelid = f->res.classid;
nhptr = (void*)(xprt + f->tunnelhdr - sizeof(*nhptr));
nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr));
goto restart;
}
}
......@@ -224,11 +223,11 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle)
{
struct rsvp_session **sht = ((struct rsvp_head*)tp->root)->ht;
struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
struct rsvp_session *s;
struct rsvp_filter *f;
unsigned h1 = handle&0xFF;
unsigned h2 = (handle>>8)&0xFF;
unsigned int h1 = handle & 0xFF;
unsigned int h2 = (handle >> 8) & 0xFF;
if (h2 > 16)
return 0;
......@@ -258,7 +257,7 @@ static int rsvp_init(struct tcf_proto *tp)
return -ENOBUFS;
}
static inline void
static void
rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
{
tcf_unbind_filter(tp, &f->res);
......@@ -277,13 +276,13 @@ static void rsvp_destroy(struct tcf_proto *tp)
sht = data->ht;
for (h1=0; h1<256; h1++) {
for (h1 = 0; h1 < 256; h1++) {
struct rsvp_session *s;
while ((s = sht[h1]) != NULL) {
sht[h1] = s->next;
for (h2=0; h2<=16; h2++) {
for (h2 = 0; h2 <= 16; h2++) {
struct rsvp_filter *f;
while ((f = s->ht[h2]) != NULL) {
......@@ -299,13 +298,13 @@ static void rsvp_destroy(struct tcf_proto *tp)
static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
{
struct rsvp_filter **fp, *f = (struct rsvp_filter*)arg;
unsigned h = f->handle;
struct rsvp_filter **fp, *f = (struct rsvp_filter *)arg;
unsigned int h = f->handle;
struct rsvp_session **sp;
struct rsvp_session *s = f->sess;
int i;
for (fp = &s->ht[(h>>8)&0xFF]; *fp; fp = &(*fp)->next) {
for (fp = &s->ht[(h >> 8) & 0xFF]; *fp; fp = &(*fp)->next) {
if (*fp == f) {
tcf_tree_lock(tp);
*fp = f->next;
......@@ -314,12 +313,12 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
/* Strip tree */
for (i=0; i<=16; i++)
for (i = 0; i <= 16; i++)
if (s->ht[i])
return 0;
/* OK, session has no flows */
for (sp = &((struct rsvp_head*)tp->root)->ht[h&0xFF];
for (sp = &((struct rsvp_head *)tp->root)->ht[h & 0xFF];
*sp; sp = &(*sp)->next) {
if (*sp == s) {
tcf_tree_lock(tp);
......@@ -337,13 +336,14 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
return 0;
}
static unsigned gen_handle(struct tcf_proto *tp, unsigned salt)
static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
{
struct rsvp_head *data = tp->root;
int i = 0xFFFF;
while (i-- > 0) {
u32 h;
if ((data->hgenerator += 0x10000) == 0)
data->hgenerator = 0x10000;
h = data->hgenerator|salt;
......@@ -355,10 +355,10 @@ static unsigned gen_handle(struct tcf_proto *tp, unsigned salt)
static int tunnel_bts(struct rsvp_head *data)
{
int n = data->tgenerator>>5;
u32 b = 1<<(data->tgenerator&0x1F);
int n = data->tgenerator >> 5;
u32 b = 1 << (data->tgenerator & 0x1F);
if (data->tmap[n]&b)
if (data->tmap[n] & b)
return 0;
data->tmap[n] |= b;
return 1;
......@@ -372,10 +372,10 @@ static void tunnel_recycle(struct rsvp_head *data)
memset(tmap, 0, sizeof(tmap));
for (h1=0; h1<256; h1++) {
for (h1 = 0; h1 < 256; h1++) {
struct rsvp_session *s;
for (s = sht[h1]; s; s = s->next) {
for (h2=0; h2<=16; h2++) {
for (h2 = 0; h2 <= 16; h2++) {
struct rsvp_filter *f;
for (f = s->ht[h2]; f; f = f->next) {
......@@ -395,8 +395,8 @@ static u32 gen_tunnel(struct rsvp_head *data)
{
int i, k;
for (k=0; k<2; k++) {
for (i=255; i>0; i--) {
for (k = 0; k < 2; k++) {
for (i = 255; i > 0; i--) {
if (++data->tgenerator == 0)
data->tgenerator = 1;
if (tunnel_bts(data))
......@@ -428,7 +428,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
struct nlattr *opt = tca[TCA_OPTIONS-1];
struct nlattr *tb[TCA_RSVP_MAX + 1];
struct tcf_exts e;
unsigned h1, h2;
unsigned int h1, h2;
__be32 *dst;
int err;
......@@ -443,7 +443,8 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
if (err < 0)
return err;
if ((f = (struct rsvp_filter*)*arg) != NULL) {
f = (struct rsvp_filter *)*arg;
if (f) {
/* Node exists: adjust only classid */
if (f->handle != handle && handle)
......@@ -500,7 +501,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
goto errout;
}
for (sp = &data->ht[h1]; (s=*sp) != NULL; sp = &s->next) {
for (sp = &data->ht[h1]; (s = *sp) != NULL; sp = &s->next) {
if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
pinfo && pinfo->protocol == s->protocol &&
memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
......@@ -523,7 +524,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
tcf_exts_change(tp, &f->exts, &e);
for (fp = &s->ht[h2]; *fp; fp = &(*fp)->next)
if (((*fp)->spi.mask&f->spi.mask) != f->spi.mask)
if (((*fp)->spi.mask & f->spi.mask) != f->spi.mask)
break;
f->next = *fp;
wmb();
......@@ -567,7 +568,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
struct rsvp_head *head = tp->root;
unsigned h, h1;
unsigned int h, h1;
if (arg->stop)
return;
......@@ -598,7 +599,7 @@ static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
struct rsvp_filter *f = (struct rsvp_filter*)fh;
struct rsvp_filter *f = (struct rsvp_filter *)fh;
struct rsvp_session *s;
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
......@@ -624,7 +625,7 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
NLA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo);
if (f->res.classid)
NLA_PUT_U32(skb, TCA_RSVP_CLASSID, f->res.classid);
if (((f->handle>>8)&0xFF) != 16)
if (((f->handle >> 8) & 0xFF) != 16)
NLA_PUT(skb, TCA_RSVP_SRC, sizeof(f->src), f->src);
if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0)
......
......@@ -249,7 +249,7 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
* of the hashing index is below the threshold.
*/
if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD)
cp.hash = (cp.mask >> cp.shift)+1;
cp.hash = (cp.mask >> cp.shift) + 1;
else
cp.hash = DEFAULT_HASH_SIZE;
}
......
......@@ -42,8 +42,7 @@
#include <net/act_api.h>
#include <net/pkt_cls.h>
struct tc_u_knode
{
struct tc_u_knode {
struct tc_u_knode *next;
u32 handle;
struct tc_u_hnode *ht_up;
......@@ -63,19 +62,17 @@ struct tc_u_knode
struct tc_u32_sel sel;
};
struct tc_u_hnode
{
struct tc_u_hnode {
struct tc_u_hnode *next;
u32 handle;
u32 prio;
struct tc_u_common *tp_c;
int refcnt;
unsigned divisor;
unsigned int divisor;
struct tc_u_knode *ht[1];
};
struct tc_u_common
{
struct tc_u_common {
struct tc_u_hnode *hlist;
struct Qdisc *q;
int refcnt;
......@@ -87,9 +84,11 @@ static const struct tcf_ext_map u32_ext_map = {
.police = TCA_U32_POLICE
};
static __inline__ unsigned u32_hash_fold(__be32 key, struct tc_u32_sel *sel, u8 fshift)
static inline unsigned int u32_hash_fold(__be32 key,
const struct tc_u32_sel *sel,
u8 fshift)
{
unsigned h = ntohl(key & sel->hmask)>>fshift;
unsigned int h = ntohl(key & sel->hmask) >> fshift;
return h;
}
......@@ -101,7 +100,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
unsigned int off;
} stack[TC_U32_MAXDEPTH];
struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
struct tc_u_hnode *ht = (struct tc_u_hnode *)tp->root;
unsigned int off = skb_network_offset(skb);
struct tc_u_knode *n;
int sdepth = 0;
......@@ -120,7 +119,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
struct tc_u32_key *key = n->sel.keys;
#ifdef CONFIG_CLS_U32_PERF
n->pf->rcnt +=1;
n->pf->rcnt += 1;
j = 0;
#endif
......@@ -133,7 +132,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
}
#endif
for (i = n->sel.nkeys; i>0; i--, key++) {
for (i = n->sel.nkeys; i > 0; i--, key++) {
int toff = off + key->off + (off2 & key->offmask);
__be32 *data, _data;
......@@ -148,13 +147,13 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
goto next_knode;
}
#ifdef CONFIG_CLS_U32_PERF
n->pf->kcnts[j] +=1;
n->pf->kcnts[j] += 1;
j++;
#endif
}
if (n->ht_down == NULL) {
check_terminal:
if (n->sel.flags&TC_U32_TERMINAL) {
if (n->sel.flags & TC_U32_TERMINAL) {
*res = n->res;
#ifdef CONFIG_NET_CLS_IND
......@@ -164,7 +163,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
}
#endif
#ifdef CONFIG_CLS_U32_PERF
n->pf->rhit +=1;
n->pf->rhit += 1;
#endif
r = tcf_exts_exec(skb, &n->exts, res);
if (r < 0) {
......@@ -197,10 +196,10 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
sel = ht->divisor & u32_hash_fold(*data, &n->sel,
n->fshift);
}
if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
goto next_ht;
if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
off2 = n->sel.off + 3;
if (n->sel.flags & TC_U32_VAROFFSET) {
__be16 *data, _data;
......@@ -215,7 +214,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
}
off2 &= ~3;
}
if (n->sel.flags&TC_U32_EAT) {
if (n->sel.flags & TC_U32_EAT) {
off += off2;
off2 = 0;
}
......@@ -236,11 +235,11 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
deadloop:
if (net_ratelimit())
printk(KERN_WARNING "cls_u32: dead loop\n");
pr_warning("cls_u32: dead loop\n");
return -1;
}
static __inline__ struct tc_u_hnode *
static struct tc_u_hnode *
u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
{
struct tc_u_hnode *ht;
......@@ -252,10 +251,10 @@ u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
return ht;
}
static __inline__ struct tc_u_knode *
static struct tc_u_knode *
u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
{
unsigned sel;
unsigned int sel;
struct tc_u_knode *n = NULL;
sel = TC_U32_HASH(handle);
......@@ -300,7 +299,7 @@ static u32 gen_new_htid(struct tc_u_common *tp_c)
do {
if (++tp_c->hgenerator == 0x7FF)
tp_c->hgenerator = 1;
} while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
} while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
}
......@@ -378,9 +377,9 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
{
struct tc_u_knode *n;
unsigned h;
unsigned int h;
for (h=0; h<=ht->divisor; h++) {
for (h = 0; h <= ht->divisor; h++) {
while ((n = ht->ht[h]) != NULL) {
ht->ht[h] = n->next;
......@@ -446,13 +445,13 @@ static void u32_destroy(struct tcf_proto *tp)
static int u32_delete(struct tcf_proto *tp, unsigned long arg)
{
struct tc_u_hnode *ht = (struct tc_u_hnode*)arg;
struct tc_u_hnode *ht = (struct tc_u_hnode *)arg;
if (ht == NULL)
return 0;
if (TC_U32_KEY(ht->handle))
return u32_delete_key(tp, (struct tc_u_knode*)ht);
return u32_delete_key(tp, (struct tc_u_knode *)ht);
if (tp->root == ht)
return -EINVAL;
......@@ -470,14 +469,14 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg)
static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
{
struct tc_u_knode *n;
unsigned i = 0x7FF;
unsigned int i = 0x7FF;
for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
if (i < TC_U32_NODE(n->handle))
i = TC_U32_NODE(n->handle);
i++;
return handle|(i>0xFFF ? 0xFFF : i);
return handle | (i > 0xFFF ? 0xFFF : i);
}
static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
......@@ -566,7 +565,8 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
if (err < 0)
return err;
if ((n = (struct tc_u_knode*)*arg) != NULL) {
n = (struct tc_u_knode *)*arg;
if (n) {
if (TC_U32_KEY(n->handle) == 0)
return -EINVAL;
......@@ -574,7 +574,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
}
if (tb[TCA_U32_DIVISOR]) {
unsigned divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
if (--divisor > 0x100)
return -EINVAL;
......@@ -585,7 +585,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
if (handle == 0)
return -ENOMEM;
}
ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL);
ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
if (ht == NULL)
return -ENOBUFS;
ht->tp_c = tp_c;
......@@ -683,7 +683,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *ht;
struct tc_u_knode *n;
unsigned h;
unsigned int h;
if (arg->stop)
return;
......@@ -717,7 +717,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
static int u32_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
struct tc_u_knode *n = (struct tc_u_knode*)fh;
struct tc_u_knode *n = (struct tc_u_knode *)fh;
struct nlattr *nest;
if (n == NULL)
......@@ -730,8 +730,9 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
goto nla_put_failure;
if (TC_U32_KEY(n->handle) == 0) {
struct tc_u_hnode *ht = (struct tc_u_hnode*)fh;
u32 divisor = ht->divisor+1;
struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
u32 divisor = ht->divisor + 1;
NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor);
} else {
NLA_PUT(skb, TCA_U32_SEL,
......@@ -755,7 +756,7 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
goto nla_put_failure;
#ifdef CONFIG_NET_CLS_IND
if(strlen(n->indev))
if (strlen(n->indev))
NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev);
#endif
#ifdef CONFIG_CLS_U32_PERF
......
......@@ -46,7 +46,8 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em,
case TCF_EM_ALIGN_U32:
/* Worth checking boundries? The branching seems
* to get worse. Visit again. */
* to get worse. Visit again.
*/
val = get_unaligned_be32(ptr);
if (cmp_needs_transformation(cmp))
......
......@@ -73,21 +73,18 @@
#include <net/pkt_cls.h>
#include <net/sock.h>
struct meta_obj
{
struct meta_obj {
unsigned long value;
unsigned int len;
};
struct meta_value
{
struct meta_value {
struct tcf_meta_val hdr;
unsigned long val;
unsigned int len;
};
struct meta_match
{
struct meta_match {
struct meta_value lvalue;
struct meta_value rvalue;
};
......@@ -483,8 +480,7 @@ META_COLLECTOR(int_sk_write_pend)
* Meta value collectors assignment table
**************************************************************************/
struct meta_ops
{
struct meta_ops {
void (*get)(struct sk_buff *, struct tcf_pkt_info *,
struct meta_value *, struct meta_obj *, int *);
};
......@@ -494,7 +490,7 @@ struct meta_ops
/* Meta value operations table listing all meta value collectors and
* assigns them to a type and meta id. */
static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = {
[TCF_META_TYPE_VAR] = {
[META_ID(DEV)] = META_FUNC(var_dev),
[META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if),
......@@ -550,7 +546,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
}
};
static inline struct meta_ops * meta_ops(struct meta_value *val)
static inline struct meta_ops *meta_ops(struct meta_value *val)
{
return &__meta_ops[meta_type(val)][meta_id(val)];
}
......@@ -649,9 +645,8 @@ static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
{
if (v->len == sizeof(unsigned long))
NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val);
else if (v->len == sizeof(u32)) {
else if (v->len == sizeof(u32))
NLA_PUT_U32(skb, tlv, v->val);
}
return 0;
......@@ -663,8 +658,7 @@ static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
* Type specific operations table
**************************************************************************/
struct meta_type_ops
{
struct meta_type_ops {
void (*destroy)(struct meta_value *);
int (*compare)(struct meta_obj *, struct meta_obj *);
int (*change)(struct meta_value *, struct nlattr *);
......@@ -672,7 +666,7 @@ struct meta_type_ops
int (*dump)(struct sk_buff *, struct meta_value *, int);
};
static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX+1] = {
static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = {
[TCF_META_TYPE_VAR] = {
.destroy = meta_var_destroy,
.compare = meta_var_compare,
......@@ -688,7 +682,7 @@ static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX+1] = {
}
};
static inline struct meta_type_ops * meta_type_ops(struct meta_value *v)
static inline struct meta_type_ops *meta_type_ops(struct meta_value *v)
{
return &__meta_type_ops[meta_type(v)];
}
......@@ -771,7 +765,7 @@ static inline int meta_change_data(struct meta_value *dst, struct nlattr *nla)
static inline int meta_is_supported(struct meta_value *val)
{
return (!meta_id(val) || meta_ops(val)->get);
return !meta_id(val) || meta_ops(val)->get;
}
static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = {
......
......@@ -18,8 +18,7 @@
#include <linux/tc_ematch/tc_em_nbyte.h>
#include <net/pkt_cls.h>
struct nbyte_data
{
struct nbyte_data {
struct tcf_em_nbyte hdr;
char pattern[0];
};
......
......@@ -19,8 +19,7 @@
#include <linux/tc_ematch/tc_em_text.h>
#include <net/pkt_cls.h>
struct text_match
{
struct text_match {
u16 from_offset;
u16 to_offset;
u8 from_layer;
......
......@@ -35,7 +35,7 @@ static int em_u32_match(struct sk_buff *skb, struct tcf_ematch *em,
if (!tcf_valid_offset(skb, ptr, sizeof(u32)))
return 0;
return !(((*(__be32*) ptr) ^ key->val) & key->mask);
return !(((*(__be32 *) ptr) ^ key->val) & key->mask);
}
static struct tcf_ematch_ops em_u32_ops = {
......
......@@ -93,7 +93,7 @@
static LIST_HEAD(ematch_ops);
static DEFINE_RWLOCK(ematch_mod_lock);
static inline struct tcf_ematch_ops * tcf_em_lookup(u16 kind)
static struct tcf_ematch_ops *tcf_em_lookup(u16 kind)
{
struct tcf_ematch_ops *e = NULL;
......@@ -163,7 +163,7 @@ void tcf_em_unregister(struct tcf_ematch_ops *ops)
}
EXPORT_SYMBOL(tcf_em_unregister);
static inline struct tcf_ematch * tcf_em_get_match(struct tcf_ematch_tree *tree,
static inline struct tcf_ematch *tcf_em_get_match(struct tcf_ematch_tree *tree,
int index)
{
return &tree->matches[index];
......@@ -184,7 +184,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
if (em_hdr->kind == TCF_EM_CONTAINER) {
/* Special ematch called "container", carries an index
* referencing an external ematch sequence. */
* referencing an external ematch sequence.
*/
u32 ref;
if (data_len < sizeof(ref))
......@@ -195,7 +196,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
goto errout;
/* We do not allow backward jumps to avoid loops and jumps
* to our own position are of course illegal. */
* to our own position are of course illegal.
*/
if (ref <= idx)
goto errout;
......@@ -208,7 +210,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
* which automatically releases the reference again, therefore
* the module MUST not be given back under any circumstances
* here. Be aware, the destroy function assumes that the
* module is held if the ops field is non zero. */
* module is held if the ops field is non zero.
*/
em->ops = tcf_em_lookup(em_hdr->kind);
if (em->ops == NULL) {
......@@ -221,7 +224,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
if (em->ops) {
/* We dropped the RTNL mutex in order to
* perform the module load. Tell the caller
* to replay the request. */
* to replay the request.
*/
module_put(em->ops->owner);
err = -EAGAIN;
}
......@@ -230,7 +234,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
}
/* ematch module provides expected length of data, so we
* can do a basic sanity check. */
* can do a basic sanity check.
*/
if (em->ops->datalen && data_len < em->ops->datalen)
goto errout;
......@@ -246,7 +251,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
* TCF_EM_SIMPLE may be specified stating that the
* data only consists of a u32 integer and the module
* does not expected a memory reference but rather
* the value carried. */
* the value carried.
*/
if (em_hdr->flags & TCF_EM_SIMPLE) {
if (data_len < sizeof(u32))
goto errout;
......@@ -334,7 +340,8 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla,
* The array of rt attributes is parsed in the order as they are
* provided, their type must be incremental from 1 to n. Even
* if it does not serve any real purpose, a failure of sticking
* to this policy will result in parsing failure. */
* to this policy will result in parsing failure.
*/
for (idx = 0; nla_ok(rt_match, list_len); idx++) {
err = -EINVAL;
......@@ -359,7 +366,8 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla,
/* Check if the number of matches provided by userspace actually
* complies with the array of matches. The number was used for
* the validation of references and a mismatch could lead to
* undefined references during the matching process. */
* undefined references during the matching process.
*/
if (idx != tree_hdr->nmatches) {
err = -EINVAL;
goto errout_abort;
......@@ -449,7 +457,7 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv)
.flags = em->flags
};
NLA_PUT(skb, i+1, sizeof(em_hdr), &em_hdr);
NLA_PUT(skb, i + 1, sizeof(em_hdr), &em_hdr);
if (em->ops && em->ops->dump) {
if (em->ops->dump(skb, em) < 0)
......@@ -478,6 +486,7 @@ static inline int tcf_em_match(struct sk_buff *skb, struct tcf_ematch *em,
struct tcf_pkt_info *info)
{
int r = em->ops->match(skb, em, info);
return tcf_em_is_inverted(em) ? !r : r;
}
......@@ -527,7 +536,7 @@ int __tcf_em_tree_match(struct sk_buff *skb, struct tcf_ematch_tree *tree,
stack_overflow:
if (net_ratelimit())
printk(KERN_WARNING "tc ematch: local stack overflow,"
pr_warning("tc ematch: local stack overflow,"
" increase NET_EMATCH_STACK\n");
return -1;
}
......
This diff is collapsed.
......@@ -319,7 +319,7 @@ static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
* creation), and one for the reference held when calling delete.
*/
if (flow->ref < 2) {
printk(KERN_ERR "atm_tc_delete: flow->ref == %d\n", flow->ref);
pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref);
return -EINVAL;
}
if (flow->ref > 2)
......@@ -384,12 +384,12 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
}
flow = NULL;
done:
done:
;
}
if (!flow)
if (!flow) {
flow = &p->link;
else {
} else {
if (flow->vcc)
ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
/*@@@ looks good ... but it's not supposed to work :-) */
......@@ -576,8 +576,7 @@ static void atm_tc_destroy(struct Qdisc *sch)
list_for_each_entry_safe(flow, tmp, &p->flows, list) {
if (flow->ref > 1)
printk(KERN_ERR "atm_destroy: %p->ref = %d\n", flow,
flow->ref);
pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref);
atm_tc_put(sch, (unsigned long)flow);
}
tasklet_kill(&p->task);
......@@ -616,9 +615,8 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
}
if (flow->excess)
NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid);
else {
else
NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0);
}
nla_nest_end(skb, nest);
return skb->len;
......
This diff is collapsed.
......@@ -137,10 +137,10 @@ static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
if (tb[TCA_DSMARK_VALUE])
p->value[*arg-1] = nla_get_u8(tb[TCA_DSMARK_VALUE]);
p->value[*arg - 1] = nla_get_u8(tb[TCA_DSMARK_VALUE]);
if (tb[TCA_DSMARK_MASK])
p->mask[*arg-1] = mask;
p->mask[*arg - 1] = mask;
err = 0;
......@@ -155,8 +155,8 @@ static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
if (!dsmark_valid_index(p, arg))
return -EINVAL;
p->mask[arg-1] = 0xff;
p->value[arg-1] = 0;
p->mask[arg - 1] = 0xff;
p->value[arg - 1] = 0;
return 0;
}
......@@ -175,7 +175,7 @@ static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
if (p->mask[i] == 0xff && !p->value[i])
goto ignore;
if (walker->count >= walker->skip) {
if (walker->fn(sch, i+1, walker) < 0) {
if (walker->fn(sch, i + 1, walker) < 0) {
walker->stop = 1;
break;
}
......@@ -304,8 +304,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
* and don't need yet another qdisc as a bypass.
*/
if (p->mask[index] != 0xff || p->value[index])
printk(KERN_WARNING
"dsmark_dequeue: unsupported protocol %d\n",
pr_warning("dsmark_dequeue: unsupported protocol %d\n",
ntohs(skb->protocol));
break;
}
......@@ -424,14 +423,14 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
if (!dsmark_valid_index(p, cl))
return -EINVAL;
tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl-1);
tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
tcm->tcm_info = p->q->handle;
opts = nla_nest_start(skb, TCA_OPTIONS);
if (opts == NULL)
goto nla_put_failure;
NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl-1]);
NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl-1]);
NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]);
NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]);
return nla_nest_end(skb, opts);
......
......@@ -19,12 +19,11 @@
/* 1 band FIFO pseudo-"scheduler" */
struct fifo_sched_data
{
struct fifo_sched_data {
u32 limit;
};
static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct fifo_sched_data *q = qdisc_priv(sch);
......@@ -34,7 +33,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
return qdisc_reshape_fail(skb, sch);
}
static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct fifo_sched_data *q = qdisc_priv(sch);
......@@ -44,7 +43,7 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
return qdisc_reshape_fail(skb, sch);
}
static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct sk_buff *skb_head;
struct fifo_sched_data *q = qdisc_priv(sch);
......
......@@ -87,8 +87,8 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
*/
kfree_skb(skb);
if (net_ratelimit())
printk(KERN_WARNING "Dead loop on netdevice %s, "
"fix it urgently!\n", dev_queue->dev->name);
pr_warning("Dead loop on netdevice %s, fix it urgently!\n",
dev_queue->dev->name);
ret = qdisc_qlen(q);
} else {
/*
......@@ -137,7 +137,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
} else {
/* Driver returned NETDEV_TX_BUSY - requeue skb */
if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
printk(KERN_WARNING "BUG %s code %d qlen %d\n",
pr_warning("BUG %s code %d qlen %d\n",
dev->name, ret, q->q.qlen);
ret = dev_requeue_skb(skb, q);
......@@ -412,8 +412,9 @@ static struct Qdisc noqueue_qdisc = {
};
static const u8 prio2band[TC_PRIO_MAX+1] =
{ 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
static const u8 prio2band[TC_PRIO_MAX + 1] = {
1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
};
/* 3-band FIFO queue: old style, but should be a bit faster than
generic prio+fifo combination.
......@@ -445,7 +446,7 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
return priv->q + band;
}
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc)
{
if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
int band = prio2band[skb->priority & TC_PRIO_MAX];
......@@ -460,7 +461,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
return qdisc_drop(skb, qdisc);
}
static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
{
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
int band = bitmap2band[priv->bitmap];
......@@ -479,7 +480,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
return NULL;
}
static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
{
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
int band = bitmap2band[priv->bitmap];
......@@ -493,7 +494,7 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
return NULL;
}
static void pfifo_fast_reset(struct Qdisc* qdisc)
static void pfifo_fast_reset(struct Qdisc *qdisc)
{
int prio;
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
......@@ -510,7 +511,7 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
{
struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
return skb->len;
......@@ -681,20 +682,18 @@ static void attach_one_default_qdisc(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_unused)
{
struct Qdisc *qdisc;
struct Qdisc *qdisc = &noqueue_qdisc;
if (dev->tx_queue_len) {
qdisc = qdisc_create_dflt(dev_queue,
&pfifo_fast_ops, TC_H_ROOT);
if (!qdisc) {
printk(KERN_INFO "%s: activation failed\n", dev->name);
netdev_info(dev, "activation failed\n");
return;
}
/* Can by-pass the queue discipline for default qdisc */
qdisc->flags |= TCQ_F_CAN_BYPASS;
} else {
qdisc = &noqueue_qdisc;
}
dev_queue->qdisc_sleeping = qdisc;
}
......
......@@ -32,8 +32,7 @@
struct gred_sched_data;
struct gred_sched;
struct gred_sched_data
{
struct gred_sched_data {
u32 limit; /* HARD maximal queue length */
u32 DP; /* the drop pramaters */
u32 bytesin; /* bytes seen on virtualQ so far*/
......@@ -50,8 +49,7 @@ enum {
GRED_RIO_MODE,
};
struct gred_sched
{
struct gred_sched {
struct gred_sched_data *tab[MAX_DPs];
unsigned long flags;
u32 red_flags;
......@@ -150,17 +148,18 @@ static inline int gred_use_harddrop(struct gred_sched *t)
return t->red_flags & TC_RED_HARDDROP;
}
static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct gred_sched_data *q=NULL;
struct gred_sched *t= qdisc_priv(sch);
struct gred_sched_data *q = NULL;
struct gred_sched *t = qdisc_priv(sch);
unsigned long qavg = 0;
u16 dp = tc_index_to_dp(skb);
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
dp = t->def;
if ((q = t->tab[dp]) == NULL) {
q = t->tab[dp];
if (!q) {
/* Pass through packets not assigned to a DP
* if no default DP has been configured. This
* allows for DP flows to be left untouched.
......@@ -183,7 +182,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
for (i = 0; i < t->DPs; i++) {
if (t->tab[i] && t->tab[i]->prio < q->prio &&
!red_is_idling(&t->tab[i]->parms))
qavg +=t->tab[i]->parms.qavg;
qavg += t->tab[i]->parms.qavg;
}
}
......@@ -241,7 +240,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
return NET_XMIT_CN;
}
static struct sk_buff *gred_dequeue(struct Qdisc* sch)
static struct sk_buff *gred_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb;
struct gred_sched *t = qdisc_priv(sch);
......@@ -254,8 +253,8 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch)
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
if (net_ratelimit())
printk(KERN_WARNING "GRED: Unable to relocate "
"VQ 0x%x after dequeue, screwing up "
pr_warning("GRED: Unable to relocate VQ 0x%x "
"after dequeue, screwing up "
"backlog.\n", tc_index_to_dp(skb));
} else {
q->backlog -= qdisc_pkt_len(skb);
......@@ -273,7 +272,7 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch)
return NULL;
}
static unsigned int gred_drop(struct Qdisc* sch)
static unsigned int gred_drop(struct Qdisc *sch)
{
struct sk_buff *skb;
struct gred_sched *t = qdisc_priv(sch);
......@@ -286,8 +285,8 @@ static unsigned int gred_drop(struct Qdisc* sch)
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
if (net_ratelimit())
printk(KERN_WARNING "GRED: Unable to relocate "
"VQ 0x%x while dropping, screwing up "
pr_warning("GRED: Unable to relocate VQ 0x%x "
"while dropping, screwing up "
"backlog.\n", tc_index_to_dp(skb));
} else {
q->backlog -= len;
......@@ -308,7 +307,7 @@ static unsigned int gred_drop(struct Qdisc* sch)
}
static void gred_reset(struct Qdisc* sch)
static void gred_reset(struct Qdisc *sch)
{
int i;
struct gred_sched *t = qdisc_priv(sch);
......@@ -369,7 +368,7 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
for (i = table->DPs; i < MAX_DPs; i++) {
if (table->tab[i]) {
printk(KERN_WARNING "GRED: Warning: Destroying "
pr_warning("GRED: Warning: Destroying "
"shadowed VQ 0x%x\n", i);
gred_destroy_vq(table->tab[i]);
table->tab[i] = NULL;
......
......@@ -81,8 +81,7 @@
* that are expensive on 32-bit architectures.
*/
struct internal_sc
{
struct internal_sc {
u64 sm1; /* scaled slope of the 1st segment */
u64 ism1; /* scaled inverse-slope of the 1st segment */
u64 dx; /* the x-projection of the 1st segment */
......@@ -92,8 +91,7 @@ struct internal_sc
};
/* runtime service curve */
struct runtime_sc
{
struct runtime_sc {
u64 x; /* current starting position on x-axis */
u64 y; /* current starting position on y-axis */
u64 sm1; /* scaled slope of the 1st segment */
......@@ -104,15 +102,13 @@ struct runtime_sc
u64 ism2; /* scaled inverse-slope of the 2nd segment */
};
enum hfsc_class_flags
{
enum hfsc_class_flags {
HFSC_RSC = 0x1,
HFSC_FSC = 0x2,
HFSC_USC = 0x4
};
struct hfsc_class
{
struct hfsc_class {
struct Qdisc_class_common cl_common;
unsigned int refcnt; /* usage count */
......@@ -176,8 +172,7 @@ struct hfsc_class
unsigned long cl_nactive; /* number of active children */
};
struct hfsc_sched
{
struct hfsc_sched {
u16 defcls; /* default class id */
struct hfsc_class root; /* root class */
struct Qdisc_class_hash clhash; /* class hash */
......@@ -693,7 +688,7 @@ init_vf(struct hfsc_class *cl, unsigned int len)
if (go_active) {
n = rb_last(&cl->cl_parent->vt_tree);
if (n != NULL) {
max_cl = rb_entry(n, struct hfsc_class,vt_node);
max_cl = rb_entry(n, struct hfsc_class, vt_node);
/*
* set vt to the average of the min and max
* classes. if the parent's period didn't
......@@ -1177,8 +1172,10 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
return NULL;
}
#endif
if ((cl = (struct hfsc_class *)res.class) == NULL) {
if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
cl = (struct hfsc_class *)res.class;
if (!cl) {
cl = hfsc_find_class(res.classid, sch);
if (!cl)
break; /* filter selected invalid classid */
if (cl->level >= head->level)
break; /* filter may only point downwards */
......@@ -1316,7 +1313,7 @@ hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
return -1;
}
static inline int
static int
hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
{
if ((cl->cl_flags & HFSC_RSC) &&
......@@ -1420,7 +1417,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch)
struct hfsc_class *cl;
u64 next_time = 0;
if ((cl = eltree_get_minel(q)) != NULL)
cl = eltree_get_minel(q);
if (cl)
next_time = cl->cl_e;
if (q->root.cl_cfmin != 0) {
if (next_time == 0 || next_time > q->root.cl_cfmin)
......@@ -1626,7 +1624,8 @@ hfsc_dequeue(struct Qdisc *sch)
* find the class with the minimum deadline among
* the eligible classes.
*/
if ((cl = eltree_get_mindl(q, cur_time)) != NULL) {
cl = eltree_get_mindl(q, cur_time);
if (cl) {
realtime = 1;
} else {
/*
......
This diff is collapsed.
......@@ -156,7 +156,7 @@ static unsigned int multiq_drop(struct Qdisc *sch)
unsigned int len;
struct Qdisc *qdisc;
for (band = q->bands-1; band >= 0; band--) {
for (band = q->bands - 1; band >= 0; band--) {
qdisc = q->queues[band];
if (qdisc->ops->drop) {
len = qdisc->ops->drop(qdisc);
......@@ -265,7 +265,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
for (i = 0; i < q->max_bands; i++)
q->queues[i] = &noop_qdisc;
err = multiq_tune(sch,opt);
err = multiq_tune(sch, opt);
if (err)
kfree(q->queues);
......@@ -346,7 +346,7 @@ static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
struct multiq_sched_data *q = qdisc_priv(sch);
tcm->tcm_handle |= TC_H_MIN(cl);
tcm->tcm_info = q->queues[cl-1]->handle;
tcm->tcm_info = q->queues[cl - 1]->handle;
return 0;
}
......@@ -378,7 +378,7 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
arg->count++;
continue;
}
if (arg->fn(sch, band+1, arg) < 0) {
if (arg->fn(sch, band + 1, arg) < 0) {
arg->stop = 1;
break;
}
......
......@@ -249,7 +249,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return ret;
}
static unsigned int netem_drop(struct Qdisc* sch)
static unsigned int netem_drop(struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
unsigned int len = 0;
......
......@@ -22,8 +22,7 @@
#include <net/pkt_sched.h>
struct prio_sched_data
{
struct prio_sched_data {
int bands;
struct tcf_proto *filter_list;
u8 prio2band[TC_PRIO_MAX+1];
......@@ -54,7 +53,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
if (!q->filter_list || err < 0) {
if (TC_H_MAJ(band))
band = 0;
return q->queues[q->prio2band[band&TC_PRIO_MAX]];
return q->queues[q->prio2band[band & TC_PRIO_MAX]];
}
band = res.classid;
}
......@@ -107,7 +106,7 @@ static struct sk_buff *prio_peek(struct Qdisc *sch)
return NULL;
}
static struct sk_buff *prio_dequeue(struct Qdisc* sch)
static struct sk_buff *prio_dequeue(struct Qdisc *sch)
{
struct prio_sched_data *q = qdisc_priv(sch);
int prio;
......@@ -124,7 +123,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc* sch)
}
static unsigned int prio_drop(struct Qdisc* sch)
static unsigned int prio_drop(struct Qdisc *sch)
{
struct prio_sched_data *q = qdisc_priv(sch);
int prio;
......@@ -143,24 +142,24 @@ static unsigned int prio_drop(struct Qdisc* sch)
static void
prio_reset(struct Qdisc* sch)
prio_reset(struct Qdisc *sch)
{
int prio;
struct prio_sched_data *q = qdisc_priv(sch);
for (prio=0; prio<q->bands; prio++)
for (prio = 0; prio < q->bands; prio++)
qdisc_reset(q->queues[prio]);
sch->q.qlen = 0;
}
static void
prio_destroy(struct Qdisc* sch)
prio_destroy(struct Qdisc *sch)
{
int prio;
struct prio_sched_data *q = qdisc_priv(sch);
tcf_destroy_chain(&q->filter_list);
for (prio=0; prio<q->bands; prio++)
for (prio = 0; prio < q->bands; prio++)
qdisc_destroy(q->queues[prio]);
}
......@@ -177,7 +176,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
return -EINVAL;
for (i=0; i<=TC_PRIO_MAX; i++) {
for (i = 0; i <= TC_PRIO_MAX; i++) {
if (qopt->priomap[i] >= qopt->bands)
return -EINVAL;
}
......@@ -186,7 +185,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
q->bands = qopt->bands;
memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
for (i=q->bands; i<TCQ_PRIO_BANDS; i++) {
for (i = q->bands; i < TCQ_PRIO_BANDS; i++) {
struct Qdisc *child = q->queues[i];
q->queues[i] = &noop_qdisc;
if (child != &noop_qdisc) {
......@@ -196,9 +195,10 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
}
sch_tree_unlock(sch);
for (i=0; i<q->bands; i++) {
for (i = 0; i < q->bands; i++) {
if (q->queues[i] == &noop_qdisc) {
struct Qdisc *child, *old;
child = qdisc_create_dflt(sch->dev_queue,
&pfifo_qdisc_ops,
TC_H_MAKE(sch->handle, i + 1));
......@@ -224,7 +224,7 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt)
struct prio_sched_data *q = qdisc_priv(sch);
int i;
for (i=0; i<TCQ_PRIO_BANDS; i++)
for (i = 0; i < TCQ_PRIO_BANDS; i++)
q->queues[i] = &noop_qdisc;
if (opt == NULL) {
......@@ -232,7 +232,7 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt)
} else {
int err;
if ((err= prio_tune(sch, opt)) != 0)
if ((err = prio_tune(sch, opt)) != 0)
return err;
}
return 0;
......@@ -245,7 +245,7 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
struct tc_prio_qopt opt;
opt.bands = q->bands;
memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1);
memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
......@@ -342,7 +342,7 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
arg->count++;
continue;
}
if (arg->fn(sch, prio+1, arg) < 0) {
if (arg->fn(sch, prio + 1, arg) < 0) {
arg->stop = 1;
break;
}
......@@ -350,7 +350,7 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
}
}
static struct tcf_proto ** prio_find_tcf(struct Qdisc *sch, unsigned long cl)
static struct tcf_proto **prio_find_tcf(struct Qdisc *sch, unsigned long cl)
{
struct prio_sched_data *q = qdisc_priv(sch);
......
......@@ -36,8 +36,7 @@
if RED works correctly.
*/
struct red_sched_data
{
struct red_sched_data {
u32 limit; /* HARD maximal queue length */
unsigned char flags;
struct red_parms parms;
......@@ -55,7 +54,7 @@ static inline int red_use_harddrop(struct red_sched_data *q)
return q->flags & TC_RED_HARDDROP;
}
static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc;
......@@ -107,7 +106,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
return NET_XMIT_CN;
}
static struct sk_buff * red_dequeue(struct Qdisc* sch)
static struct sk_buff *red_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb;
struct red_sched_data *q = qdisc_priv(sch);
......@@ -122,7 +121,7 @@ static struct sk_buff * red_dequeue(struct Qdisc* sch)
return skb;
}
static struct sk_buff * red_peek(struct Qdisc* sch)
static struct sk_buff *red_peek(struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc;
......@@ -130,7 +129,7 @@ static struct sk_buff * red_peek(struct Qdisc* sch)
return child->ops->peek(child);
}
static unsigned int red_drop(struct Qdisc* sch)
static unsigned int red_drop(struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc;
......@@ -149,7 +148,7 @@ static unsigned int red_drop(struct Qdisc* sch)
return 0;
}
static void red_reset(struct Qdisc* sch)
static void red_reset(struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
......@@ -216,7 +215,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
return 0;
}
static int red_init(struct Qdisc* sch, struct nlattr *opt)
static int red_init(struct Qdisc *sch, struct nlattr *opt)
{
struct red_sched_data *q = qdisc_priv(sch);
......
......@@ -92,8 +92,7 @@ typedef unsigned char sfq_index;
* while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
* are 'pointers' to dep[] array
*/
struct sfq_head
{
struct sfq_head {
sfq_index next;
sfq_index prev;
};
......@@ -108,11 +107,10 @@ struct sfq_slot {
short allot; /* credit for this slot */
};
struct sfq_sched_data
{
struct sfq_sched_data {
/* Parameters */
int perturb_period;
unsigned quantum; /* Allotment per round: MUST BE >= MTU */
unsigned int quantum; /* Allotment per round: MUST BE >= MTU */
int limit;
/* Variables */
......@@ -137,12 +135,12 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index
return &q->dep[val - SFQ_SLOTS];
}
static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
{
return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
}
static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
{
u32 h, h2;
......@@ -157,13 +155,13 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
iph = ip_hdr(skb);
h = (__force u32)iph->daddr;
h2 = (__force u32)iph->saddr ^ iph->protocol;
if (iph->frag_off & htons(IP_MF|IP_OFFSET))
if (iph->frag_off & htons(IP_MF | IP_OFFSET))
break;
poff = proto_ports_offset(iph->protocol);
if (poff >= 0 &&
pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
iph = ip_hdr(skb);
h2 ^= *(u32*)((void *)iph + iph->ihl * 4 + poff);
h2 ^= *(u32 *)((void *)iph + iph->ihl * 4 + poff);
}
break;
}
......@@ -181,7 +179,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
if (poff >= 0 &&
pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
iph = ipv6_hdr(skb);
h2 ^= *(u32*)((void *)iph + sizeof(*iph) + poff);
h2 ^= *(u32 *)((void *)iph + sizeof(*iph) + poff);
}
break;
}
......
......@@ -97,8 +97,7 @@
changed the limit is not effective anymore.
*/
struct tbf_sched_data
{
struct tbf_sched_data {
/* Parameters */
u32 limit; /* Maximal length of backlog: bytes */
u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
......@@ -115,10 +114,10 @@ struct tbf_sched_data
struct qdisc_watchdog watchdog; /* Watchdog timer */
};
#define L2T(q,L) qdisc_l2t((q)->R_tab,L)
#define L2T_P(q,L) qdisc_l2t((q)->P_tab,L)
#define L2T(q, L) qdisc_l2t((q)->R_tab, L)
#define L2T_P(q, L) qdisc_l2t((q)->P_tab, L)
static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct tbf_sched_data *q = qdisc_priv(sch);
int ret;
......@@ -138,7 +137,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
return NET_XMIT_SUCCESS;
}
static unsigned int tbf_drop(struct Qdisc* sch)
static unsigned int tbf_drop(struct Qdisc *sch)
{
struct tbf_sched_data *q = qdisc_priv(sch);
unsigned int len = 0;
......@@ -150,7 +149,7 @@ static unsigned int tbf_drop(struct Qdisc* sch)
return len;
}
static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
{
struct tbf_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
......@@ -209,7 +208,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
return NULL;
}
static void tbf_reset(struct Qdisc* sch)
static void tbf_reset(struct Qdisc *sch)
{
struct tbf_sched_data *q = qdisc_priv(sch);
......@@ -227,7 +226,7 @@ static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
[TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
};
static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
{
int err;
struct tbf_sched_data *q = qdisc_priv(sch);
......@@ -236,7 +235,7 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
struct qdisc_rate_table *rtab = NULL;
struct qdisc_rate_table *ptab = NULL;
struct Qdisc *child = NULL;
int max_size,n;
int max_size, n;
err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy);
if (err < 0)
......@@ -259,15 +258,18 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
}
for (n = 0; n < 256; n++)
if (rtab->data[n] > qopt->buffer) break;
max_size = (n << qopt->rate.cell_log)-1;
if (rtab->data[n] > qopt->buffer)
break;
max_size = (n << qopt->rate.cell_log) - 1;
if (ptab) {
int size;
for (n = 0; n < 256; n++)
if (ptab->data[n] > qopt->mtu) break;
size = (n << qopt->peakrate.cell_log)-1;
if (size < max_size) max_size = size;
if (ptab->data[n] > qopt->mtu)
break;
size = (n << qopt->peakrate.cell_log) - 1;
if (size < max_size)
max_size = size;
}
if (max_size < 0)
goto done;
......@@ -310,7 +312,7 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
return err;
}
static int tbf_init(struct Qdisc* sch, struct nlattr *opt)
static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
{
struct tbf_sched_data *q = qdisc_priv(sch);
......@@ -422,8 +424,7 @@ static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
}
}
static const struct Qdisc_class_ops tbf_class_ops =
{
static const struct Qdisc_class_ops tbf_class_ops = {
.graft = tbf_graft,
.leaf = tbf_leaf,
.get = tbf_get,
......
......@@ -53,8 +53,7 @@
which will not break load balancing, though native slave
traffic will have the highest priority. */
struct teql_master
{
struct teql_master {
struct Qdisc_ops qops;
struct net_device *dev;
struct Qdisc *slaves;
......@@ -65,22 +64,21 @@ struct teql_master
unsigned long tx_dropped;
};
struct teql_sched_data
{
struct teql_sched_data {
struct Qdisc *next;
struct teql_master *m;
struct neighbour *ncache;
struct sk_buff_head q;
};
#define NEXT_SLAVE(q) (((struct teql_sched_data*)qdisc_priv(q))->next)
#define NEXT_SLAVE(q) (((struct teql_sched_data *)qdisc_priv(q))->next)
#define FMASK (IFF_BROADCAST|IFF_POINTOPOINT)
#define FMASK (IFF_BROADCAST | IFF_POINTOPOINT)
/* "teql*" qdisc routines */
static int
teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
teql_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct net_device *dev = qdisc_dev(sch);
struct teql_sched_data *q = qdisc_priv(sch);
......@@ -97,7 +95,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
}
static struct sk_buff *
teql_dequeue(struct Qdisc* sch)
teql_dequeue(struct Qdisc *sch)
{
struct teql_sched_data *dat = qdisc_priv(sch);
struct netdev_queue *dat_queue;
......@@ -117,13 +115,13 @@ teql_dequeue(struct Qdisc* sch)
}
static struct sk_buff *
teql_peek(struct Qdisc* sch)
teql_peek(struct Qdisc *sch)
{
/* teql is meant to be used as root qdisc */
return NULL;
}
static __inline__ void
static inline void
teql_neigh_release(struct neighbour *n)
{
if (n)
......@@ -131,7 +129,7 @@ teql_neigh_release(struct neighbour *n)
}
static void
teql_reset(struct Qdisc* sch)
teql_reset(struct Qdisc *sch)
{
struct teql_sched_data *dat = qdisc_priv(sch);
......@@ -141,13 +139,14 @@ teql_reset(struct Qdisc* sch)
}
static void
teql_destroy(struct Qdisc* sch)
teql_destroy(struct Qdisc *sch)
{
struct Qdisc *q, *prev;
struct teql_sched_data *dat = qdisc_priv(sch);
struct teql_master *master = dat->m;
if ((prev = master->slaves) != NULL) {
prev = master->slaves;
if (prev) {
do {
q = NEXT_SLAVE(prev);
if (q == sch) {
......@@ -179,7 +178,7 @@ teql_destroy(struct Qdisc* sch)
static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
{
struct net_device *dev = qdisc_dev(sch);
struct teql_master *m = (struct teql_master*)sch->ops;
struct teql_master *m = (struct teql_master *)sch->ops;
struct teql_sched_data *q = qdisc_priv(sch);
if (dev->hard_header_len > m->dev->hard_header_len)
......@@ -290,7 +289,8 @@ static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
nores = 0;
busy = 0;
if ((q = start) == NULL)
q = start;
if (!q)
goto drop;
do {
......@@ -355,10 +355,10 @@ static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
static int teql_master_open(struct net_device *dev)
{
struct Qdisc * q;
struct Qdisc *q;
struct teql_master *m = netdev_priv(dev);
int mtu = 0xFFFE;
unsigned flags = IFF_NOARP|IFF_MULTICAST;
unsigned int flags = IFF_NOARP | IFF_MULTICAST;
if (m->slaves == NULL)
return -EUNATCH;
......@@ -426,7 +426,7 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu)
do {
if (new_mtu > qdisc_dev(q)->mtu)
return -EINVAL;
} while ((q=NEXT_SLAVE(q)) != m->slaves);
} while ((q = NEXT_SLAVE(q)) != m->slaves);
}
dev->mtu = new_mtu;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment