Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
eb1f4c38
Commit
eb1f4c38
authored
Jul 18, 2002
by
David S. Miller
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
PKT SCHED: Add HTB scheduler.
parent
821b5162
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
1642 additions
and
0 deletions
+1642
-0
net/sched/sch_htb.c
net/sched/sch_htb.c
+1642
-0
No files found.
net/sched/sch_htb.c
0 → 100644
View file @
eb1f4c38
/* vim: ts=8 sw=8
* net/sched/sch_htb.c Hierarchical token bucket, feed tree version
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Martin Devera, <devik@cdi.cz>
*
* Credits (in time order) for older HTB versions:
* Ondrej Kraus, <krauso@barr.cz>
* found missing INIT_QDISC(htb)
* Vladimir Smelhaus, Aamer Akhter, Bert Hubert
* helped a lot to locate nasty class stall bug
* Andi Kleen, Jamal Hadi, Bert Hubert
* code review and helpful comments on shaping
* and many others. thanks.
*
* $Id: sch_htb.c,v 1.13 2002/05/25 09:04:50 devik Exp $
*/
#include <linux/config.h>
#include <linux/module.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/version.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/notifier.h>
#include <net/ip.h>
#include <net/route.h>
#include <linux/skbuff.h>
#include <linux/list.h>
#include <linux/compiler.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
#include <linux/rbtree.h>
/* HTB algorithm.
Author: devik@cdi.cz
========================================================================
HTB is like TBF with multiple classes. It is also similar to CBQ because
it allows to assign priority to each class in hierarchy.
In fact it is another implementation of Floyd's formal sharing.
Levels:
Each class is assigned level. Leaf has ALWAYS level 0 and root
classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
one less than their parent.
*/
#define HTB_HSIZE 16
/* classid hash size */
#define HTB_EWMAC 2
/* rate average over HTB_EWMAC*HTB_HSIZE sec */
#define HTB_DEBUG 1
/* compile debugging support (activated by tc tool) */
#define HTB_RATECM 1
/* whether to use rate computer */
#define HTB_HYSTERESIS 1
/* whether to use mode hysteresis for speedup */
#define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
#define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
#define HTB_VER 0x30006
/* major must be matched with number suplied by TC as version */
#if HTB_VER >> 16 != TC_HTB_PROTOVER
#error "Mismatched sch_htb.c and pkt_sch.h"
#endif
/* temporary debug defines to be removed after beta stage */
#define DEVIK_MEND(N)
#define DEVIK_MSTART(N)
/* debugging support; S is subsystem, these are defined:
0 - netlink messages
1 - enqueue
2 - drop & requeue
3 - dequeue main
4 - dequeue one prio DRR part
5 - dequeue class accounting
6 - class overlimit status computation
7 - hint tree
8 - event queue
10 - rate estimator
11 - classifier
12 - fast dequeue cache
L is level; 0 = none, 1 = basic info, 2 = detailed, 3 = full
q->debug uint32 contains 16 2-bit fields one for subsystem starting
from LSB
*/
#ifdef HTB_DEBUG
#define HTB_DBG(S,L,FMT,ARG...) if (((q->debug>>(2*S))&3) >= L) \
printk(KERN_DEBUG FMT,##ARG)
#define HTB_CHCL(cl) BUG_TRAP((cl)->magic == HTB_CMAGIC)
#define HTB_PASSQ q,
#define HTB_ARGQ struct htb_sched *q,
#define static
#define __inline__
#define inline
#define HTB_CMAGIC 0xFEFAFEF1
#define htb_safe_rb_erase(N,R) do { BUG_TRAP((N)->rb_color != -1); \
if ((N)->rb_color == -1) break; \
rb_erase(N,R); \
(N)->rb_color = -1; } while (0)
#else
#define HTB_DBG(S,L,FMT,ARG...)
#define HTB_PASSQ
#define HTB_ARGQ
#define HTB_CHCL(cl)
#define htb_safe_rb_erase(N,R) rb_erase(N,R)
#endif
/* used internaly to keep status of single class */
enum
htb_cmode
{
HTB_CANT_SEND
,
/* class can't send and can't borrow */
HTB_MAY_BORROW
,
/* class can't send but may borrow */
HTB_CAN_SEND
/* class can send */
};
/* interior & leaf nodes; props specific to leaves are marked L: */
struct
htb_class
{
#ifdef HTB_DEBUG
unsigned
magic
;
#endif
/* general class parameters */
u32
classid
;
struct
tc_stats
stats
;
/* generic stats */
struct
tc_htb_xstats
xstats
;
/* our special stats */
int
refcnt
;
/* usage count of this class */
#ifdef HTB_RATECM
/* rate measurement counters */
unsigned
long
rate_bytes
,
sum_bytes
;
unsigned
long
rate_packets
,
sum_packets
;
#endif
/* topology */
int
level
;
/* our level (see above) */
struct
htb_class
*
parent
;
/* parent class */
struct
list_head
hlist
;
/* classid hash list item */
struct
list_head
sibling
;
/* sibling list item */
struct
list_head
children
;
/* children list */
union
{
struct
htb_class_leaf
{
struct
Qdisc
*
q
;
int
prio
;
int
aprio
;
int
quantum
;
int
deficit
[
TC_HTB_MAXDEPTH
];
struct
list_head
drop_list
;
}
leaf
;
struct
htb_class_inner
{
rb_root_t
feed
[
TC_HTB_NUMPRIO
];
/* feed trees */
rb_node_t
*
ptr
[
TC_HTB_NUMPRIO
];
/* current class ptr */
}
inner
;
}
un
;
rb_node_t
node
[
TC_HTB_NUMPRIO
];
/* node for self or feed tree */
rb_node_t
pq_node
;
/* node for event queue */
unsigned
long
pq_key
;
/* the same type as jiffies global */
int
prio_activity
;
/* for which prios are we active */
enum
htb_cmode
cmode
;
/* current mode of the class */
/* class attached filters */
struct
tcf_proto
*
filter_list
;
int
filter_cnt
;
int
warned
;
/* only one warning about non work conserving .. */
/* token bucket parameters */
struct
qdisc_rate_table
*
rate
;
/* rate table of the class itself */
struct
qdisc_rate_table
*
ceil
;
/* ceiling rate (limits borrows too) */
long
buffer
,
cbuffer
;
/* token bucket depth/rate */
long
mbuffer
;
/* max wait time */
long
tokens
,
ctokens
;
/* current number of tokens */
psched_time_t
t_c
;
/* checkpoint time */
};
/* TODO: maybe compute rate when size is too large .. or drop ? */
static
__inline__
long
L2T
(
struct
htb_class
*
cl
,
struct
qdisc_rate_table
*
rate
,
int
size
)
{
int
slot
=
size
>>
rate
->
rate
.
cell_log
;
if
(
slot
>
255
)
{
cl
->
xstats
.
giants
++
;
slot
=
255
;
}
return
rate
->
data
[
slot
];
}
struct
htb_sched
{
struct
list_head
root
;
/* root classes list */
struct
list_head
hash
[
HTB_HSIZE
];
/* hashed by classid */
struct
list_head
drops
[
TC_HTB_NUMPRIO
];
/* active leaves (for drops) */
/* self list - roots of self generating tree */
rb_root_t
row
[
TC_HTB_MAXDEPTH
][
TC_HTB_NUMPRIO
];
int
row_mask
[
TC_HTB_MAXDEPTH
];
rb_node_t
*
ptr
[
TC_HTB_MAXDEPTH
][
TC_HTB_NUMPRIO
];
/* self wait list - roots of wait PQs per row */
rb_root_t
wait_pq
[
TC_HTB_MAXDEPTH
];
/* time of nearest event per level (row) */
unsigned
long
near_ev_cache
[
TC_HTB_MAXDEPTH
];
/* whether we hit non-work conserving class during this dequeue; we use */
int
nwc_hit
;
/* this to disable mindelay complaint in dequeue */
int
defcls
;
/* class where unclassified flows go to */
u32
debug
;
/* subsystem debug levels */
/* filters for qdisc itself */
struct
tcf_proto
*
filter_list
;
int
filter_cnt
;
int
rate2quantum
;
/* quant = rate / rate2quantum */
psched_time_t
now
;
/* cached dequeue time */
struct
timer_list
timer
;
/* send delay timer */
#ifdef HTB_RATECM
struct
timer_list
rttim
;
/* rate computer timer */
int
recmp_bucket
;
/* which hash bucket to recompute next */
#endif
/* non shaped skbs; let them go directly thru */
struct
sk_buff_head
direct_queue
;
int
direct_qlen
;
/* max qlen of above */
long
direct_pkts
;
};
/* compute hash of size HTB_HSIZE for given handle */
static
__inline__
int
htb_hash
(
u32
h
)
{
#if HTB_HSIZE != 16
#error "Declare new hash for your HTB_HSIZE"
#endif
h
^=
h
>>
8
;
/* stolen from cbq_hash */
h
^=
h
>>
4
;
return
h
&
0xf
;
}
/* find class in global hash table using given handle */
static
__inline__
struct
htb_class
*
htb_find
(
u32
handle
,
struct
Qdisc
*
sch
)
{
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
struct
list_head
*
p
;
if
(
TC_H_MAJ
(
handle
)
!=
sch
->
handle
)
return
NULL
;
list_for_each
(
p
,
q
->
hash
+
htb_hash
(
handle
))
{
struct
htb_class
*
cl
=
list_entry
(
p
,
struct
htb_class
,
hlist
);
if
(
cl
->
classid
==
handle
)
return
cl
;
}
return
NULL
;
}
/**
* htb_classify - classify a packet into class
*
* It returns NULL if the packet should be dropped or -1 if the packet
* should be passed directly thru. In all other cases leaf class is returned.
* We allow direct class selection by classid in priority. The we examine
* filters in qdisc and in inner nodes (if higher filter points to the inner
* node). If we end up with classid MAJOR:0 we enqueue the skb into special
* internal fifo (direct). These packets then go directly thru. If we still
* have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
* then finish and return direct queue.
*/
#define HTB_DIRECT (struct htb_class*)-1
static
struct
htb_class
*
htb_classify
(
struct
sk_buff
*
skb
,
struct
Qdisc
*
sch
)
{
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
struct
htb_class
*
cl
;
struct
tcf_result
res
;
struct
tcf_proto
*
tcf
;
int
result
;
/* allow to select class by setting skb->priority to valid classid;
note that nfmark can be used too by attaching filter fw with no
rules in it */
if
(
skb
->
priority
==
sch
->
handle
)
return
HTB_DIRECT
;
/* X:0 (direct flow) selected */
if
((
cl
=
htb_find
(
skb
->
priority
,
sch
))
!=
NULL
)
return
cl
;
tcf
=
q
->
filter_list
;
while
(
tcf
&&
(
result
=
tc_classify
(
skb
,
tcf
,
&
res
))
>=
0
)
{
#ifdef CONFIG_NET_CLS_POLICE
if
(
result
==
TC_POLICE_SHOT
)
return
NULL
;
#endif
if
((
cl
=
(
void
*
)
res
.
class
)
==
NULL
)
{
if
(
res
.
classid
==
sch
->
handle
)
return
HTB_DIRECT
;
/* X:0 (direct flow) */
if
((
cl
=
htb_find
(
res
.
classid
,
sch
))
==
NULL
)
break
;
/* filter selected invalid classid */
}
if
(
!
cl
->
level
)
return
cl
;
/* we hit leaf; return it */
/* we have got inner class; apply inner filter chain */
tcf
=
cl
->
filter_list
;
}
/* classification failed; try to use default class */
cl
=
htb_find
(
TC_H_MAKE
(
TC_H_MAJ
(
sch
->
handle
),
q
->
defcls
),
sch
);
if
(
!
cl
||
cl
->
level
)
return
HTB_DIRECT
;
/* bad default .. this is safe bet */
return
cl
;
}
#ifdef HTB_DEBUG
static
void
htb_next_rb_node
(
rb_node_t
**
n
);
#define HTB_DUMTREE(root,memb) if(root) { \
rb_node_t *n = (root)->rb_node; \
while (n->rb_left) n = n->rb_left; \
while (n) { \
struct htb_class *cl = rb_entry(n, struct htb_class, memb); \
printk(" %x",cl->classid); htb_next_rb_node (&n); \
} }
static
void
htb_debug_dump
(
struct
htb_sched
*
q
)
{
int
i
,
p
;
printk
(
KERN_DEBUG
"htb*g j=%lu
\n
"
,
jiffies
);
/* rows */
for
(
i
=
TC_HTB_MAXDEPTH
-
1
;
i
>=
0
;
i
--
)
{
printk
(
KERN_DEBUG
"htb*r%d m=%x"
,
i
,
q
->
row_mask
[
i
]);
for
(
p
=
0
;
p
<
TC_HTB_NUMPRIO
;
p
++
)
{
if
(
!
q
->
row
[
i
][
p
].
rb_node
)
continue
;
printk
(
" p%d:"
,
p
);
HTB_DUMTREE
(
q
->
row
[
i
]
+
p
,
node
[
p
]);
}
printk
(
"
\n
"
);
}
/* classes */
for
(
i
=
0
;
i
<
HTB_HSIZE
;
i
++
)
{
struct
list_head
*
l
;
list_for_each
(
l
,
q
->
hash
+
i
)
{
struct
htb_class
*
cl
=
list_entry
(
l
,
struct
htb_class
,
hlist
);
long
diff
=
PSCHED_TDIFF_SAFE
(
q
->
now
,
cl
->
t_c
,
(
u32
)
cl
->
mbuffer
,
0
);
printk
(
KERN_DEBUG
"htb*c%x m=%d t=%ld c=%ld pq=%lu df=%ld ql=%d "
"pa=%x f:"
,
cl
->
classid
,
cl
->
cmode
,
cl
->
tokens
,
cl
->
ctokens
,
cl
->
pq_node
.
rb_color
==-
1
?
0
:
cl
->
pq_key
,
diff
,
cl
->
level
?
0
:
cl
->
un
.
leaf
.
q
->
q
.
qlen
,
cl
->
prio_activity
);
if
(
cl
->
level
)
for
(
p
=
0
;
p
<
TC_HTB_NUMPRIO
;
p
++
)
{
if
(
!
cl
->
un
.
inner
.
feed
[
p
].
rb_node
)
continue
;
printk
(
" p%d a=%x:"
,
p
,
cl
->
un
.
inner
.
ptr
[
p
]
?
rb_entry
(
cl
->
un
.
inner
.
ptr
[
p
],
struct
htb_class
,
node
[
p
])
->
classid
:
0
);
HTB_DUMTREE
(
cl
->
un
.
inner
.
feed
+
p
,
node
[
p
]);
}
printk
(
"
\n
"
);
}
}
}
#endif
/**
* htb_add_to_id_tree - adds class to the round robin list
*
* Routine adds class to the list (actually tree) sorted by classid.
* Make sure that class is not already on such list for given prio.
*/
static
void
htb_add_to_id_tree
(
HTB_ARGQ
rb_root_t
*
root
,
struct
htb_class
*
cl
,
int
prio
)
{
rb_node_t
**
p
=
&
root
->
rb_node
,
*
parent
=
NULL
;
HTB_DBG
(
7
,
3
,
"htb_add_id_tree cl=%X prio=%d
\n
"
,
cl
->
classid
,
prio
);
#ifdef HTB_DEBUG
if
(
cl
->
node
[
prio
].
rb_color
!=
-
1
)
{
BUG_TRAP
(
0
);
return
;
}
HTB_CHCL
(
cl
);
if
(
*
p
)
{
struct
htb_class
*
x
=
rb_entry
(
*
p
,
struct
htb_class
,
node
[
prio
]);
HTB_CHCL
(
x
);
}
#endif
while
(
*
p
)
{
struct
htb_class
*
c
;
parent
=
*
p
;
c
=
rb_entry
(
parent
,
struct
htb_class
,
node
[
prio
]);
HTB_CHCL
(
c
);
if
(
cl
->
classid
>
c
->
classid
)
p
=
&
parent
->
rb_right
;
else
p
=
&
parent
->
rb_left
;
}
rb_link_node
(
&
cl
->
node
[
prio
],
parent
,
p
);
rb_insert_color
(
&
cl
->
node
[
prio
],
root
);
}
/**
* htb_add_to_wait_tree - adds class to the event queue with delay
*
* The class is added to priority event queue to indicate that class will
* change its mode in cl->pq_key microseconds. Make sure that class is not
* already in the queue.
*/
static
void
htb_add_to_wait_tree
(
struct
htb_sched
*
q
,
struct
htb_class
*
cl
,
long
delay
,
int
debug_hint
)
{
rb_node_t
**
p
=
&
q
->
wait_pq
[
cl
->
level
].
rb_node
,
*
parent
=
NULL
;
HTB_DBG
(
7
,
3
,
"htb_add_wt cl=%X key=%lu
\n
"
,
cl
->
classid
,
cl
->
pq_key
);
#ifdef HTB_DEBUG
if
(
cl
->
pq_node
.
rb_color
!=
-
1
)
{
BUG_TRAP
(
0
);
return
;
}
HTB_CHCL
(
cl
);
if
((
delay
<=
0
||
delay
>
cl
->
mbuffer
)
&&
net_ratelimit
())
printk
(
KERN_ERR
"HTB: suspicious delay in wait_tree d=%ld cl=%X h=%d
\n
"
,
delay
,
cl
->
classid
,
debug_hint
);
#endif
DEVIK_MSTART
(
9
);
cl
->
pq_key
=
jiffies
+
PSCHED_US2JIFFIE
(
delay
);
if
(
cl
->
pq_key
==
jiffies
)
cl
->
pq_key
++
;
/* update the nearest event cache */
if
(
q
->
near_ev_cache
[
cl
->
level
]
-
cl
->
pq_key
<
0x80000000
)
q
->
near_ev_cache
[
cl
->
level
]
=
cl
->
pq_key
;
while
(
*
p
)
{
struct
htb_class
*
c
;
parent
=
*
p
;
c
=
rb_entry
(
parent
,
struct
htb_class
,
pq_node
);
if
(
cl
->
pq_key
-
c
->
pq_key
<
0x80000000
)
p
=
&
parent
->
rb_right
;
else
p
=
&
parent
->
rb_left
;
}
rb_link_node
(
&
cl
->
pq_node
,
parent
,
p
);
rb_insert_color
(
&
cl
->
pq_node
,
&
q
->
wait_pq
[
cl
->
level
]);
DEVIK_MEND
(
9
);
}
/**
* htb_next_rb_node - finds next node in binary tree
*
* When we are past last key we return NULL.
* Average complexity is 2 steps per call.
*/
static
void
htb_next_rb_node
(
rb_node_t
**
n
)
{
rb_node_t
*
p
;
if
((
*
n
)
->
rb_right
)
{
*
n
=
(
*
n
)
->
rb_right
;
while
((
*
n
)
->
rb_left
)
*
n
=
(
*
n
)
->
rb_left
;
return
;
}
while
((
p
=
(
*
n
)
->
rb_parent
)
!=
NULL
)
{
if
(
p
->
rb_left
==
*
n
)
break
;
*
n
=
p
;
}
*
n
=
p
;
}
/**
* htb_add_class_to_row - add class to its row
*
* The class is added to row at priorities marked in mask.
* It does nothing if mask == 0.
*/
static
inline
void
htb_add_class_to_row
(
struct
htb_sched
*
q
,
struct
htb_class
*
cl
,
int
mask
)
{
HTB_DBG
(
7
,
2
,
"htb_addrow cl=%X mask=%X rmask=%X
\n
"
,
cl
->
classid
,
mask
,
q
->
row_mask
[
cl
->
level
]);
HTB_CHCL
(
cl
);
q
->
row_mask
[
cl
->
level
]
|=
mask
;
while
(
mask
)
{
int
prio
=
ffz
(
~
mask
);
mask
&=
~
(
1
<<
prio
);
htb_add_to_id_tree
(
HTB_PASSQ
q
->
row
[
cl
->
level
]
+
prio
,
cl
,
prio
);
}
}
/**
* htb_remove_class_from_row - removes class from its row
*
* The class is removed from row at priorities marked in mask.
* It does nothing if mask == 0.
*/
static
__inline__
void
htb_remove_class_from_row
(
struct
htb_sched
*
q
,
struct
htb_class
*
cl
,
int
mask
)
{
int
m
=
0
;
HTB_CHCL
(
cl
);
while
(
mask
)
{
int
prio
=
ffz
(
~
mask
);
mask
&=
~
(
1
<<
prio
);
if
(
q
->
ptr
[
cl
->
level
][
prio
]
==
cl
->
node
+
prio
)
htb_next_rb_node
(
q
->
ptr
[
cl
->
level
]
+
prio
);
htb_safe_rb_erase
(
cl
->
node
+
prio
,
q
->
row
[
cl
->
level
]
+
prio
);
if
(
!
q
->
row
[
cl
->
level
][
prio
].
rb_node
)
m
|=
1
<<
prio
;
}
HTB_DBG
(
7
,
2
,
"htb_delrow cl=%X mask=%X rmask=%X maskdel=%X
\n
"
,
cl
->
classid
,
mask
,
q
->
row_mask
[
cl
->
level
],
m
);
q
->
row_mask
[
cl
->
level
]
&=
~
m
;
}
/**
* htb_activate_prios - creates active classe's feed chain
*
* The class is connected to ancestors and/or appropriate rows
* for priorities it is participating on. cl->cmode must be new
* (activated) mode. It does nothing if cl->prio_activity == 0.
*/
static
void
htb_activate_prios
(
struct
htb_sched
*
q
,
struct
htb_class
*
cl
)
{
struct
htb_class
*
p
=
cl
->
parent
;
long
m
,
mask
=
cl
->
prio_activity
;
HTB_DBG
(
7
,
2
,
"htb_act_prios cl=%X mask=%lX cmode=%d
\n
"
,
cl
->
classid
,
mask
,
cl
->
cmode
);
HTB_CHCL
(
cl
);
while
(
cl
->
cmode
==
HTB_MAY_BORROW
&&
p
&&
mask
)
{
HTB_CHCL
(
p
);
m
=
mask
;
while
(
m
)
{
int
prio
=
ffz
(
~
m
);
m
&=
~
(
1
<<
prio
);
if
(
p
->
un
.
inner
.
feed
[
prio
].
rb_node
)
/* parent already has its feed in use so that
reset bit in mask as parent is already ok */
mask
&=
~
(
1
<<
prio
);
htb_add_to_id_tree
(
HTB_PASSQ
p
->
un
.
inner
.
feed
+
prio
,
cl
,
prio
);
}
HTB_DBG
(
7
,
3
,
"htb_act_pr_aft p=%X pact=%X mask=%lX pmode=%d
\n
"
,
p
->
classid
,
p
->
prio_activity
,
mask
,
p
->
cmode
);
p
->
prio_activity
|=
mask
;
cl
=
p
;
p
=
cl
->
parent
;
HTB_CHCL
(
cl
);
}
if
(
cl
->
cmode
==
HTB_CAN_SEND
&&
mask
)
htb_add_class_to_row
(
q
,
cl
,
mask
);
}
/**
* htb_deactivate_prios - remove class from feed chain
*
* cl->cmode must represent old mode (before deactivation). It does
* nothing if cl->prio_activity == 0. Class is removed from all feed
* chains and rows.
*/
static
void
htb_deactivate_prios
(
struct
htb_sched
*
q
,
struct
htb_class
*
cl
)
{
struct
htb_class
*
p
=
cl
->
parent
;
long
m
,
mask
=
cl
->
prio_activity
;
HTB_DBG
(
7
,
2
,
"htb_deact_prios cl=%X mask=%lX cmode=%d
\n
"
,
cl
->
classid
,
mask
,
cl
->
cmode
);
HTB_CHCL
(
cl
);
while
(
cl
->
cmode
==
HTB_MAY_BORROW
&&
p
&&
mask
)
{
m
=
mask
;
mask
=
0
;
while
(
m
)
{
int
prio
=
ffz
(
~
m
);
m
&=
~
(
1
<<
prio
);
if
(
p
->
un
.
inner
.
ptr
[
prio
]
==
cl
->
node
+
prio
)
htb_next_rb_node
(
p
->
un
.
inner
.
ptr
+
prio
);
htb_safe_rb_erase
(
cl
->
node
+
prio
,
p
->
un
.
inner
.
feed
+
prio
);
if
(
!
p
->
un
.
inner
.
feed
[
prio
].
rb_node
)
mask
|=
1
<<
prio
;
}
HTB_DBG
(
7
,
3
,
"htb_deact_pr_aft p=%X pact=%X mask=%lX pmode=%d
\n
"
,
p
->
classid
,
p
->
prio_activity
,
mask
,
p
->
cmode
);
p
->
prio_activity
&=
~
mask
;
cl
=
p
;
p
=
cl
->
parent
;
HTB_CHCL
(
cl
);
}
if
(
cl
->
cmode
==
HTB_CAN_SEND
&&
mask
)
htb_remove_class_from_row
(
q
,
cl
,
mask
);
}
/**
* htb_class_mode - computes and returns current class mode
*
* It computes cl's mode at time cl->t_c+diff and returns it. If mode
* is not HTB_CAN_SEND then cl->pq_key is updated to time difference
* from now to time when cl will change its state.
* Also it is worth to note that class mode doesn't change simply
* at cl->{c,}tokens == 0 but there can rather be hysteresis of
* 0 .. -cl->{c,}buffer range. It is meant to limit number of
* mode transitions per time unit. The speed gain is about 1/6.
*/
static
__inline__
enum
htb_cmode
htb_class_mode
(
struct
htb_class
*
cl
,
long
*
diff
)
{
long
toks
;
if
((
toks
=
(
cl
->
ctokens
+
*
diff
))
<
(
#ifdef HTB_HYSTERESIS
cl
->
cmode
!=
HTB_CANT_SEND
?
-
cl
->
cbuffer
:
#endif
0
))
{
*
diff
=
-
toks
;
return
HTB_CANT_SEND
;
}
if
((
toks
=
(
cl
->
tokens
+
*
diff
))
>=
(
#ifdef HTB_HYSTERESIS
cl
->
cmode
==
HTB_CAN_SEND
?
-
cl
->
buffer
:
#endif
0
))
return
HTB_CAN_SEND
;
*
diff
=
-
toks
;
return
HTB_MAY_BORROW
;
}
/**
* htb_change_class_mode - changes classe's mode
*
* This should be the only way how to change classe's mode under normal
* cirsumstances. Routine will update feed lists linkage, change mode
* and add class to the wait event queue if appropriate. New mode should
* be different from old one and cl->pq_key has to be valid if changing
* to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
*/
static
void
htb_change_class_mode
(
struct
htb_sched
*
q
,
struct
htb_class
*
cl
,
long
*
diff
)
{
enum
htb_cmode
new_mode
=
htb_class_mode
(
cl
,
diff
);
HTB_CHCL
(
cl
);
HTB_DBG
(
7
,
1
,
"htb_chging_clmode %d->%d cl=%X
\n
"
,
cl
->
cmode
,
new_mode
,
cl
->
classid
);
if
(
new_mode
==
cl
->
cmode
)
return
;
if
(
cl
->
prio_activity
)
{
/* not neccessary: speed optimization */
if
(
cl
->
cmode
!=
HTB_CANT_SEND
)
htb_deactivate_prios
(
q
,
cl
);
cl
->
cmode
=
new_mode
;
if
(
new_mode
!=
HTB_CANT_SEND
)
htb_activate_prios
(
q
,
cl
);
}
else
cl
->
cmode
=
new_mode
;
}
/**
* htb_activate - inserts leaf cl into appropriate active feeds
*
* Routine learns (new) priority of leaf and activates feed chain
* for the prio. It can be called on already active leaf safely.
* It also adds leaf into droplist.
*/
static
__inline__
void
htb_activate
(
struct
htb_sched
*
q
,
struct
htb_class
*
cl
)
{
BUG_TRAP
(
!
cl
->
level
&&
cl
->
un
.
leaf
.
q
&&
cl
->
un
.
leaf
.
q
->
q
.
qlen
);
HTB_CHCL
(
cl
);
if
(
!
cl
->
prio_activity
)
{
cl
->
prio_activity
=
1
<<
(
cl
->
un
.
leaf
.
aprio
=
cl
->
un
.
leaf
.
prio
);
htb_activate_prios
(
q
,
cl
);
list_add_tail
(
&
cl
->
un
.
leaf
.
drop_list
,
q
->
drops
+
cl
->
un
.
leaf
.
aprio
);
}
}
/**
* htb_deactivate - remove leaf cl from active feeds
*
* Make sure that leaf is active. In the other words it can't be called
* with non-active leaf. It also removes class from the drop list.
*/
static
__inline__
void
htb_deactivate
(
struct
htb_sched
*
q
,
struct
htb_class
*
cl
)
{
BUG_TRAP
(
cl
->
prio_activity
);
HTB_CHCL
(
cl
);
htb_deactivate_prios
(
q
,
cl
);
cl
->
prio_activity
=
0
;
list_del_init
(
&
cl
->
un
.
leaf
.
drop_list
);
}
static
int
htb_enqueue
(
struct
sk_buff
*
skb
,
struct
Qdisc
*
sch
)
{
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
struct
htb_class
*
cl
=
htb_classify
(
skb
,
sch
);
DEVIK_MSTART
(
0
);
if
(
cl
==
HTB_DIRECT
||
!
cl
)
{
/* enqueue to helper queue */
if
(
q
->
direct_queue
.
qlen
<
q
->
direct_qlen
&&
cl
)
{
__skb_queue_tail
(
&
q
->
direct_queue
,
skb
);
q
->
direct_pkts
++
;
}
else
{
kfree_skb
(
skb
);
sch
->
stats
.
drops
++
;
DEVIK_MEND
(
0
);
return
NET_XMIT_DROP
;
}
}
else
if
(
cl
->
un
.
leaf
.
q
->
enqueue
(
skb
,
cl
->
un
.
leaf
.
q
)
!=
NET_XMIT_SUCCESS
)
{
sch
->
stats
.
drops
++
;
cl
->
stats
.
drops
++
;
DEVIK_MEND
(
0
);
return
NET_XMIT_DROP
;
}
else
{
cl
->
stats
.
packets
++
;
cl
->
stats
.
bytes
+=
skb
->
len
;
DEVIK_MSTART
(
1
);
htb_activate
(
q
,
cl
);
DEVIK_MEND
(
1
);
}
sch
->
q
.
qlen
++
;
sch
->
stats
.
packets
++
;
sch
->
stats
.
bytes
+=
skb
->
len
;
HTB_DBG
(
1
,
1
,
"htb_enq_ok cl=%X skb=%p
\n
"
,
cl
?
cl
->
classid
:
0
,
skb
);
DEVIK_MEND
(
0
);
return
NET_XMIT_SUCCESS
;
}
/* TODO: requeuing packet charges it to policers again !! */
static
int
htb_requeue
(
struct
sk_buff
*
skb
,
struct
Qdisc
*
sch
)
{
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
struct
htb_class
*
cl
=
htb_classify
(
skb
,
sch
);
if
(
cl
==
HTB_DIRECT
||
!
cl
)
{
/* enqueue to helper queue */
if
(
q
->
direct_queue
.
qlen
<
q
->
direct_qlen
&&
cl
)
{
__skb_queue_tail
(
&
q
->
direct_queue
,
skb
);
q
->
direct_pkts
++
;
}
else
{
kfree_skb
(
skb
);
sch
->
stats
.
drops
++
;
return
NET_XMIT_DROP
;
}
}
else
if
(
cl
->
un
.
leaf
.
q
->
ops
->
requeue
(
skb
,
cl
->
un
.
leaf
.
q
)
!=
NET_XMIT_SUCCESS
)
{
sch
->
stats
.
drops
++
;
cl
->
stats
.
drops
++
;
return
NET_XMIT_DROP
;
}
else
htb_activate
(
q
,
cl
);
sch
->
q
.
qlen
++
;
HTB_DBG
(
1
,
1
,
"htb_req_ok cl=%X skb=%p
\n
"
,
cl
?
cl
->
classid
:
0
,
skb
);
return
NET_XMIT_SUCCESS
;
}
static
void
htb_timer
(
unsigned
long
arg
)
{
struct
Qdisc
*
sch
=
(
struct
Qdisc
*
)
arg
;
sch
->
flags
&=
~
TCQ_F_THROTTLED
;
wmb
();
netif_schedule
(
sch
->
dev
);
}
#ifdef HTB_RATECM
#define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
static
void
htb_rate_timer
(
unsigned
long
arg
)
{
struct
Qdisc
*
sch
=
(
struct
Qdisc
*
)
arg
;
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
struct
list_head
*
p
;
/* lock queue so that we can muck with it */
HTB_QLOCK
(
sch
);
HTB_DBG
(
10
,
1
,
"htb_rttmr j=%ld
\n
"
,
jiffies
);
q
->
rttim
.
expires
=
jiffies
+
HZ
;
add_timer
(
&
q
->
rttim
);
/* scan and recompute one bucket at time */
if
(
++
q
->
recmp_bucket
>=
HTB_HSIZE
)
q
->
recmp_bucket
=
0
;
list_for_each
(
p
,
q
->
hash
+
q
->
recmp_bucket
)
{
struct
htb_class
*
cl
=
list_entry
(
p
,
struct
htb_class
,
hlist
);
HTB_DBG
(
10
,
2
,
"htb_rttmr_cl cl=%X sbyte=%lu spkt=%lu
\n
"
,
cl
->
classid
,
cl
->
sum_bytes
,
cl
->
sum_packets
);
RT_GEN
(
cl
->
sum_bytes
,
cl
->
rate_bytes
);
RT_GEN
(
cl
->
sum_packets
,
cl
->
rate_packets
);
}
HTB_QUNLOCK
(
sch
);
}
#endif
/**
* htb_charge_class - charges ammount "bytes" to leaf and ancestors
*
* Routine assumes that packet "bytes" long was dequeued from leaf cl
* borrowing from "level". It accounts bytes to ceil leaky bucket for
* leaf and all ancestors and to rate bucket for ancestors at levels
* "level" and higher. It also handles possible change of mode resulting
* from the update. Note that mode can also increase here (MAY_BORROW to
* CAN_SEND) because we can use more precise clock that event queue here.
* In such case we remove class from event queue first.
*/
static
void
htb_charge_class
(
struct
htb_sched
*
q
,
struct
htb_class
*
cl
,
int
level
,
int
bytes
)
{
long
toks
,
diff
;
enum
htb_cmode
old_mode
;
HTB_DBG
(
5
,
1
,
"htb_chrg_cl cl=%X lev=%d len=%d
\n
"
,
cl
->
classid
,
level
,
bytes
);
#define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
if (toks > cl->B) toks = cl->B; \
toks -= L2T(cl, cl->R, bytes); \
if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
cl->T = toks
while
(
cl
)
{
HTB_CHCL
(
cl
);
diff
=
PSCHED_TDIFF_SAFE
(
q
->
now
,
cl
->
t_c
,
(
u32
)
cl
->
mbuffer
,
0
);
#ifdef HTB_DEBUG
if
(
diff
>
cl
->
mbuffer
||
diff
<
0
||
PSCHED_TLESS
(
q
->
now
,
cl
->
t_c
))
{
if
(
net_ratelimit
())
printk
(
KERN_ERR
"HTB: bad diff in charge, cl=%X diff=%lX now=%Lu then=%Lu j=%lu
\n
"
,
cl
->
classid
,
diff
,
q
->
now
,
cl
->
t_c
,
jiffies
);
diff
=
1000
;
}
#endif
if
(
cl
->
level
>=
level
)
{
if
(
cl
->
level
==
level
)
cl
->
xstats
.
lends
++
;
HTB_ACCNT
(
tokens
,
buffer
,
rate
);
}
else
{
cl
->
xstats
.
borrows
++
;
cl
->
tokens
+=
diff
;
/* we moved t_c; update tokens */
}
HTB_ACCNT
(
ctokens
,
cbuffer
,
ceil
);
cl
->
t_c
=
q
->
now
;
HTB_DBG
(
5
,
2
,
"htb_chrg_clp cl=%X diff=%ld tok=%ld ctok=%ld
\n
"
,
cl
->
classid
,
diff
,
cl
->
tokens
,
cl
->
ctokens
);
old_mode
=
cl
->
cmode
;
diff
=
0
;
htb_change_class_mode
(
q
,
cl
,
&
diff
);
if
(
old_mode
!=
cl
->
cmode
)
{
if
(
old_mode
!=
HTB_CAN_SEND
)
htb_safe_rb_erase
(
&
cl
->
pq_node
,
q
->
wait_pq
+
cl
->
level
);
if
(
cl
->
cmode
!=
HTB_CAN_SEND
)
htb_add_to_wait_tree
(
q
,
cl
,
diff
,
1
);
}
#ifdef HTB_RATECM
/* update rate counters */
cl
->
sum_bytes
+=
bytes
;
cl
->
sum_packets
++
;
#endif
/* update byte stats except for leaves which are already updated */
if
(
cl
->
level
)
{
cl
->
stats
.
bytes
+=
bytes
;
cl
->
stats
.
packets
++
;
}
cl
=
cl
->
parent
;
}
}
/**
* htb_do_events - make mode changes to classes at the level
*
* Scans event queue for pending events and applies them. Returns jiffies to
* next pending event (0 for no event in pq).
*/
static
long
htb_do_events
(
struct
htb_sched
*
q
,
int
level
)
{
int
i
;
HTB_DBG
(
8
,
1
,
"htb_do_events l=%d root=%p rmask=%X
\n
"
,
level
,
q
->
wait_pq
[
level
].
rb_node
,
q
->
row_mask
[
level
]);
for
(
i
=
0
;
i
<
500
;
i
++
)
{
struct
htb_class
*
cl
;
long
diff
;
rb_node_t
*
p
=
q
->
wait_pq
[
level
].
rb_node
;
if
(
!
p
)
return
0
;
while
(
p
->
rb_left
)
p
=
p
->
rb_left
;
cl
=
rb_entry
(
p
,
struct
htb_class
,
pq_node
);
if
(
cl
->
pq_key
-
(
jiffies
+
1
)
<
0x80000000
)
{
HTB_DBG
(
8
,
3
,
"htb_do_ev_ret delay=%ld
\n
"
,
cl
->
pq_key
-
jiffies
);
return
cl
->
pq_key
-
jiffies
;
}
htb_safe_rb_erase
(
p
,
q
->
wait_pq
+
level
);
diff
=
PSCHED_TDIFF_SAFE
(
q
->
now
,
cl
->
t_c
,
(
u32
)
cl
->
mbuffer
,
0
);
#ifdef HTB_DEBUG
if
(
diff
>
cl
->
mbuffer
||
diff
<
0
||
PSCHED_TLESS
(
q
->
now
,
cl
->
t_c
))
{
if
(
net_ratelimit
())
printk
(
KERN_ERR
"HTB: bad diff in events, cl=%X diff=%lX now=%Lu then=%Lu j=%lu
\n
"
,
cl
->
classid
,
diff
,
q
->
now
,
cl
->
t_c
,
jiffies
);
diff
=
1000
;
}
#endif
htb_change_class_mode
(
q
,
cl
,
&
diff
);
if
(
cl
->
cmode
!=
HTB_CAN_SEND
)
htb_add_to_wait_tree
(
q
,
cl
,
diff
,
2
);
}
if
(
net_ratelimit
())
printk
(
KERN_WARNING
"htb: too many events !
\n
"
);
return
HZ
/
10
;
}
/**
* htb_lookup_leaf - returns next leaf class in DRR order
*
* Find leaf where current feed pointers points to.
*/
static
struct
htb_class
*
htb_lookup_leaf
(
rb_root_t
*
tree
,
int
prio
,
rb_node_t
**
pptr
)
{
int
i
;
struct
{
rb_node_t
*
root
;
rb_node_t
**
pptr
;
}
stk
[
TC_HTB_MAXDEPTH
],
*
sp
=
stk
;
sp
->
root
=
tree
->
rb_node
;
sp
->
pptr
=
pptr
;
for
(
i
=
0
;
i
<
65535
;
i
++
)
{
if
(
!*
sp
->
pptr
)
{
/* we are at right end; rewind & go up */
*
sp
->
pptr
=
sp
->
root
;
while
((
*
sp
->
pptr
)
->
rb_left
)
*
sp
->
pptr
=
(
*
sp
->
pptr
)
->
rb_left
;
if
(
sp
>
stk
)
{
sp
--
;
BUG_TRAP
(
*
sp
->
pptr
);
if
(
!*
sp
->
pptr
)
return
NULL
;
htb_next_rb_node
(
sp
->
pptr
);
}
}
else
{
struct
htb_class
*
cl
;
cl
=
rb_entry
(
*
sp
->
pptr
,
struct
htb_class
,
node
[
prio
]);
HTB_CHCL
(
cl
);
if
(
!
cl
->
level
)
return
cl
;
(
++
sp
)
->
root
=
cl
->
un
.
inner
.
feed
[
prio
].
rb_node
;
sp
->
pptr
=
cl
->
un
.
inner
.
ptr
+
prio
;
}
}
BUG_TRAP
(
0
);
return
NULL
;
}
/* dequeues packet at given priority and level; call only if
you are sure that there is active class at prio/level */
static
struct
sk_buff
*
htb_dequeue_tree
(
struct
htb_sched
*
q
,
int
prio
,
int
level
)
{
struct
sk_buff
*
skb
=
NULL
;
//struct htb_sched *q = (struct htb_sched *)sch->data;
struct
htb_class
*
cl
,
*
start
;
/* look initial class up in the row */
DEVIK_MSTART
(
6
);
start
=
cl
=
htb_lookup_leaf
(
q
->
row
[
level
]
+
prio
,
prio
,
q
->
ptr
[
level
]
+
prio
);
do
{
BUG_TRAP
(
cl
&&
cl
->
un
.
leaf
.
q
->
q
.
qlen
);
if
(
!
cl
)
return
NULL
;
HTB_DBG
(
4
,
1
,
"htb_deq_tr prio=%d lev=%d cl=%X defic=%d
\n
"
,
prio
,
level
,
cl
->
classid
,
cl
->
un
.
leaf
.
deficit
[
level
]);
if
(
likely
((
skb
=
cl
->
un
.
leaf
.
q
->
dequeue
(
cl
->
un
.
leaf
.
q
))
!=
NULL
))
break
;
if
(
!
cl
->
warned
)
{
printk
(
KERN_WARNING
"htb: class %X isn't work conserving ?!
\n
"
,
cl
->
classid
);
cl
->
warned
=
1
;
}
q
->
nwc_hit
++
;
htb_next_rb_node
((
level
?
cl
->
parent
->
un
.
inner
.
ptr
:
q
->
ptr
[
0
])
+
prio
);
cl
=
htb_lookup_leaf
(
q
->
row
[
level
]
+
prio
,
prio
,
q
->
ptr
[
level
]
+
prio
);
}
while
(
cl
!=
start
);
DEVIK_MEND
(
6
);
DEVIK_MSTART
(
7
);
if
(
likely
(
skb
!=
NULL
))
{
if
((
cl
->
un
.
leaf
.
deficit
[
level
]
-=
skb
->
len
)
<
0
)
{
HTB_DBG
(
4
,
2
,
"htb_next_cl oldptr=%p quant_add=%d
\n
"
,
level
?
cl
->
parent
->
un
.
inner
.
ptr
[
prio
]
:
q
->
ptr
[
0
][
prio
],
cl
->
un
.
leaf
.
quantum
);
cl
->
un
.
leaf
.
deficit
[
level
]
+=
cl
->
un
.
leaf
.
quantum
;
htb_next_rb_node
((
level
?
cl
->
parent
->
un
.
inner
.
ptr
:
q
->
ptr
[
0
])
+
prio
);
}
/* this used to be after charge_class but this constelation
gives us slightly better performance */
if
(
!
cl
->
un
.
leaf
.
q
->
q
.
qlen
)
htb_deactivate
(
q
,
cl
);
DEVIK_MSTART
(
8
);
htb_charge_class
(
q
,
cl
,
level
,
skb
->
len
);
DEVIK_MEND
(
8
);
}
DEVIK_MEND
(
7
);
return
skb
;
}
static
void
htb_delay_by
(
struct
Qdisc
*
sch
,
long
delay
)
{
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
if
(
netif_queue_stopped
(
sch
->
dev
))
return
;
if
(
delay
<=
0
)
delay
=
1
;
if
(
unlikely
(
delay
>
5
*
HZ
))
{
if
(
net_ratelimit
())
printk
(
KERN_INFO
"HTB delay %ld > 5sec
\n
"
,
delay
);
delay
=
5
*
HZ
;
}
del_timer
(
&
q
->
timer
);
q
->
timer
.
expires
=
jiffies
+
delay
;
add_timer
(
&
q
->
timer
);
sch
->
flags
|=
TCQ_F_THROTTLED
;
sch
->
stats
.
overlimits
++
;
HTB_DBG
(
3
,
1
,
"htb_deq t_delay=%ld
\n
"
,
delay
);
}
static
struct
sk_buff
*
htb_dequeue
(
struct
Qdisc
*
sch
)
{
struct
sk_buff
*
skb
=
NULL
;
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
int
level
;
long
min_delay
;
HTB_DBG
(
3
,
1
,
"htb_deq dircnt=%d qlen=%d
\n
"
,
skb_queue_len
(
&
q
->
direct_queue
),
sch
->
q
.
qlen
);
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
if
((
skb
=
__skb_dequeue
(
&
q
->
direct_queue
))
!=
NULL
)
{
sch
->
flags
&=
~
TCQ_F_THROTTLED
;
sch
->
q
.
qlen
--
;
return
skb
;
}
DEVIK_MSTART
(
2
);
if
(
!
sch
->
q
.
qlen
)
goto
fin
;
PSCHED_GET_TIME
(
q
->
now
);
min_delay
=
HZ
*
5
;
q
->
nwc_hit
=
0
;
for
(
level
=
0
;
level
<
TC_HTB_MAXDEPTH
;
level
++
)
{
/* common case optimization - skip event handler quickly */
int
m
;
long
delay
;
DEVIK_MSTART
(
3
);
if
(
jiffies
-
q
->
near_ev_cache
[
level
]
<
0x80000000
||
0
)
{
delay
=
htb_do_events
(
q
,
level
);
q
->
near_ev_cache
[
level
]
+=
delay
?
delay
:
HZ
;
}
else
delay
=
q
->
near_ev_cache
[
level
]
-
jiffies
;
if
(
delay
&&
min_delay
>
delay
)
min_delay
=
delay
;
DEVIK_MEND
(
3
);
DEVIK_MSTART
(
5
);
m
=
~
q
->
row_mask
[
level
];
while
(
m
!=
(
int
)(
-
1
))
{
int
prio
=
ffz
(
m
);
m
|=
1
<<
prio
;
skb
=
htb_dequeue_tree
(
q
,
prio
,
level
);
if
(
likely
(
skb
!=
NULL
))
{
sch
->
q
.
qlen
--
;
sch
->
flags
&=
~
TCQ_F_THROTTLED
;
DEVIK_MEND
(
5
);
goto
fin
;
}
}
DEVIK_MEND
(
5
);
}
DEVIK_MSTART
(
4
);
#ifdef HTB_DEBUG
if
(
!
q
->
nwc_hit
&&
min_delay
>=
5
*
HZ
&&
net_ratelimit
())
{
printk
(
KERN_ERR
"HTB: mindelay=%ld, report it please !
\n
"
,
min_delay
);
htb_debug_dump
(
q
);
}
#endif
htb_delay_by
(
sch
,
min_delay
);
DEVIK_MEND
(
4
);
fin:
HTB_DBG
(
3
,
1
,
"htb_deq_end %s j=%lu skb=%p
\n
"
,
sch
->
dev
->
name
,
jiffies
,
skb
);
DEVIK_MEND
(
2
);
return
skb
;
}
/* try to drop from each class (by prio) until one succeed */
static
int
htb_drop
(
struct
Qdisc
*
sch
)
{
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
int
prio
;
for
(
prio
=
TC_HTB_NUMPRIO
-
1
;
prio
>=
0
;
prio
--
)
{
struct
list_head
*
p
;
list_for_each
(
p
,
q
->
drops
+
prio
)
{
struct
htb_class
*
cl
=
list_entry
(
p
,
struct
htb_class
,
un
.
leaf
.
drop_list
);
if
(
cl
->
un
.
leaf
.
q
->
ops
->
drop
&&
cl
->
un
.
leaf
.
q
->
ops
->
drop
(
cl
->
un
.
leaf
.
q
))
{
sch
->
q
.
qlen
--
;
if
(
!
cl
->
un
.
leaf
.
q
->
q
.
qlen
)
htb_deactivate
(
q
,
cl
);
return
1
;
}
}
}
return
0
;
}
/* reset all classes */
/* always caled under BH & queue lock */
static
void
htb_reset
(
struct
Qdisc
*
sch
)
{
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
int
i
;
HTB_DBG
(
0
,
1
,
"htb_reset sch=%X, handle=%X
\n
"
,(
int
)
sch
,
sch
->
handle
);
for
(
i
=
0
;
i
<
HTB_HSIZE
;
i
++
)
{
struct
list_head
*
p
;
list_for_each
(
p
,
q
->
hash
+
i
)
{
struct
htb_class
*
cl
=
list_entry
(
p
,
struct
htb_class
,
hlist
);
if
(
cl
->
level
)
memset
(
&
cl
->
un
.
inner
,
0
,
sizeof
(
cl
->
un
.
inner
));
else
{
if
(
cl
->
un
.
leaf
.
q
)
qdisc_reset
(
cl
->
un
.
leaf
.
q
);
INIT_LIST_HEAD
(
&
cl
->
un
.
leaf
.
drop_list
);
}
cl
->
prio_activity
=
0
;
cl
->
cmode
=
HTB_CAN_SEND
;
#ifdef HTB_DEBUG
cl
->
pq_node
.
rb_color
=
-
1
;
memset
(
cl
->
node
,
255
,
sizeof
(
cl
->
node
));
#endif
}
}
sch
->
flags
&=
~
TCQ_F_THROTTLED
;
del_timer
(
&
q
->
timer
);
__skb_queue_purge
(
&
q
->
direct_queue
);
sch
->
q
.
qlen
=
0
;
memset
(
q
->
row
,
0
,
sizeof
(
q
->
row
));
memset
(
q
->
row_mask
,
0
,
sizeof
(
q
->
row_mask
));
memset
(
q
->
wait_pq
,
0
,
sizeof
(
q
->
wait_pq
));
memset
(
q
->
ptr
,
0
,
sizeof
(
q
->
ptr
));
for
(
i
=
0
;
i
<
TC_HTB_NUMPRIO
;
i
++
)
INIT_LIST_HEAD
(
q
->
drops
+
i
);
}
static
int
htb_init
(
struct
Qdisc
*
sch
,
struct
rtattr
*
opt
)
{
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
struct
rtattr
*
tb
[
TCA_HTB_INIT
];
struct
tc_htb_glob
*
gopt
;
int
i
;
#ifdef HTB_DEBUG
printk
(
KERN_INFO
"HTB init, kernel part version %d.%d
\n
"
,
HTB_VER
>>
16
,
HTB_VER
&
0xffff
);
#endif
if
(
!
opt
||
rtattr_parse
(
tb
,
TCA_HTB_INIT
,
RTA_DATA
(
opt
),
RTA_PAYLOAD
(
opt
))
||
tb
[
TCA_HTB_INIT
-
1
]
==
NULL
||
RTA_PAYLOAD
(
tb
[
TCA_HTB_INIT
-
1
])
<
sizeof
(
*
gopt
))
{
printk
(
KERN_ERR
"HTB: hey probably you have bad tc tool ?
\n
"
);
return
-
EINVAL
;
}
gopt
=
RTA_DATA
(
tb
[
TCA_HTB_INIT
-
1
]);
if
(
gopt
->
version
!=
HTB_VER
>>
16
)
{
printk
(
KERN_ERR
"HTB: need tc/htb version %d (minor is %d), you have %d
\n
"
,
HTB_VER
>>
16
,
HTB_VER
&
0xffff
,
gopt
->
version
);
return
-
EINVAL
;
}
memset
(
q
,
0
,
sizeof
(
*
q
));
q
->
debug
=
gopt
->
debug
;
HTB_DBG
(
0
,
1
,
"htb_init sch=%p handle=%X r2q=%d
\n
"
,
sch
,
sch
->
handle
,
gopt
->
rate2quantum
);
INIT_LIST_HEAD
(
&
q
->
root
);
for
(
i
=
0
;
i
<
HTB_HSIZE
;
i
++
)
INIT_LIST_HEAD
(
q
->
hash
+
i
);
for
(
i
=
0
;
i
<
TC_HTB_NUMPRIO
;
i
++
)
INIT_LIST_HEAD
(
q
->
drops
+
i
);
init_timer
(
&
q
->
timer
);
skb_queue_head_init
(
&
q
->
direct_queue
);
q
->
direct_qlen
=
sch
->
dev
->
tx_queue_len
;
q
->
timer
.
function
=
htb_timer
;
q
->
timer
.
data
=
(
unsigned
long
)
sch
;
#ifdef HTB_RATECM
init_timer
(
&
q
->
rttim
);
q
->
rttim
.
function
=
htb_rate_timer
;
q
->
rttim
.
data
=
(
unsigned
long
)
sch
;
q
->
rttim
.
expires
=
jiffies
+
HZ
;
add_timer
(
&
q
->
rttim
);
#endif
if
((
q
->
rate2quantum
=
gopt
->
rate2quantum
)
<
1
)
q
->
rate2quantum
=
1
;
q
->
defcls
=
gopt
->
defcls
;
MOD_INC_USE_COUNT
;
return
0
;
}
static
int
htb_dump
(
struct
Qdisc
*
sch
,
struct
sk_buff
*
skb
)
{
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
unsigned
char
*
b
=
skb
->
tail
;
struct
rtattr
*
rta
;
struct
tc_htb_glob
gopt
;
HTB_DBG
(
0
,
1
,
"htb_dump sch=%p, handle=%X
\n
"
,
sch
,
sch
->
handle
);
/* stats */
HTB_QLOCK
(
sch
);
gopt
.
direct_pkts
=
q
->
direct_pkts
;
#ifdef HTB_DEBUG
htb_debug_dump
(
q
);
#endif
gopt
.
version
=
HTB_VER
;
gopt
.
rate2quantum
=
q
->
rate2quantum
;
gopt
.
defcls
=
q
->
defcls
;
gopt
.
debug
=
q
->
debug
;
rta
=
(
struct
rtattr
*
)
b
;
RTA_PUT
(
skb
,
TCA_OPTIONS
,
0
,
NULL
);
RTA_PUT
(
skb
,
TCA_HTB_INIT
,
sizeof
(
gopt
),
&
gopt
);
rta
->
rta_len
=
skb
->
tail
-
b
;
sch
->
stats
.
qlen
=
sch
->
q
.
qlen
;
RTA_PUT
(
skb
,
TCA_STATS
,
sizeof
(
sch
->
stats
),
&
sch
->
stats
);
HTB_QUNLOCK
(
sch
);
return
skb
->
len
;
rtattr_failure:
HTB_QUNLOCK
(
sch
);
skb_trim
(
skb
,
skb
->
tail
-
skb
->
data
);
return
-
1
;
}
static
int
htb_dump_class
(
struct
Qdisc
*
sch
,
unsigned
long
arg
,
struct
sk_buff
*
skb
,
struct
tcmsg
*
tcm
)
{
#ifdef HTB_DEBUG
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
#endif
struct
htb_class
*
cl
=
(
struct
htb_class
*
)
arg
;
unsigned
char
*
b
=
skb
->
tail
;
struct
rtattr
*
rta
;
struct
tc_htb_opt
opt
;
HTB_DBG
(
0
,
1
,
"htb_dump_class handle=%X clid=%X
\n
"
,
sch
->
handle
,
cl
->
classid
);
HTB_QLOCK
(
sch
);
tcm
->
tcm_parent
=
cl
->
parent
?
cl
->
parent
->
classid
:
TC_H_ROOT
;
tcm
->
tcm_handle
=
cl
->
classid
;
if
(
!
cl
->
level
&&
cl
->
un
.
leaf
.
q
)
{
tcm
->
tcm_info
=
cl
->
un
.
leaf
.
q
->
handle
;
cl
->
stats
.
qlen
=
cl
->
un
.
leaf
.
q
->
q
.
qlen
;
}
rta
=
(
struct
rtattr
*
)
b
;
RTA_PUT
(
skb
,
TCA_OPTIONS
,
0
,
NULL
);
memset
(
&
opt
,
0
,
sizeof
(
opt
));
opt
.
rate
=
cl
->
rate
->
rate
;
opt
.
buffer
=
cl
->
buffer
;
opt
.
ceil
=
cl
->
ceil
->
rate
;
opt
.
cbuffer
=
cl
->
cbuffer
;
opt
.
quantum
=
cl
->
un
.
leaf
.
quantum
;
opt
.
prio
=
cl
->
un
.
leaf
.
prio
;
opt
.
level
=
cl
->
level
;
RTA_PUT
(
skb
,
TCA_HTB_PARMS
,
sizeof
(
opt
),
&
opt
);
rta
->
rta_len
=
skb
->
tail
-
b
;
#ifdef HTB_RATECM
cl
->
stats
.
bps
=
cl
->
rate_bytes
/
(
HTB_EWMAC
*
HTB_HSIZE
);
cl
->
stats
.
pps
=
cl
->
rate_packets
/
(
HTB_EWMAC
*
HTB_HSIZE
);
#endif
cl
->
xstats
.
tokens
=
cl
->
tokens
;
cl
->
xstats
.
ctokens
=
cl
->
ctokens
;
RTA_PUT
(
skb
,
TCA_STATS
,
sizeof
(
cl
->
stats
),
&
cl
->
stats
);
RTA_PUT
(
skb
,
TCA_XSTATS
,
sizeof
(
cl
->
xstats
),
&
cl
->
xstats
);
HTB_QUNLOCK
(
sch
);
return
skb
->
len
;
rtattr_failure:
HTB_QUNLOCK
(
sch
);
skb_trim
(
skb
,
b
-
skb
->
data
);
return
-
1
;
}
static
int
htb_graft
(
struct
Qdisc
*
sch
,
unsigned
long
arg
,
struct
Qdisc
*
new
,
struct
Qdisc
**
old
)
{
struct
htb_class
*
cl
=
(
struct
htb_class
*
)
arg
;
if
(
cl
&&
!
cl
->
level
)
{
if
(
new
==
NULL
&&
(
new
=
qdisc_create_dflt
(
sch
->
dev
,
&
pfifo_qdisc_ops
))
==
NULL
)
return
-
ENOBUFS
;
sch_tree_lock
(
sch
);
if
((
*
old
=
xchg
(
&
cl
->
un
.
leaf
.
q
,
new
))
!=
NULL
)
{
/* TODO: is it correct ? Why CBQ doesn't do it ? */
sch
->
q
.
qlen
-=
(
*
old
)
->
q
.
qlen
;
qdisc_reset
(
*
old
);
}
sch_tree_unlock
(
sch
);
return
0
;
}
return
-
ENOENT
;
}
static
struct
Qdisc
*
htb_leaf
(
struct
Qdisc
*
sch
,
unsigned
long
arg
)
{
struct
htb_class
*
cl
=
(
struct
htb_class
*
)
arg
;
return
(
cl
&&
!
cl
->
level
)
?
cl
->
un
.
leaf
.
q
:
NULL
;
}
static
unsigned
long
htb_get
(
struct
Qdisc
*
sch
,
u32
classid
)
{
#ifdef HTB_DEBUG
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
#endif
struct
htb_class
*
cl
=
htb_find
(
classid
,
sch
);
HTB_DBG
(
0
,
1
,
"htb_get clid=%X q=%p cl=%p ref=%d
\n
"
,
classid
,
q
,
cl
,
cl
?
cl
->
refcnt
:
0
);
if
(
cl
)
cl
->
refcnt
++
;
return
(
unsigned
long
)
cl
;
}
static
void
htb_destroy_filters
(
struct
tcf_proto
**
fl
)
{
struct
tcf_proto
*
tp
;
while
((
tp
=
*
fl
)
!=
NULL
)
{
*
fl
=
tp
->
next
;
tp
->
ops
->
destroy
(
tp
);
}
}
static
void
htb_destroy_class
(
struct
Qdisc
*
sch
,
struct
htb_class
*
cl
)
{
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
HTB_DBG
(
0
,
1
,
"htb_destrycls clid=%X ref=%d
\n
"
,
cl
?
cl
->
classid
:
0
,
cl
?
cl
->
refcnt
:
0
);
if
(
!
cl
->
level
)
{
BUG_TRAP
(
cl
->
un
.
leaf
.
q
);
sch
->
q
.
qlen
-=
cl
->
un
.
leaf
.
q
->
q
.
qlen
;
qdisc_destroy
(
cl
->
un
.
leaf
.
q
);
}
qdisc_put_rtab
(
cl
->
rate
);
qdisc_put_rtab
(
cl
->
ceil
);
#ifdef CONFIG_NET_ESTIMATOR
qdisc_kill_estimator
(
&
cl
->
stats
);
#endif
htb_destroy_filters
(
&
cl
->
filter_list
);
while
(
!
list_empty
(
&
cl
->
children
))
htb_destroy_class
(
sch
,
list_entry
(
cl
->
children
.
next
,
struct
htb_class
,
sibling
));
/* note: this delete may happen twice (see htb_delete) */
list_del
(
&
cl
->
hlist
);
list_del
(
&
cl
->
sibling
);
if
(
cl
->
prio_activity
)
htb_deactivate
(
q
,
cl
);
if
(
cl
->
cmode
!=
HTB_CAN_SEND
)
htb_safe_rb_erase
(
&
cl
->
pq_node
,
q
->
wait_pq
+
cl
->
level
);
kfree
(
cl
);
}
/* always caled under BH & queue lock */
static
void
htb_destroy
(
struct
Qdisc
*
sch
)
{
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
HTB_DBG
(
0
,
1
,
"htb_destroy q=%p
\n
"
,
q
);
del_timer_sync
(
&
q
->
timer
);
#ifdef HTB_RATECM
del_timer_sync
(
&
q
->
rttim
);
#endif
while
(
!
list_empty
(
&
q
->
root
))
htb_destroy_class
(
sch
,
list_entry
(
q
->
root
.
next
,
struct
htb_class
,
sibling
));
htb_destroy_filters
(
&
q
->
filter_list
);
__skb_queue_purge
(
&
q
->
direct_queue
);
MOD_DEC_USE_COUNT
;
}
static
int
htb_delete
(
struct
Qdisc
*
sch
,
unsigned
long
arg
)
{
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
struct
htb_class
*
cl
=
(
struct
htb_class
*
)
arg
;
HTB_DBG
(
0
,
1
,
"htb_delete q=%p cl=%X ref=%d
\n
"
,
q
,
cl
?
cl
->
classid
:
0
,
cl
?
cl
->
refcnt
:
0
);
// TODO: why don't allow to delete subtree ? references ? does
// tc subsys quarantee us that in htb_destroy it holds no class
// refs so that we can remove children safely there ?
if
(
!
list_empty
(
&
cl
->
children
)
||
cl
->
filter_cnt
)
return
-
EBUSY
;
sch_tree_lock
(
sch
);
/* delete from hash and active; remainder in destroy_class */
list_del_init
(
&
cl
->
hlist
);
if
(
cl
->
prio_activity
)
htb_deactivate
(
q
,
cl
);
if
(
--
cl
->
refcnt
==
0
)
htb_destroy_class
(
sch
,
cl
);
sch_tree_unlock
(
sch
);
return
0
;
}
static
void
htb_put
(
struct
Qdisc
*
sch
,
unsigned
long
arg
)
{
#ifdef HTB_DEBUG
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
#endif
struct
htb_class
*
cl
=
(
struct
htb_class
*
)
arg
;
HTB_DBG
(
0
,
1
,
"htb_put q=%p cl=%X ref=%d
\n
"
,
q
,
cl
?
cl
->
classid
:
0
,
cl
?
cl
->
refcnt
:
0
);
if
(
--
cl
->
refcnt
==
0
)
htb_destroy_class
(
sch
,
cl
);
}
static
int
htb_change_class
(
struct
Qdisc
*
sch
,
u32
classid
,
u32
parentid
,
struct
rtattr
**
tca
,
unsigned
long
*
arg
)
{
int
err
=
-
EINVAL
;
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
struct
htb_class
*
cl
=
(
struct
htb_class
*
)
*
arg
,
*
parent
;
struct
rtattr
*
opt
=
tca
[
TCA_OPTIONS
-
1
];
struct
qdisc_rate_table
*
rtab
=
NULL
,
*
ctab
=
NULL
;
struct
rtattr
*
tb
[
TCA_HTB_RTAB
];
struct
tc_htb_opt
*
hopt
;
/* extract all subattrs from opt attr */
if
(
!
opt
||
rtattr_parse
(
tb
,
TCA_HTB_RTAB
,
RTA_DATA
(
opt
),
RTA_PAYLOAD
(
opt
))
||
tb
[
TCA_HTB_PARMS
-
1
]
==
NULL
||
RTA_PAYLOAD
(
tb
[
TCA_HTB_PARMS
-
1
])
<
sizeof
(
*
hopt
))
goto
failure
;
parent
=
parentid
==
TC_H_ROOT
?
NULL
:
htb_find
(
parentid
,
sch
);
hopt
=
RTA_DATA
(
tb
[
TCA_HTB_PARMS
-
1
]);
HTB_DBG
(
0
,
1
,
"htb_chg cl=%p, clid=%X, opt/prio=%d, rate=%u, buff=%d, quant=%d
\n
"
,
cl
,
cl
?
cl
->
classid
:
0
,(
int
)
hopt
->
prio
,
hopt
->
rate
.
rate
,
hopt
->
buffer
,
hopt
->
quantum
);
rtab
=
qdisc_get_rtab
(
&
hopt
->
rate
,
tb
[
TCA_HTB_RTAB
-
1
]);
ctab
=
qdisc_get_rtab
(
&
hopt
->
ceil
,
tb
[
TCA_HTB_CTAB
-
1
]);
if
(
!
rtab
||
!
ctab
)
goto
failure
;
if
(
!
cl
)
{
/* new class */
/* check maximal depth */
if
(
parent
&&
parent
->
parent
&&
parent
->
parent
->
level
<
2
)
{
printk
(
KERN_ERR
"htb: tree is too deep
\n
"
);
goto
failure
;
}
err
=
-
ENOBUFS
;
if
((
cl
=
kmalloc
(
sizeof
(
*
cl
),
GFP_KERNEL
))
==
NULL
)
goto
failure
;
memset
(
cl
,
0
,
sizeof
(
*
cl
));
cl
->
refcnt
=
1
;
INIT_LIST_HEAD
(
&
cl
->
sibling
);
INIT_LIST_HEAD
(
&
cl
->
hlist
);
INIT_LIST_HEAD
(
&
cl
->
children
);
INIT_LIST_HEAD
(
&
cl
->
un
.
leaf
.
drop_list
);
#ifdef HTB_DEBUG
cl
->
magic
=
HTB_CMAGIC
;
#endif
sch_tree_lock
(
sch
);
if
(
parent
&&
!
parent
->
level
)
{
/* turn parent into inner node */
sch
->
q
.
qlen
-=
parent
->
un
.
leaf
.
q
->
q
.
qlen
;
qdisc_destroy
(
parent
->
un
.
leaf
.
q
);
if
(
parent
->
prio_activity
)
htb_deactivate
(
q
,
parent
);
/* remove from evt list because of level change */
if
(
parent
->
cmode
!=
HTB_CAN_SEND
)
{
htb_safe_rb_erase
(
&
parent
->
pq_node
,
q
->
wait_pq
/*+0*/
);
parent
->
cmode
=
HTB_CAN_SEND
;
}
parent
->
level
=
(
parent
->
parent
?
parent
->
parent
->
level
:
TC_HTB_MAXDEPTH
)
-
1
;
memset
(
&
parent
->
un
.
inner
,
0
,
sizeof
(
parent
->
un
.
inner
));
}
/* leaf (we) needs elementary qdisc */
if
(
!
(
cl
->
un
.
leaf
.
q
=
qdisc_create_dflt
(
sch
->
dev
,
&
pfifo_qdisc_ops
)))
cl
->
un
.
leaf
.
q
=
&
noop_qdisc
;
cl
->
classid
=
classid
;
cl
->
parent
=
parent
;
/* set class to be in HTB_CAN_SEND state */
cl
->
tokens
=
hopt
->
buffer
;
cl
->
ctokens
=
hopt
->
cbuffer
;
cl
->
mbuffer
=
60000000
;
/* 1min */
PSCHED_GET_TIME
(
cl
->
t_c
);
cl
->
cmode
=
HTB_CAN_SEND
;
/* attach to the hash list and parent's family */
list_add_tail
(
&
cl
->
hlist
,
q
->
hash
+
htb_hash
(
classid
));
list_add_tail
(
&
cl
->
sibling
,
parent
?
&
parent
->
children
:
&
q
->
root
);
#ifdef HTB_DEBUG
{
int
i
;
for
(
i
=
0
;
i
<
TC_HTB_NUMPRIO
;
i
++
)
cl
->
node
[
i
].
rb_color
=
-
1
;
cl
->
pq_node
.
rb_color
=
-
1
;
}
#endif
}
else
sch_tree_lock
(
sch
);
cl
->
un
.
leaf
.
quantum
=
rtab
->
rate
.
rate
/
q
->
rate2quantum
;
if
(
!
hopt
->
quantum
&&
cl
->
un
.
leaf
.
quantum
<
1000
)
{
printk
(
KERN_WARNING
"HTB: quantum of class %X is small. Consider r2q change."
,
cl
->
classid
);
cl
->
un
.
leaf
.
quantum
=
1000
;
}
if
(
!
hopt
->
quantum
&&
cl
->
un
.
leaf
.
quantum
>
200000
)
{
printk
(
KERN_WARNING
"HTB: quantum of class %X is big. Consider r2q change."
,
cl
->
classid
);
cl
->
un
.
leaf
.
quantum
=
200000
;
}
if
(
hopt
->
quantum
)
cl
->
un
.
leaf
.
quantum
=
hopt
->
quantum
;
if
((
cl
->
un
.
leaf
.
prio
=
hopt
->
prio
)
>=
TC_HTB_NUMPRIO
)
cl
->
un
.
leaf
.
prio
=
TC_HTB_NUMPRIO
-
1
;
cl
->
buffer
=
hopt
->
buffer
;
cl
->
cbuffer
=
hopt
->
cbuffer
;
if
(
cl
->
rate
)
qdisc_put_rtab
(
cl
->
rate
);
cl
->
rate
=
rtab
;
if
(
cl
->
ceil
)
qdisc_put_rtab
(
cl
->
ceil
);
cl
->
ceil
=
ctab
;
sch_tree_unlock
(
sch
);
*
arg
=
(
unsigned
long
)
cl
;
return
0
;
failure:
if
(
rtab
)
qdisc_put_rtab
(
rtab
);
if
(
ctab
)
qdisc_put_rtab
(
ctab
);
return
err
;
}
static
struct
tcf_proto
**
htb_find_tcf
(
struct
Qdisc
*
sch
,
unsigned
long
arg
)
{
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
struct
htb_class
*
cl
=
(
struct
htb_class
*
)
arg
;
struct
tcf_proto
**
fl
=
cl
?
&
cl
->
filter_list
:
&
q
->
filter_list
;
HTB_DBG
(
0
,
2
,
"htb_tcf q=%p clid=%X fref=%d fl=%p
\n
"
,
q
,
cl
?
cl
->
classid
:
0
,
cl
?
cl
->
filter_cnt
:
q
->
filter_cnt
,
*
fl
);
return
fl
;
}
static
unsigned
long
htb_bind_filter
(
struct
Qdisc
*
sch
,
unsigned
long
parent
,
u32
classid
)
{
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
struct
htb_class
*
cl
=
htb_find
(
classid
,
sch
);
HTB_DBG
(
0
,
2
,
"htb_bind q=%p clid=%X cl=%p fref=%d
\n
"
,
q
,
classid
,
cl
,
cl
?
cl
->
filter_cnt
:
q
->
filter_cnt
);
/*if (cl && !cl->level) return 0;
The line above used to be there to prevent attaching filters to
leaves. But at least tc_index filter uses this just to get class
for other reasons so that we have to allow for it.
----
19.6.2002 As Werner explained it is ok - bind filter is just
another way to "lock" the class - unlike "get" this lock can
be broken by class during destroy IIUC.
*/
if
(
cl
)
cl
->
filter_cnt
++
;
else
q
->
filter_cnt
++
;
return
(
unsigned
long
)
cl
;
}
static
void
htb_unbind_filter
(
struct
Qdisc
*
sch
,
unsigned
long
arg
)
{
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
struct
htb_class
*
cl
=
(
struct
htb_class
*
)
arg
;
HTB_DBG
(
0
,
2
,
"htb_unbind q=%p cl=%p fref=%d
\n
"
,
q
,
cl
,
cl
?
cl
->
filter_cnt
:
q
->
filter_cnt
);
if
(
cl
)
cl
->
filter_cnt
--
;
else
q
->
filter_cnt
--
;
}
static
void
htb_walk
(
struct
Qdisc
*
sch
,
struct
qdisc_walker
*
arg
)
{
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
int
i
;
if
(
arg
->
stop
)
return
;
for
(
i
=
0
;
i
<
HTB_HSIZE
;
i
++
)
{
struct
list_head
*
p
;
list_for_each
(
p
,
q
->
hash
+
i
)
{
struct
htb_class
*
cl
=
list_entry
(
p
,
struct
htb_class
,
hlist
);
if
(
arg
->
count
<
arg
->
skip
)
{
arg
->
count
++
;
continue
;
}
if
(
arg
->
fn
(
sch
,
(
unsigned
long
)
cl
,
arg
)
<
0
)
{
arg
->
stop
=
1
;
return
;
}
arg
->
count
++
;
}
}
}
static
struct
Qdisc_class_ops
htb_class_ops
=
{
htb_graft
,
htb_leaf
,
htb_get
,
htb_put
,
htb_change_class
,
htb_delete
,
htb_walk
,
htb_find_tcf
,
htb_bind_filter
,
htb_unbind_filter
,
htb_dump_class
,
};
struct
Qdisc_ops
htb_qdisc_ops
=
{
NULL
,
&
htb_class_ops
,
"htb"
,
sizeof
(
struct
htb_sched
),
htb_enqueue
,
htb_dequeue
,
htb_requeue
,
htb_drop
,
htb_init
,
htb_reset
,
htb_destroy
,
NULL
/* htb_change */
,
htb_dump
,
};
#ifdef MODULE
int
init_module
(
void
)
{
return
register_qdisc
(
&
htb_qdisc_ops
);
}
void
cleanup_module
(
void
)
{
unregister_qdisc
(
&
htb_qdisc_ops
);
}
MODULE_LICENSE
(
"GPL"
);
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment