Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
62859d82
Commit
62859d82
authored
Jul 08, 2003
by
Dmitry Torokhov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[NET] Attach inner qdiscs to TBF
parent
ce8b73b4
Changes
13
Show whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
229 additions
and
90 deletions
+229
-90
include/net/pkt_sched.h
include/net/pkt_sched.h
+1
-1
net/netsyms.c
net/netsyms.c
+1
-0
net/sched/sch_atm.c
net/sched/sch_atm.c
+4
-3
net/sched/sch_cbq.c
net/sched/sch_cbq.c
+4
-3
net/sched/sch_dsmark.c
net/sched/sch_dsmark.c
+5
-4
net/sched/sch_fifo.c
net/sched/sch_fifo.c
+4
-3
net/sched/sch_gred.c
net/sched/sch_gred.c
+8
-8
net/sched/sch_htb.c
net/sched/sch_htb.c
+6
-5
net/sched/sch_ingress.c
net/sched/sch_ingress.c
+1
-1
net/sched/sch_prio.c
net/sched/sch_prio.c
+4
-4
net/sched/sch_red.c
net/sched/sch_red.c
+4
-4
net/sched/sch_sfq.c
net/sched/sch_sfq.c
+6
-3
net/sched/sch_tbf.c
net/sched/sch_tbf.c
+181
-51
No files found.
include/net/pkt_sched.h
View file @
62859d82
...
...
@@ -61,7 +61,7 @@ struct Qdisc_ops
int
(
*
enqueue
)(
struct
sk_buff
*
,
struct
Qdisc
*
);
struct
sk_buff
*
(
*
dequeue
)(
struct
Qdisc
*
);
int
(
*
requeue
)(
struct
sk_buff
*
,
struct
Qdisc
*
);
int
(
*
drop
)(
struct
Qdisc
*
);
unsigned
int
(
*
drop
)(
struct
Qdisc
*
);
int
(
*
init
)(
struct
Qdisc
*
,
struct
rtattr
*
arg
);
void
(
*
reset
)(
struct
Qdisc
*
);
...
...
net/netsyms.c
View file @
62859d82
...
...
@@ -641,6 +641,7 @@ EXPORT_SYMBOL(qdisc_tree_lock);
#ifdef CONFIG_NET_SCHED
PSCHED_EXPORTLIST
;
EXPORT_SYMBOL
(
pfifo_qdisc_ops
);
EXPORT_SYMBOL
(
bfifo_qdisc_ops
);
EXPORT_SYMBOL
(
register_qdisc
);
EXPORT_SYMBOL
(
unregister_qdisc
);
EXPORT_SYMBOL
(
qdisc_get_rtab
);
...
...
net/sched/sch_atm.c
View file @
62859d82
...
...
@@ -545,15 +545,16 @@ static int atm_tc_requeue(struct sk_buff *skb,struct Qdisc *sch)
}
static
int
atm_tc_drop
(
struct
Qdisc
*
sch
)
static
unsigned
int
atm_tc_drop
(
struct
Qdisc
*
sch
)
{
struct
atm_qdisc_data
*
p
=
PRIV
(
sch
);
struct
atm_flow_data
*
flow
;
unsigned
int
len
;
DPRINTK
(
"atm_tc_drop(sch %p,[qdisc %p])
\n
"
,
sch
,
p
);
for
(
flow
=
p
->
flows
;
flow
;
flow
=
flow
->
next
)
if
(
flow
->
q
->
ops
->
drop
&&
flow
->
q
->
ops
->
drop
(
flow
->
q
))
return
1
;
if
(
flow
->
q
->
ops
->
drop
&&
(
len
=
flow
->
q
->
ops
->
drop
(
flow
->
q
)
))
return
len
;
return
0
;
}
...
...
net/sched/sch_cbq.c
View file @
62859d82
...
...
@@ -1231,11 +1231,12 @@ static void cbq_link_class(struct cbq_class *this)
}
}
static
int
cbq_drop
(
struct
Qdisc
*
sch
)
static
unsigned
int
cbq_drop
(
struct
Qdisc
*
sch
)
{
struct
cbq_sched_data
*
q
=
(
struct
cbq_sched_data
*
)
sch
->
data
;
struct
cbq_class
*
cl
,
*
cl_head
;
int
prio
;
unsigned
int
len
;
for
(
prio
=
TC_CBQ_MAXPRIO
;
prio
>=
0
;
prio
--
)
{
if
((
cl_head
=
q
->
active
[
prio
])
==
NULL
)
...
...
@@ -1243,9 +1244,9 @@ static int cbq_drop(struct Qdisc* sch)
cl
=
cl_head
;
do
{
if
(
cl
->
q
->
ops
->
drop
&&
cl
->
q
->
ops
->
drop
(
cl
->
q
))
{
if
(
cl
->
q
->
ops
->
drop
&&
(
len
=
cl
->
q
->
ops
->
drop
(
cl
->
q
)
))
{
sch
->
q
.
qlen
--
;
return
1
;
return
len
;
}
}
while
((
cl
=
cl
->
next_alive
)
!=
cl_head
);
}
...
...
net/sched/sch_dsmark.c
View file @
62859d82
...
...
@@ -302,17 +302,18 @@ static int dsmark_requeue(struct sk_buff *skb,struct Qdisc *sch)
}
static
int
dsmark_drop
(
struct
Qdisc
*
sch
)
static
unsigned
int
dsmark_drop
(
struct
Qdisc
*
sch
)
{
struct
dsmark_qdisc_data
*
p
=
PRIV
(
sch
);
unsigned
int
len
;
DPRINTK
(
"dsmark_reset(sch %p,[qdisc %p])
\n
"
,
sch
,
p
);
if
(
!
p
->
q
->
ops
->
drop
)
return
0
;
if
(
!
p
->
q
->
ops
->
drop
(
p
->
q
))
if
(
!
(
len
=
p
->
q
->
ops
->
drop
(
p
->
q
)
))
return
0
;
sch
->
q
.
qlen
--
;
return
1
;
return
len
;
}
...
...
net/sched/sch_fifo.c
View file @
62859d82
...
...
@@ -81,16 +81,17 @@ bfifo_dequeue(struct Qdisc* sch)
return
skb
;
}
static
int
static
unsigned
int
fifo_drop
(
struct
Qdisc
*
sch
)
{
struct
sk_buff
*
skb
;
skb
=
__skb_dequeue_tail
(
&
sch
->
q
);
if
(
skb
)
{
sch
->
stats
.
backlog
-=
skb
->
len
;
unsigned
int
len
=
skb
->
len
;
sch
->
stats
.
backlog
-=
len
;
kfree_skb
(
skb
);
return
1
;
return
len
;
}
return
0
;
}
...
...
net/sched/sch_gred.c
View file @
62859d82
...
...
@@ -259,8 +259,7 @@ gred_dequeue(struct Qdisc* sch)
return
NULL
;
}
static
int
gred_drop
(
struct
Qdisc
*
sch
)
static
unsigned
int
gred_drop
(
struct
Qdisc
*
sch
)
{
struct
sk_buff
*
skb
;
...
...
@@ -269,11 +268,12 @@ gred_drop(struct Qdisc* sch)
skb
=
__skb_dequeue_tail
(
&
sch
->
q
);
if
(
skb
)
{
sch
->
stats
.
backlog
-=
skb
->
len
;
unsigned
int
len
=
skb
->
len
;
sch
->
stats
.
backlog
-=
len
;
sch
->
stats
.
drops
++
;
q
=
t
->
tab
[(
skb
->
tc_index
&
0xf
)];
if
(
q
)
{
q
->
backlog
-=
skb
->
len
;
q
->
backlog
-=
len
;
q
->
other
++
;
if
(
!
q
->
backlog
&&
!
t
->
eqp
)
PSCHED_GET_TIME
(
q
->
qidlestart
);
...
...
@@ -282,7 +282,7 @@ gred_drop(struct Qdisc* sch)
}
kfree_skb
(
skb
);
return
1
;
return
len
;
}
q
=
t
->
tab
[
t
->
def
];
...
...
net/sched/sch_htb.c
View file @
62859d82
...
...
@@ -1059,7 +1059,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
}
/* try to drop from each class (by prio) until one succeed */
static
int
htb_drop
(
struct
Qdisc
*
sch
)
static
unsigned
int
htb_drop
(
struct
Qdisc
*
sch
)
{
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
int
prio
;
...
...
@@ -1067,14 +1067,15 @@ static int htb_drop(struct Qdisc* sch)
for
(
prio
=
TC_HTB_NUMPRIO
-
1
;
prio
>=
0
;
prio
--
)
{
struct
list_head
*
p
;
list_for_each
(
p
,
q
->
drops
+
prio
)
{
struct
htb_class
*
cl
=
list_entry
(
p
,
struct
htb_class
,
struct
htb_class
*
cl
=
list_entry
(
p
,
struct
htb_class
,
un
.
leaf
.
drop_list
);
unsigned
int
len
;
if
(
cl
->
un
.
leaf
.
q
->
ops
->
drop
&&
cl
->
un
.
leaf
.
q
->
ops
->
drop
(
cl
->
un
.
leaf
.
q
))
{
(
len
=
cl
->
un
.
leaf
.
q
->
ops
->
drop
(
cl
->
un
.
leaf
.
q
)
))
{
sch
->
q
.
qlen
--
;
if
(
!
cl
->
un
.
leaf
.
q
->
q
.
qlen
)
htb_deactivate
(
q
,
cl
);
return
1
;
return
len
;
}
}
}
...
...
net/sched/sch_ingress.c
View file @
62859d82
...
...
@@ -190,7 +190,7 @@ static int ingress_requeue(struct sk_buff *skb,struct Qdisc *sch)
return
0
;
}
static
int
ingress_drop
(
struct
Qdisc
*
sch
)
static
unsigned
int
ingress_drop
(
struct
Qdisc
*
sch
)
{
#ifdef DEBUG_INGRESS
struct
ingress_qdisc_data
*
p
=
PRIV
(
sch
);
...
...
net/sched/sch_prio.c
View file @
62859d82
...
...
@@ -124,18 +124,18 @@ prio_dequeue(struct Qdisc* sch)
}
static
int
prio_drop
(
struct
Qdisc
*
sch
)
static
unsigned
int
prio_drop
(
struct
Qdisc
*
sch
)
{
struct
prio_sched_data
*
q
=
(
struct
prio_sched_data
*
)
sch
->
data
;
int
prio
;
unsigned
int
len
;
struct
Qdisc
*
qdisc
;
for
(
prio
=
q
->
bands
-
1
;
prio
>=
0
;
prio
--
)
{
qdisc
=
q
->
queues
[
prio
];
if
(
qdisc
->
ops
->
drop
(
qdisc
)
)
{
if
(
(
len
=
qdisc
->
ops
->
drop
(
qdisc
))
!=
0
)
{
sch
->
q
.
qlen
--
;
return
1
;
return
len
;
}
}
return
0
;
...
...
net/sched/sch_red.c
View file @
62859d82
...
...
@@ -342,19 +342,19 @@ red_dequeue(struct Qdisc* sch)
return
NULL
;
}
static
int
red_drop
(
struct
Qdisc
*
sch
)
static
unsigned
int
red_drop
(
struct
Qdisc
*
sch
)
{
struct
sk_buff
*
skb
;
struct
red_sched_data
*
q
=
(
struct
red_sched_data
*
)
sch
->
data
;
skb
=
__skb_dequeue_tail
(
&
sch
->
q
);
if
(
skb
)
{
sch
->
stats
.
backlog
-=
skb
->
len
;
unsigned
int
len
=
skb
->
len
;
sch
->
stats
.
backlog
-=
len
;
sch
->
stats
.
drops
++
;
q
->
st
.
other
++
;
kfree_skb
(
skb
);
return
1
;
return
len
;
}
PSCHED_GET_TIME
(
q
->
qidlestart
);
return
0
;
...
...
net/sched/sch_sfq.c
View file @
62859d82
...
...
@@ -209,11 +209,12 @@ static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
sfq_link
(
q
,
x
);
}
static
int
sfq_drop
(
struct
Qdisc
*
sch
)
static
unsigned
int
sfq_drop
(
struct
Qdisc
*
sch
)
{
struct
sfq_sched_data
*
q
=
(
struct
sfq_sched_data
*
)
sch
->
data
;
sfq_index
d
=
q
->
max_depth
;
struct
sk_buff
*
skb
;
unsigned
int
len
;
/* Queue is full! Find the longest slot and
drop a packet from it */
...
...
@@ -221,12 +222,13 @@ static int sfq_drop(struct Qdisc *sch)
if
(
d
>
1
)
{
sfq_index
x
=
q
->
dep
[
d
+
SFQ_DEPTH
].
next
;
skb
=
q
->
qs
[
x
].
prev
;
len
=
skb
->
len
;
__skb_unlink
(
skb
,
&
q
->
qs
[
x
]);
kfree_skb
(
skb
);
sfq_dec
(
q
,
x
);
sch
->
q
.
qlen
--
;
sch
->
stats
.
drops
++
;
return
1
;
return
len
;
}
if
(
d
==
1
)
{
...
...
@@ -235,13 +237,14 @@ static int sfq_drop(struct Qdisc *sch)
q
->
next
[
q
->
tail
]
=
q
->
next
[
d
];
q
->
allot
[
q
->
next
[
d
]]
+=
q
->
quantum
;
skb
=
q
->
qs
[
d
].
prev
;
len
=
skb
->
len
;
__skb_unlink
(
skb
,
&
q
->
qs
[
d
]);
kfree_skb
(
skb
);
sfq_dec
(
q
,
d
);
sch
->
q
.
qlen
--
;
q
->
ht
[
q
->
hash
[
d
]]
=
SFQ_DEPTH
;
sch
->
stats
.
drops
++
;
return
1
;
return
len
;
}
return
0
;
...
...
net/sched/sch_tbf.c
View file @
62859d82
...
...
@@ -7,6 +7,8 @@
* 2 of the License, or (at your option) any later version.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
* Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
* original idea by Martin Devera
*
*/
...
...
@@ -123,62 +125,63 @@ struct tbf_sched_data
long
ptokens
;
/* Current number of P tokens */
psched_time_t
t_c
;
/* Time check-point */
struct
timer_list
wd_timer
;
/* Watchdog timer */
struct
Qdisc
*
qdisc
;
/* Inner qdisc, default - bfifo queue */
};
#define L2T(q,L) ((q)->R_tab->data[(L)>>(q)->R_tab->rate.cell_log])
#define L2T_P(q,L) ((q)->P_tab->data[(L)>>(q)->P_tab->rate.cell_log])
static
int
tbf_enqueue
(
struct
sk_buff
*
skb
,
struct
Qdisc
*
sch
)
static
int
tbf_enqueue
(
struct
sk_buff
*
skb
,
struct
Qdisc
*
sch
)
{
struct
tbf_sched_data
*
q
=
(
struct
tbf_sched_data
*
)
sch
->
data
;
int
ret
;
if
(
skb
->
len
>
q
->
max_size
)
goto
drop
;
__skb_queue_tail
(
&
sch
->
q
,
skb
);
if
((
sch
->
stats
.
backlog
+=
skb
->
len
)
<=
q
->
limit
)
{
sch
->
stats
.
bytes
+=
skb
->
len
;
sch
->
stats
.
packets
++
;
return
0
;
}
/* Drop action: undo the things that we just did,
* i.e. make tail drop
*/
__skb_unlink
(
skb
,
&
sch
->
q
);
sch
->
stats
.
backlog
-=
skb
->
len
;
drop:
if
(
skb
->
len
>
q
->
max_size
||
sch
->
stats
.
backlog
+
skb
->
len
>
q
->
limit
)
{
sch
->
stats
.
drops
++
;
#ifdef CONFIG_NET_CLS_POLICE
if
(
sch
->
reshape_fail
==
NULL
||
sch
->
reshape_fail
(
skb
,
sch
))
if
(
sch
->
reshape_fail
==
NULL
||
sch
->
reshape_fail
(
skb
,
sch
))
#endif
kfree_skb
(
skb
);
return
NET_XMIT_DROP
;
}
if
((
ret
=
q
->
qdisc
->
enqueue
(
skb
,
q
->
qdisc
))
!=
0
)
{
sch
->
stats
.
drops
++
;
return
ret
;
}
sch
->
q
.
qlen
++
;
sch
->
stats
.
backlog
+=
skb
->
len
;
sch
->
stats
.
bytes
+=
skb
->
len
;
sch
->
stats
.
packets
++
;
return
0
;
}
static
int
tbf_requeue
(
struct
sk_buff
*
skb
,
struct
Qdisc
*
sch
)
static
int
tbf_requeue
(
struct
sk_buff
*
skb
,
struct
Qdisc
*
sch
)
{
__skb_queue_head
(
&
sch
->
q
,
skb
);
struct
tbf_sched_data
*
q
=
(
struct
tbf_sched_data
*
)
sch
->
data
;
int
ret
;
if
((
ret
=
q
->
qdisc
->
ops
->
requeue
(
skb
,
q
->
qdisc
))
==
0
)
{
sch
->
q
.
qlen
++
;
sch
->
stats
.
backlog
+=
skb
->
len
;
return
0
;
}
return
ret
;
}
static
int
tbf_drop
(
struct
Qdisc
*
sch
)
static
unsigned
int
tbf_drop
(
struct
Qdisc
*
sch
)
{
struct
sk_buff
*
skb
;
struct
tbf_sched_data
*
q
=
(
struct
tbf_sched_data
*
)
sch
->
data
;
unsigned
int
len
;
skb
=
__skb_dequeue_tail
(
&
sch
->
q
);
if
(
skb
)
{
sch
->
stats
.
backlog
-=
skb
->
len
;
if
((
len
=
q
->
qdisc
->
ops
->
drop
(
q
->
qdisc
))
!=
0
)
{
sch
->
q
.
qlen
--
;
sch
->
stats
.
backlog
-=
len
;
sch
->
stats
.
drops
++
;
kfree_skb
(
skb
);
return
1
;
}
return
0
;
return
len
;
}
static
void
tbf_watchdog
(
unsigned
long
arg
)
...
...
@@ -189,18 +192,18 @@ static void tbf_watchdog(unsigned long arg)
netif_schedule
(
sch
->
dev
);
}
static
struct
sk_buff
*
tbf_dequeue
(
struct
Qdisc
*
sch
)
static
struct
sk_buff
*
tbf_dequeue
(
struct
Qdisc
*
sch
)
{
struct
tbf_sched_data
*
q
=
(
struct
tbf_sched_data
*
)
sch
->
data
;
struct
sk_buff
*
skb
;
skb
=
__skb_dequeue
(
&
sch
->
q
);
skb
=
q
->
qdisc
->
dequeue
(
q
->
qdisc
);
if
(
skb
)
{
psched_time_t
now
;
long
toks
;
long
ptoks
=
0
;
unsigned
int
len
=
skb
->
len
;
PSCHED_GET_TIME
(
now
);
...
...
@@ -210,18 +213,19 @@ tbf_dequeue(struct Qdisc* sch)
ptoks
=
toks
+
q
->
ptokens
;
if
(
ptoks
>
(
long
)
q
->
mtu
)
ptoks
=
q
->
mtu
;
ptoks
-=
L2T_P
(
q
,
skb
->
len
);
ptoks
-=
L2T_P
(
q
,
len
);
}
toks
+=
q
->
tokens
;
if
(
toks
>
(
long
)
q
->
buffer
)
toks
=
q
->
buffer
;
toks
-=
L2T
(
q
,
skb
->
len
);
toks
-=
L2T
(
q
,
len
);
if
((
toks
|
ptoks
)
>=
0
)
{
q
->
t_c
=
now
;
q
->
tokens
=
toks
;
q
->
ptokens
=
ptoks
;
sch
->
stats
.
backlog
-=
skb
->
len
;
sch
->
stats
.
backlog
-=
len
;
sch
->
q
.
qlen
--
;
sch
->
flags
&=
~
TCQ_F_THROTTLED
;
return
skb
;
}
...
...
@@ -245,7 +249,13 @@ tbf_dequeue(struct Qdisc* sch)
This is the main idea of all FQ algorithms
(cf. CSZ, HPFQ, HFSC)
*/
__skb_queue_head
(
&
sch
->
q
,
skb
);
if
(
q
->
qdisc
->
ops
->
requeue
(
skb
,
q
->
qdisc
)
!=
NET_XMIT_SUCCESS
)
{
/* When requeue fails skb is dropped */
sch
->
q
.
qlen
--
;
sch
->
stats
.
backlog
-=
len
;
sch
->
stats
.
drops
++
;
}
sch
->
flags
|=
TCQ_F_THROTTLED
;
sch
->
stats
.
overlimits
++
;
...
...
@@ -253,12 +263,11 @@ tbf_dequeue(struct Qdisc* sch)
return
NULL
;
}
static
void
tbf_reset
(
struct
Qdisc
*
sch
)
static
void
tbf_reset
(
struct
Qdisc
*
sch
)
{
struct
tbf_sched_data
*
q
=
(
struct
tbf_sched_data
*
)
sch
->
data
;
qdisc_reset
(
q
->
qdisc
);
skb_queue_purge
(
&
sch
->
q
);
sch
->
stats
.
backlog
=
0
;
PSCHED_GET_TIME
(
q
->
t_c
);
...
...
@@ -268,6 +277,31 @@ tbf_reset(struct Qdisc* sch)
del_timer
(
&
q
->
wd_timer
);
}
static
struct
Qdisc
*
tbf_create_dflt_qdisc
(
struct
net_device
*
dev
,
u32
limit
)
{
struct
Qdisc
*
q
=
qdisc_create_dflt
(
dev
,
&
bfifo_qdisc_ops
);
struct
rtattr
*
rta
;
int
ret
;
if
(
q
)
{
rta
=
kmalloc
(
RTA_LENGTH
(
sizeof
(
struct
tc_fifo_qopt
)),
GFP_KERNEL
);
if
(
rta
)
{
rta
->
rta_type
=
RTM_NEWQDISC
;
rta
->
rta_len
=
RTA_LENGTH
(
sizeof
(
struct
tc_fifo_qopt
));
((
struct
tc_fifo_qopt
*
)
RTA_DATA
(
rta
))
->
limit
=
limit
;
ret
=
q
->
ops
->
change
(
q
,
rta
);
kfree
(
rta
);
if
(
ret
==
0
)
return
q
;
}
qdisc_destroy
(
q
);
}
return
NULL
;
}
static
int
tbf_change
(
struct
Qdisc
*
sch
,
struct
rtattr
*
opt
)
{
int
err
=
-
EINVAL
;
...
...
@@ -276,6 +310,7 @@ static int tbf_change(struct Qdisc* sch, struct rtattr *opt)
struct
tc_tbf_qopt
*
qopt
;
struct
qdisc_rate_table
*
rtab
=
NULL
;
struct
qdisc_rate_table
*
ptab
=
NULL
;
struct
Qdisc
*
child
=
NULL
;
int
max_size
,
n
;
if
(
rtattr_parse
(
tb
,
TCA_TBF_PTAB
,
RTA_DATA
(
opt
),
RTA_PAYLOAD
(
opt
))
||
...
...
@@ -309,7 +344,13 @@ static int tbf_change(struct Qdisc* sch, struct rtattr *opt)
if
(
max_size
<
0
)
goto
done
;
if
(
q
->
qdisc
==
&
noop_qdisc
)
{
if
((
child
=
tbf_create_dflt_qdisc
(
sch
->
dev
,
qopt
->
limit
))
==
NULL
)
goto
done
;
}
sch_tree_lock
(
sch
);
if
(
child
)
q
->
qdisc
=
child
;
q
->
limit
=
qopt
->
limit
;
q
->
mtu
=
qopt
->
mtu
;
q
->
max_size
=
max_size
;
...
...
@@ -340,6 +381,8 @@ static int tbf_init(struct Qdisc* sch, struct rtattr *opt)
q
->
wd_timer
.
function
=
tbf_watchdog
;
q
->
wd_timer
.
data
=
(
unsigned
long
)
sch
;
q
->
qdisc
=
&
noop_qdisc
;
return
tbf_change
(
sch
,
opt
);
}
...
...
@@ -353,6 +396,9 @@ static void tbf_destroy(struct Qdisc *sch)
qdisc_put_rtab
(
q
->
P_tab
);
if
(
q
->
R_tab
)
qdisc_put_rtab
(
q
->
R_tab
);
qdisc_destroy
(
q
->
qdisc
);
q
->
qdisc
=
&
noop_qdisc
;
}
static
int
tbf_dump
(
struct
Qdisc
*
sch
,
struct
sk_buff
*
skb
)
...
...
@@ -383,9 +429,92 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
return
-
1
;
}
static
int
tbf_dump_class
(
struct
Qdisc
*
sch
,
unsigned
long
cl
,
struct
sk_buff
*
skb
,
struct
tcmsg
*
tcm
)
{
struct
tbf_sched_data
*
q
=
(
struct
tbf_sched_data
*
)
sch
->
data
;
if
(
cl
!=
1
)
/* only one class */
return
-
ENOENT
;
tcm
->
tcm_parent
=
TC_H_ROOT
;
tcm
->
tcm_handle
=
1
;
tcm
->
tcm_info
=
q
->
qdisc
->
handle
;
return
0
;
}
static
int
tbf_graft
(
struct
Qdisc
*
sch
,
unsigned
long
arg
,
struct
Qdisc
*
new
,
struct
Qdisc
**
old
)
{
struct
tbf_sched_data
*
q
=
(
struct
tbf_sched_data
*
)
sch
->
data
;
if
(
new
==
NULL
)
new
=
&
noop_qdisc
;
sch_tree_lock
(
sch
);
*
old
=
xchg
(
&
q
->
qdisc
,
new
);
qdisc_reset
(
*
old
);
sch_tree_unlock
(
sch
);
return
0
;
}
static
struct
Qdisc
*
tbf_leaf
(
struct
Qdisc
*
sch
,
unsigned
long
arg
)
{
struct
tbf_sched_data
*
q
=
(
struct
tbf_sched_data
*
)
sch
->
data
;
return
q
->
qdisc
;
}
static
unsigned
long
tbf_get
(
struct
Qdisc
*
sch
,
u32
classid
)
{
return
1
;
}
static
void
tbf_put
(
struct
Qdisc
*
sch
,
unsigned
long
arg
)
{
}
static
int
tbf_change_class
(
struct
Qdisc
*
sch
,
u32
classid
,
u32
parentid
,
struct
rtattr
**
tca
,
unsigned
long
*
arg
)
{
return
-
ENOSYS
;
}
static
int
tbf_delete
(
struct
Qdisc
*
sch
,
unsigned
long
arg
)
{
return
-
ENOSYS
;
}
static
void
tbf_walk
(
struct
Qdisc
*
sch
,
struct
qdisc_walker
*
walker
)
{
struct
tbf_sched_data
*
q
=
(
struct
tbf_sched_data
*
)
sch
->
data
;
if
(
!
walker
->
stop
)
{
if
(
walker
->
count
>=
walker
->
skip
)
if
(
walker
->
fn
(
sch
,
(
unsigned
long
)
q
,
walker
)
<
0
)
{
walker
->
stop
=
1
;
return
;
}
walker
->
count
++
;
}
}
static
struct
Qdisc_class_ops
tbf_class_ops
=
{
.
graft
=
tbf_graft
,
.
leaf
=
tbf_leaf
,
.
get
=
tbf_get
,
.
put
=
tbf_put
,
.
change
=
tbf_change_class
,
.
delete
=
tbf_delete
,
.
walk
=
tbf_walk
,
.
dump
=
tbf_dump_class
,
};
struct
Qdisc_ops
tbf_qdisc_ops
=
{
.
next
=
NULL
,
.
cl_ops
=
NULL
,
.
cl_ops
=
&
tbf_class_ops
,
.
id
=
"tbf"
,
.
priv_size
=
sizeof
(
struct
tbf_sched_data
),
.
enqueue
=
tbf_enqueue
,
...
...
@@ -397,6 +526,7 @@ struct Qdisc_ops tbf_qdisc_ops = {
.
destroy
=
tbf_destroy
,
.
change
=
tbf_change
,
.
dump
=
tbf_dump
,
.
owner
=
THIS_MODULE
,
};
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment