sch_tbf.c 13.4 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * net/sched/sch_tbf.c	Token Bucket Filter queue.
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 *		Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
 *						 original idea by Martin Devera
 *
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
21
#include <net/netlink.h>
22
#include <net/sch_generic.h>
Linus Torvalds's avatar
Linus Torvalds committed
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
#include <net/pkt_sched.h>


/*	Simple Token Bucket Filter.
	=======================================

	SOURCE.
	-------

	None.

	Description.
	------------

	A data flow obeys TBF with rate R and depth B, if for any
	time interval t_i...t_f the number of transmitted bits
	does not exceed B + R*(t_f-t_i).

	Packetized version of this definition:
	The sequence of packets of sizes s_i served at moments t_i
	obeys TBF, if for any i<=k:

	s_i+....+s_k <= B + R*(t_k - t_i)

	Algorithm.
	----------

	Let N(t_i) be B/R initially and N(t) grow continuously with time as:

	N(t+delta) = min{B/R, N(t) + delta}

	If the first packet in queue has length S, it may be
	transmitted only at the time t_* when S/R <= N(t_*),
	and in this case N(t) jumps:

	N(t_* + 0) = N(t_* - 0) - S/R.



	Actually, QoS requires two TBF to be applied to a data stream.
	One of them controls steady state burst size, another
	one with rate P (peak rate) and depth M (equal to link MTU)
	limits bursts at a smaller time scale.

	It is easy to see that P>R, and B>M. If P is infinity, this double
	TBF is equivalent to a single one.

	When TBF works in reshaping mode, latency is estimated as:

	lat = max ((L-B)/R, (L-M)/P)


	NOTES.
	------

	If TBF throttles, it starts a watchdog timer, which will wake it up
	when it is ready to transmit.
	Note that the minimal timer resolution is 1/HZ.
	If no new packets arrive during this period,
	or if the device is not awaken by EOI for some previous packet,
	TBF can stop its activity for 1/HZ.


	This means, that with depth B, the maximal rate is

	R_crit = B*HZ

	F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.

	Note that the peak rate TBF is much more tough: with MTU 1500
	P_crit = 150Kbytes/sec. So, if you need greater peak
	rates, use alpha with HZ=1000 :-)

	With classful TBF, limit is just kept for backwards compatibility.
	It is passed to the default bfifo qdisc - if the inner qdisc is
	changed the limit is not effective anymore.
*/

Eric Dumazet's avatar
Eric Dumazet committed
101
struct tbf_sched_data {
Linus Torvalds's avatar
Linus Torvalds committed
102 103
/* Parameters */
	u32		limit;		/* Maximal length of backlog: bytes */
104 105
	s64		buffer;		/* Token bucket depth/rate: MUST BE >= MTU/B */
	s64		mtu;
Linus Torvalds's avatar
Linus Torvalds committed
106
	u32		max_size;
107 108 109
	struct psched_ratecfg rate;
	struct psched_ratecfg peak;
	bool peak_present;
Linus Torvalds's avatar
Linus Torvalds committed
110 111

/* Variables */
112 113 114
	s64	tokens;			/* Current number of B tokens */
	s64	ptokens;		/* Current number of P tokens */
	s64	t_c;			/* Time check-point */
Linus Torvalds's avatar
Linus Torvalds committed
115
	struct Qdisc	*qdisc;		/* Inner qdisc, default - bfifo queue */
116
	struct qdisc_watchdog watchdog;	/* Watchdog timer */
Linus Torvalds's avatar
Linus Torvalds committed
117 118
};

119

120 121 122 123 124 125 126 127 128 129 130 131 132
/* Time to Length, convert time in ns to length in bytes
 * to determinate how many bytes can be sent in given time.
 */
static u64 psched_ns_t2l(const struct psched_ratecfg *r,
			 u64 time_in_ns)
{
	/* The formula is :
	 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
	 */
	u64 len = time_in_ns * r->rate_bytes_ps;

	do_div(len, NSEC_PER_SEC);

133 134 135 136
	if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
		do_div(len, 53);
		len = len * 48;
	}
137 138 139 140 141 142 143 144 145

	if (len > r->overhead)
		len -= r->overhead;
	else
		len = 0;

	return len;
}

Eric Dumazet's avatar
Eric Dumazet committed
146 147 148 149
/*
 * Return length of individual segments of a gso packet,
 * including all headers (MAC, IP, TCP/UDP)
 */
150
static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
Eric Dumazet's avatar
Eric Dumazet committed
151 152
{
	unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
153
	return hdr_len + skb_gso_transport_seglen(skb);
Eric Dumazet's avatar
Eric Dumazet committed
154 155
}

156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
/* GSO packet is too big, segment it so that tbf can transmit
 * each segment in time
 */
static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
{
	struct tbf_sched_data *q = qdisc_priv(sch);
	struct sk_buff *segs, *nskb;
	netdev_features_t features = netif_skb_features(skb);
	int ret, nb;

	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);

	if (IS_ERR_OR_NULL(segs))
		return qdisc_reshape_fail(skb, sch);

	nb = 0;
	while (segs) {
		nskb = segs->next;
		segs->next = NULL;
Eric Dumazet's avatar
Eric Dumazet committed
175 176
		qdisc_skb_cb(segs)->pkt_len = segs->len;
		ret = qdisc_enqueue(segs, q->qdisc);
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
		if (ret != NET_XMIT_SUCCESS) {
			if (net_xmit_drop_count(ret))
				sch->qstats.drops++;
		} else {
			nb++;
		}
		segs = nskb;
	}
	sch->q.qlen += nb;
	if (nb > 1)
		qdisc_tree_decrease_qlen(sch, 1 - nb);
	consume_skb(skb);
	return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
}

Eric Dumazet's avatar
Eric Dumazet committed
192
static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
Linus Torvalds's avatar
Linus Torvalds committed
193 194 195 196
{
	struct tbf_sched_data *q = qdisc_priv(sch);
	int ret;

197
	if (qdisc_pkt_len(skb) > q->max_size) {
198
		if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
199
			return tbf_segment(skb, sch);
200
		return qdisc_reshape_fail(skb, sch);
201
	}
202
	ret = qdisc_enqueue(skb, q->qdisc);
203
	if (ret != NET_XMIT_SUCCESS) {
204 205
		if (net_xmit_drop_count(ret))
			sch->qstats.drops++;
Linus Torvalds's avatar
Linus Torvalds committed
206 207 208 209
		return ret;
	}

	sch->q.qlen++;
210
	return NET_XMIT_SUCCESS;
Linus Torvalds's avatar
Linus Torvalds committed
211 212
}

Eric Dumazet's avatar
Eric Dumazet committed
213
static unsigned int tbf_drop(struct Qdisc *sch)
Linus Torvalds's avatar
Linus Torvalds committed
214 215
{
	struct tbf_sched_data *q = qdisc_priv(sch);
216
	unsigned int len = 0;
Linus Torvalds's avatar
Linus Torvalds committed
217

218
	if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
Linus Torvalds's avatar
Linus Torvalds committed
219 220 221 222 223 224
		sch->q.qlen--;
		sch->qstats.drops++;
	}
	return len;
}

Eric Dumazet's avatar
Eric Dumazet committed
225
static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
Linus Torvalds's avatar
Linus Torvalds committed
226 227 228 229
{
	struct tbf_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;

230
	skb = q->qdisc->ops->peek(q->qdisc);
Linus Torvalds's avatar
Linus Torvalds committed
231 232

	if (skb) {
233 234 235
		s64 now;
		s64 toks;
		s64 ptoks = 0;
236
		unsigned int len = qdisc_pkt_len(skb);
Linus Torvalds's avatar
Linus Torvalds committed
237

238 239
		now = ktime_to_ns(ktime_get());
		toks = min_t(s64, now - q->t_c, q->buffer);
Linus Torvalds's avatar
Linus Torvalds committed
240

241
		if (q->peak_present) {
Linus Torvalds's avatar
Linus Torvalds committed
242
			ptoks = toks + q->ptokens;
243
			if (ptoks > q->mtu)
Linus Torvalds's avatar
Linus Torvalds committed
244
				ptoks = q->mtu;
245
			ptoks -= (s64) psched_l2t_ns(&q->peak, len);
Linus Torvalds's avatar
Linus Torvalds committed
246 247
		}
		toks += q->tokens;
248
		if (toks > q->buffer)
Linus Torvalds's avatar
Linus Torvalds committed
249
			toks = q->buffer;
250
		toks -= (s64) psched_l2t_ns(&q->rate, len);
Linus Torvalds's avatar
Linus Torvalds committed
251 252

		if ((toks|ptoks) >= 0) {
253
			skb = qdisc_dequeue_peeked(q->qdisc);
254 255 256
			if (unlikely(!skb))
				return NULL;

Linus Torvalds's avatar
Linus Torvalds committed
257 258 259 260
			q->t_c = now;
			q->tokens = toks;
			q->ptokens = ptoks;
			sch->q.qlen--;
261
			qdisc_unthrottled(sch);
262
			qdisc_bstats_update(sch, skb);
Linus Torvalds's avatar
Linus Torvalds committed
263 264 265
			return skb;
		}

266 267
		qdisc_watchdog_schedule_ns(&q->watchdog,
					   now + max_t(long, -toks, -ptoks));
Linus Torvalds's avatar
Linus Torvalds committed
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284

		/* Maybe we have a shorter packet in the queue,
		   which can be sent now. It sounds cool,
		   but, however, this is wrong in principle.
		   We MUST NOT reorder packets under these circumstances.

		   Really, if we split the flow into independent
		   subflows, it would be a very good solution.
		   This is the main idea of all FQ algorithms
		   (cf. CSZ, HPFQ, HFSC)
		 */

		sch->qstats.overlimits++;
	}
	return NULL;
}

Eric Dumazet's avatar
Eric Dumazet committed
285
static void tbf_reset(struct Qdisc *sch)
Linus Torvalds's avatar
Linus Torvalds committed
286 287 288 289 290
{
	struct tbf_sched_data *q = qdisc_priv(sch);

	qdisc_reset(q->qdisc);
	sch->q.qlen = 0;
291
	q->t_c = ktime_to_ns(ktime_get());
Linus Torvalds's avatar
Linus Torvalds committed
292 293
	q->tokens = q->buffer;
	q->ptokens = q->mtu;
294
	qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds's avatar
Linus Torvalds committed
295 296
}

297 298 299 300
static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
	[TCA_TBF_PARMS]	= { .len = sizeof(struct tc_tbf_qopt) },
	[TCA_TBF_RTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
	[TCA_TBF_PTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
301 302
	[TCA_TBF_RATE64]	= { .type = NLA_U64 },
	[TCA_TBF_PRATE64]	= { .type = NLA_U64 },
303 304
};

Eric Dumazet's avatar
Eric Dumazet committed
305
static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds's avatar
Linus Torvalds committed
306
{
307
	int err;
Linus Torvalds's avatar
Linus Torvalds committed
308
	struct tbf_sched_data *q = qdisc_priv(sch);
309
	struct nlattr *tb[TCA_TBF_MAX + 1];
Linus Torvalds's avatar
Linus Torvalds committed
310 311
	struct tc_tbf_qopt *qopt;
	struct Qdisc *child = NULL;
312 313 314 315
	struct psched_ratecfg rate;
	struct psched_ratecfg peak;
	u64 max_size;
	s64 buffer, mtu;
316
	u64 rate64 = 0, prate64 = 0;
Linus Torvalds's avatar
Linus Torvalds committed
317

318
	err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy);
319 320 321 322
	if (err < 0)
		return err;

	err = -EINVAL;
323
	if (tb[TCA_TBF_PARMS] == NULL)
Linus Torvalds's avatar
Linus Torvalds committed
324 325
		goto done;

326
	qopt = nla_data(tb[TCA_TBF_PARMS]);
327 328 329
	if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
		qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
					      tb[TCA_TBF_RTAB]));
Linus Torvalds's avatar
Linus Torvalds committed
330

331 332 333
	if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
			qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
						      tb[TCA_TBF_PTAB]));
Eric Dumazet's avatar
Eric Dumazet committed
334

335 336 337 338 339
	if (q->qdisc != &noop_qdisc) {
		err = fifo_set_limit(q->qdisc, qopt->limit);
		if (err)
			goto done;
	} else if (qopt->limit > 0) {
340 341 342
		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
		if (IS_ERR(child)) {
			err = PTR_ERR(child);
Linus Torvalds's avatar
Linus Torvalds committed
343
			goto done;
344
		}
Linus Torvalds's avatar
Linus Torvalds committed
345 346
	}

347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
	buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
	mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);

	if (tb[TCA_TBF_RATE64])
		rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
	psched_ratecfg_precompute(&rate, &qopt->rate, rate64);

	max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);

	if (qopt->peakrate.rate) {
		if (tb[TCA_TBF_PRATE64])
			prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
		psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
		if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
			pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
					    peak.rate_bytes_ps, rate.rate_bytes_ps);
			err = -EINVAL;
			goto done;
		}

		max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
	}

	if (max_size < psched_mtu(qdisc_dev(sch)))
		pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
				    max_size, qdisc_dev(sch)->name,
				    psched_mtu(qdisc_dev(sch)));

	if (!max_size) {
		err = -EINVAL;
		goto done;
	}

Linus Torvalds's avatar
Linus Torvalds committed
380
	sch_tree_lock(sch);
381 382
	if (child) {
		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
383 384
		qdisc_destroy(q->qdisc);
		q->qdisc = child;
385
	}
Linus Torvalds's avatar
Linus Torvalds committed
386
	q->limit = qopt->limit;
387
	q->mtu = PSCHED_TICKS2NS(qopt->mtu);
Linus Torvalds's avatar
Linus Torvalds committed
388
	q->max_size = max_size;
389
	q->buffer = PSCHED_TICKS2NS(qopt->buffer);
Linus Torvalds's avatar
Linus Torvalds committed
390 391
	q->tokens = q->buffer;
	q->ptokens = q->mtu;
392

393 394 395
	memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
	if (qopt->peakrate.rate) {
		memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
396 397 398 399
		q->peak_present = true;
	} else {
		q->peak_present = false;
	}
400

Linus Torvalds's avatar
Linus Torvalds committed
401 402 403 404 405 406
	sch_tree_unlock(sch);
	err = 0;
done:
	return err;
}

Eric Dumazet's avatar
Eric Dumazet committed
407
static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds's avatar
Linus Torvalds committed
408 409 410 411 412 413
{
	struct tbf_sched_data *q = qdisc_priv(sch);

	if (opt == NULL)
		return -EINVAL;

414
	q->t_c = ktime_to_ns(ktime_get());
415
	qdisc_watchdog_init(&q->watchdog, sch);
Linus Torvalds's avatar
Linus Torvalds committed
416 417 418 419 420 421 422 423 424
	q->qdisc = &noop_qdisc;

	return tbf_change(sch, opt);
}

static void tbf_destroy(struct Qdisc *sch)
{
	struct tbf_sched_data *q = qdisc_priv(sch);

425
	qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds's avatar
Linus Torvalds committed
426 427 428 429 430 431
	qdisc_destroy(q->qdisc);
}

static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
{
	struct tbf_sched_data *q = qdisc_priv(sch);
432
	struct nlattr *nest;
Linus Torvalds's avatar
Linus Torvalds committed
433 434
	struct tc_tbf_qopt opt;

435
	sch->qstats.backlog = q->qdisc->qstats.backlog;
436 437 438
	nest = nla_nest_start(skb, TCA_OPTIONS);
	if (nest == NULL)
		goto nla_put_failure;
Linus Torvalds's avatar
Linus Torvalds committed
439 440

	opt.limit = q->limit;
441
	psched_ratecfg_getrate(&opt.rate, &q->rate);
442
	if (q->peak_present)
443
		psched_ratecfg_getrate(&opt.peakrate, &q->peak);
Linus Torvalds's avatar
Linus Torvalds committed
444 445
	else
		memset(&opt.peakrate, 0, sizeof(opt.peakrate));
446 447
	opt.mtu = PSCHED_NS2TICKS(q->mtu);
	opt.buffer = PSCHED_NS2TICKS(q->buffer);
448 449
	if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
		goto nla_put_failure;
450 451 452 453 454 455 456
	if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
	    nla_put_u64(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps))
		goto nla_put_failure;
	if (q->peak_present &&
	    q->peak.rate_bytes_ps >= (1ULL << 32) &&
	    nla_put_u64(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps))
		goto nla_put_failure;
Linus Torvalds's avatar
Linus Torvalds committed
457

458
	nla_nest_end(skb, nest);
Linus Torvalds's avatar
Linus Torvalds committed
459 460
	return skb->len;

461
nla_put_failure:
462
	nla_nest_cancel(skb, nest);
Linus Torvalds's avatar
Linus Torvalds committed
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
	return -1;
}

static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
			  struct sk_buff *skb, struct tcmsg *tcm)
{
	struct tbf_sched_data *q = qdisc_priv(sch);

	tcm->tcm_handle |= TC_H_MIN(1);
	tcm->tcm_info = q->qdisc->handle;

	return 0;
}

static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
		     struct Qdisc **old)
{
	struct tbf_sched_data *q = qdisc_priv(sch);

	if (new == NULL)
		new = &noop_qdisc;

	sch_tree_lock(sch);
486 487
	*old = q->qdisc;
	q->qdisc = new;
488
	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
Linus Torvalds's avatar
Linus Torvalds committed
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
	qdisc_reset(*old);
	sch_tree_unlock(sch);

	return 0;
}

static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
{
	struct tbf_sched_data *q = qdisc_priv(sch);
	return q->qdisc;
}

static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
{
	return 1;
}

static void tbf_put(struct Qdisc *sch, unsigned long arg)
{
}

static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
{
	if (!walker->stop) {
		if (walker->count >= walker->skip)
			if (walker->fn(sch, 1, walker) < 0) {
				walker->stop = 1;
				return;
			}
		walker->count++;
	}
}

Eric Dumazet's avatar
Eric Dumazet committed
522
static const struct Qdisc_class_ops tbf_class_ops = {
Linus Torvalds's avatar
Linus Torvalds committed
523 524 525 526 527 528 529 530
	.graft		=	tbf_graft,
	.leaf		=	tbf_leaf,
	.get		=	tbf_get,
	.put		=	tbf_put,
	.walk		=	tbf_walk,
	.dump		=	tbf_dump_class,
};

531
static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
Linus Torvalds's avatar
Linus Torvalds committed
532 533 534 535 536 537
	.next		=	NULL,
	.cl_ops		=	&tbf_class_ops,
	.id		=	"tbf",
	.priv_size	=	sizeof(struct tbf_sched_data),
	.enqueue	=	tbf_enqueue,
	.dequeue	=	tbf_dequeue,
538
	.peek		=	qdisc_peek_dequeued,
Linus Torvalds's avatar
Linus Torvalds committed
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
	.drop		=	tbf_drop,
	.init		=	tbf_init,
	.reset		=	tbf_reset,
	.destroy	=	tbf_destroy,
	.change		=	tbf_change,
	.dump		=	tbf_dump,
	.owner		=	THIS_MODULE,
};

static int __init tbf_module_init(void)
{
	return register_qdisc(&tbf_qdisc_ops);
}

static void __exit tbf_module_exit(void)
{
	unregister_qdisc(&tbf_qdisc_ops);
}
module_init(tbf_module_init)
module_exit(tbf_module_exit)
MODULE_LICENSE("GPL");