Commit 6d9f6e67 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-sched-add-Flow-Queue-PIE-packet-scheduler'

Gautam Ramakrishnan says:

====================
net: sched: add Flow Queue PIE packet scheduler

Flow Queue PIE packet scheduler

This patch series implements the Flow Queue Proportional
Integral controller Enhanced (FQ-PIE) active queue
Management algorithm. It is an enhancement over the PIE
algorithm. It integrates the PIE aqm with a deficit round robin
scheme.

FQ-PIE is implemented over the latest version of PIE which
uses timestamps to calculate queue delay with an additional
option of using average dequeue rate to calculate the queue
delay. This patch also adds a memory limit of all the packets
across all queues to a default value of 32Mb.

 - Patch #1
   - Creates pie.h and moves all small functions and structures
     common to PIE and FQ-PIE here. The functions are all made
     inline.
 - Patch #2 - #8
   - Addresses code formatting, indentation, comment changes
     and rearrangement of structure members.
 - Patch #9
   - Refactors sch_pie.c by changing arguments to
     calculate_probability(), [pie_]drop_early() and
     pie_process_dequeue() to make it generic enough to
     be used by sch_fq_pie.c. These functions are exported
     to be used by sch_fq_pie.c.
 - Patch #10
   - Adds the FQ-PIE Qdisc.

For more information:
https://tools.ietf.org/html/rfc8033

Changes from v6 to v7
 - Call tcf_block_put() when destroying the Qdisc as suggested
   by Jakub Kicinski.

Changes from v5 to v6
 - Rearranged struct members according to their access pattern
   and to remove holes.

Changes from v4 to v5
 - This patch series breaks down patch 1 of v4 into
   separate logical commits as suggested by David Miller.

Changes from v3 to v4
 - Used non deprecated version of nla_parse_nested
 - Used SZ_32M macro
 - Removed an unused variable
 - Code cleanup
 All suggested by Jakub and Toke.

Changes from v2 to v3
 - Exported drop_early, pie_process_dequeue and
   calculate_probability functions from sch_pie as
   suggested by Stephen Hemminger.

Changes from v1 ( and RFC patch) to v2
 - Added timestamp to calculate queue delay as recommended
   by Dave Taht
 - Packet memory limit implemented as recommended by Toke.
 - Added external classifier as recommended by Toke.
 - Used NET_XMIT_CN instead of NET_XMIT_DROP as the return
   value in the fq_pie_qdisc_enqueue function.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents fd786fb1 ec97ecf1
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __NET_SCHED_PIE_H
#define __NET_SCHED_PIE_H
#include <linux/ktime.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <net/inet_ecn.h>
#include <net/pkt_sched.h>
#define MAX_PROB U64_MAX
#define DTIME_INVALID U64_MAX
#define QUEUE_THRESHOLD 16384
#define DQCOUNT_INVALID -1
#define PIE_SCALE 8
/**
* struct pie_params - contains pie parameters
* @target: target delay in pschedtime
* @tudpate: interval at which drop probability is calculated
* @limit: total number of packets that can be in the queue
* @alpha: parameter to control drop probability
* @beta: parameter to control drop probability
* @ecn: is ECN marking of packets enabled
* @bytemode: is drop probability scaled based on pkt size
* @dq_rate_estimator: is Little's law used for qdelay calculation
*/
struct pie_params {
psched_time_t target;
u32 tupdate;
u32 limit;
u32 alpha;
u32 beta;
u8 ecn;
u8 bytemode;
u8 dq_rate_estimator;
};
/**
* struct pie_vars - contains pie variables
* @qdelay: current queue delay
* @qdelay_old: queue delay in previous qdelay calculation
* @burst_time: burst time allowance
* @dq_tstamp: timestamp at which dq rate was last calculated
* @prob: drop probability
* @accu_prob: accumulated drop probability
* @dq_count: number of bytes dequeued in a measurement cycle
* @avg_dq_rate: calculated average dq rate
* @qlen_old: queue length during previous qdelay calculation
* @accu_prob_overflows: number of times accu_prob overflows
*/
struct pie_vars {
psched_time_t qdelay;
psched_time_t qdelay_old;
psched_time_t burst_time;
psched_time_t dq_tstamp;
u64 prob;
u64 accu_prob;
u64 dq_count;
u32 avg_dq_rate;
u32 qlen_old;
u8 accu_prob_overflows;
};
/**
* struct pie_stats - contains pie stats
* @packets_in: total number of packets enqueued
* @dropped: packets dropped due to pie action
* @overlimit: packets dropped due to lack of space in queue
* @ecn_mark: packets marked with ECN
* @maxq: maximum queue size
*/
struct pie_stats {
u32 packets_in;
u32 dropped;
u32 overlimit;
u32 ecn_mark;
u32 maxq;
};
/**
* struct pie_skb_cb - contains private skb vars
* @enqueue_time: timestamp when the packet is enqueued
* @mem_usage: size of the skb during enqueue
*/
struct pie_skb_cb {
psched_time_t enqueue_time;
u32 mem_usage;
};
static inline void pie_params_init(struct pie_params *params)
{
params->target = PSCHED_NS2TICKS(15 * NSEC_PER_MSEC); /* 15 ms */
params->tupdate = usecs_to_jiffies(15 * USEC_PER_MSEC); /* 15 ms */
params->limit = 1000;
params->alpha = 2;
params->beta = 20;
params->ecn = false;
params->bytemode = false;
params->dq_rate_estimator = false;
}
static inline void pie_vars_init(struct pie_vars *vars)
{
vars->burst_time = PSCHED_NS2TICKS(150 * NSEC_PER_MSEC); /* 150 ms */
vars->dq_tstamp = DTIME_INVALID;
vars->accu_prob = 0;
vars->dq_count = DQCOUNT_INVALID;
vars->avg_dq_rate = 0;
vars->accu_prob_overflows = 0;
}
static inline struct pie_skb_cb *get_pie_cb(const struct sk_buff *skb)
{
qdisc_cb_private_validate(skb, sizeof(struct pie_skb_cb));
return (struct pie_skb_cb *)qdisc_skb_cb(skb)->data;
}
static inline psched_time_t pie_get_enqueue_time(const struct sk_buff *skb)
{
return get_pie_cb(skb)->enqueue_time;
}
static inline void pie_set_enqueue_time(struct sk_buff *skb)
{
get_pie_cb(skb)->enqueue_time = psched_get_time();
}
bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
struct pie_vars *vars, u32 qlen, u32 packet_size);
void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params,
struct pie_vars *vars, u32 qlen);
void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
u32 qlen);
#endif
...@@ -971,6 +971,37 @@ struct tc_pie_xstats { ...@@ -971,6 +971,37 @@ struct tc_pie_xstats {
__u32 ecn_mark; /* packets marked with ecn*/ __u32 ecn_mark; /* packets marked with ecn*/
}; };
/* FQ PIE */
enum {
TCA_FQ_PIE_UNSPEC,
TCA_FQ_PIE_LIMIT,
TCA_FQ_PIE_FLOWS,
TCA_FQ_PIE_TARGET,
TCA_FQ_PIE_TUPDATE,
TCA_FQ_PIE_ALPHA,
TCA_FQ_PIE_BETA,
TCA_FQ_PIE_QUANTUM,
TCA_FQ_PIE_MEMORY_LIMIT,
TCA_FQ_PIE_ECN_PROB,
TCA_FQ_PIE_ECN,
TCA_FQ_PIE_BYTEMODE,
TCA_FQ_PIE_DQ_RATE_ESTIMATOR,
__TCA_FQ_PIE_MAX
};
#define TCA_FQ_PIE_MAX (__TCA_FQ_PIE_MAX - 1)
struct tc_fq_pie_xstats {
__u32 packets_in; /* total number of packets enqueued */
__u32 dropped; /* packets dropped due to fq_pie_action */
__u32 overlimit; /* dropped due to lack of space in queue */
__u32 overmemory; /* dropped due to lack of memory in queue */
__u32 ecn_mark; /* packets marked with ecn */
__u32 new_flow_count; /* count of new flows created by packets */
__u32 new_flows_len; /* count of flows in new list */
__u32 old_flows_len; /* count of flows in old list */
__u32 memory_usage; /* total memory across all queues */
};
/* CBS */ /* CBS */
struct tc_cbs_qopt { struct tc_cbs_qopt {
__u8 offload; __u8 offload;
......
...@@ -366,6 +366,19 @@ config NET_SCH_PIE ...@@ -366,6 +366,19 @@ config NET_SCH_PIE
If unsure, say N. If unsure, say N.
config NET_SCH_FQ_PIE
depends on NET_SCH_PIE
tristate "Flow Queue Proportional Integral controller Enhanced (FQ-PIE)"
help
Say Y here if you want to use the Flow Queue Proportional Integral
controller Enhanced (FQ-PIE) packet scheduling algorithm.
For more information, please see https://tools.ietf.org/html/rfc8033
To compile this driver as a module, choose M here: the module
will be called sch_fq_pie.
If unsure, say N.
config NET_SCH_INGRESS config NET_SCH_INGRESS
tristate "Ingress/classifier-action Qdisc" tristate "Ingress/classifier-action Qdisc"
depends on NET_CLS_ACT depends on NET_CLS_ACT
......
...@@ -59,6 +59,7 @@ obj-$(CONFIG_NET_SCH_CAKE) += sch_cake.o ...@@ -59,6 +59,7 @@ obj-$(CONFIG_NET_SCH_CAKE) += sch_cake.o
obj-$(CONFIG_NET_SCH_FQ) += sch_fq.o obj-$(CONFIG_NET_SCH_FQ) += sch_fq.o
obj-$(CONFIG_NET_SCH_HHF) += sch_hhf.o obj-$(CONFIG_NET_SCH_HHF) += sch_hhf.o
obj-$(CONFIG_NET_SCH_PIE) += sch_pie.o obj-$(CONFIG_NET_SCH_PIE) += sch_pie.o
obj-$(CONFIG_NET_SCH_FQ_PIE) += sch_fq_pie.o
obj-$(CONFIG_NET_SCH_CBS) += sch_cbs.o obj-$(CONFIG_NET_SCH_CBS) += sch_cbs.o
obj-$(CONFIG_NET_SCH_ETF) += sch_etf.o obj-$(CONFIG_NET_SCH_ETF) += sch_etf.o
obj-$(CONFIG_NET_SCH_TAPRIO) += sch_taprio.o obj-$(CONFIG_NET_SCH_TAPRIO) += sch_taprio.o
......
// SPDX-License-Identifier: GPL-2.0-only
/* Flow Queue PIE discipline
*
* Copyright (C) 2019 Mohit P. Tahiliani <tahiliani@nitk.edu.in>
* Copyright (C) 2019 Sachin D. Patil <sdp.sachin@gmail.com>
* Copyright (C) 2019 V. Saicharan <vsaicharan1998@gmail.com>
* Copyright (C) 2019 Mohit Bhasi <mohitbhasi1998@gmail.com>
* Copyright (C) 2019 Leslie Monis <lesliemonis@gmail.com>
* Copyright (C) 2019 Gautam Ramakrishnan <gautamramk@gmail.com>
*/
#include <linux/jhash.h>
#include <linux/sizes.h>
#include <linux/vmalloc.h>
#include <net/pkt_cls.h>
#include <net/pie.h>
/* Flow Queue PIE
*
* Principles:
* - Packets are classified on flows.
* - This is a Stochastic model (as we use a hash, several flows might
* be hashed to the same slot)
* - Each flow has a PIE managed queue.
* - Flows are linked onto two (Round Robin) lists,
* so that new flows have priority on old ones.
* - For a given flow, packets are not reordered.
* - Drops during enqueue only.
* - ECN capability is off by default.
* - ECN threshold (if ECN is enabled) is at 10% by default.
* - Uses timestamps to calculate queue delay by default.
*/
/**
* struct fq_pie_flow - contains data for each flow
* @vars: pie vars associated with the flow
* @deficit: number of remaining byte credits
* @backlog: size of data in the flow
* @qlen: number of packets in the flow
* @flowchain: flowchain for the flow
* @head: first packet in the flow
* @tail: last packet in the flow
*/
struct fq_pie_flow {
struct pie_vars vars;
s32 deficit;
u32 backlog;
u32 qlen;
struct list_head flowchain;
struct sk_buff *head;
struct sk_buff *tail;
};
struct fq_pie_sched_data {
struct tcf_proto __rcu *filter_list; /* optional external classifier */
struct tcf_block *block;
struct fq_pie_flow *flows;
struct Qdisc *sch;
struct list_head old_flows;
struct list_head new_flows;
struct pie_params p_params;
u32 ecn_prob;
u32 flows_cnt;
u32 quantum;
u32 memory_limit;
u32 new_flow_count;
u32 memory_usage;
u32 overmemory;
struct pie_stats stats;
struct timer_list adapt_timer;
};
static unsigned int fq_pie_hash(const struct fq_pie_sched_data *q,
struct sk_buff *skb)
{
return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
}
static unsigned int fq_pie_classify(struct sk_buff *skb, struct Qdisc *sch,
int *qerr)
{
struct fq_pie_sched_data *q = qdisc_priv(sch);
struct tcf_proto *filter;
struct tcf_result res;
int result;
if (TC_H_MAJ(skb->priority) == sch->handle &&
TC_H_MIN(skb->priority) > 0 &&
TC_H_MIN(skb->priority) <= q->flows_cnt)
return TC_H_MIN(skb->priority);
filter = rcu_dereference_bh(q->filter_list);
if (!filter)
return fq_pie_hash(q, skb) + 1;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
result = tcf_classify(skb, filter, &res, false);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
case TC_ACT_TRAP:
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
/* fall through */
case TC_ACT_SHOT:
return 0;
}
#endif
if (TC_H_MIN(res.classid) <= q->flows_cnt)
return TC_H_MIN(res.classid);
}
return 0;
}
/* add skb to flow queue (tail add) */
static inline void flow_queue_add(struct fq_pie_flow *flow,
struct sk_buff *skb)
{
if (!flow->head)
flow->head = skb;
else
flow->tail->next = skb;
flow->tail = skb;
skb->next = NULL;
}
static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct fq_pie_sched_data *q = qdisc_priv(sch);
struct fq_pie_flow *sel_flow;
int uninitialized_var(ret);
u8 memory_limited = false;
u8 enqueue = false;
u32 pkt_len;
u32 idx;
/* Classifies packet into corresponding flow */
idx = fq_pie_classify(skb, sch, &ret);
sel_flow = &q->flows[idx];
/* Checks whether adding a new packet would exceed memory limit */
get_pie_cb(skb)->mem_usage = skb->truesize;
memory_limited = q->memory_usage > q->memory_limit + skb->truesize;
/* Checks if the qdisc is full */
if (unlikely(qdisc_qlen(sch) >= sch->limit)) {
q->stats.overlimit++;
goto out;
} else if (unlikely(memory_limited)) {
q->overmemory++;
}
if (!pie_drop_early(sch, &q->p_params, &sel_flow->vars,
sel_flow->backlog, skb->len)) {
enqueue = true;
} else if (q->p_params.ecn &&
sel_flow->vars.prob <= (MAX_PROB / 100) * q->ecn_prob &&
INET_ECN_set_ce(skb)) {
/* If packet is ecn capable, mark it if drop probability
* is lower than the parameter ecn_prob, else drop it.
*/
q->stats.ecn_mark++;
enqueue = true;
}
if (enqueue) {
/* Set enqueue time only when dq_rate_estimator is disabled. */
if (!q->p_params.dq_rate_estimator)
pie_set_enqueue_time(skb);
pkt_len = qdisc_pkt_len(skb);
q->stats.packets_in++;
q->memory_usage += skb->truesize;
sch->qstats.backlog += pkt_len;
sch->q.qlen++;
flow_queue_add(sel_flow, skb);
if (list_empty(&sel_flow->flowchain)) {
list_add_tail(&sel_flow->flowchain, &q->new_flows);
q->new_flow_count++;
sel_flow->deficit = q->quantum;
sel_flow->qlen = 0;
sel_flow->backlog = 0;
}
sel_flow->qlen++;
sel_flow->backlog += pkt_len;
return NET_XMIT_SUCCESS;
}
out:
q->stats.dropped++;
sel_flow->vars.accu_prob = 0;
sel_flow->vars.accu_prob_overflows = 0;
__qdisc_drop(skb, to_free);
qdisc_qstats_drop(sch);
return NET_XMIT_CN;
}
static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = {
[TCA_FQ_PIE_LIMIT] = {.type = NLA_U32},
[TCA_FQ_PIE_FLOWS] = {.type = NLA_U32},
[TCA_FQ_PIE_TARGET] = {.type = NLA_U32},
[TCA_FQ_PIE_TUPDATE] = {.type = NLA_U32},
[TCA_FQ_PIE_ALPHA] = {.type = NLA_U32},
[TCA_FQ_PIE_BETA] = {.type = NLA_U32},
[TCA_FQ_PIE_QUANTUM] = {.type = NLA_U32},
[TCA_FQ_PIE_MEMORY_LIMIT] = {.type = NLA_U32},
[TCA_FQ_PIE_ECN_PROB] = {.type = NLA_U32},
[TCA_FQ_PIE_ECN] = {.type = NLA_U32},
[TCA_FQ_PIE_BYTEMODE] = {.type = NLA_U32},
[TCA_FQ_PIE_DQ_RATE_ESTIMATOR] = {.type = NLA_U32},
};
static inline struct sk_buff *dequeue_head(struct fq_pie_flow *flow)
{
struct sk_buff *skb = flow->head;
flow->head = skb->next;
skb->next = NULL;
return skb;
}
static struct sk_buff *fq_pie_qdisc_dequeue(struct Qdisc *sch)
{
struct fq_pie_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb = NULL;
struct fq_pie_flow *flow;
struct list_head *head;
u32 pkt_len;
begin:
head = &q->new_flows;
if (list_empty(head)) {
head = &q->old_flows;
if (list_empty(head))
return NULL;
}
flow = list_first_entry(head, struct fq_pie_flow, flowchain);
/* Flow has exhausted all its credits */
if (flow->deficit <= 0) {
flow->deficit += q->quantum;
list_move_tail(&flow->flowchain, &q->old_flows);
goto begin;
}
if (flow->head) {
skb = dequeue_head(flow);
pkt_len = qdisc_pkt_len(skb);
sch->qstats.backlog -= pkt_len;
sch->q.qlen--;
qdisc_bstats_update(sch, skb);
}
if (!skb) {
/* force a pass through old_flows to prevent starvation */
if (head == &q->new_flows && !list_empty(&q->old_flows))
list_move_tail(&flow->flowchain, &q->old_flows);
else
list_del_init(&flow->flowchain);
goto begin;
}
flow->qlen--;
flow->deficit -= pkt_len;
flow->backlog -= pkt_len;
q->memory_usage -= get_pie_cb(skb)->mem_usage;
pie_process_dequeue(skb, &q->p_params, &flow->vars, flow->backlog);
return skb;
}
static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct fq_pie_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_FQ_PIE_MAX + 1];
unsigned int len_dropped = 0;
unsigned int num_dropped = 0;
int err;
if (!opt)
return -EINVAL;
err = nla_parse_nested(tb, TCA_FQ_PIE_MAX, opt, fq_pie_policy, extack);
if (err < 0)
return err;
sch_tree_lock(sch);
if (tb[TCA_FQ_PIE_LIMIT]) {
u32 limit = nla_get_u32(tb[TCA_FQ_PIE_LIMIT]);
q->p_params.limit = limit;
sch->limit = limit;
}
if (tb[TCA_FQ_PIE_FLOWS]) {
if (q->flows) {
NL_SET_ERR_MSG_MOD(extack,
"Number of flows cannot be changed");
goto flow_error;
}
q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]);
if (!q->flows_cnt || q->flows_cnt > 65536) {
NL_SET_ERR_MSG_MOD(extack,
"Number of flows must be < 65536");
goto flow_error;
}
}
/* convert from microseconds to pschedtime */
if (tb[TCA_FQ_PIE_TARGET]) {
/* target is in us */
u32 target = nla_get_u32(tb[TCA_FQ_PIE_TARGET]);
/* convert to pschedtime */
q->p_params.target =
PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC);
}
/* tupdate is in jiffies */
if (tb[TCA_FQ_PIE_TUPDATE])
q->p_params.tupdate =
usecs_to_jiffies(nla_get_u32(tb[TCA_FQ_PIE_TUPDATE]));
if (tb[TCA_FQ_PIE_ALPHA])
q->p_params.alpha = nla_get_u32(tb[TCA_FQ_PIE_ALPHA]);
if (tb[TCA_FQ_PIE_BETA])
q->p_params.beta = nla_get_u32(tb[TCA_FQ_PIE_BETA]);
if (tb[TCA_FQ_PIE_QUANTUM])
q->quantum = nla_get_u32(tb[TCA_FQ_PIE_QUANTUM]);
if (tb[TCA_FQ_PIE_MEMORY_LIMIT])
q->memory_limit = nla_get_u32(tb[TCA_FQ_PIE_MEMORY_LIMIT]);
if (tb[TCA_FQ_PIE_ECN_PROB])
q->ecn_prob = nla_get_u32(tb[TCA_FQ_PIE_ECN_PROB]);
if (tb[TCA_FQ_PIE_ECN])
q->p_params.ecn = nla_get_u32(tb[TCA_FQ_PIE_ECN]);
if (tb[TCA_FQ_PIE_BYTEMODE])
q->p_params.bytemode = nla_get_u32(tb[TCA_FQ_PIE_BYTEMODE]);
if (tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR])
q->p_params.dq_rate_estimator =
nla_get_u32(tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR]);
/* Drop excess packets if new limit is lower */
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = fq_pie_qdisc_dequeue(sch);
kfree_skb(skb);
len_dropped += qdisc_pkt_len(skb);
num_dropped += 1;
}
qdisc_tree_reduce_backlog(sch, num_dropped, len_dropped);
sch_tree_unlock(sch);
return 0;
flow_error:
sch_tree_unlock(sch);
return -EINVAL;
}
static void fq_pie_timer(struct timer_list *t)
{
struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer);
struct Qdisc *sch = q->sch;
spinlock_t *root_lock; /* to lock qdisc for probability calculations */
u16 idx;
root_lock = qdisc_lock(qdisc_root_sleeping(sch));
spin_lock(root_lock);
for (idx = 0; idx < q->flows_cnt; idx++)
pie_calculate_probability(&q->p_params, &q->flows[idx].vars,
q->flows[idx].backlog);
/* reset the timer to fire after 'tupdate' jiffies. */
if (q->p_params.tupdate)
mod_timer(&q->adapt_timer, jiffies + q->p_params.tupdate);
spin_unlock(root_lock);
}
static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct fq_pie_sched_data *q = qdisc_priv(sch);
int err;
u16 idx;
pie_params_init(&q->p_params);
sch->limit = 10 * 1024;
q->p_params.limit = sch->limit;
q->quantum = psched_mtu(qdisc_dev(sch));
q->sch = sch;
q->ecn_prob = 10;
q->flows_cnt = 1024;
q->memory_limit = SZ_32M;
INIT_LIST_HEAD(&q->new_flows);
INIT_LIST_HEAD(&q->old_flows);
if (opt) {
err = fq_pie_change(sch, opt, extack);
if (err)
return err;
}
err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
if (err)
goto init_failure;
q->flows = kvcalloc(q->flows_cnt, sizeof(struct fq_pie_flow),
GFP_KERNEL);
if (!q->flows) {
err = -ENOMEM;
goto init_failure;
}
for (idx = 0; idx < q->flows_cnt; idx++) {
struct fq_pie_flow *flow = q->flows + idx;
INIT_LIST_HEAD(&flow->flowchain);
pie_vars_init(&flow->vars);
}
timer_setup(&q->adapt_timer, fq_pie_timer, 0);
mod_timer(&q->adapt_timer, jiffies + HZ / 2);
return 0;
init_failure:
q->flows_cnt = 0;
return err;
}
static int fq_pie_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct fq_pie_sched_data *q = qdisc_priv(sch);
struct nlattr *opts;
opts = nla_nest_start(skb, TCA_OPTIONS);
if (!opts)
return -EMSGSIZE;
/* convert target from pschedtime to us */
if (nla_put_u32(skb, TCA_FQ_PIE_LIMIT, sch->limit) ||
nla_put_u32(skb, TCA_FQ_PIE_FLOWS, q->flows_cnt) ||
nla_put_u32(skb, TCA_FQ_PIE_TARGET,
((u32)PSCHED_TICKS2NS(q->p_params.target)) /
NSEC_PER_USEC) ||
nla_put_u32(skb, TCA_FQ_PIE_TUPDATE,
jiffies_to_usecs(q->p_params.tupdate)) ||
nla_put_u32(skb, TCA_FQ_PIE_ALPHA, q->p_params.alpha) ||
nla_put_u32(skb, TCA_FQ_PIE_BETA, q->p_params.beta) ||
nla_put_u32(skb, TCA_FQ_PIE_QUANTUM, q->quantum) ||
nla_put_u32(skb, TCA_FQ_PIE_MEMORY_LIMIT, q->memory_limit) ||
nla_put_u32(skb, TCA_FQ_PIE_ECN_PROB, q->ecn_prob) ||
nla_put_u32(skb, TCA_FQ_PIE_ECN, q->p_params.ecn) ||
nla_put_u32(skb, TCA_FQ_PIE_BYTEMODE, q->p_params.bytemode) ||
nla_put_u32(skb, TCA_FQ_PIE_DQ_RATE_ESTIMATOR,
q->p_params.dq_rate_estimator))
goto nla_put_failure;
return nla_nest_end(skb, opts);
nla_put_failure:
nla_nest_cancel(skb, opts);
return -EMSGSIZE;
}
static int fq_pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
struct fq_pie_sched_data *q = qdisc_priv(sch);
struct tc_fq_pie_xstats st = {
.packets_in = q->stats.packets_in,
.overlimit = q->stats.overlimit,
.overmemory = q->overmemory,
.dropped = q->stats.dropped,
.ecn_mark = q->stats.ecn_mark,
.new_flow_count = q->new_flow_count,
.memory_usage = q->memory_usage,
};
struct list_head *pos;
sch_tree_lock(sch);
list_for_each(pos, &q->new_flows)
st.new_flows_len++;
list_for_each(pos, &q->old_flows)
st.old_flows_len++;
sch_tree_unlock(sch);
return gnet_stats_copy_app(d, &st, sizeof(st));
}
static void fq_pie_reset(struct Qdisc *sch)
{
struct fq_pie_sched_data *q = qdisc_priv(sch);
u16 idx;
INIT_LIST_HEAD(&q->new_flows);
INIT_LIST_HEAD(&q->old_flows);
for (idx = 0; idx < q->flows_cnt; idx++) {
struct fq_pie_flow *flow = q->flows + idx;
/* Removes all packets from flow */
rtnl_kfree_skbs(flow->head, flow->tail);
flow->head = NULL;
INIT_LIST_HEAD(&flow->flowchain);
pie_vars_init(&flow->vars);
}
sch->q.qlen = 0;
sch->qstats.backlog = 0;
}
static void fq_pie_destroy(struct Qdisc *sch)
{
struct fq_pie_sched_data *q = qdisc_priv(sch);
tcf_block_put(q->block);
del_timer_sync(&q->adapt_timer);
kvfree(q->flows);
}
static struct Qdisc_ops fq_pie_qdisc_ops __read_mostly = {
.id = "fq_pie",
.priv_size = sizeof(struct fq_pie_sched_data),
.enqueue = fq_pie_qdisc_enqueue,
.dequeue = fq_pie_qdisc_dequeue,
.peek = qdisc_peek_dequeued,
.init = fq_pie_init,
.destroy = fq_pie_destroy,
.reset = fq_pie_reset,
.change = fq_pie_change,
.dump = fq_pie_dump,
.dump_stats = fq_pie_dump_stats,
.owner = THIS_MODULE,
};
static int __init fq_pie_module_init(void)
{
return register_qdisc(&fq_pie_qdisc_ops);
}
static void __exit fq_pie_module_exit(void)
{
unregister_qdisc(&fq_pie_qdisc_ops);
}
module_init(fq_pie_module_init);
module_exit(fq_pie_module_exit);
MODULE_DESCRIPTION("Flow Queue Proportional Integral controller Enhanced (FQ-PIE)");
MODULE_AUTHOR("Mohit P. Tahiliani");
MODULE_LICENSE("GPL");
...@@ -19,159 +19,76 @@ ...@@ -19,159 +19,76 @@
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <net/pkt_sched.h> #include <net/pkt_sched.h>
#include <net/inet_ecn.h> #include <net/inet_ecn.h>
#include <net/pie.h>
#define QUEUE_THRESHOLD 16384
#define DQCOUNT_INVALID -1
#define DTIME_INVALID 0xffffffffffffffff
#define MAX_PROB 0xffffffffffffffff
#define PIE_SCALE 8
/* parameters used */
struct pie_params {
psched_time_t target; /* user specified target delay in pschedtime */
u32 tupdate; /* timer frequency (in jiffies) */
u32 limit; /* number of packets that can be enqueued */
u32 alpha; /* alpha and beta are between 0 and 32 */
u32 beta; /* and are used for shift relative to 1 */
bool ecn; /* true if ecn is enabled */
bool bytemode; /* to scale drop early prob based on pkt size */
u8 dq_rate_estimator; /* to calculate delay using Little's law */
};
/* variables used */
struct pie_vars {
u64 prob; /* probability but scaled by u64 limit. */
psched_time_t burst_time;
psched_time_t qdelay;
psched_time_t qdelay_old;
u64 dq_count; /* measured in bytes */
psched_time_t dq_tstamp; /* drain rate */
u64 accu_prob; /* accumulated drop probability */
u32 avg_dq_rate; /* bytes per pschedtime tick,scaled */
u32 qlen_old; /* in bytes */
u8 accu_prob_overflows; /* overflows of accu_prob */
};
/* statistics gathering */
struct pie_stats {
u32 packets_in; /* total number of packets enqueued */
u32 dropped; /* packets dropped due to pie_action */
u32 overlimit; /* dropped due to lack of space in queue */
u32 maxq; /* maximum queue size */
u32 ecn_mark; /* packets marked with ECN */
};
/* private data for the Qdisc */ /* private data for the Qdisc */
struct pie_sched_data { struct pie_sched_data {
struct pie_params params;
struct pie_vars vars; struct pie_vars vars;
struct pie_params params;
struct pie_stats stats; struct pie_stats stats;
struct timer_list adapt_timer; struct timer_list adapt_timer;
struct Qdisc *sch; struct Qdisc *sch;
}; };
static void pie_params_init(struct pie_params *params) bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
struct pie_vars *vars, u32 qlen, u32 packet_size)
{ {
params->alpha = 2;
params->beta = 20;
params->tupdate = usecs_to_jiffies(15 * USEC_PER_MSEC); /* 15 ms */
params->limit = 1000; /* default of 1000 packets */
params->target = PSCHED_NS2TICKS(15 * NSEC_PER_MSEC); /* 15 ms */
params->ecn = false;
params->bytemode = false;
params->dq_rate_estimator = false;
}
/* private skb vars */
struct pie_skb_cb {
psched_time_t enqueue_time;
};
static struct pie_skb_cb *get_pie_cb(const struct sk_buff *skb)
{
qdisc_cb_private_validate(skb, sizeof(struct pie_skb_cb));
return (struct pie_skb_cb *)qdisc_skb_cb(skb)->data;
}
static psched_time_t pie_get_enqueue_time(const struct sk_buff *skb)
{
return get_pie_cb(skb)->enqueue_time;
}
static void pie_set_enqueue_time(struct sk_buff *skb)
{
get_pie_cb(skb)->enqueue_time = psched_get_time();
}
static void pie_vars_init(struct pie_vars *vars)
{
vars->dq_count = DQCOUNT_INVALID;
vars->dq_tstamp = DTIME_INVALID;
vars->accu_prob = 0;
vars->avg_dq_rate = 0;
/* default of 150 ms in pschedtime */
vars->burst_time = PSCHED_NS2TICKS(150 * NSEC_PER_MSEC);
vars->accu_prob_overflows = 0;
}
static bool drop_early(struct Qdisc *sch, u32 packet_size)
{
struct pie_sched_data *q = qdisc_priv(sch);
u64 rnd; u64 rnd;
u64 local_prob = q->vars.prob; u64 local_prob = vars->prob;
u32 mtu = psched_mtu(qdisc_dev(sch)); u32 mtu = psched_mtu(qdisc_dev(sch));
/* If there is still burst allowance left skip random early drop */ /* If there is still burst allowance left skip random early drop */
if (q->vars.burst_time > 0) if (vars->burst_time > 0)
return false; return false;
/* If current delay is less than half of target, and /* If current delay is less than half of target, and
* if drop prob is low already, disable early_drop * if drop prob is low already, disable early_drop
*/ */
if ((q->vars.qdelay < q->params.target / 2) && if ((vars->qdelay < params->target / 2) &&
(q->vars.prob < MAX_PROB / 5)) (vars->prob < MAX_PROB / 5))
return false; return false;
/* If we have fewer than 2 mtu-sized packets, disable drop_early, /* If we have fewer than 2 mtu-sized packets, disable pie_drop_early,
* similar to min_th in RED * similar to min_th in RED
*/ */
if (sch->qstats.backlog < 2 * mtu) if (qlen < 2 * mtu)
return false; return false;
/* If bytemode is turned on, use packet size to compute new /* If bytemode is turned on, use packet size to compute new
* probablity. Smaller packets will have lower drop prob in this case * probablity. Smaller packets will have lower drop prob in this case
*/ */
if (q->params.bytemode && packet_size <= mtu) if (params->bytemode && packet_size <= mtu)
local_prob = (u64)packet_size * div_u64(local_prob, mtu); local_prob = (u64)packet_size * div_u64(local_prob, mtu);
else else
local_prob = q->vars.prob; local_prob = vars->prob;
if (local_prob == 0) { if (local_prob == 0) {
q->vars.accu_prob = 0; vars->accu_prob = 0;
q->vars.accu_prob_overflows = 0; vars->accu_prob_overflows = 0;
} }
if (local_prob > MAX_PROB - q->vars.accu_prob) if (local_prob > MAX_PROB - vars->accu_prob)
q->vars.accu_prob_overflows++; vars->accu_prob_overflows++;
q->vars.accu_prob += local_prob; vars->accu_prob += local_prob;
if (q->vars.accu_prob_overflows == 0 && if (vars->accu_prob_overflows == 0 &&
q->vars.accu_prob < (MAX_PROB / 100) * 85) vars->accu_prob < (MAX_PROB / 100) * 85)
return false; return false;
if (q->vars.accu_prob_overflows == 8 && if (vars->accu_prob_overflows == 8 &&
q->vars.accu_prob >= MAX_PROB / 2) vars->accu_prob >= MAX_PROB / 2)
return true; return true;
prandom_bytes(&rnd, 8); prandom_bytes(&rnd, 8);
if (rnd < local_prob) { if (rnd < local_prob) {
q->vars.accu_prob = 0; vars->accu_prob = 0;
q->vars.accu_prob_overflows = 0; vars->accu_prob_overflows = 0;
return true; return true;
} }
return false; return false;
} }
EXPORT_SYMBOL_GPL(pie_drop_early);
static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free) struct sk_buff **to_free)
...@@ -184,7 +101,8 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -184,7 +101,8 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
goto out; goto out;
} }
if (!drop_early(sch, skb->len)) { if (!pie_drop_early(sch, &q->params, &q->vars, sch->qstats.backlog,
skb->len)) {
enqueue = true; enqueue = true;
} else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) && } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) &&
INET_ECN_set_ce(skb)) { INET_ECN_set_ce(skb)) {
...@@ -216,14 +134,14 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -216,14 +134,14 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
} }
static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = { static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
[TCA_PIE_TARGET] = {.type = NLA_U32}, [TCA_PIE_TARGET] = {.type = NLA_U32},
[TCA_PIE_LIMIT] = {.type = NLA_U32}, [TCA_PIE_LIMIT] = {.type = NLA_U32},
[TCA_PIE_TUPDATE] = {.type = NLA_U32}, [TCA_PIE_TUPDATE] = {.type = NLA_U32},
[TCA_PIE_ALPHA] = {.type = NLA_U32}, [TCA_PIE_ALPHA] = {.type = NLA_U32},
[TCA_PIE_BETA] = {.type = NLA_U32}, [TCA_PIE_BETA] = {.type = NLA_U32},
[TCA_PIE_ECN] = {.type = NLA_U32}, [TCA_PIE_ECN] = {.type = NLA_U32},
[TCA_PIE_BYTEMODE] = {.type = NLA_U32}, [TCA_PIE_BYTEMODE] = {.type = NLA_U32},
[TCA_PIE_DQ_RATE_ESTIMATOR] = {.type = NLA_U32}, [TCA_PIE_DQ_RATE_ESTIMATOR] = {.type = NLA_U32},
}; };
static int pie_change(struct Qdisc *sch, struct nlattr *opt, static int pie_change(struct Qdisc *sch, struct nlattr *opt,
...@@ -296,26 +214,25 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -296,26 +214,25 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
return 0; return 0;
} }
static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb) void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params,
struct pie_vars *vars, u32 qlen)
{ {
struct pie_sched_data *q = qdisc_priv(sch);
int qlen = sch->qstats.backlog; /* current queue size in bytes */
psched_time_t now = psched_get_time(); psched_time_t now = psched_get_time();
u32 dtime = 0; u32 dtime = 0;
/* If dq_rate_estimator is disabled, calculate qdelay using the /* If dq_rate_estimator is disabled, calculate qdelay using the
* packet timestamp. * packet timestamp.
*/ */
if (!q->params.dq_rate_estimator) { if (!params->dq_rate_estimator) {
q->vars.qdelay = now - pie_get_enqueue_time(skb); vars->qdelay = now - pie_get_enqueue_time(skb);
if (q->vars.dq_tstamp != DTIME_INVALID) if (vars->dq_tstamp != DTIME_INVALID)
dtime = now - q->vars.dq_tstamp; dtime = now - vars->dq_tstamp;
q->vars.dq_tstamp = now; vars->dq_tstamp = now;
if (qlen == 0) if (qlen == 0)
q->vars.qdelay = 0; vars->qdelay = 0;
if (dtime == 0) if (dtime == 0)
return; return;
...@@ -327,39 +244,39 @@ static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb) ...@@ -327,39 +244,39 @@ static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
* we have enough packets to calculate the drain rate. Save * we have enough packets to calculate the drain rate. Save
* current time as dq_tstamp and start measurement cycle. * current time as dq_tstamp and start measurement cycle.
*/ */
if (qlen >= QUEUE_THRESHOLD && q->vars.dq_count == DQCOUNT_INVALID) { if (qlen >= QUEUE_THRESHOLD && vars->dq_count == DQCOUNT_INVALID) {
q->vars.dq_tstamp = psched_get_time(); vars->dq_tstamp = psched_get_time();
q->vars.dq_count = 0; vars->dq_count = 0;
} }
/* Calculate the average drain rate from this value. If queue length /* Calculate the average drain rate from this value. If queue length
* has receded to a small value viz., <= QUEUE_THRESHOLD bytes,reset * has receded to a small value viz., <= QUEUE_THRESHOLD bytes, reset
* the dq_count to -1 as we don't have enough packets to calculate the * the dq_count to -1 as we don't have enough packets to calculate the
* drain rate anymore The following if block is entered only when we * drain rate anymore. The following if block is entered only when we
* have a substantial queue built up (QUEUE_THRESHOLD bytes or more) * have a substantial queue built up (QUEUE_THRESHOLD bytes or more)
* and we calculate the drain rate for the threshold here. dq_count is * and we calculate the drain rate for the threshold here. dq_count is
* in bytes, time difference in psched_time, hence rate is in * in bytes, time difference in psched_time, hence rate is in
* bytes/psched_time. * bytes/psched_time.
*/ */
if (q->vars.dq_count != DQCOUNT_INVALID) { if (vars->dq_count != DQCOUNT_INVALID) {
q->vars.dq_count += skb->len; vars->dq_count += skb->len;
if (q->vars.dq_count >= QUEUE_THRESHOLD) { if (vars->dq_count >= QUEUE_THRESHOLD) {
u32 count = q->vars.dq_count << PIE_SCALE; u32 count = vars->dq_count << PIE_SCALE;
dtime = now - q->vars.dq_tstamp; dtime = now - vars->dq_tstamp;
if (dtime == 0) if (dtime == 0)
return; return;
count = count / dtime; count = count / dtime;
if (q->vars.avg_dq_rate == 0) if (vars->avg_dq_rate == 0)
q->vars.avg_dq_rate = count; vars->avg_dq_rate = count;
else else
q->vars.avg_dq_rate = vars->avg_dq_rate =
(q->vars.avg_dq_rate - (vars->avg_dq_rate -
(q->vars.avg_dq_rate >> 3)) + (count >> 3); (vars->avg_dq_rate >> 3)) + (count >> 3);
/* If the queue has receded below the threshold, we hold /* If the queue has receded below the threshold, we hold
* on to the last drain rate calculated, else we reset * on to the last drain rate calculated, else we reset
...@@ -367,10 +284,10 @@ static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb) ...@@ -367,10 +284,10 @@ static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
* packet is dequeued * packet is dequeued
*/ */
if (qlen < QUEUE_THRESHOLD) { if (qlen < QUEUE_THRESHOLD) {
q->vars.dq_count = DQCOUNT_INVALID; vars->dq_count = DQCOUNT_INVALID;
} else { } else {
q->vars.dq_count = 0; vars->dq_count = 0;
q->vars.dq_tstamp = psched_get_time(); vars->dq_tstamp = psched_get_time();
} }
goto burst_allowance_reduction; goto burst_allowance_reduction;
...@@ -380,18 +297,18 @@ static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb) ...@@ -380,18 +297,18 @@ static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
return; return;
burst_allowance_reduction: burst_allowance_reduction:
if (q->vars.burst_time > 0) { if (vars->burst_time > 0) {
if (q->vars.burst_time > dtime) if (vars->burst_time > dtime)
q->vars.burst_time -= dtime; vars->burst_time -= dtime;
else else
q->vars.burst_time = 0; vars->burst_time = 0;
} }
} }
EXPORT_SYMBOL_GPL(pie_process_dequeue);
static void calculate_probability(struct Qdisc *sch) void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
u32 qlen)
{ {
struct pie_sched_data *q = qdisc_priv(sch);
u32 qlen = sch->qstats.backlog; /* queue size in bytes */
psched_time_t qdelay = 0; /* in pschedtime */ psched_time_t qdelay = 0; /* in pschedtime */
psched_time_t qdelay_old = 0; /* in pschedtime */ psched_time_t qdelay_old = 0; /* in pschedtime */
s64 delta = 0; /* determines the change in probability */ s64 delta = 0; /* determines the change in probability */
...@@ -400,21 +317,21 @@ static void calculate_probability(struct Qdisc *sch) ...@@ -400,21 +317,21 @@ static void calculate_probability(struct Qdisc *sch)
u32 power; u32 power;
bool update_prob = true; bool update_prob = true;
if (q->params.dq_rate_estimator) { if (params->dq_rate_estimator) {
qdelay_old = q->vars.qdelay; qdelay_old = vars->qdelay;
q->vars.qdelay_old = q->vars.qdelay; vars->qdelay_old = vars->qdelay;
if (q->vars.avg_dq_rate > 0) if (vars->avg_dq_rate > 0)
qdelay = (qlen << PIE_SCALE) / q->vars.avg_dq_rate; qdelay = (qlen << PIE_SCALE) / vars->avg_dq_rate;
else else
qdelay = 0; qdelay = 0;
} else { } else {
qdelay = q->vars.qdelay; qdelay = vars->qdelay;
qdelay_old = q->vars.qdelay_old; qdelay_old = vars->qdelay_old;
} }
/* If qdelay is zero and qlen is not, it means qlen is very small, less /* If qdelay is zero and qlen is not, it means qlen is very small,
* than dequeue_rate, so we do not update probabilty in this round * so we do not update probabilty in this round.
*/ */
if (qdelay == 0 && qlen != 0) if (qdelay == 0 && qlen != 0)
update_prob = false; update_prob = false;
...@@ -426,18 +343,18 @@ static void calculate_probability(struct Qdisc *sch) ...@@ -426,18 +343,18 @@ static void calculate_probability(struct Qdisc *sch)
* probability. alpha/beta are updated locally below by scaling down * probability. alpha/beta are updated locally below by scaling down
* by 16 to come to 0-2 range. * by 16 to come to 0-2 range.
*/ */
alpha = ((u64)q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4; alpha = ((u64)params->alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
beta = ((u64)q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4; beta = ((u64)params->beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
/* We scale alpha and beta differently depending on how heavy the /* We scale alpha and beta differently depending on how heavy the
* congestion is. Please see RFC 8033 for details. * congestion is. Please see RFC 8033 for details.
*/ */
if (q->vars.prob < MAX_PROB / 10) { if (vars->prob < MAX_PROB / 10) {
alpha >>= 1; alpha >>= 1;
beta >>= 1; beta >>= 1;
power = 100; power = 100;
while (q->vars.prob < div_u64(MAX_PROB, power) && while (vars->prob < div_u64(MAX_PROB, power) &&
power <= 1000000) { power <= 1000000) {
alpha >>= 2; alpha >>= 2;
beta >>= 2; beta >>= 2;
...@@ -446,14 +363,14 @@ static void calculate_probability(struct Qdisc *sch) ...@@ -446,14 +363,14 @@ static void calculate_probability(struct Qdisc *sch)
} }
/* alpha and beta should be between 0 and 32, in multiples of 1/16 */ /* alpha and beta should be between 0 and 32, in multiples of 1/16 */
delta += alpha * (u64)(qdelay - q->params.target); delta += alpha * (u64)(qdelay - params->target);
delta += beta * (u64)(qdelay - qdelay_old); delta += beta * (u64)(qdelay - qdelay_old);
oldprob = q->vars.prob; oldprob = vars->prob;
/* to ensure we increase probability in steps of no more than 2% */ /* to ensure we increase probability in steps of no more than 2% */
if (delta > (s64)(MAX_PROB / (100 / 2)) && if (delta > (s64)(MAX_PROB / (100 / 2)) &&
q->vars.prob >= MAX_PROB / 10) vars->prob >= MAX_PROB / 10)
delta = (MAX_PROB / 100) * 2; delta = (MAX_PROB / 100) * 2;
/* Non-linear drop: /* Non-linear drop:
...@@ -464,12 +381,12 @@ static void calculate_probability(struct Qdisc *sch) ...@@ -464,12 +381,12 @@ static void calculate_probability(struct Qdisc *sch)
if (qdelay > (PSCHED_NS2TICKS(250 * NSEC_PER_MSEC))) if (qdelay > (PSCHED_NS2TICKS(250 * NSEC_PER_MSEC)))
delta += MAX_PROB / (100 / 2); delta += MAX_PROB / (100 / 2);
q->vars.prob += delta; vars->prob += delta;
if (delta > 0) { if (delta > 0) {
/* prevent overflow */ /* prevent overflow */
if (q->vars.prob < oldprob) { if (vars->prob < oldprob) {
q->vars.prob = MAX_PROB; vars->prob = MAX_PROB;
/* Prevent normalization error. If probability is at /* Prevent normalization error. If probability is at
* maximum value already, we normalize it here, and * maximum value already, we normalize it here, and
* skip the check to do a non-linear drop in the next * skip the check to do a non-linear drop in the next
...@@ -479,8 +396,8 @@ static void calculate_probability(struct Qdisc *sch) ...@@ -479,8 +396,8 @@ static void calculate_probability(struct Qdisc *sch)
} }
} else { } else {
/* prevent underflow */ /* prevent underflow */
if (q->vars.prob > oldprob) if (vars->prob > oldprob)
q->vars.prob = 0; vars->prob = 0;
} }
/* Non-linear drop in probability: Reduce drop probability quickly if /* Non-linear drop in probability: Reduce drop probability quickly if
...@@ -489,10 +406,10 @@ static void calculate_probability(struct Qdisc *sch) ...@@ -489,10 +406,10 @@ static void calculate_probability(struct Qdisc *sch)
if (qdelay == 0 && qdelay_old == 0 && update_prob) if (qdelay == 0 && qdelay_old == 0 && update_prob)
/* Reduce drop probability to 98.4% */ /* Reduce drop probability to 98.4% */
q->vars.prob -= q->vars.prob / 64u; vars->prob -= vars->prob / 64;
q->vars.qdelay = qdelay; vars->qdelay = qdelay;
q->vars.qlen_old = qlen; vars->qlen_old = qlen;
/* We restart the measurement cycle if the following conditions are met /* We restart the measurement cycle if the following conditions are met
* 1. If the delay has been low for 2 consecutive Tupdate periods * 1. If the delay has been low for 2 consecutive Tupdate periods
...@@ -500,16 +417,17 @@ static void calculate_probability(struct Qdisc *sch) ...@@ -500,16 +417,17 @@ static void calculate_probability(struct Qdisc *sch)
* 3. If average dq_rate_estimator is enabled, we have atleast one * 3. If average dq_rate_estimator is enabled, we have atleast one
* estimate for the avg_dq_rate ie., is a non-zero value * estimate for the avg_dq_rate ie., is a non-zero value
*/ */
if ((q->vars.qdelay < q->params.target / 2) && if ((vars->qdelay < params->target / 2) &&
(q->vars.qdelay_old < q->params.target / 2) && (vars->qdelay_old < params->target / 2) &&
q->vars.prob == 0 && vars->prob == 0 &&
(!q->params.dq_rate_estimator || q->vars.avg_dq_rate > 0)) { (!params->dq_rate_estimator || vars->avg_dq_rate > 0)) {
pie_vars_init(&q->vars); pie_vars_init(vars);
} }
if (!q->params.dq_rate_estimator) if (!params->dq_rate_estimator)
q->vars.qdelay_old = qdelay; vars->qdelay_old = qdelay;
} }
EXPORT_SYMBOL_GPL(pie_calculate_probability);
static void pie_timer(struct timer_list *t) static void pie_timer(struct timer_list *t)
{ {
...@@ -518,7 +436,7 @@ static void pie_timer(struct timer_list *t) ...@@ -518,7 +436,7 @@ static void pie_timer(struct timer_list *t)
spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
spin_lock(root_lock); spin_lock(root_lock);
calculate_probability(sch); pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog);
/* reset the timer to fire after 'tupdate'. tupdate is in jiffies. */ /* reset the timer to fire after 'tupdate'. tupdate is in jiffies. */
if (q->params.tupdate) if (q->params.tupdate)
...@@ -607,12 +525,13 @@ static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d) ...@@ -607,12 +525,13 @@ static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch) static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch)
{ {
struct pie_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb = qdisc_dequeue_head(sch); struct sk_buff *skb = qdisc_dequeue_head(sch);
if (!skb) if (!skb)
return NULL; return NULL;
pie_process_dequeue(sch, skb); pie_process_dequeue(skb, &q->params, &q->vars, sch->qstats.backlog);
return skb; return skb;
} }
...@@ -633,7 +552,7 @@ static void pie_destroy(struct Qdisc *sch) ...@@ -633,7 +552,7 @@ static void pie_destroy(struct Qdisc *sch)
} }
static struct Qdisc_ops pie_qdisc_ops __read_mostly = { static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
.id = "pie", .id = "pie",
.priv_size = sizeof(struct pie_sched_data), .priv_size = sizeof(struct pie_sched_data),
.enqueue = pie_qdisc_enqueue, .enqueue = pie_qdisc_enqueue,
.dequeue = pie_qdisc_dequeue, .dequeue = pie_qdisc_dequeue,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment