Commit 32302902 authored by Alexander Duyck's avatar Alexander Duyck Committed by David S. Miller

mqprio: Reserve last 32 classid values for HW traffic classes and misc IDs

This patch makes a slight tweak to mqprio in order to bring the
classid values used back in line with what is used for mq. The general idea
is to reserve values :ffe0 - :ffef to identify hardware traffic classes
normally reported via dev->num_tc. By doing this we can maintain a
consistent behavior with mq for classid where :1 - :ffdf will represent a
physical qdisc mapped onto a Tx queue represented by classid - 1, and the
traffic classes will be mapped onto a known subset of classid values
reserved for our virtual qdiscs.

Note I reserved the range from :fff0 - :ffff since this way we might be
able to reuse these classid values with clsact and ingress which would mean
that for mq, mqprio, ingress, and clsact we should be able to maintain a
similar classid layout.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarJesus Sanchez-Palencia <jesus.sanchez-palencia@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent af28f6f2
...@@ -74,6 +74,7 @@ struct tc_estimator { ...@@ -74,6 +74,7 @@ struct tc_estimator {
#define TC_H_INGRESS (0xFFFFFFF1U) #define TC_H_INGRESS (0xFFFFFFF1U)
#define TC_H_CLSACT TC_H_INGRESS #define TC_H_CLSACT TC_H_INGRESS
#define TC_H_MIN_PRIORITY 0xFFE0U
#define TC_H_MIN_INGRESS 0xFFF2U #define TC_H_MIN_INGRESS 0xFFF2U
#define TC_H_MIN_EGRESS 0xFFF3U #define TC_H_MIN_EGRESS 0xFFF3U
......
...@@ -153,6 +153,10 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) ...@@ -153,6 +153,10 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
if (!netif_is_multiqueue(dev)) if (!netif_is_multiqueue(dev))
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* make certain can allocate enough classids to handle queues */
if (dev->num_tx_queues >= TC_H_MIN_PRIORITY)
return -ENOMEM;
if (!opt || nla_len(opt) < sizeof(*qopt)) if (!opt || nla_len(opt) < sizeof(*qopt))
return -EINVAL; return -EINVAL;
...@@ -305,7 +309,7 @@ static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch, ...@@ -305,7 +309,7 @@ static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
unsigned long cl) unsigned long cl)
{ {
struct net_device *dev = qdisc_dev(sch); struct net_device *dev = qdisc_dev(sch);
unsigned long ntx = cl - 1 - netdev_get_num_tc(dev); unsigned long ntx = cl - 1;
if (ntx >= dev->num_tx_queues) if (ntx >= dev->num_tx_queues)
return NULL; return NULL;
...@@ -447,38 +451,35 @@ static unsigned long mqprio_find(struct Qdisc *sch, u32 classid) ...@@ -447,38 +451,35 @@ static unsigned long mqprio_find(struct Qdisc *sch, u32 classid)
struct net_device *dev = qdisc_dev(sch); struct net_device *dev = qdisc_dev(sch);
unsigned int ntx = TC_H_MIN(classid); unsigned int ntx = TC_H_MIN(classid);
if (ntx > dev->num_tx_queues + netdev_get_num_tc(dev)) /* There are essentially two regions here that have valid classid
return 0; * values. The first region will have a classid value of 1 through
return ntx; * num_tx_queues. All of these are backed by actual Qdiscs.
*/
if (ntx < TC_H_MIN_PRIORITY)
return (ntx <= dev->num_tx_queues) ? ntx : 0;
/* The second region represents the hardware traffic classes. These
* are represented by classid values of TC_H_MIN_PRIORITY through
* TC_H_MIN_PRIORITY + netdev_get_num_tc - 1
*/
return ((ntx - TC_H_MIN_PRIORITY) < netdev_get_num_tc(dev)) ? ntx : 0;
} }
static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl, static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
struct sk_buff *skb, struct tcmsg *tcm) struct sk_buff *skb, struct tcmsg *tcm)
{ {
struct net_device *dev = qdisc_dev(sch); if (cl < TC_H_MIN_PRIORITY) {
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
struct net_device *dev = qdisc_dev(sch);
int tc = netdev_txq_to_tc(dev, cl - 1);
if (cl <= netdev_get_num_tc(dev)) { tcm->tcm_parent = (tc < 0) ? 0 :
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(tc + TC_H_MIN_PRIORITY));
tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
} else {
tcm->tcm_parent = TC_H_ROOT; tcm->tcm_parent = TC_H_ROOT;
tcm->tcm_info = 0; tcm->tcm_info = 0;
} else {
int i;
struct netdev_queue *dev_queue;
dev_queue = mqprio_queue_get(sch, cl);
tcm->tcm_parent = 0;
for (i = 0; i < netdev_get_num_tc(dev); i++) {
struct netdev_tc_txq tc = dev->tc_to_txq[i];
int q_idx = cl - netdev_get_num_tc(dev);
if (q_idx > tc.offset &&
q_idx <= tc.offset + tc.count) {
tcm->tcm_parent =
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(i + 1));
break;
}
}
tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
} }
tcm->tcm_handle |= TC_H_MIN(cl); tcm->tcm_handle |= TC_H_MIN(cl);
return 0; return 0;
...@@ -489,15 +490,14 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -489,15 +490,14 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
__releases(d->lock) __releases(d->lock)
__acquires(d->lock) __acquires(d->lock)
{ {
struct net_device *dev = qdisc_dev(sch); if (cl >= TC_H_MIN_PRIORITY) {
if (cl <= netdev_get_num_tc(dev)) {
int i; int i;
__u32 qlen = 0; __u32 qlen = 0;
struct Qdisc *qdisc; struct Qdisc *qdisc;
struct gnet_stats_queue qstats = {0}; struct gnet_stats_queue qstats = {0};
struct gnet_stats_basic_packed bstats = {0}; struct gnet_stats_basic_packed bstats = {0};
struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1]; struct net_device *dev = qdisc_dev(sch);
struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK];
/* Drop lock here it will be reclaimed before touching /* Drop lock here it will be reclaimed before touching
* statistics this is required because the d->lock we * statistics this is required because the d->lock we
...@@ -550,12 +550,25 @@ static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) ...@@ -550,12 +550,25 @@ static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
/* Walk hierarchy with a virtual class per tc */ /* Walk hierarchy with a virtual class per tc */
arg->count = arg->skip; arg->count = arg->skip;
for (ntx = arg->skip; for (ntx = arg->skip; ntx < netdev_get_num_tc(dev); ntx++) {
ntx < dev->num_tx_queues + netdev_get_num_tc(dev); if (arg->fn(sch, ntx + TC_H_MIN_PRIORITY, arg) < 0) {
ntx++) { arg->stop = 1;
return;
}
arg->count++;
}
/* Pad the values and skip over unused traffic classes */
if (ntx < TC_MAX_QUEUE) {
arg->count = TC_MAX_QUEUE;
ntx = TC_MAX_QUEUE;
}
/* Reset offset, sort out remaining per-queue qdiscs */
for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) {
if (arg->fn(sch, ntx + 1, arg) < 0) { if (arg->fn(sch, ntx + 1, arg) < 0) {
arg->stop = 1; arg->stop = 1;
break; return;
} }
arg->count++; arg->count++;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment