Commit f2dec9a2 authored by David S. Miller's avatar David S. Miller

Merge branch 'dpaa2-eth-Add-support-for-MQPRIO-offloading'

Ioana Radulescu says:

====================
dpaa2-eth: Add support for MQPRIO offloading

Add support for adding multiple TX traffic classes with mqprio. We can have
up to one netdev queue and hardware frame queue per TC per core.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a41efedf ab1e6de2
...@@ -757,6 +757,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) ...@@ -757,6 +757,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
u16 queue_mapping; u16 queue_mapping;
unsigned int needed_headroom; unsigned int needed_headroom;
u32 fd_len; u32 fd_len;
u8 prio = 0;
int err, i; int err, i;
percpu_stats = this_cpu_ptr(priv->percpu_stats); percpu_stats = this_cpu_ptr(priv->percpu_stats);
...@@ -814,6 +815,18 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) ...@@ -814,6 +815,18 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
* a queue affined to the same core that processed the Rx frame * a queue affined to the same core that processed the Rx frame
*/ */
queue_mapping = skb_get_queue_mapping(skb); queue_mapping = skb_get_queue_mapping(skb);
if (net_dev->num_tc) {
prio = netdev_txq_to_tc(net_dev, queue_mapping);
/* Hardware interprets priority level 0 as being the highest,
* so we need to do a reverse mapping to the netdev tc index
*/
prio = net_dev->num_tc - prio - 1;
/* We have only one FQ array entry for all Tx hardware queues
* with the same flow id (but different priority levels)
*/
queue_mapping %= dpaa2_eth_queue_count(priv);
}
fq = &priv->fq[queue_mapping]; fq = &priv->fq[queue_mapping];
fd_len = dpaa2_fd_get_len(&fd); fd_len = dpaa2_fd_get_len(&fd);
...@@ -824,7 +837,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) ...@@ -824,7 +837,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
* the Tx confirmation callback for this frame * the Tx confirmation callback for this frame
*/ */
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
err = priv->enqueue(priv, fq, &fd, 0); err = priv->enqueue(priv, fq, &fd, prio);
if (err != -EBUSY) if (err != -EBUSY)
break; break;
} }
...@@ -1863,6 +1876,78 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n, ...@@ -1863,6 +1876,78 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
return n - drops; return n - drops;
} }
static int update_xps(struct dpaa2_eth_priv *priv)
{
struct net_device *net_dev = priv->net_dev;
struct cpumask xps_mask;
struct dpaa2_eth_fq *fq;
int i, num_queues, netdev_queues;
int err = 0;
num_queues = dpaa2_eth_queue_count(priv);
netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
/* The first <num_queues> entries in priv->fq array are Tx/Tx conf
* queues, so only process those
*/
for (i = 0; i < netdev_queues; i++) {
fq = &priv->fq[i % num_queues];
cpumask_clear(&xps_mask);
cpumask_set_cpu(fq->target_cpu, &xps_mask);
err = netif_set_xps_queue(net_dev, &xps_mask, i);
if (err) {
netdev_warn_once(net_dev, "Error setting XPS queue\n");
break;
}
}
return err;
}
static int dpaa2_eth_setup_tc(struct net_device *net_dev,
enum tc_setup_type type, void *type_data)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct tc_mqprio_qopt *mqprio = type_data;
u8 num_tc, num_queues;
int i;
if (type != TC_SETUP_QDISC_MQPRIO)
return -EINVAL;
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
num_queues = dpaa2_eth_queue_count(priv);
num_tc = mqprio->num_tc;
if (num_tc == net_dev->num_tc)
return 0;
if (num_tc > dpaa2_eth_tc_count(priv)) {
netdev_err(net_dev, "Max %d traffic classes supported\n",
dpaa2_eth_tc_count(priv));
return -EINVAL;
}
if (!num_tc) {
netdev_reset_tc(net_dev);
netif_set_real_num_tx_queues(net_dev, num_queues);
goto out;
}
netdev_set_num_tc(net_dev, num_tc);
netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);
for (i = 0; i < num_tc; i++)
netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);
out:
update_xps(priv);
return 0;
}
static const struct net_device_ops dpaa2_eth_ops = { static const struct net_device_ops dpaa2_eth_ops = {
.ndo_open = dpaa2_eth_open, .ndo_open = dpaa2_eth_open,
.ndo_start_xmit = dpaa2_eth_tx, .ndo_start_xmit = dpaa2_eth_tx,
...@@ -1875,6 +1960,7 @@ static const struct net_device_ops dpaa2_eth_ops = { ...@@ -1875,6 +1960,7 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_change_mtu = dpaa2_eth_change_mtu, .ndo_change_mtu = dpaa2_eth_change_mtu,
.ndo_bpf = dpaa2_eth_xdp, .ndo_bpf = dpaa2_eth_xdp,
.ndo_xdp_xmit = dpaa2_eth_xdp_xmit, .ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
.ndo_setup_tc = dpaa2_eth_setup_tc,
}; };
static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
...@@ -2129,10 +2215,9 @@ static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv, ...@@ -2129,10 +2215,9 @@ static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
static void set_fq_affinity(struct dpaa2_eth_priv *priv) static void set_fq_affinity(struct dpaa2_eth_priv *priv)
{ {
struct device *dev = priv->net_dev->dev.parent; struct device *dev = priv->net_dev->dev.parent;
struct cpumask xps_mask;
struct dpaa2_eth_fq *fq; struct dpaa2_eth_fq *fq;
int rx_cpu, txc_cpu; int rx_cpu, txc_cpu;
int i, err; int i;
/* For each FQ, pick one channel/CPU to deliver frames to. /* For each FQ, pick one channel/CPU to deliver frames to.
* This may well change at runtime, either through irqbalance or * This may well change at runtime, either through irqbalance or
...@@ -2151,17 +2236,6 @@ static void set_fq_affinity(struct dpaa2_eth_priv *priv) ...@@ -2151,17 +2236,6 @@ static void set_fq_affinity(struct dpaa2_eth_priv *priv)
break; break;
case DPAA2_TX_CONF_FQ: case DPAA2_TX_CONF_FQ:
fq->target_cpu = txc_cpu; fq->target_cpu = txc_cpu;
/* Tell the stack to affine to txc_cpu the Tx queue
* associated with the confirmation one
*/
cpumask_clear(&xps_mask);
cpumask_set_cpu(txc_cpu, &xps_mask);
err = netif_set_xps_queue(priv->net_dev, &xps_mask,
fq->flowid);
if (err)
dev_err(dev, "Error setting XPS queue\n");
txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask); txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
if (txc_cpu >= nr_cpu_ids) if (txc_cpu >= nr_cpu_ids)
txc_cpu = cpumask_first(&priv->dpio_cpumask); txc_cpu = cpumask_first(&priv->dpio_cpumask);
...@@ -2171,6 +2245,8 @@ static void set_fq_affinity(struct dpaa2_eth_priv *priv) ...@@ -2171,6 +2245,8 @@ static void set_fq_affinity(struct dpaa2_eth_priv *priv)
} }
fq->channel = get_affine_channel(priv, fq->target_cpu); fq->channel = get_affine_channel(priv, fq->target_cpu);
} }
update_xps(priv);
} }
static void setup_fqs(struct dpaa2_eth_priv *priv) static void setup_fqs(struct dpaa2_eth_priv *priv)
...@@ -2352,11 +2428,10 @@ static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv, ...@@ -2352,11 +2428,10 @@ static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv, static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_fq *fq, struct dpaa2_eth_fq *fq,
struct dpaa2_fd *fd, struct dpaa2_fd *fd, u8 prio)
u8 prio __always_unused)
{ {
return dpaa2_io_service_enqueue_fq(fq->channel->dpio, return dpaa2_io_service_enqueue_fq(fq->channel->dpio,
fq->tx_fqid, fd); fq->tx_fqid[prio], fd);
} }
static void set_enqueue_mode(struct dpaa2_eth_priv *priv) static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
...@@ -2512,17 +2587,21 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv, ...@@ -2512,17 +2587,21 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv,
struct device *dev = priv->net_dev->dev.parent; struct device *dev = priv->net_dev->dev.parent;
struct dpni_queue queue; struct dpni_queue queue;
struct dpni_queue_id qid; struct dpni_queue_id qid;
int err; int i, err;
err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid); err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
if (err) { DPNI_QUEUE_TX, i, fq->flowid,
dev_err(dev, "dpni_get_queue(TX) failed\n"); &queue, &qid);
return err; if (err) {
dev_err(dev, "dpni_get_queue(TX) failed\n");
return err;
}
fq->tx_fqid[i] = qid.fqid;
} }
/* All Tx queues belonging to the same flowid have the same qdbin */
fq->tx_qdbin = qid.qdbin; fq->tx_qdbin = qid.qdbin;
fq->tx_fqid = qid.fqid;
err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
...@@ -3222,7 +3301,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) ...@@ -3222,7 +3301,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
dev = &dpni_dev->dev; dev = &dpni_dev->dev;
/* Net device */ /* Net device */
net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES); net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
if (!net_dev) { if (!net_dev) {
dev_err(dev, "alloc_etherdev_mq() failed\n"); dev_err(dev, "alloc_etherdev_mq() failed\n");
return -ENOMEM; return -ENOMEM;
......
...@@ -282,10 +282,13 @@ struct dpaa2_eth_ch_stats { ...@@ -282,10 +282,13 @@ struct dpaa2_eth_ch_stats {
}; };
/* Maximum number of queues associated with a DPNI */ /* Maximum number of queues associated with a DPNI */
#define DPAA2_ETH_MAX_TCS 8
#define DPAA2_ETH_MAX_RX_QUEUES 16 #define DPAA2_ETH_MAX_RX_QUEUES 16
#define DPAA2_ETH_MAX_TX_QUEUES 16 #define DPAA2_ETH_MAX_TX_QUEUES 16
#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \ #define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
DPAA2_ETH_MAX_TX_QUEUES) DPAA2_ETH_MAX_TX_QUEUES)
#define DPAA2_ETH_MAX_NETDEV_QUEUES \
(DPAA2_ETH_MAX_TX_QUEUES * DPAA2_ETH_MAX_TCS)
#define DPAA2_ETH_MAX_DPCONS 16 #define DPAA2_ETH_MAX_DPCONS 16
...@@ -299,8 +302,9 @@ struct dpaa2_eth_priv; ...@@ -299,8 +302,9 @@ struct dpaa2_eth_priv;
struct dpaa2_eth_fq { struct dpaa2_eth_fq {
u32 fqid; u32 fqid;
u32 tx_qdbin; u32 tx_qdbin;
u32 tx_fqid; u32 tx_fqid[DPAA2_ETH_MAX_TCS];
u16 flowid; u16 flowid;
u8 tc;
int target_cpu; int target_cpu;
u32 dq_frames; u32 dq_frames;
u32 dq_bytes; u32 dq_bytes;
...@@ -448,6 +452,9 @@ static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv, ...@@ -448,6 +452,9 @@ static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
#define dpaa2_eth_fs_count(priv) \ #define dpaa2_eth_fs_count(priv) \
((priv)->dpni_attrs.fs_entries) ((priv)->dpni_attrs.fs_entries)
#define dpaa2_eth_tc_count(priv) \
((priv)->dpni_attrs.num_tcs)
/* We have exactly one {Rx, Tx conf} queue per channel */ /* We have exactly one {Rx, Tx conf} queue per channel */
#define dpaa2_eth_queue_count(priv) \ #define dpaa2_eth_queue_count(priv) \
((priv)->num_channels) ((priv)->num_channels)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment