Commit 674cb229 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

nfp: abm: multi-queue RED offload

Add support for MQ offload and setting RED parameters
on queue-by-queue basis.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarDirk van der Merwe <dirk.vandermerwe@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f971b132
......@@ -111,8 +111,7 @@ nfp_abm_ctrl_stat_all(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
return 0;
}
static int
nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, u32 val)
int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, u32 val)
{
struct nfp_cpp *cpp = alink->abm->app->cpp;
u32 muw;
......@@ -164,6 +163,37 @@ u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i)
return val;
}
int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i,
struct nfp_alink_stats *stats)
{
int err;
stats->tx_pkts = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i));
stats->tx_bytes = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8);
err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls,
NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES,
i, false, &stats->backlog_bytes);
if (err)
return err;
err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls,
NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS,
i, false, &stats->backlog_pkts);
if (err)
return err;
err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
i, true, &stats->drops);
if (err)
return err;
return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
i, true, &stats->overlimits);
}
int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink,
struct nfp_alink_stats *stats)
{
......@@ -200,6 +230,22 @@ int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink,
true, &stats->overlimits);
}
int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i,
struct nfp_alink_xstats *xstats)
{
int err;
err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
i, true, &xstats->pdrop);
if (err)
return err;
return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
i, true, &xstats->ecn_marked);
}
int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink,
struct nfp_alink_xstats *xstats)
{
......
......@@ -58,43 +58,77 @@ static u32 nfp_abm_portid(enum nfp_repr_type rtype, unsigned int id)
FIELD_PREP(NFP_ABM_PORTID_ID, id);
}
static int nfp_abm_reset_stats(struct nfp_abm_link *alink)
static int
__nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink,
u32 handle, unsigned int qs, u32 init_val)
{
int err;
struct nfp_port *port = nfp_port_from_netdev(netdev);
int ret;
err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[0].stats);
if (err)
return err;
alink->qdiscs[0].stats.backlog_pkts = 0;
alink->qdiscs[0].stats.backlog_bytes = 0;
ret = nfp_abm_ctrl_set_all_q_lvls(alink, init_val);
memset(alink->qdiscs, 0, sizeof(*alink->qdiscs) * alink->num_qdiscs);
err = nfp_abm_ctrl_read_xstats(alink, &alink->qdiscs[0].xstats);
if (err)
return err;
alink->parent = handle;
alink->num_qdiscs = qs;
port->tc_offload_cnt = qs;
return 0;
return ret;
}
static void
nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink,
u32 handle, unsigned int qs)
{
__nfp_abm_reset_root(netdev, alink, handle, qs, ~0);
}
static int
nfp_abm_red_find(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
{
unsigned int i = TC_H_MIN(opt->parent) - 1;
if (opt->parent == TC_H_ROOT)
i = 0;
else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent))
i = TC_H_MIN(opt->parent) - 1;
else
return -EOPNOTSUPP;
if (i >= alink->num_qdiscs || opt->handle != alink->qdiscs[i].handle)
return -EOPNOTSUPP;
return i;
}
static void
nfp_abm_red_destroy(struct net_device *netdev, struct nfp_abm_link *alink,
u32 handle)
{
struct nfp_port *port = nfp_port_from_netdev(netdev);
unsigned int i;
if (handle != alink->qdiscs[0].handle)
for (i = 0; i < alink->num_qdiscs; i++)
if (handle == alink->qdiscs[i].handle)
break;
if (i == alink->num_qdiscs)
return;
alink->qdiscs[0].handle = TC_H_UNSPEC;
port->tc_offload_cnt = 0;
nfp_abm_ctrl_set_all_q_lvls(alink, ~0);
if (alink->parent == TC_H_ROOT) {
nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0);
} else {
nfp_abm_ctrl_set_q_lvl(alink, i, ~0);
memset(&alink->qdiscs[i], 0, sizeof(*alink->qdiscs));
}
}
static int
nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
struct tc_red_qopt_offload *opt)
{
struct nfp_port *port = nfp_port_from_netdev(netdev);
int err;
bool existing;
int i, err;
i = nfp_abm_red_find(alink, opt);
existing = i >= 0;
if (opt->set.min != opt->set.max || !opt->set.is_ecn) {
nfp_warn(alink->abm->app->cpp,
......@@ -102,30 +136,62 @@ nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
err = -EINVAL;
goto err_destroy;
}
if (existing) {
if (alink->parent == TC_H_ROOT)
err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min);
else
err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
if (err)
goto err_destroy;
return 0;
}
/* Reset stats only on new qdisc */
if (alink->qdiscs[0].handle != opt->handle) {
err = nfp_abm_reset_stats(alink);
if (opt->parent == TC_H_ROOT) {
i = 0;
err = __nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 1,
opt->set.min);
} else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) {
i = TC_H_MIN(opt->parent) - 1;
err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
} else {
return -EINVAL;
}
/* Set the handle to try full clean up, in case IO failed */
alink->qdiscs[i].handle = opt->handle;
if (err)
goto err_destroy;
if (opt->parent == TC_H_ROOT)
err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[i].stats);
else
err = nfp_abm_ctrl_read_q_stats(alink, i,
&alink->qdiscs[i].stats);
if (err)
goto err_destroy;
if (opt->parent == TC_H_ROOT)
err = nfp_abm_ctrl_read_xstats(alink,
&alink->qdiscs[i].xstats);
else
err = nfp_abm_ctrl_read_q_xstats(alink, i,
&alink->qdiscs[i].xstats);
if (err)
goto err_destroy;
}
alink->qdiscs[0].handle = opt->handle;
port->tc_offload_cnt = 1;
alink->qdiscs[i].stats.backlog_pkts = 0;
alink->qdiscs[i].stats.backlog_bytes = 0;
return 0;
err_destroy:
/* If the qdisc keeps on living, but we can't offload undo changes */
if (alink->qdiscs[0].handle == opt->handle) {
opt->set.qstats->qlen -= alink->qdiscs[0].stats.backlog_pkts;
if (existing) {
opt->set.qstats->qlen -= alink->qdiscs[i].stats.backlog_pkts;
opt->set.qstats->backlog -=
alink->qdiscs[0].stats.backlog_bytes;
alink->qdiscs[i].stats.backlog_bytes;
}
if (alink->qdiscs[0].handle != TC_H_UNSPEC)
nfp_abm_red_destroy(netdev, alink, alink->qdiscs[0].handle);
nfp_abm_red_destroy(netdev, alink, opt->handle);
return err;
}
......@@ -146,13 +212,17 @@ nfp_abm_red_stats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
{
struct nfp_alink_stats *prev_stats;
struct nfp_alink_stats stats;
int err;
int i, err;
if (alink->qdiscs[0].handle != opt->handle)
return -EOPNOTSUPP;
prev_stats = &alink->qdiscs[0].stats;
i = nfp_abm_red_find(alink, opt);
if (i < 0)
return i;
prev_stats = &alink->qdiscs[i].stats;
if (alink->parent == TC_H_ROOT)
err = nfp_abm_ctrl_read_stats(alink, &stats);
else
err = nfp_abm_ctrl_read_q_stats(alink, i, &stats);
if (err)
return err;
......@@ -168,13 +238,17 @@ nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
{
struct nfp_alink_xstats *prev_xstats;
struct nfp_alink_xstats xstats;
int err;
int i, err;
if (alink->qdiscs[0].handle != opt->handle)
return -EOPNOTSUPP;
prev_xstats = &alink->qdiscs[0].xstats;
i = nfp_abm_red_find(alink, opt);
if (i < 0)
return i;
prev_xstats = &alink->qdiscs[i].xstats;
if (alink->parent == TC_H_ROOT)
err = nfp_abm_ctrl_read_xstats(alink, &xstats);
else
err = nfp_abm_ctrl_read_q_xstats(alink, i, &xstats);
if (err)
return err;
......@@ -190,9 +264,6 @@ static int
nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
struct tc_red_qopt_offload *opt)
{
if (opt->parent != TC_H_ROOT)
return -EOPNOTSUPP;
switch (opt->command) {
case TC_RED_REPLACE:
return nfp_abm_red_replace(netdev, alink, opt);
......@@ -208,6 +279,24 @@ nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
}
}
static int
nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
struct tc_mq_qopt_offload *opt)
{
switch (opt->command) {
case TC_MQ_CREATE:
nfp_abm_reset_root(netdev, alink, opt->handle,
alink->total_queues);
return 0;
case TC_MQ_DESTROY:
if (opt->handle == alink->parent)
nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0);
return 0;
default:
return -EOPNOTSUPP;
}
}
static int
nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data)
......@@ -220,6 +309,8 @@ nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_QDISC_MQ:
return nfp_abm_setup_tc_mq(netdev, repr->app_priv, type_data);
case TC_SETUP_QDISC_RED:
return nfp_abm_setup_tc_red(netdev, repr->app_priv, type_data);
default:
......@@ -473,13 +564,21 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
alink->abm = abm;
alink->vnic = nn;
alink->id = id;
alink->parent = TC_H_ROOT;
alink->total_queues = alink->vnic->max_rx_rings;
alink->qdiscs = kvzalloc(sizeof(*alink->qdiscs) * alink->total_queues,
GFP_KERNEL);
if (!alink->qdiscs) {
err = -ENOMEM;
goto err_free_alink;
}
/* This is a multi-host app, make sure MAC/PHY is up, but don't
* make the MAC/PHY state follow the state of any of the ports.
*/
err = nfp_eth_set_configured(app->cpp, eth_port->index, true);
if (err < 0)
goto err_free_alink;
goto err_free_qdiscs;
netif_keep_dst(nn->dp.netdev);
......@@ -488,6 +587,8 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
return 0;
err_free_qdiscs:
kvfree(alink->qdiscs);
err_free_alink:
kfree(alink);
return err;
......@@ -498,6 +599,7 @@ static void nfp_abm_vnic_free(struct nfp_app *app, struct nfp_net *nn)
struct nfp_abm_link *alink = nn->app_priv;
nfp_abm_kill_reprs(alink->abm, alink);
kvfree(alink->qdiscs);
kfree(alink);
}
......
......@@ -106,6 +106,9 @@ struct nfp_red_qdisc {
* @vnic: data vNIC
* @id: id of the data vNIC
* @queue_base: id of base to host queue within PCIe (not QC idx)
* @total_queues: number of PF queues
* @parent: handle of expected parent, i.e. handle of MQ, or TC_H_ROOT
* @num_qdiscs: number of currently used qdiscs
* @qdiscs: array of qdiscs
*/
struct nfp_abm_link {
......@@ -113,16 +116,25 @@ struct nfp_abm_link {
struct nfp_net *vnic;
unsigned int id;
unsigned int queue_base;
struct nfp_red_qdisc qdiscs[1];
unsigned int total_queues;
u32 parent;
unsigned int num_qdiscs;
struct nfp_red_qdisc *qdiscs;
};
void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink);
int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm);
int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val);
int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i,
u32 val);
int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink,
struct nfp_alink_stats *stats);
int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i,
struct nfp_alink_stats *stats);
int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink,
struct nfp_alink_xstats *xstats);
int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i,
struct nfp_alink_xstats *xstats);
u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i);
u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i);
int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment