Commit 530d9951 authored by John Hurley's avatar John Hurley Committed by David S. Miller

net: sched: cls_u32: implement offload tcf_proto_op

Add the offload tcf_proto_op in cls_u32 to generate an offload message for
each filter and the hashtable in the given tcf_proto. Call the specified
callback with this new offload message. The function only returns an error
if the callback rejects adding a 'hardware only' rule.

A filter contains a flag to indicate if it is in hardware or not. To
ensure the offload function properly maintains this flag, keep a reference
counter for the number of instances of the filter that are in hardware.
Only update the flag when this counter changes from or to 0.
Signed-off-by: default avatarJohn Hurley <john.hurley@netronome.com>
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0efd1b3a
...@@ -62,6 +62,7 @@ struct tc_u_knode { ...@@ -62,6 +62,7 @@ struct tc_u_knode {
struct tc_u32_pcnt __percpu *pf; struct tc_u32_pcnt __percpu *pf;
#endif #endif
u32 flags; u32 flags;
unsigned int in_hw_count;
#ifdef CONFIG_CLS_U32_MARK #ifdef CONFIG_CLS_U32_MARK
u32 val; u32 val;
u32 mask; u32 mask;
...@@ -571,6 +572,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, ...@@ -571,6 +572,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
u32_remove_hw_knode(tp, n, NULL); u32_remove_hw_knode(tp, n, NULL);
return err; return err;
} else if (err > 0) { } else if (err > 0) {
n->in_hw_count = err;
tcf_block_offload_inc(block, &n->flags); tcf_block_offload_inc(block, &n->flags);
} }
...@@ -1199,6 +1201,114 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg) ...@@ -1199,6 +1201,114 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
} }
} }
static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
bool add, tc_setup_cb_t *cb, void *cb_priv,
struct netlink_ext_ack *extack)
{
struct tc_cls_u32_offload cls_u32 = {};
int err;
tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack);
cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
cls_u32.hnode.divisor = ht->divisor;
cls_u32.hnode.handle = ht->handle;
cls_u32.hnode.prio = ht->prio;
err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
if (err && add && tc_skip_sw(ht->flags))
return err;
return 0;
}
static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
bool add, tc_setup_cb_t *cb, void *cb_priv,
struct netlink_ext_ack *extack)
{
struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {};
int err;
tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
cls_u32.command = add ?
TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
cls_u32.knode.handle = n->handle;
if (add) {
cls_u32.knode.fshift = n->fshift;
#ifdef CONFIG_CLS_U32_MARK
cls_u32.knode.val = n->val;
cls_u32.knode.mask = n->mask;
#else
cls_u32.knode.val = 0;
cls_u32.knode.mask = 0;
#endif
cls_u32.knode.sel = &n->sel;
cls_u32.knode.exts = &n->exts;
if (n->ht_down)
cls_u32.knode.link_handle = ht->handle;
}
err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
if (err) {
if (add && tc_skip_sw(n->flags))
return err;
return 0;
}
tc_cls_offload_cnt_update(block, &n->in_hw_count, &n->flags, add);
return 0;
}
static int u32_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
void *cb_priv, struct netlink_ext_ack *extack)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *ht;
struct tc_u_knode *n;
unsigned int h;
int err;
for (ht = rtnl_dereference(tp_c->hlist);
ht;
ht = rtnl_dereference(ht->next)) {
if (ht->prio != tp->prio)
continue;
/* When adding filters to a new dev, try to offload the
* hashtable first. When removing, do the filters before the
* hashtable.
*/
if (add && !tc_skip_hw(ht->flags)) {
err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv,
extack);
if (err)
return err;
}
for (h = 0; h <= ht->divisor; h++) {
for (n = rtnl_dereference(ht->ht[h]);
n;
n = rtnl_dereference(n->next)) {
if (tc_skip_hw(n->flags))
continue;
err = u32_reoffload_knode(tp, n, add, cb,
cb_priv, extack);
if (err)
return err;
}
}
if (!add && !tc_skip_hw(ht->flags))
u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack);
}
return 0;
}
static void u32_bind_class(void *fh, u32 classid, unsigned long cl) static void u32_bind_class(void *fh, u32 classid, unsigned long cl)
{ {
struct tc_u_knode *n = fh; struct tc_u_knode *n = fh;
...@@ -1336,6 +1446,7 @@ static struct tcf_proto_ops cls_u32_ops __read_mostly = { ...@@ -1336,6 +1446,7 @@ static struct tcf_proto_ops cls_u32_ops __read_mostly = {
.change = u32_change, .change = u32_change,
.delete = u32_delete, .delete = u32_delete,
.walk = u32_walk, .walk = u32_walk,
.reoffload = u32_reoffload,
.dump = u32_dump, .dump = u32_dump,
.bind_class = u32_bind_class, .bind_class = u32_bind_class,
.owner = THIS_MODULE, .owner = THIS_MODULE,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment