Commit 9bc64bd0 authored by Vlad Buslov's avatar Vlad Buslov Committed by Jakub Kicinski

net/sched: act_ct: Always fill offloading tuple iifidx

Referenced commit doesn't always set iifidx when offloading the flow to
hardware. Fix the following cases:

- nf_conn_act_ct_ext_fill() is called before extension is created with
nf_conn_act_ct_ext_add() in tcf_ct_act(). This can cause rule offload with
unspecified iifidx when connection is offloaded after only single
original-direction packet has been processed by tc data path. Always fill
the new nf_conn_act_ct_ext instance after creating it in
nf_conn_act_ct_ext_add().

- Offloading of unidirectional UDP NEW connections is now supported, but ct
flow iifidx field is not updated when connection is promoted to
bidirectional which can result reply-direction iifidx to be zero when
refreshing the connection. Fill in the extension and update flow iifidx
before calling flow_offload_refresh().

Fixes: 9795ded7 ("net/sched: act_ct: Fill offloading tuple iifidx")
Reviewed-by: default avatarPaul Blakey <paulb@nvidia.com>
Signed-off-by: default avatarVlad Buslov <vladbu@nvidia.com>
Reviewed-by: default avatarSimon Horman <horms@kernel.org>
Fixes: 6a9bad00 ("net/sched: act_ct: offload UDP NEW connections")
Link: https://lore.kernel.org/r/20231103151410.764271-1-vladbu@nvidia.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent caf31008
...@@ -20,7 +20,22 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_find(const struct nf ...@@ -20,7 +20,22 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_find(const struct nf
#endif #endif
} }
static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *ct) static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
#if IS_ENABLED(CONFIG_NET_ACT_CT)
struct nf_conn_act_ct_ext *act_ct_ext;
act_ct_ext = nf_conn_act_ct_ext_find(ct);
if (dev_net(skb->dev) == &init_net && act_ct_ext)
act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
#endif
}
static inline struct
nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{ {
#if IS_ENABLED(CONFIG_NET_ACT_CT) #if IS_ENABLED(CONFIG_NET_ACT_CT)
struct nf_conn_act_ct_ext *act_ct = nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT); struct nf_conn_act_ct_ext *act_ct = nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT);
...@@ -29,22 +44,11 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn * ...@@ -29,22 +44,11 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *
return act_ct; return act_ct;
act_ct = nf_ct_ext_add(ct, NF_CT_EXT_ACT_CT, GFP_ATOMIC); act_ct = nf_ct_ext_add(ct, NF_CT_EXT_ACT_CT, GFP_ATOMIC);
nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
return act_ct; return act_ct;
#else #else
return NULL; return NULL;
#endif #endif
} }
static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
#if IS_ENABLED(CONFIG_NET_ACT_CT)
struct nf_conn_act_ct_ext *act_ct_ext;
act_ct_ext = nf_conn_act_ct_ext_find(ct);
if (dev_net(skb->dev) == &init_net && act_ct_ext)
act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
#endif
}
#endif /* _NF_CONNTRACK_ACT_CT_H */ #endif /* _NF_CONNTRACK_ACT_CT_H */
...@@ -985,7 +985,7 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key, ...@@ -985,7 +985,7 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
if (err) if (err)
return err; return err;
nf_conn_act_ct_ext_add(ct); nf_conn_act_ct_ext_add(skb, ct, ctinfo);
} else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
labels_nonzero(&info->labels.mask)) { labels_nonzero(&info->labels.mask)) {
err = ovs_ct_set_labels(ct, key, &info->labels.value, err = ovs_ct_set_labels(ct, key, &info->labels.value,
......
...@@ -376,6 +376,17 @@ static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry, ...@@ -376,6 +376,17 @@ static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry,
entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir]; entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir];
} }
static void tcf_ct_flow_ct_ext_ifidx_update(struct flow_offload *entry)
{
struct nf_conn_act_ct_ext *act_ct_ext;
act_ct_ext = nf_conn_act_ct_ext_find(entry->ct);
if (act_ct_ext) {
tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
}
}
static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft, static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
struct nf_conn *ct, struct nf_conn *ct,
bool tcp, bool bidirectional) bool tcp, bool bidirectional)
...@@ -671,6 +682,8 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p, ...@@ -671,6 +682,8 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
else else
ctinfo = IP_CT_ESTABLISHED_REPLY; ctinfo = IP_CT_ESTABLISHED_REPLY;
nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
tcf_ct_flow_ct_ext_ifidx_update(flow);
flow_offload_refresh(nf_ft, flow, force_refresh); flow_offload_refresh(nf_ft, flow, force_refresh);
if (!test_bit(IPS_ASSURED_BIT, &ct->status)) { if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
/* Process this flow in SW to allow promoting to ASSURED */ /* Process this flow in SW to allow promoting to ASSURED */
...@@ -1034,7 +1047,7 @@ TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a, ...@@ -1034,7 +1047,7 @@ TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
tcf_ct_act_set_labels(ct, p->labels, p->labels_mask); tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
if (!nf_ct_is_confirmed(ct)) if (!nf_ct_is_confirmed(ct))
nf_conn_act_ct_ext_add(ct); nf_conn_act_ct_ext_add(skb, ct, ctinfo);
/* This will take care of sending queued events /* This will take care of sending queued events
* even if the connection is already confirmed. * even if the connection is already confirmed.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment