Commit 2738d9d9 authored by Ritaro Takenaka's avatar Ritaro Takenaka Committed by Pablo Neira Ayuso

netfilter: flowtable: move dst_check to packet path

Fixes sporadic IPv6 packet loss when flow offloading is enabled.

IPv6 route GC and flowtable GC are not synchronized.
When dst_cache becomes stale and a packet passes through the flow before
the flowtable GC teardowns it, the packet can be dropped.
So, it is necessary to check dst every time in packet path.

Fixes: 227e1e4d ("netfilter: nf_flowtable: skip device lookup from interface index")
Signed-off-by: default avatarRitaro Takenaka <ritarot634@gmail.com>
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent e5eaac2b
...@@ -421,32 +421,11 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table, ...@@ -421,32 +421,11 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
return err; return err;
} }
static bool flow_offload_stale_dst(struct flow_offload_tuple *tuple)
{
struct dst_entry *dst;
if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
tuple->xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
dst = tuple->dst_cache;
if (!dst_check(dst, tuple->dst_cookie))
return true;
}
return false;
}
static bool nf_flow_has_stale_dst(struct flow_offload *flow)
{
return flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple) ||
flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple);
}
static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table, static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
struct flow_offload *flow, void *data) struct flow_offload *flow, void *data)
{ {
if (nf_flow_has_expired(flow) || if (nf_flow_has_expired(flow) ||
nf_ct_is_dying(flow->ct) || nf_ct_is_dying(flow->ct))
nf_flow_has_stale_dst(flow))
flow_offload_teardown(flow); flow_offload_teardown(flow);
if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) { if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
......
...@@ -248,6 +248,15 @@ static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) ...@@ -248,6 +248,15 @@ static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
return true; return true;
} }
static inline bool nf_flow_dst_check(struct flow_offload_tuple *tuple)
{
if (tuple->xmit_type != FLOW_OFFLOAD_XMIT_NEIGH &&
tuple->xmit_type != FLOW_OFFLOAD_XMIT_XFRM)
return true;
return dst_check(tuple->dst_cache, tuple->dst_cookie);
}
static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb, static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
const struct nf_hook_state *state, const struct nf_hook_state *state,
struct dst_entry *dst) struct dst_entry *dst)
...@@ -367,6 +376,11 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, ...@@ -367,6 +376,11 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
if (nf_flow_state_check(flow, iph->protocol, skb, thoff)) if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
return NF_ACCEPT; return NF_ACCEPT;
if (!nf_flow_dst_check(&tuplehash->tuple)) {
flow_offload_teardown(flow);
return NF_ACCEPT;
}
if (skb_try_make_writable(skb, thoff + hdrsize)) if (skb_try_make_writable(skb, thoff + hdrsize))
return NF_DROP; return NF_DROP;
...@@ -624,6 +638,11 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, ...@@ -624,6 +638,11 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff)) if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff))
return NF_ACCEPT; return NF_ACCEPT;
if (!nf_flow_dst_check(&tuplehash->tuple)) {
flow_offload_teardown(flow);
return NF_ACCEPT;
}
if (skb_try_make_writable(skb, thoff + hdrsize)) if (skb_try_make_writable(skb, thoff + hdrsize))
return NF_DROP; return NF_DROP;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment