Commit 602ae008 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next

Pablo Neira Ayuso says:

====================
Netfilter/IPVS updates for net-next

The following patchset contains Netfilter/IPVS updates for net-next:

1) Simplify nf_ct_get_tuple(), from Jackie Liu.

2) Add format to request_module() call, from Bill Wendling.

3) Add /proc/net/stats/nf_flowtable to monitor in-flight pending
   hardware offload objects to be processed, from Vlad Buslov.

4) Missing rcu annotation and accessors in the netfilter tree,
   from Florian Westphal.

5) Merge h323 conntrack helper nat hooks into single object,
   also from Florian.

6) A batch of update to fix sparse warnings treewide,
   from Florian Westphal.

7) Move nft_cmp_fast_mask() where it used, from Florian.

8) Missing const in nf_nat_initialized(), from James Yonan.

9) Use bitmap API for Maglev IPVS scheduler, from Christophe Jaillet.

10) Use refcount_inc instead of _inc_not_zero in flowtable,
    from Florian Westphal.

11) Remove pr_debug in xt_TPROXY, from Nathan Cancellor.

* git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next:
  netfilter: xt_TPROXY: remove pr_debug invocations
  netfilter: flowtable: prefer refcount_inc
  netfilter: ipvs: Use the bitmap API to allocate bitmaps
  netfilter: nf_nat: in nf_nat_initialized(), use const struct nf_conn *
  netfilter: nf_tables: move nft_cmp_fast_mask to where its used
  netfilter: nf_tables: use correct integer types
  netfilter: nf_tables: add and use BE register load-store helpers
  netfilter: nf_tables: use the correct get/put helpers
  netfilter: x_tables: use correct integer types
  netfilter: nfnetlink: add missing __be16 cast
  netfilter: nft_set_bitmap: Fix spelling mistake
  netfilter: h323: merge nat hook pointers into one
  netfilter: nf_conntrack: use rcu accessors where needed
  netfilter: nf_conntrack: add missing __rcu annotations
  netfilter: nf_flow_table: count pending offload workqueue tasks
  net/sched: act_ct: set 'net' pointer when creating new nf_flow_table
  netfilter: conntrack: use correct format characters
  netfilter: conntrack: use fallthrough to cleanup
====================

Link: https://lore.kernel.org/r/20220720230754.209053-1-pablo@netfilter.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 47f058ce aa8c7cdb
......@@ -38,60 +38,63 @@ void nf_conntrack_h245_expect(struct nf_conn *new,
struct nf_conntrack_expect *this);
void nf_conntrack_q931_expect(struct nf_conn *new,
struct nf_conntrack_expect *this);
extern int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff,
unsigned char **data, int dataoff,
H245_TransportAddress *taddr,
union nf_inet_addr *addr,
__be16 port);
extern int (*set_h225_addr_hook) (struct sk_buff *skb, unsigned int protoff,
unsigned char **data, int dataoff,
TransportAddress *taddr,
union nf_inet_addr *addr,
__be16 port);
extern int (*set_sig_addr_hook) (struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff, unsigned char **data,
TransportAddress *taddr, int count);
extern int (*set_ras_addr_hook) (struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff, unsigned char **data,
TransportAddress *taddr, int count);
extern int (*nat_rtp_rtcp_hook) (struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff, unsigned char **data,
int dataoff,
H245_TransportAddress *taddr,
__be16 port, __be16 rtp_port,
struct nf_conntrack_expect *rtp_exp,
struct nf_conntrack_expect *rtcp_exp);
extern int (*nat_t120_hook) (struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
struct nfct_h323_nat_hooks {
int (*set_h245_addr)(struct sk_buff *skb, unsigned int protoff,
unsigned char **data, int dataoff,
H245_TransportAddress *taddr, __be16 port,
struct nf_conntrack_expect *exp);
extern int (*nat_h245_hook) (struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
H245_TransportAddress *taddr,
union nf_inet_addr *addr, __be16 port);
int (*set_h225_addr)(struct sk_buff *skb, unsigned int protoff,
unsigned char **data, int dataoff,
TransportAddress *taddr, __be16 port,
struct nf_conntrack_expect *exp);
extern int (*nat_callforwarding_hook) (struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, int dataoff,
TransportAddress *taddr,
__be16 port,
struct nf_conntrack_expect *exp);
extern int (*nat_q931_hook) (struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, TransportAddress *taddr,
int idx, __be16 port,
struct nf_conntrack_expect *exp);
TransportAddress *taddr,
union nf_inet_addr *addr, __be16 port);
int (*set_sig_addr)(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff, unsigned char **data,
TransportAddress *taddr, int count);
int (*set_ras_addr)(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff, unsigned char **data,
TransportAddress *taddr, int count);
int (*nat_rtp_rtcp)(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, int dataoff,
H245_TransportAddress *taddr,
__be16 port, __be16 rtp_port,
struct nf_conntrack_expect *rtp_exp,
struct nf_conntrack_expect *rtcp_exp);
int (*nat_t120)(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, int dataoff,
H245_TransportAddress *taddr, __be16 port,
struct nf_conntrack_expect *exp);
int (*nat_h245)(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, int dataoff,
TransportAddress *taddr, __be16 port,
struct nf_conntrack_expect *exp);
int (*nat_callforwarding)(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, int dataoff,
TransportAddress *taddr, __be16 port,
struct nf_conntrack_expect *exp);
int (*nat_q931)(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, TransportAddress *taddr, int idx,
__be16 port, struct nf_conntrack_expect *exp);
};
extern const struct nfct_h323_nat_hooks __rcu *nfct_h323_nat_hook;
#endif
......@@ -164,7 +164,7 @@ struct nf_nat_sip_hooks {
unsigned int medialen,
union nf_inet_addr *rtp_addr);
};
extern const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
extern const struct nf_nat_sip_hooks __rcu *nf_nat_sip_hooks;
int ct_sip_parse_request(const struct nf_conn *ct, const char *dptr,
unsigned int datalen, unsigned int *matchoff,
......
......@@ -26,6 +26,9 @@
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#include <net/netns/conntrack.h>
#endif
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
#include <net/netns/flow_table.h>
#endif
#include <net/netns/nftables.h>
#include <net/netns/xfrm.h>
#include <net/netns/mpls.h>
......@@ -142,6 +145,9 @@ struct net {
#if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE)
struct netns_nftables nft;
#endif
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
struct netns_ft ft;
#endif
#endif
#ifdef CONFIG_WEXT_CORE
struct sk_buff_head wext_nlevents;
......
......@@ -105,7 +105,7 @@ struct nf_ct_timeout_hooks {
void (*timeout_put)(struct nf_ct_timeout *timeout);
};
extern const struct nf_ct_timeout_hooks *nf_ct_timeout_hook;
extern const struct nf_ct_timeout_hooks __rcu *nf_ct_timeout_hook;
#endif
#endif /* _NF_CONNTRACK_TIMEOUT_H */
......@@ -335,4 +335,25 @@ static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
return 0;
}
#define NF_FLOW_TABLE_STAT_INC(net, count) __this_cpu_inc((net)->ft.stat->count)
#define NF_FLOW_TABLE_STAT_DEC(net, count) __this_cpu_dec((net)->ft.stat->count)
#define NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count) \
this_cpu_inc((net)->ft.stat->count)
#define NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count) \
this_cpu_dec((net)->ft.stat->count)
#ifdef CONFIG_NF_FLOW_TABLE_PROCFS
int nf_flow_table_init_proc(struct net *net);
void nf_flow_table_fini_proc(struct net *net);
#else
static inline int nf_flow_table_init_proc(struct net *net)
{
return 0;
}
static inline void nf_flow_table_fini_proc(struct net *net)
{
}
#endif /* CONFIG_NF_FLOW_TABLE_PROCFS */
#endif /* _NF_FLOW_TABLE_H */
......@@ -104,7 +104,7 @@ unsigned int
nf_nat_inet_fn(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state);
static inline int nf_nat_initialized(struct nf_conn *ct,
static inline int nf_nat_initialized(const struct nf_conn *ct,
enum nf_nat_manip_type manip)
{
if (manip == NF_NAT_MANIP_SRC)
......
......@@ -157,11 +157,26 @@ static inline void nft_reg_store16(u32 *dreg, u16 val)
*(u16 *)dreg = val;
}
static inline void nft_reg_store_be16(u32 *dreg, __be16 val)
{
nft_reg_store16(dreg, (__force __u16)val);
}
static inline u16 nft_reg_load16(const u32 *sreg)
{
return *(u16 *)sreg;
}
static inline __be16 nft_reg_load_be16(const u32 *sreg)
{
return (__force __be16)nft_reg_load16(sreg);
}
static inline __be32 nft_reg_load_be32(const u32 *sreg)
{
return *(__force __be32 *)sreg;
}
static inline void nft_reg_store64(u32 *dreg, u64 val)
{
put_unaligned(val, (u64 *)dreg);
......
......@@ -56,16 +56,6 @@ struct nft_immediate_expr {
u8 dlen;
};
/* Calculate the mask for the nft_cmp_fast expression. On big endian the
* mask needs to include the *upper* bytes when interpreting that data as
* something smaller than the full u32, therefore a cpu_to_le32 is done.
*/
static inline u32 nft_cmp_fast_mask(unsigned int len)
{
return cpu_to_le32(~0U >> (sizeof_field(struct nft_cmp_fast_expr,
data) * BITS_PER_BYTE - len));
}
extern const struct nft_expr_ops nft_cmp_fast_ops;
extern const struct nft_expr_ops nft_cmp16_fast_ops;
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NETNS_FLOW_TABLE_H
#define __NETNS_FLOW_TABLE_H
struct nf_flow_table_stat {
unsigned int count_wq_add;
unsigned int count_wq_del;
unsigned int count_wq_stats;
};
struct netns_ft {
struct nf_flow_table_stat __percpu *stat;
};
#endif
......@@ -53,7 +53,7 @@ static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
goto err;
br_vlan_get_proto(br_dev, &p_proto);
nft_reg_store16(dest, htons(p_proto));
nft_reg_store_be16(dest, htons(p_proto));
return;
}
default:
......
......@@ -579,28 +579,22 @@ static struct nf_ct_helper_expectfn callforwarding_nat = {
.expectfn = ip_nat_callforwarding_expect,
};
static const struct nfct_h323_nat_hooks nathooks = {
.set_h245_addr = set_h245_addr,
.set_h225_addr = set_h225_addr,
.set_sig_addr = set_sig_addr,
.set_ras_addr = set_ras_addr,
.nat_rtp_rtcp = nat_rtp_rtcp,
.nat_t120 = nat_t120,
.nat_h245 = nat_h245,
.nat_callforwarding = nat_callforwarding,
.nat_q931 = nat_q931,
};
/****************************************************************************/
static int __init nf_nat_h323_init(void)
{
BUG_ON(set_h245_addr_hook != NULL);
BUG_ON(set_h225_addr_hook != NULL);
BUG_ON(set_sig_addr_hook != NULL);
BUG_ON(set_ras_addr_hook != NULL);
BUG_ON(nat_rtp_rtcp_hook != NULL);
BUG_ON(nat_t120_hook != NULL);
BUG_ON(nat_h245_hook != NULL);
BUG_ON(nat_callforwarding_hook != NULL);
BUG_ON(nat_q931_hook != NULL);
RCU_INIT_POINTER(set_h245_addr_hook, set_h245_addr);
RCU_INIT_POINTER(set_h225_addr_hook, set_h225_addr);
RCU_INIT_POINTER(set_sig_addr_hook, set_sig_addr);
RCU_INIT_POINTER(set_ras_addr_hook, set_ras_addr);
RCU_INIT_POINTER(nat_rtp_rtcp_hook, nat_rtp_rtcp);
RCU_INIT_POINTER(nat_t120_hook, nat_t120);
RCU_INIT_POINTER(nat_h245_hook, nat_h245);
RCU_INIT_POINTER(nat_callforwarding_hook, nat_callforwarding);
RCU_INIT_POINTER(nat_q931_hook, nat_q931);
RCU_INIT_POINTER(nfct_h323_nat_hook, &nathooks);
nf_ct_helper_expectfn_register(&q931_nat);
nf_ct_helper_expectfn_register(&callforwarding_nat);
return 0;
......@@ -609,15 +603,7 @@ static int __init nf_nat_h323_init(void)
/****************************************************************************/
static void __exit nf_nat_h323_fini(void)
{
RCU_INIT_POINTER(set_h245_addr_hook, NULL);
RCU_INIT_POINTER(set_h225_addr_hook, NULL);
RCU_INIT_POINTER(set_sig_addr_hook, NULL);
RCU_INIT_POINTER(set_ras_addr_hook, NULL);
RCU_INIT_POINTER(nat_rtp_rtcp_hook, NULL);
RCU_INIT_POINTER(nat_t120_hook, NULL);
RCU_INIT_POINTER(nat_h245_hook, NULL);
RCU_INIT_POINTER(nat_callforwarding_hook, NULL);
RCU_INIT_POINTER(nat_q931_hook, NULL);
RCU_INIT_POINTER(nfct_h323_nat_hook, NULL);
nf_ct_helper_expectfn_unregister(&q931_nat);
nf_ct_helper_expectfn_unregister(&callforwarding_nat);
synchronize_rcu();
......
......@@ -734,6 +734,15 @@ config NF_FLOW_TABLE
To compile it as a module, choose M here.
config NF_FLOW_TABLE_PROCFS
bool "Supply flow table statistics in procfs"
default y
depends on PROC_FS
depends on SYSCTL
help
This option enables for the flow table offload statistics
to be shown in procfs under net/netfilter/nf_flowtable.
config NETFILTER_XTABLES
tristate "Netfilter Xtables support (required for ip_tables)"
default m if NETFILTER_ADVANCED=n
......
......@@ -128,6 +128,7 @@ obj-$(CONFIG_NFT_FWD_NETDEV) += nft_fwd_netdev.o
obj-$(CONFIG_NF_FLOW_TABLE) += nf_flow_table.o
nf_flow_table-objs := nf_flow_table_core.o nf_flow_table_ip.o \
nf_flow_table_offload.o
nf_flow_table-$(CONFIG_NF_FLOW_TABLE_PROCFS) += nf_flow_table_procfs.o
obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o
......
......@@ -174,8 +174,7 @@ static int ip_vs_mh_populate(struct ip_vs_mh_state *s,
return 0;
}
table = kcalloc(BITS_TO_LONGS(IP_VS_MH_TAB_SIZE),
sizeof(unsigned long), GFP_KERNEL);
table = bitmap_zalloc(IP_VS_MH_TAB_SIZE, GFP_KERNEL);
if (!table)
return -ENOMEM;
......@@ -227,7 +226,7 @@ static int ip_vs_mh_populate(struct ip_vs_mh_state *s,
}
out:
kfree(table);
bitmap_free(table);
return 0;
}
......
......@@ -20,6 +20,7 @@ int nf_conntrack_broadcast_help(struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
unsigned int timeout)
{
const struct nf_conntrack_helper *helper;
struct nf_conntrack_expect *exp;
struct iphdr *iph = ip_hdr(skb);
struct rtable *rt = skb_rtable(skb);
......@@ -58,7 +59,10 @@ int nf_conntrack_broadcast_help(struct sk_buff *skb,
goto out;
exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
exp->tuple.src.u.udp.port = help->helper->tuple.src.u.udp.port;
helper = rcu_dereference(help->helper);
if (helper)
exp->tuple.src.u.udp.port = helper->tuple.src.u.udp.port;
exp->mask.src.u3.ip = mask;
exp->mask.src.u.udp.port = htons(0xFFFF);
......
......@@ -329,20 +329,18 @@ nf_ct_get_tuple(const struct sk_buff *skb,
return gre_pkt_to_tuple(skb, dataoff, net, tuple);
#endif
case IPPROTO_TCP:
case IPPROTO_UDP: /* fallthrough */
return nf_ct_get_tuple_ports(skb, dataoff, tuple);
case IPPROTO_UDP:
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
case IPPROTO_UDPLITE:
return nf_ct_get_tuple_ports(skb, dataoff, tuple);
#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
case IPPROTO_SCTP:
return nf_ct_get_tuple_ports(skb, dataoff, tuple);
#endif
#ifdef CONFIG_NF_CT_PROTO_DCCP
case IPPROTO_DCCP:
return nf_ct_get_tuple_ports(skb, dataoff, tuple);
#endif
/* fallthrough */
return nf_ct_get_tuple_ports(skb, dataoff, tuple);
default:
break;
}
......
This diff is collapsed.
......@@ -165,7 +165,7 @@ nf_nat_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
if (!nat) {
snprintf(mod_name, sizeof(mod_name), "%s", h->nat_mod_name);
rcu_read_unlock();
request_module(mod_name);
request_module("%s", mod_name);
rcu_read_lock();
nat = nf_conntrack_nat_helper_find(mod_name);
......@@ -249,7 +249,7 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
if (tmpl != NULL) {
help = nfct_help(tmpl);
if (help != NULL) {
helper = help->helper;
helper = rcu_dereference(help->helper);
set_bit(IPS_HELPER_BIT, &ct->status);
}
}
......
......@@ -2005,7 +2005,7 @@ static int ctnetlink_change_helper(struct nf_conn *ct,
}
if (help) {
if (help->helper == helper) {
if (rcu_access_pointer(help->helper) == helper) {
/* update private helper data if allowed. */
if (helper->from_nlattr)
helper->from_nlattr(helpinfo, ct);
......@@ -3413,12 +3413,17 @@ static int ctnetlink_get_expect(struct sk_buff *skb,
static bool expect_iter_name(struct nf_conntrack_expect *exp, void *data)
{
struct nf_conntrack_helper *helper;
const struct nf_conn_help *m_help;
const char *name = data;
m_help = nfct_help(exp->master);
return strcmp(m_help->helper->name, name) == 0;
helper = rcu_dereference(m_help->helper);
if (!helper)
return false;
return strcmp(helper->name, name) == 0;
}
static bool expect_iter_all(struct nf_conntrack_expect *exp, void *data)
......
......@@ -45,7 +45,7 @@ MODULE_ALIAS_NFCT_HELPER("pptp");
static DEFINE_SPINLOCK(nf_pptp_lock);
const struct nf_nat_pptp_hook *nf_nat_pptp_hook;
const struct nf_nat_pptp_hook __rcu *nf_nat_pptp_hook;
EXPORT_SYMBOL_GPL(nf_nat_pptp_hook);
#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
......
......@@ -60,7 +60,7 @@ module_param(sip_external_media, int, 0600);
MODULE_PARM_DESC(sip_external_media, "Expect Media streams between external "
"endpoints (default 0)");
const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
const struct nf_nat_sip_hooks __rcu *nf_nat_sip_hooks;
EXPORT_SYMBOL_GPL(nf_nat_sip_hooks);
static int string_len(const struct nf_conn *ct, const char *dptr,
......@@ -1229,6 +1229,7 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff,
struct nf_conntrack_expect *exp;
union nf_inet_addr *saddr, daddr;
const struct nf_nat_sip_hooks *hooks;
struct nf_conntrack_helper *helper;
__be16 port;
u8 proto;
unsigned int expires = 0;
......@@ -1289,10 +1290,14 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff,
if (sip_direct_signalling)
saddr = &ct->tuplehash[!dir].tuple.src.u3;
helper = rcu_dereference(nfct_help(ct)->helper);
if (!helper)
return NF_DROP;
nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, nf_ct_l3num(ct),
saddr, &daddr, proto, NULL, &port);
exp->timeout.expires = sip_timeout * HZ;
exp->helper = nfct_help(ct)->helper;
exp->helper = helper;
exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE;
hooks = rcu_dereference(nf_nat_sip_hooks);
......
......@@ -22,15 +22,21 @@
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_timeout.h>
const struct nf_ct_timeout_hooks *nf_ct_timeout_hook __read_mostly;
const struct nf_ct_timeout_hooks __rcu *nf_ct_timeout_hook __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_timeout_hook);
static int untimeout(struct nf_conn *ct, void *timeout)
{
struct nf_conn_timeout *timeout_ext = nf_ct_timeout_find(ct);
if (timeout_ext && (!timeout || timeout_ext->timeout == timeout))
RCU_INIT_POINTER(timeout_ext->timeout, NULL);
if (timeout_ext) {
const struct nf_ct_timeout *t;
t = rcu_access_pointer(timeout_ext->timeout);
if (!timeout || t == timeout)
RCU_INIT_POINTER(timeout_ext->timeout, NULL);
}
/* We are not intended to delete this conntrack. */
return 0;
......@@ -127,7 +133,11 @@ void nf_ct_destroy_timeout(struct nf_conn *ct)
if (h) {
timeout_ext = nf_ct_timeout_find(ct);
if (timeout_ext) {
h->timeout_put(timeout_ext->timeout);
struct nf_ct_timeout *t;
t = rcu_dereference(timeout_ext->timeout);
if (t)
h->timeout_put(t);
RCU_INIT_POINTER(timeout_ext->timeout, NULL);
}
}
......
......@@ -53,14 +53,14 @@ struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
{
struct flow_offload *flow;
if (unlikely(nf_ct_is_dying(ct) ||
!refcount_inc_not_zero(&ct->ct_general.use)))
if (unlikely(nf_ct_is_dying(ct)))
return NULL;
flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
if (!flow)
goto err_ct_refcnt;
return NULL;
refcount_inc(&ct->ct_general.use);
flow->ct = ct;
flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
......@@ -72,11 +72,6 @@ struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
__set_bit(NF_FLOW_DNAT, &flow->flags);
return flow;
err_ct_refcnt:
nf_ct_put(ct);
return NULL;
}
EXPORT_SYMBOL_GPL(flow_offload_alloc);
......@@ -614,14 +609,74 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
}
EXPORT_SYMBOL_GPL(nf_flow_table_free);
static int nf_flow_table_init_net(struct net *net)
{
net->ft.stat = alloc_percpu(struct nf_flow_table_stat);
return net->ft.stat ? 0 : -ENOMEM;
}
static void nf_flow_table_fini_net(struct net *net)
{
free_percpu(net->ft.stat);
}
static int nf_flow_table_pernet_init(struct net *net)
{
int ret;
ret = nf_flow_table_init_net(net);
if (ret < 0)
return ret;
ret = nf_flow_table_init_proc(net);
if (ret < 0)
goto out_proc;
return 0;
out_proc:
nf_flow_table_fini_net(net);
return ret;
}
static void nf_flow_table_pernet_exit(struct list_head *net_exit_list)
{
struct net *net;
list_for_each_entry(net, net_exit_list, exit_list) {
nf_flow_table_fini_proc(net);
nf_flow_table_fini_net(net);
}
}
static struct pernet_operations nf_flow_table_net_ops = {
.init = nf_flow_table_pernet_init,
.exit_batch = nf_flow_table_pernet_exit,
};
static int __init nf_flow_table_module_init(void)
{
return nf_flow_table_offload_init();
int ret;
ret = register_pernet_subsys(&nf_flow_table_net_ops);
if (ret < 0)
return ret;
ret = nf_flow_table_offload_init();
if (ret)
goto out_offload;
return 0;
out_offload:
unregister_pernet_subsys(&nf_flow_table_net_ops);
return ret;
}
static void __exit nf_flow_table_module_exit(void)
{
nf_flow_table_offload_exit();
unregister_pernet_subsys(&nf_flow_table_net_ops);
}
module_init(nf_flow_table_module_init);
......
......@@ -967,17 +967,22 @@ static void flow_offload_work_stats(struct flow_offload_work *offload)
static void flow_offload_work_handler(struct work_struct *work)
{
struct flow_offload_work *offload;
struct net *net;
offload = container_of(work, struct flow_offload_work, work);
net = read_pnet(&offload->flowtable->net);
switch (offload->cmd) {
case FLOW_CLS_REPLACE:
flow_offload_work_add(offload);
NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count_wq_add);
break;
case FLOW_CLS_DESTROY:
flow_offload_work_del(offload);
NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count_wq_del);
break;
case FLOW_CLS_STATS:
flow_offload_work_stats(offload);
NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count_wq_stats);
break;
default:
WARN_ON_ONCE(1);
......@@ -989,12 +994,18 @@ static void flow_offload_work_handler(struct work_struct *work)
static void flow_offload_queue_work(struct flow_offload_work *offload)
{
if (offload->cmd == FLOW_CLS_REPLACE)
struct net *net = read_pnet(&offload->flowtable->net);
if (offload->cmd == FLOW_CLS_REPLACE) {
NF_FLOW_TABLE_STAT_INC(net, count_wq_add);
queue_work(nf_flow_offload_add_wq, &offload->work);
else if (offload->cmd == FLOW_CLS_DESTROY)
} else if (offload->cmd == FLOW_CLS_DESTROY) {
NF_FLOW_TABLE_STAT_INC(net, count_wq_del);
queue_work(nf_flow_offload_del_wq, &offload->work);
else
} else {
NF_FLOW_TABLE_STAT_INC(net, count_wq_stats);
queue_work(nf_flow_offload_stats_wq, &offload->work);
}
}
static struct flow_offload_work *
......
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <net/netfilter/nf_flow_table.h>
static void *nf_flow_table_cpu_seq_start(struct seq_file *seq, loff_t *pos)
{
struct net *net = seq_file_net(seq);
int cpu;
if (*pos == 0)
return SEQ_START_TOKEN;
for (cpu = *pos - 1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu + 1;
return per_cpu_ptr(net->ft.stat, cpu);
}
return NULL;
}
static void *nf_flow_table_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct net *net = seq_file_net(seq);
int cpu;
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu + 1;
return per_cpu_ptr(net->ft.stat, cpu);
}
(*pos)++;
return NULL;
}
static void nf_flow_table_cpu_seq_stop(struct seq_file *seq, void *v)
{
}
static int nf_flow_table_cpu_seq_show(struct seq_file *seq, void *v)
{
const struct nf_flow_table_stat *st = v;
if (v == SEQ_START_TOKEN) {
seq_puts(seq, "wq_add wq_del wq_stats\n");
return 0;
}
seq_printf(seq, "%8d %8d %8d\n",
st->count_wq_add,
st->count_wq_del,
st->count_wq_stats
);
return 0;
}
static const struct seq_operations nf_flow_table_cpu_seq_ops = {
.start = nf_flow_table_cpu_seq_start,
.next = nf_flow_table_cpu_seq_next,
.stop = nf_flow_table_cpu_seq_stop,
.show = nf_flow_table_cpu_seq_show,
};
int nf_flow_table_init_proc(struct net *net)
{
struct proc_dir_entry *pde;
pde = proc_create_net("nf_flowtable", 0444, net->proc_net_stat,
&nf_flow_table_cpu_seq_ops,
sizeof(struct seq_net_private));
return pde ? 0 : -ENOMEM;
}
void nf_flow_table_fini_proc(struct net *net)
{
remove_proc_entry("nf_flowtable", net->proc_net_stat);
}
......@@ -626,7 +626,7 @@ static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh)
nfgenmsg = nlmsg_data(nlh);
skb_pull(skb, msglen);
/* Work around old nft using host byte order */
if (nfgenmsg->res_id == NFNL_SUBSYS_NFTABLES)
if (nfgenmsg->res_id == (__force __be16)NFNL_SUBSYS_NFTABLES)
res_id = NFNL_SUBSYS_NFTABLES;
else
res_id = ntohs(nfgenmsg->res_id);
......
......@@ -96,11 +96,13 @@ static int
nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
{
struct nf_conn_help *help = nfct_help(ct);
const struct nf_conntrack_helper *helper;
if (attr == NULL)
return -EINVAL;
if (help->helper->data_len == 0)
helper = rcu_dereference(help->helper);
if (!helper || helper->data_len == 0)
return -EINVAL;
nla_memcpy(help->data, attr, sizeof(help->data));
......@@ -111,9 +113,11 @@ static int
nfnl_cthelper_to_nlattr(struct sk_buff *skb, const struct nf_conn *ct)
{
const struct nf_conn_help *help = nfct_help(ct);
const struct nf_conntrack_helper *helper;
if (help->helper->data_len &&
nla_put(skb, CTA_HELP_INFO, help->helper->data_len, &help->data))
helper = rcu_dereference(help->helper);
if (helper && helper->data_len &&
nla_put(skb, CTA_HELP_INFO, helper->data_len, &help->data))
goto nla_put_failure;
return 0;
......
......@@ -44,7 +44,8 @@ void nft_byteorder_eval(const struct nft_expr *expr,
case NFT_BYTEORDER_NTOH:
for (i = 0; i < priv->len / 8; i++) {
src64 = nft_reg_load64(&src[i]);
nft_reg_store64(&dst[i], be64_to_cpu(src64));
nft_reg_store64(&dst[i],
be64_to_cpu((__force __be64)src64));
}
break;
case NFT_BYTEORDER_HTON:
......
......@@ -125,13 +125,13 @@ static void nft_payload_n2h(union nft_cmp_offload_data *data,
{
switch (len) {
case 2:
data->val16 = ntohs(*((u16 *)val));
data->val16 = ntohs(*((__be16 *)val));
break;
case 4:
data->val32 = ntohl(*((u32 *)val));
data->val32 = ntohl(*((__be32 *)val));
break;
case 8:
data->val64 = be64_to_cpu(*((u64 *)val));
data->val64 = be64_to_cpu(*((__be64 *)val));
break;
default:
WARN_ON_ONCE(1);
......@@ -197,6 +197,18 @@ static const struct nft_expr_ops nft_cmp_ops = {
.offload = nft_cmp_offload,
};
/* Calculate the mask for the nft_cmp_fast expression. On big endian the
* mask needs to include the *upper* bytes when interpreting that data as
* something smaller than the full u32, therefore a cpu_to_le32 is done.
*/
static u32 nft_cmp_fast_mask(unsigned int len)
{
__le32 mask = cpu_to_le32(~0U >> (sizeof_field(struct nft_cmp_fast_expr,
data) * BITS_PER_BYTE - len));
return (__force u32)mask;
}
static int nft_cmp_fast_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
......
......@@ -204,12 +204,12 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
case NFT_CT_SRC_IP:
if (nf_ct_l3num(ct) != NFPROTO_IPV4)
goto err;
*dest = tuple->src.u3.ip;
*dest = (__force __u32)tuple->src.u3.ip;
return;
case NFT_CT_DST_IP:
if (nf_ct_l3num(ct) != NFPROTO_IPV4)
goto err;
*dest = tuple->dst.u3.ip;
*dest = (__force __u32)tuple->dst.u3.ip;
return;
case NFT_CT_SRC_IP6:
if (nf_ct_l3num(ct) != NFPROTO_IPV6)
......
......@@ -266,7 +266,7 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
switch (priv->len) {
case 2:
old.v16 = get_unaligned((u16 *)(opt + offset));
old.v16 = (__force __be16)get_unaligned((u16 *)(opt + offset));
new.v16 = (__force __be16)nft_reg_load16(
&regs->data[priv->sreg]);
......@@ -281,18 +281,18 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
if (old.v16 == new.v16)
return;
put_unaligned(new.v16, (u16*)(opt + offset));
put_unaligned(new.v16, (__be16*)(opt + offset));
inet_proto_csum_replace2(&tcph->check, pkt->skb,
old.v16, new.v16, false);
break;
case 4:
new.v32 = regs->data[priv->sreg];
old.v32 = get_unaligned((u32 *)(opt + offset));
new.v32 = nft_reg_load_be32(&regs->data[priv->sreg]);
old.v32 = (__force __be32)get_unaligned((u32 *)(opt + offset));
if (old.v32 == new.v32)
return;
put_unaligned(new.v32, (u32*)(opt + offset));
put_unaligned(new.v32, (__be32*)(opt + offset));
inet_proto_csum_replace4(&tcph->check, pkt->skb,
old.v32, new.v32, false);
break;
......
......@@ -99,7 +99,7 @@ static int nft_osf_dump(struct sk_buff *skb, const struct nft_expr *expr)
if (nla_put_u8(skb, NFTA_OSF_TTL, priv->ttl))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_OSF_FLAGS, ntohl(priv->flags)))
if (nla_put_u32(skb, NFTA_OSF_FLAGS, ntohl((__force __be32)priv->flags)))
goto nla_put_failure;
if (nft_dump_register(skb, NFTA_OSF_DREG, priv->dreg))
......
......@@ -21,7 +21,7 @@ struct nft_bitmap_elem {
* the element state in the current and the future generation.
*
* An element can be in three states. The generation cursor is represented using
* the ^ character, note that this cursor shifts on every succesful transaction.
* the ^ character, note that this cursor shifts on every successful transaction.
* If no transaction is going on, we observe all elements are in the following
* state:
*
......@@ -39,7 +39,7 @@ struct nft_bitmap_elem {
* 10 = this element is active in the current generation and it becomes inactive
* ^ in the next one. This happens when the element is deactivated but commit
* path has not yet been executed yet, so removal is still pending. On
* transation abortion, the next generation bit is reset to go back to
* transaction abortion, the next generation bit is reset to go back to
* restore its previous state.
*/
struct nft_bitmap {
......
......@@ -163,7 +163,7 @@ static int nft_socket_init(const struct nft_ctx *ctx,
return -EOPNOTSUPP;
}
priv->key = ntohl(nla_get_u32(tb[NFTA_SOCKET_KEY]));
priv->key = ntohl(nla_get_be32(tb[NFTA_SOCKET_KEY]));
switch(priv->key) {
case NFT_SOCKET_TRANSPARENT:
case NFT_SOCKET_WILDCARD:
......@@ -179,7 +179,7 @@ static int nft_socket_init(const struct nft_ctx *ctx,
if (!tb[NFTA_SOCKET_LEVEL])
return -EINVAL;
level = ntohl(nla_get_u32(tb[NFTA_SOCKET_LEVEL]));
level = ntohl(nla_get_be32(tb[NFTA_SOCKET_LEVEL]));
if (level > 255)
return -EOPNOTSUPP;
......@@ -202,12 +202,12 @@ static int nft_socket_dump(struct sk_buff *skb,
{
const struct nft_socket *priv = nft_expr_priv(expr);
if (nla_put_u32(skb, NFTA_SOCKET_KEY, htonl(priv->key)))
if (nla_put_be32(skb, NFTA_SOCKET_KEY, htonl(priv->key)))
return -1;
if (nft_dump_register(skb, NFTA_SOCKET_DREG, priv->dreg))
return -1;
if (priv->key == NFT_SOCKET_CGROUPV2 &&
nla_put_u32(skb, NFTA_SOCKET_LEVEL, htonl(priv->level)))
nla_put_be32(skb, NFTA_SOCKET_LEVEL, htonl(priv->level)))
return -1;
return 0;
}
......
......@@ -52,11 +52,11 @@ static void nft_tproxy_eval_v4(const struct nft_expr *expr,
skb->dev, NF_TPROXY_LOOKUP_ESTABLISHED);
if (priv->sreg_addr)
taddr = regs->data[priv->sreg_addr];
taddr = nft_reg_load_be32(&regs->data[priv->sreg_addr]);
taddr = nf_tproxy_laddr4(skb, taddr, iph->daddr);
if (priv->sreg_port)
tport = nft_reg_load16(&regs->data[priv->sreg_port]);
tport = nft_reg_load_be16(&regs->data[priv->sreg_port]);
if (!tport)
tport = hp->dest;
......@@ -124,7 +124,7 @@ static void nft_tproxy_eval_v6(const struct nft_expr *expr,
taddr = *nf_tproxy_laddr6(skb, &taddr, &iph->daddr);
if (priv->sreg_port)
tport = nft_reg_load16(&regs->data[priv->sreg_port]);
tport = nft_reg_load_be16(&regs->data[priv->sreg_port]);
if (!tport)
tport = hp->dest;
......
......@@ -383,8 +383,9 @@ static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
struct ip_tunnel_info *info,
struct nft_tunnel_opts *opts)
{
int err, rem, type = 0;
struct nlattr *nla;
__be16 type = 0;
int err, rem;
err = nla_validate_nested_deprecated(attr, NFTA_TUNNEL_KEY_OPTS_MAX,
nft_tunnel_opts_policy, NULL);
......
......@@ -51,7 +51,7 @@ static int nft_xfrm_get_init(const struct nft_ctx *ctx,
return -EOPNOTSUPP;
}
priv->key = ntohl(nla_get_u32(tb[NFTA_XFRM_KEY]));
priv->key = ntohl(nla_get_be32(tb[NFTA_XFRM_KEY]));
switch (priv->key) {
case NFT_XFRM_KEY_REQID:
case NFT_XFRM_KEY_SPI:
......@@ -134,13 +134,13 @@ static void nft_xfrm_state_get_key(const struct nft_xfrm *priv,
WARN_ON_ONCE(1);
break;
case NFT_XFRM_KEY_DADDR_IP4:
*dest = state->id.daddr.a4;
*dest = (__force __u32)state->id.daddr.a4;
return;
case NFT_XFRM_KEY_DADDR_IP6:
memcpy(dest, &state->id.daddr.in6, sizeof(struct in6_addr));
return;
case NFT_XFRM_KEY_SADDR_IP4:
*dest = state->props.saddr.a4;
*dest = (__force __u32)state->props.saddr.a4;
return;
case NFT_XFRM_KEY_SADDR_IP6:
memcpy(dest, &state->props.saddr.in6, sizeof(struct in6_addr));
......@@ -149,7 +149,7 @@ static void nft_xfrm_state_get_key(const struct nft_xfrm *priv,
*dest = state->props.reqid;
return;
case NFT_XFRM_KEY_SPI:
*dest = state->id.spi;
*dest = (__force __u32)state->id.spi;
return;
}
......
......@@ -96,7 +96,7 @@ xt_ct_set_helper(struct nf_conn *ct, const char *helper_name,
return -ENOMEM;
}
help->helper = helper;
rcu_assign_pointer(help->helper, helper);
return 0;
}
......@@ -136,6 +136,21 @@ static u16 xt_ct_flags_to_dir(const struct xt_ct_target_info_v1 *info)
}
}
static void xt_ct_put_helper(struct nf_conn_help *help)
{
struct nf_conntrack_helper *helper;
if (!help)
return;
/* not yet exposed to other cpus, or ruleset
* already detached (post-replacement).
*/
helper = rcu_dereference_raw(help->helper);
if (helper)
nf_conntrack_helper_put(helper);
}
static int xt_ct_tg_check(const struct xt_tgchk_param *par,
struct xt_ct_target_info_v1 *info)
{
......@@ -207,8 +222,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
err4:
help = nfct_help(ct);
if (help)
nf_conntrack_helper_put(help->helper);
xt_ct_put_helper(help);
err3:
nf_ct_tmpl_free(ct);
err2:
......@@ -270,8 +284,7 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par,
if (ct) {
help = nfct_help(ct);
if (help)
nf_conntrack_helper_put(help->helper);
xt_ct_put_helper(help);
nf_ct_netns_put(par->net, par->family);
......
......@@ -24,6 +24,8 @@ MODULE_ALIAS("ip6t_DSCP");
MODULE_ALIAS("ipt_TOS");
MODULE_ALIAS("ip6t_TOS");
#define XT_DSCP_ECN_MASK 3u
static unsigned int
dscp_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
......@@ -34,8 +36,7 @@ dscp_tg(struct sk_buff *skb, const struct xt_action_param *par)
if (skb_ensure_writable(skb, sizeof(struct iphdr)))
return NF_DROP;
ipv4_change_dsfield(ip_hdr(skb),
(__force __u8)(~XT_DSCP_MASK),
ipv4_change_dsfield(ip_hdr(skb), XT_DSCP_ECN_MASK,
dinfo->dscp << XT_DSCP_SHIFT);
}
......@@ -52,8 +53,7 @@ dscp_tg6(struct sk_buff *skb, const struct xt_action_param *par)
if (skb_ensure_writable(skb, sizeof(struct ipv6hdr)))
return NF_DROP;
ipv6_change_dsfield(ipv6_hdr(skb),
(__force __u8)(~XT_DSCP_MASK),
ipv6_change_dsfield(ipv6_hdr(skb), XT_DSCP_ECN_MASK,
dinfo->dscp << XT_DSCP_SHIFT);
}
return XT_CONTINUE;
......
......@@ -239,8 +239,8 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
oldlen = ipv6h->payload_len;
newlen = htons(ntohs(oldlen) + ret);
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_add(csum_sub(skb->csum, oldlen),
newlen);
skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)oldlen),
(__force __wsum)newlen);
ipv6h->payload_len = newlen;
}
return XT_CONTINUE;
......
......@@ -74,18 +74,10 @@ tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport,
/* This should be in a separate target, but we don't do multiple
targets on the same rule yet */
skb->mark = (skb->mark & ~mark_mask) ^ mark_value;
pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n",
iph->protocol, &iph->daddr, ntohs(hp->dest),
&laddr, ntohs(lport), skb->mark);
nf_tproxy_assign_sock(skb, sk);
return NF_ACCEPT;
}
pr_debug("no socket, dropping: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n",
iph->protocol, &iph->saddr, ntohs(hp->source),
&iph->daddr, ntohs(hp->dest), skb->mark);
return NF_DROP;
}
......@@ -122,16 +114,12 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
int tproto;
tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
if (tproto < 0) {
pr_debug("unable to find transport header in IPv6 packet, dropping\n");
if (tproto < 0)
return NF_DROP;
}
hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr);
if (hp == NULL) {
pr_debug("unable to grab transport header contents in IPv6 packet, dropping\n");
if (!hp)
return NF_DROP;
}
/* check if there's an ongoing connection on the packet
* addresses, this happens if the redirect already happened
......@@ -168,19 +156,10 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
/* This should be in a separate target, but we don't do multiple
targets on the same rule yet */
skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value;
pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n",
tproto, &iph->saddr, ntohs(hp->source),
laddr, ntohs(lport), skb->mark);
nf_tproxy_assign_sock(skb, sk);
return NF_ACCEPT;
}
pr_debug("no socket, dropping: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n",
tproto, &iph->saddr, ntohs(hp->source),
&iph->daddr, ntohs(hp->dest), skb->mark);
return NF_DROP;
}
......
......@@ -62,10 +62,10 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
key[4] = zone->id;
} else {
const struct iphdr *iph = ip_hdr(skb);
key[0] = (info->flags & XT_CONNLIMIT_DADDR) ?
iph->daddr : iph->saddr;
key[0] &= info->mask.ip;
key[0] = (info->flags & XT_CONNLIMIT_DADDR) ?
(__force __u32)iph->daddr : (__force __u32)iph->saddr;
key[0] &= (__force __u32)info->mask.ip;
key[1] = zone->id;
}
......
......@@ -277,7 +277,7 @@ static struct nf_flowtable_type flowtable_ct = {
.owner = THIS_MODULE,
};
static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
{
struct tcf_ct_flow_table *ct_ft;
int err = -ENOMEM;
......@@ -303,6 +303,7 @@ static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
err = nf_flow_table_init(&ct_ft->nf_ft);
if (err)
goto err_init;
write_pnet(&ct_ft->nf_ft.net, net);
__module_get(THIS_MODULE);
out_unlock:
......@@ -1391,7 +1392,7 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
if (err)
goto cleanup;
err = tcf_ct_flow_table_get(params);
err = tcf_ct_flow_table_get(net, params);
if (err)
goto cleanup;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment