Commit 4165079b authored by Florian Westphal's avatar Florian Westphal Committed by David S. Miller

net: switch secpath to use skb extension infrastructure

Remove skb->sp and allocate secpath storage via extension
infrastructure.  This also reduces sk_buff by 8 bytes on x86_64.

Total size of allyesconfig kernel is reduced slightly, as there is
less inlined code (one conditional atomic op instead of two on
skb_clone).

No differences in throughput in following ipsec performance tests:
- transport mode with aes on 10GB link
- tunnel mode between two network namespaces with aes and null cipher
Signed-off-by: default avatarFlorian Westphal <fw@strlen.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a84e3f53
......@@ -111,9 +111,10 @@ the stack in xfrm_input().
xfrm_state_hold(xs);
store the state information into the skb
skb->sp = secpath_dup(skb->sp);
skb->sp->xvec[skb->sp->len++] = xs;
skb->sp->olen++;
sp = secpath_set(skb);
if (!sp) return;
sp->xvec[sp->len++] = xs;
sp->olen++;
indicate the success and/or error status of the offload
xo = xfrm_offload(skb);
......
......@@ -714,9 +714,6 @@ struct sk_buff {
struct list_head tcp_tsorted_anchor;
};
#ifdef CONFIG_XFRM
struct sec_path *sp;
#endif
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
unsigned long _nfct;
#endif
......@@ -3907,6 +3904,9 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct)
enum skb_ext_id {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
SKB_EXT_BRIDGE_NF,
#endif
#ifdef CONFIG_XFRM
SKB_EXT_SEC_PATH,
#endif
SKB_EXT_NUM, /* must be last */
};
......@@ -4069,7 +4069,7 @@ static inline void skb_init_secmark(struct sk_buff *skb)
static inline int secpath_exists(const struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
return skb->sp != NULL;
return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
#else
return 0;
#endif
......@@ -4127,7 +4127,7 @@ static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
return skb->sp;
return skb_ext_find(skb, SKB_EXT_SEC_PATH);
#else
return NULL;
#endif
......
......@@ -1096,7 +1096,6 @@ struct xfrm_offload {
};
struct sec_path {
refcount_t refcnt;
int len;
int olen;
......@@ -1104,32 +1103,13 @@ struct sec_path {
struct xfrm_offload ovec[XFRM_MAX_OFFLOAD_DEPTH];
};
static inline struct sec_path *
secpath_get(struct sec_path *sp)
{
if (sp)
refcount_inc(&sp->refcnt);
return sp;
}
void __secpath_destroy(struct sec_path *sp);
static inline void
secpath_put(struct sec_path *sp)
{
if (sp && refcount_dec_and_test(&sp->refcnt))
__secpath_destroy(sp);
}
struct sec_path *secpath_dup(struct sec_path *src);
struct sec_path *secpath_set(struct sk_buff *skb);
static inline void
secpath_reset(struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
secpath_put(skb->sp);
skb->sp = NULL;
skb_ext_del(skb, SKB_EXT_SEC_PATH);
#endif
}
......
......@@ -609,7 +609,6 @@ static void kfree_skbmem(struct sk_buff *skb)
void skb_release_head_state(struct sk_buff *skb)
{
skb_dst_drop(skb);
secpath_reset(skb);
if (skb->destructor) {
WARN_ON(in_irq());
skb->destructor(skb);
......@@ -798,9 +797,6 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
memcpy(new->cb, old->cb, sizeof(old->cb));
skb_dst_copy(new, old);
__skb_ext_copy(new, old);
#ifdef CONFIG_XFRM
new->sp = secpath_get(old->sp);
#endif
__nf_copy(new, old, false);
/* Note : this field could be in headers_start/headers_end section
......@@ -3912,6 +3908,9 @@ static const u8 skb_ext_type_len[] = {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
[SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info),
#endif
#ifdef CONFIG_XFRM
[SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path),
#endif
};
static __always_inline unsigned int skb_ext_total_length(void)
......@@ -3919,6 +3918,9 @@ static __always_inline unsigned int skb_ext_total_length(void)
return SKB_EXT_CHUNKSIZEOF(struct skb_ext) +
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
skb_ext_type_len[SKB_EXT_BRIDGE_NF] +
#endif
#ifdef CONFIG_XFRM
skb_ext_type_len[SKB_EXT_SEC_PATH] +
#endif
0;
}
......@@ -5610,7 +5612,8 @@ static struct skb_ext *skb_ext_alloc(void)
return new;
}
static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old)
static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
unsigned int old_active)
{
struct skb_ext *new;
......@@ -5624,6 +5627,15 @@ static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old)
memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE);
refcount_set(&new->refcnt, 1);
#ifdef CONFIG_XFRM
if (old_active & (1 << SKB_EXT_SEC_PATH)) {
struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH);
unsigned int i;
for (i = 0; i < sp->len; i++)
xfrm_state_hold(sp->xvec[i]);
}
#endif
__skb_ext_put(old);
return new;
}
......@@ -5650,7 +5662,7 @@ void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
if (skb->active_extensions) {
old = skb->extensions;
new = skb_ext_maybe_cow(old);
new = skb_ext_maybe_cow(old, skb->active_extensions);
if (!new)
return NULL;
......@@ -5679,6 +5691,16 @@ void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
}
EXPORT_SYMBOL(skb_ext_add);
#ifdef CONFIG_XFRM
static void skb_ext_put_sp(struct sec_path *sp)
{
unsigned int i;
for (i = 0; i < sp->len; i++)
xfrm_state_put(sp->xvec[i]);
}
#endif
void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
{
struct skb_ext *ext = skb->extensions;
......@@ -5687,6 +5709,14 @@ void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
if (skb->active_extensions == 0) {
skb->extensions = NULL;
__skb_ext_put(ext);
#ifdef CONFIG_XFRM
} else if (id == SKB_EXT_SEC_PATH &&
refcount_read(&ext->refcnt) == 1) {
struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH);
skb_ext_put_sp(sp);
sp->len = 0;
#endif
}
}
EXPORT_SYMBOL(__skb_ext_del);
......@@ -5702,6 +5732,11 @@ void __skb_ext_put(struct skb_ext *ext)
if (!refcount_dec_and_test(&ext->refcnt))
return;
free_now:
#ifdef CONFIG_XFRM
if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH))
skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH));
#endif
kmem_cache_free(skbuff_ext_cache, ext);
}
EXPORT_SYMBOL(__skb_ext_put);
......
......@@ -38,8 +38,6 @@ struct xfrm_trans_cb {
#define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
static struct kmem_cache *secpath_cachep __ro_after_init;
static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1];
......@@ -111,54 +109,21 @@ static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol,
return ret;
}
void __secpath_destroy(struct sec_path *sp)
{
int i;
for (i = 0; i < sp->len; i++)
xfrm_state_put(sp->xvec[i]);
kmem_cache_free(secpath_cachep, sp);
}
EXPORT_SYMBOL(__secpath_destroy);
struct sec_path *secpath_dup(struct sec_path *src)
struct sec_path *secpath_set(struct sk_buff *skb)
{
struct sec_path *sp;
struct sec_path *sp, *tmp = skb_ext_find(skb, SKB_EXT_SEC_PATH);
sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC);
sp = skb_ext_add(skb, SKB_EXT_SEC_PATH);
if (!sp)
return NULL;
sp->len = 0;
sp->olen = 0;
if (tmp) /* reused existing one (was COW'd if needed) */
return sp;
/* allocated new secpath */
memset(sp->ovec, 0, sizeof(sp->ovec));
if (src) {
int i;
memcpy(sp, src, sizeof(*sp));
for (i = 0; i < sp->len; i++)
xfrm_state_hold(sp->xvec[i]);
}
refcount_set(&sp->refcnt, 1);
return sp;
}
EXPORT_SYMBOL(secpath_dup);
struct sec_path *secpath_set(struct sk_buff *skb)
{
struct sec_path *sp = skb->sp;
/* Allocate new secpath or COW existing one. */
if (!sp || refcount_read(&sp->refcnt) != 1) {
sp = secpath_dup(skb->sp);
if (!sp)
return NULL;
if (skb->sp)
secpath_put(skb->sp);
skb->sp = sp;
}
sp->olen = 0;
sp->len = 0;
return sp;
}
......@@ -552,11 +517,6 @@ void __init xfrm_input_init(void)
if (err)
gro_cells.cells = NULL;
secpath_cachep = kmem_cache_create("secpath_cache",
sizeof(struct sec_path),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
NULL);
for_each_possible_cpu(i) {
struct xfrm_trans_tasklet *trans;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment