Commit bbc20b70 authored by Eric Dumazet's avatar Eric Dumazet Committed by Jakub Kicinski

net: reduce indentation level in sk_clone_lock()

Rework initial test to jump over init code
if memory allocation has failed.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Link: https://lore.kernel.org/r/20210127152731.748663-1-eric.dumazet@gmail.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent d1f3bdd4
...@@ -1876,123 +1876,120 @@ static void sk_init_common(struct sock *sk) ...@@ -1876,123 +1876,120 @@ static void sk_init_common(struct sock *sk)
struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
{ {
struct proto *prot = READ_ONCE(sk->sk_prot); struct proto *prot = READ_ONCE(sk->sk_prot);
struct sock *newsk; struct sk_filter *filter;
bool is_charged = true; bool is_charged = true;
struct sock *newsk;
newsk = sk_prot_alloc(prot, priority, sk->sk_family); newsk = sk_prot_alloc(prot, priority, sk->sk_family);
if (newsk != NULL) { if (!newsk)
struct sk_filter *filter; goto out;
sock_copy(newsk, sk); sock_copy(newsk, sk);
newsk->sk_prot_creator = prot; newsk->sk_prot_creator = prot;
/* SANITY */ /* SANITY */
if (likely(newsk->sk_net_refcnt)) if (likely(newsk->sk_net_refcnt))
get_net(sock_net(newsk)); get_net(sock_net(newsk));
sk_node_init(&newsk->sk_node); sk_node_init(&newsk->sk_node);
sock_lock_init(newsk); sock_lock_init(newsk);
bh_lock_sock(newsk); bh_lock_sock(newsk);
newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
newsk->sk_backlog.len = 0; newsk->sk_backlog.len = 0;
atomic_set(&newsk->sk_rmem_alloc, 0); atomic_set(&newsk->sk_rmem_alloc, 0);
/*
* sk_wmem_alloc set to one (see sk_free() and sock_wfree())
*/
refcount_set(&newsk->sk_wmem_alloc, 1);
atomic_set(&newsk->sk_omem_alloc, 0);
sk_init_common(newsk);
newsk->sk_dst_cache = NULL; /* sk_wmem_alloc set to one (see sk_free() and sock_wfree()) */
newsk->sk_dst_pending_confirm = 0; refcount_set(&newsk->sk_wmem_alloc, 1);
newsk->sk_wmem_queued = 0;
newsk->sk_forward_alloc = 0;
atomic_set(&newsk->sk_drops, 0);
newsk->sk_send_head = NULL;
newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
atomic_set(&newsk->sk_zckey, 0);
sock_reset_flag(newsk, SOCK_DONE); atomic_set(&newsk->sk_omem_alloc, 0);
sk_init_common(newsk);
/* sk->sk_memcg will be populated at accept() time */ newsk->sk_dst_cache = NULL;
newsk->sk_memcg = NULL; newsk->sk_dst_pending_confirm = 0;
newsk->sk_wmem_queued = 0;
newsk->sk_forward_alloc = 0;
atomic_set(&newsk->sk_drops, 0);
newsk->sk_send_head = NULL;
newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
atomic_set(&newsk->sk_zckey, 0);
cgroup_sk_clone(&newsk->sk_cgrp_data); sock_reset_flag(newsk, SOCK_DONE);
rcu_read_lock(); /* sk->sk_memcg will be populated at accept() time */
filter = rcu_dereference(sk->sk_filter); newsk->sk_memcg = NULL;
if (filter != NULL)
/* though it's an empty new sock, the charging may fail
* if sysctl_optmem_max was changed between creation of
* original socket and cloning
*/
is_charged = sk_filter_charge(newsk, filter);
RCU_INIT_POINTER(newsk->sk_filter, filter);
rcu_read_unlock();
if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { cgroup_sk_clone(&newsk->sk_cgrp_data);
/* We need to make sure that we don't uncharge the new
* socket if we couldn't charge it in the first place
* as otherwise we uncharge the parent's filter.
*/
if (!is_charged)
RCU_INIT_POINTER(newsk->sk_filter, NULL);
sk_free_unlock_clone(newsk);
newsk = NULL;
goto out;
}
RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
if (bpf_sk_storage_clone(sk, newsk)) { rcu_read_lock();
sk_free_unlock_clone(newsk); filter = rcu_dereference(sk->sk_filter);
newsk = NULL; if (filter != NULL)
goto out; /* though it's an empty new sock, the charging may fail
} * if sysctl_optmem_max was changed between creation of
* original socket and cloning
*/
is_charged = sk_filter_charge(newsk, filter);
RCU_INIT_POINTER(newsk->sk_filter, filter);
rcu_read_unlock();
/* Clear sk_user_data if parent had the pointer tagged if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
* as not suitable for copying when cloning. /* We need to make sure that we don't uncharge the new
* socket if we couldn't charge it in the first place
* as otherwise we uncharge the parent's filter.
*/ */
if (sk_user_data_is_nocopy(newsk)) if (!is_charged)
newsk->sk_user_data = NULL; RCU_INIT_POINTER(newsk->sk_filter, NULL);
sk_free_unlock_clone(newsk);
newsk = NULL;
goto out;
}
RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
newsk->sk_err = 0; if (bpf_sk_storage_clone(sk, newsk)) {
newsk->sk_err_soft = 0; sk_free_unlock_clone(newsk);
newsk->sk_priority = 0; newsk = NULL;
newsk->sk_incoming_cpu = raw_smp_processor_id(); goto out;
if (likely(newsk->sk_net_refcnt)) }
sock_inuse_add(sock_net(newsk), 1);
/* /* Clear sk_user_data if parent had the pointer tagged
* Before updating sk_refcnt, we must commit prior changes to memory * as not suitable for copying when cloning.
* (Documentation/RCU/rculist_nulls.rst for details) */
*/ if (sk_user_data_is_nocopy(newsk))
smp_wmb(); newsk->sk_user_data = NULL;
refcount_set(&newsk->sk_refcnt, 2);
/* newsk->sk_err = 0;
* Increment the counter in the same struct proto as the master newsk->sk_err_soft = 0;
* sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that newsk->sk_priority = 0;
* is the same as sk->sk_prot->socks, as this field was copied newsk->sk_incoming_cpu = raw_smp_processor_id();
* with memcpy). if (likely(newsk->sk_net_refcnt))
* sock_inuse_add(sock_net(newsk), 1);
* This _changes_ the previous behaviour, where
* tcp_create_openreq_child always was incrementing the
* equivalent to tcp_prot->socks (inet_sock_nr), so this have
* to be taken into account in all callers. -acme
*/
sk_refcnt_debug_inc(newsk);
sk_set_socket(newsk, NULL);
sk_tx_queue_clear(newsk);
RCU_INIT_POINTER(newsk->sk_wq, NULL);
if (newsk->sk_prot->sockets_allocated) /* Before updating sk_refcnt, we must commit prior changes to memory
sk_sockets_allocated_inc(newsk); * (Documentation/RCU/rculist_nulls.rst for details)
*/
smp_wmb();
refcount_set(&newsk->sk_refcnt, 2);
if (sock_needs_netstamp(sk) && /* Increment the counter in the same struct proto as the master
newsk->sk_flags & SK_FLAGS_TIMESTAMP) * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
net_enable_timestamp(); * is the same as sk->sk_prot->socks, as this field was copied
} * with memcpy).
*
* This _changes_ the previous behaviour, where
* tcp_create_openreq_child always was incrementing the
* equivalent to tcp_prot->socks (inet_sock_nr), so this have
* to be taken into account in all callers. -acme
*/
sk_refcnt_debug_inc(newsk);
sk_set_socket(newsk, NULL);
sk_tx_queue_clear(newsk);
RCU_INIT_POINTER(newsk->sk_wq, NULL);
if (newsk->sk_prot->sockets_allocated)
sk_sockets_allocated_inc(newsk);
if (sock_needs_netstamp(sk) && newsk->sk_flags & SK_FLAGS_TIMESTAMP)
net_enable_timestamp();
out: out:
return newsk; return newsk;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment