Commit 2d758073 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: memcontrol: consolidate cgroup socket tracking

The cgroup core and the memory controller need to track socket ownership
for different purposes, but the tracking sites being entirely different
is kind of ugly.

Be a better citizen and rename the memory controller callbacks to match
the cgroup core callbacks, then move them to the same place.

[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/20160914194846.11153-3-hannes@cmpxchg.orgSigned-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Vladimir Davydov <vdavydov@virtuozzo.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 08ea8c07
...@@ -773,13 +773,13 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, ...@@ -773,13 +773,13 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
#endif /* CONFIG_CGROUP_WRITEBACK */ #endif /* CONFIG_CGROUP_WRITEBACK */
struct sock; struct sock;
void sock_update_memcg(struct sock *sk);
void sock_release_memcg(struct sock *sk);
bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
extern struct static_key_false memcg_sockets_enabled_key; extern struct static_key_false memcg_sockets_enabled_key;
#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
void mem_cgroup_sk_alloc(struct sock *sk);
void mem_cgroup_sk_free(struct sock *sk);
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{ {
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
...@@ -792,6 +792,8 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) ...@@ -792,6 +792,8 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
} }
#else #else
#define mem_cgroup_sockets_enabled 0 #define mem_cgroup_sockets_enabled 0
static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
static inline void mem_cgroup_sk_free(struct sock *sk) { };
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{ {
return false; return false;
......
...@@ -2939,16 +2939,16 @@ static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit) ...@@ -2939,16 +2939,16 @@ static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
/* /*
* The active flag needs to be written after the static_key * The active flag needs to be written after the static_key
* update. This is what guarantees that the socket activation * update. This is what guarantees that the socket activation
* function is the last one to run. See sock_update_memcg() for * function is the last one to run. See mem_cgroup_sk_alloc()
* details, and note that we don't mark any socket as belonging * for details, and note that we don't mark any socket as
* to this memcg until that flag is up. * belonging to this memcg until that flag is up.
* *
* We need to do this, because static_keys will span multiple * We need to do this, because static_keys will span multiple
* sites, but we can't control their order. If we mark a socket * sites, but we can't control their order. If we mark a socket
* as accounted, but the accounting functions are not patched in * as accounted, but the accounting functions are not patched in
* yet, we'll lose accounting. * yet, we'll lose accounting.
* *
* We never race with the readers in sock_update_memcg(), * We never race with the readers in mem_cgroup_sk_alloc(),
* because when this value change, the code to process it is not * because when this value change, the code to process it is not
* patched in yet. * patched in yet.
*/ */
...@@ -5651,11 +5651,15 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) ...@@ -5651,11 +5651,15 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
EXPORT_SYMBOL(memcg_sockets_enabled_key); EXPORT_SYMBOL(memcg_sockets_enabled_key);
void sock_update_memcg(struct sock *sk) void mem_cgroup_sk_alloc(struct sock *sk)
{ {
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
/* Socket cloning can throw us here with sk_cgrp already if (!mem_cgroup_sockets_enabled)
return;
/*
* Socket cloning can throw us here with sk_memcg already
* filled. It won't however, necessarily happen from * filled. It won't however, necessarily happen from
* process context. So the test for root memcg given * process context. So the test for root memcg given
* the current task's memcg won't help us in this case. * the current task's memcg won't help us in this case.
...@@ -5680,12 +5684,11 @@ void sock_update_memcg(struct sock *sk) ...@@ -5680,12 +5684,11 @@ void sock_update_memcg(struct sock *sk)
out: out:
rcu_read_unlock(); rcu_read_unlock();
} }
EXPORT_SYMBOL(sock_update_memcg);
void sock_release_memcg(struct sock *sk) void mem_cgroup_sk_free(struct sock *sk)
{ {
WARN_ON(!sk->sk_memcg); if (sk->sk_memcg)
css_put(&sk->sk_memcg->css); css_put(&sk->sk_memcg->css);
} }
/** /**
......
...@@ -1363,6 +1363,7 @@ static void sk_prot_free(struct proto *prot, struct sock *sk) ...@@ -1363,6 +1363,7 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
slab = prot->slab; slab = prot->slab;
cgroup_sk_free(&sk->sk_cgrp_data); cgroup_sk_free(&sk->sk_cgrp_data);
mem_cgroup_sk_free(sk);
security_sk_free(sk); security_sk_free(sk);
if (slab != NULL) if (slab != NULL)
kmem_cache_free(slab, sk); kmem_cache_free(slab, sk);
...@@ -1399,6 +1400,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, ...@@ -1399,6 +1400,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
sock_net_set(sk, net); sock_net_set(sk, net);
atomic_set(&sk->sk_wmem_alloc, 1); atomic_set(&sk->sk_wmem_alloc, 1);
mem_cgroup_sk_alloc(sk);
cgroup_sk_alloc(&sk->sk_cgrp_data); cgroup_sk_alloc(&sk->sk_cgrp_data);
sock_update_classid(&sk->sk_cgrp_data); sock_update_classid(&sk->sk_cgrp_data);
sock_update_netprioidx(&sk->sk_cgrp_data); sock_update_netprioidx(&sk->sk_cgrp_data);
...@@ -1545,6 +1547,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) ...@@ -1545,6 +1547,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
newsk->sk_incoming_cpu = raw_smp_processor_id(); newsk->sk_incoming_cpu = raw_smp_processor_id();
atomic64_set(&newsk->sk_cookie, 0); atomic64_set(&newsk->sk_cookie, 0);
mem_cgroup_sk_alloc(newsk);
cgroup_sk_alloc(&newsk->sk_cgrp_data); cgroup_sk_alloc(&newsk->sk_cgrp_data);
/* /*
...@@ -1569,9 +1572,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) ...@@ -1569,9 +1572,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
sk_set_socket(newsk, NULL); sk_set_socket(newsk, NULL);
newsk->sk_wq = NULL; newsk->sk_wq = NULL;
if (mem_cgroup_sockets_enabled && sk->sk_memcg)
sock_update_memcg(newsk);
if (newsk->sk_prot->sockets_allocated) if (newsk->sk_prot->sockets_allocated)
sk_sockets_allocated_inc(newsk); sk_sockets_allocated_inc(newsk);
......
...@@ -424,8 +424,6 @@ void tcp_init_sock(struct sock *sk) ...@@ -424,8 +424,6 @@ void tcp_init_sock(struct sock *sk)
sk->sk_rcvbuf = sysctl_tcp_rmem[1]; sk->sk_rcvbuf = sysctl_tcp_rmem[1];
local_bh_disable(); local_bh_disable();
if (mem_cgroup_sockets_enabled)
sock_update_memcg(sk);
sk_sockets_allocated_inc(sk); sk_sockets_allocated_inc(sk);
local_bh_enable(); local_bh_enable();
} }
......
...@@ -1871,9 +1871,6 @@ void tcp_v4_destroy_sock(struct sock *sk) ...@@ -1871,9 +1871,6 @@ void tcp_v4_destroy_sock(struct sock *sk)
local_bh_disable(); local_bh_disable();
sk_sockets_allocated_dec(sk); sk_sockets_allocated_dec(sk);
local_bh_enable(); local_bh_enable();
if (mem_cgroup_sockets_enabled && sk->sk_memcg)
sock_release_memcg(sk);
} }
EXPORT_SYMBOL(tcp_v4_destroy_sock); EXPORT_SYMBOL(tcp_v4_destroy_sock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment