Commit 447cd7a0 authored by Kirill Tkhai's avatar Kirill Tkhai Committed by David S. Miller

net: Allow pernet_operations to be executed in parallel

This adds new pernet_operations::async flag to indicate operations,
which ->init(), ->exit() and ->exit_batch() methods are allowed
to be executed in parallel with the methods of any other pernet_operations.

When there are only asynchronous pernet_operations in the system,
net_mutex won't be taken for a net construction and destruction.

Also, remove BUG_ON(mutex_is_locked()) from net_assign_generic()
without replacing with the equivalent net_sem check, as there is
one more lockdep assert below.

v3: Add comment near net_mutex.
Suggested-by: default avatarEric W. Biederman <ebiederm@xmission.com>
Signed-off-by: default avatarKirill Tkhai <ktkhai@virtuozzo.com>
Acked-by: default avatarAndrei Vagin <avagin@virtuozzo.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent bcab1ddd
...@@ -313,6 +313,12 @@ struct pernet_operations { ...@@ -313,6 +313,12 @@ struct pernet_operations {
void (*exit_batch)(struct list_head *net_exit_list); void (*exit_batch)(struct list_head *net_exit_list);
unsigned int *id; unsigned int *id;
size_t size; size_t size;
/*
* Indicates above methods are allowed to be executed in parallel
* with methods of any other pernet_operations, i.e. they are not
* need synchronization via net_mutex.
*/
bool async;
}; };
/* /*
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
static LIST_HEAD(pernet_list); static LIST_HEAD(pernet_list);
static struct list_head *first_device = &pernet_list; static struct list_head *first_device = &pernet_list;
/* Used only if there are !async pernet_operations registered */
DEFINE_MUTEX(net_mutex); DEFINE_MUTEX(net_mutex);
LIST_HEAD(net_namespace_list); LIST_HEAD(net_namespace_list);
...@@ -41,8 +42,9 @@ struct net init_net = { ...@@ -41,8 +42,9 @@ struct net init_net = {
EXPORT_SYMBOL(init_net); EXPORT_SYMBOL(init_net);
static bool init_net_initialized; static bool init_net_initialized;
static unsigned nr_sync_pernet_ops;
/* /*
* net_sem: protects: pernet_list, net_generic_ids, * net_sem: protects: pernet_list, net_generic_ids, nr_sync_pernet_ops,
* init_net_initialized and first_device pointer. * init_net_initialized and first_device pointer.
*/ */
DECLARE_RWSEM(net_sem); DECLARE_RWSEM(net_sem);
...@@ -70,11 +72,10 @@ static int net_assign_generic(struct net *net, unsigned int id, void *data) ...@@ -70,11 +72,10 @@ static int net_assign_generic(struct net *net, unsigned int id, void *data)
{ {
struct net_generic *ng, *old_ng; struct net_generic *ng, *old_ng;
BUG_ON(!mutex_is_locked(&net_mutex));
BUG_ON(id < MIN_PERNET_OPS_ID); BUG_ON(id < MIN_PERNET_OPS_ID);
old_ng = rcu_dereference_protected(net->gen, old_ng = rcu_dereference_protected(net->gen,
lockdep_is_held(&net_mutex)); lockdep_is_held(&net_sem));
if (old_ng->s.len > id) { if (old_ng->s.len > id) {
old_ng->ptr[id] = data; old_ng->ptr[id] = data;
return 0; return 0;
...@@ -426,11 +427,14 @@ struct net *copy_net_ns(unsigned long flags, ...@@ -426,11 +427,14 @@ struct net *copy_net_ns(unsigned long flags,
rv = down_read_killable(&net_sem); rv = down_read_killable(&net_sem);
if (rv < 0) if (rv < 0)
goto put_userns; goto put_userns;
rv = mutex_lock_killable(&net_mutex); if (nr_sync_pernet_ops) {
if (rv < 0) rv = mutex_lock_killable(&net_mutex);
goto up_read; if (rv < 0)
goto up_read;
}
rv = setup_net(net, user_ns); rv = setup_net(net, user_ns);
mutex_unlock(&net_mutex); if (nr_sync_pernet_ops)
mutex_unlock(&net_mutex);
up_read: up_read:
up_read(&net_sem); up_read(&net_sem);
if (rv < 0) { if (rv < 0) {
...@@ -487,7 +491,8 @@ static void cleanup_net(struct work_struct *work) ...@@ -487,7 +491,8 @@ static void cleanup_net(struct work_struct *work)
spin_unlock_irq(&cleanup_list_lock); spin_unlock_irq(&cleanup_list_lock);
down_read(&net_sem); down_read(&net_sem);
mutex_lock(&net_mutex); if (nr_sync_pernet_ops)
mutex_lock(&net_mutex);
/* Don't let anyone else find us. */ /* Don't let anyone else find us. */
rtnl_lock(); rtnl_lock();
...@@ -522,7 +527,8 @@ static void cleanup_net(struct work_struct *work) ...@@ -522,7 +527,8 @@ static void cleanup_net(struct work_struct *work)
list_for_each_entry_reverse(ops, &pernet_list, list) list_for_each_entry_reverse(ops, &pernet_list, list)
ops_exit_list(ops, &net_exit_list); ops_exit_list(ops, &net_exit_list);
mutex_unlock(&net_mutex); if (nr_sync_pernet_ops)
mutex_unlock(&net_mutex);
/* Free the net generic variables */ /* Free the net generic variables */
list_for_each_entry_reverse(ops, &pernet_list, list) list_for_each_entry_reverse(ops, &pernet_list, list)
...@@ -994,6 +1000,9 @@ static int register_pernet_operations(struct list_head *list, ...@@ -994,6 +1000,9 @@ static int register_pernet_operations(struct list_head *list,
rcu_barrier(); rcu_barrier();
if (ops->id) if (ops->id)
ida_remove(&net_generic_ids, *ops->id); ida_remove(&net_generic_ids, *ops->id);
} else if (!ops->async) {
pr_info_once("Pernet operations %ps are sync.\n", ops);
nr_sync_pernet_ops++;
} }
return error; return error;
...@@ -1001,7 +1010,8 @@ static int register_pernet_operations(struct list_head *list, ...@@ -1001,7 +1010,8 @@ static int register_pernet_operations(struct list_head *list,
static void unregister_pernet_operations(struct pernet_operations *ops) static void unregister_pernet_operations(struct pernet_operations *ops)
{ {
if (!ops->async)
BUG_ON(nr_sync_pernet_ops-- == 0);
__unregister_pernet_operations(ops); __unregister_pernet_operations(ops);
rcu_barrier(); rcu_barrier();
if (ops->id) if (ops->id)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment