Commit 9630fe88 authored by Todd Kjos's avatar Todd Kjos Committed by Greg Kroah-Hartman

binder: introduce locking helper functions

There are 3 main spinlocks which must be acquired in this
order:
1) proc->outer_lock : protects most fields of binder_proc,
	binder_thread, and binder_ref structures. binder_proc_lock()
	and binder_proc_unlock() are used to acq/rel.
2) node->lock : protects most fields of binder_node.
	binder_node_lock() and binder_node_unlock() are
	used to acq/rel
3) proc->inner_lock : protects the thread and node lists
	(proc->threads, proc->nodes) and all todo lists associated
	with the binder_proc (proc->todo, thread->todo,
	proc->delivered_death and node->async_todo).
	binder_inner_proc_lock() and binder_inner_proc_unlock()
	are used to acq/rel

Any lock under procA must never be nested under any lock at the same
level or below on procB.

Functions that require a lock held on entry indicate which lock
in the suffix of the function name:

foo_olocked() : requires node->outer_lock
foo_nlocked() : requires node->lock
foo_ilocked() : requires proc->inner_lock
foo_iolocked(): requires proc->outer_lock and proc->inner_lock
foo_nilocked(): requires node->lock and proc->inner_lock
Signed-off-by: default avatarTodd Kjos <tkjos@google.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent adc18842
...@@ -15,6 +15,39 @@ ...@@ -15,6 +15,39 @@
* *
*/ */
/*
* Locking overview
*
* There are 3 main spinlocks which must be acquired in the
* order shown:
*
* 1) proc->outer_lock : protects binder_ref
* binder_proc_lock() and binder_proc_unlock() are
* used to acq/rel.
* 2) node->lock : protects most fields of binder_node.
* binder_node_lock() and binder_node_unlock() are
* used to acq/rel
* 3) proc->inner_lock : protects the thread and node lists
* (proc->threads, proc->nodes) and all todo lists associated
* with the binder_proc (proc->todo, thread->todo,
* proc->delivered_death and node->async_todo).
* binder_inner_proc_lock() and binder_inner_proc_unlock()
* are used to acq/rel
*
* Any lock under procA must never be nested under any lock at the same
* level or below on procB.
*
* Functions that require a lock held on entry indicate which lock
* in the suffix of the function name:
*
* foo_olocked() : requires node->outer_lock
* foo_nlocked() : requires node->lock
* foo_ilocked() : requires proc->inner_lock
* foo_oilocked(): requires proc->outer_lock and proc->inner_lock
* foo_nilocked(): requires node->lock and proc->inner_lock
* ...
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -36,6 +69,7 @@ ...@@ -36,6 +69,7 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/pid_namespace.h> #include <linux/pid_namespace.h>
#include <linux/security.h> #include <linux/security.h>
#include <linux/spinlock.h>
#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
#define BINDER_IPC_32BIT 1 #define BINDER_IPC_32BIT 1
...@@ -106,6 +140,7 @@ enum { ...@@ -106,6 +140,7 @@ enum {
BINDER_DEBUG_FREE_BUFFER = 1U << 11, BINDER_DEBUG_FREE_BUFFER = 1U << 11,
BINDER_DEBUG_INTERNAL_REFS = 1U << 12, BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
BINDER_DEBUG_PRIORITY_CAP = 1U << 13, BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
BINDER_DEBUG_SPINLOCKS = 1U << 14,
}; };
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
...@@ -262,8 +297,43 @@ struct binder_error { ...@@ -262,8 +297,43 @@ struct binder_error {
uint32_t cmd; uint32_t cmd;
}; };
/**
* struct binder_node - binder node bookkeeping
* @debug_id: unique ID for debugging
* (invariant after initialized)
* @lock: lock for node fields
* @work: worklist element for node work
* @rb_node: element for proc->nodes tree
* @dead_node: element for binder_dead_nodes list
* (protected by binder_dead_nodes_lock)
* @proc: binder_proc that owns this node
* (invariant after initialized)
* @refs: list of references on this node
* @internal_strong_refs: used to take strong references when
* initiating a transaction
* @local_weak_refs: weak user refs from local process
* @local_strong_refs: strong user refs from local process
* @tmp_refs: temporary kernel refs
* @ptr: userspace pointer for node
* (invariant, no lock needed)
* @cookie: userspace cookie for node
* (invariant, no lock needed)
* @has_strong_ref: userspace notified of strong ref
* @pending_strong_ref: userspace has acked notification of strong ref
* @has_weak_ref: userspace notified of weak ref
* @pending_weak_ref: userspace has acked notification of weak ref
* @has_async_transaction: async transaction to node in progress
* @accept_fds: file descriptor operations supported for node
* (invariant after initialized)
* @min_priority: minimum scheduling priority
* (invariant after initialized)
* @async_todo: list of async work items
*
* Bookkeeping structure for binder nodes.
*/
struct binder_node { struct binder_node {
int debug_id; int debug_id;
spinlock_t lock;
struct binder_work work; struct binder_work work;
union { union {
struct rb_node rb_node; struct rb_node rb_node;
...@@ -346,6 +416,51 @@ enum binder_deferred_state { ...@@ -346,6 +416,51 @@ enum binder_deferred_state {
BINDER_DEFERRED_RELEASE = 0x04, BINDER_DEFERRED_RELEASE = 0x04,
}; };
/**
* struct binder_proc - binder process bookkeeping
* @proc_node: element for binder_procs list
* @threads: rbtree of binder_threads in this proc
* @nodes: rbtree of binder nodes associated with
* this proc ordered by node->ptr
* @refs_by_desc: rbtree of refs ordered by ref->desc
* @refs_by_node: rbtree of refs ordered by ref->node
* @pid PID of group_leader of process
* (invariant after initialized)
* @tsk task_struct for group_leader of process
* (invariant after initialized)
* @files files_struct for process
* (invariant after initialized)
* @deferred_work_node: element for binder_deferred_list
* (protected by binder_deferred_lock)
* @deferred_work: bitmap of deferred work to perform
* (protected by binder_deferred_lock)
* @is_dead: process is dead and awaiting free
* when outstanding transactions are cleaned up
* @todo: list of work for this process
* @wait: wait queue head to wait for proc work
* (invariant after initialized)
* @stats: per-process binder statistics
* (atomics, no lock needed)
* @delivered_death: list of delivered death notification
* @max_threads: cap on number of binder threads
* @requested_threads: number of binder threads requested but not
* yet started. In current implementation, can
* only be 0 or 1.
* @requested_threads_started: number binder threads started
* @ready_threads: number of threads waiting for proc work
* @tmp_ref: temporary reference to indicate proc is in use
* @default_priority: default scheduler priority
* (invariant after initialized)
* @debugfs_entry: debugfs node
* @alloc: binder allocator bookkeeping
* @context: binder_context for this proc
* (invariant after initialized)
* @inner_lock: can nest under outer_lock and/or node lock
* @outer_lock: no nesting under innor or node lock
* Lock order: 1) outer, 2) node, 3) inner
*
* Bookkeeping structure for binder processes
*/
struct binder_proc { struct binder_proc {
struct hlist_node proc_node; struct hlist_node proc_node;
struct rb_root threads; struct rb_root threads;
...@@ -372,6 +487,8 @@ struct binder_proc { ...@@ -372,6 +487,8 @@ struct binder_proc {
struct dentry *debugfs_entry; struct dentry *debugfs_entry;
struct binder_alloc alloc; struct binder_alloc alloc;
struct binder_context *context; struct binder_context *context;
spinlock_t inner_lock;
spinlock_t outer_lock;
}; };
enum { enum {
...@@ -382,6 +499,33 @@ enum { ...@@ -382,6 +499,33 @@ enum {
BINDER_LOOPER_STATE_WAITING = 0x10, BINDER_LOOPER_STATE_WAITING = 0x10,
}; };
/**
* struct binder_thread - binder thread bookkeeping
* @proc: binder process for this thread
* (invariant after initialization)
* @rb_node: element for proc->threads rbtree
* @pid: PID for this thread
* (invariant after initialization)
* @looper: bitmap of looping state
* (only accessed by this thread)
* @looper_needs_return: looping thread needs to exit driver
* (no lock needed)
* @transaction_stack: stack of in-progress transactions for this thread
* @todo: list of work to do for this thread
* @return_error: transaction errors reported by this thread
* (only accessed by this thread)
* @reply_error: transaction errors reported by target thread
* @wait: wait queue for thread work
* @stats: per-thread statistics
* (atomics, no lock needed)
* @tmp_ref: temporary reference to indicate thread is in use
* (atomic since @proc->inner_lock cannot
* always be acquired)
* @is_dead: thread is dead and awaiting free
* when outstanding transactions are cleaned up
*
* Bookkeeping structure for binder threads.
*/
struct binder_thread { struct binder_thread {
struct binder_proc *proc; struct binder_proc *proc;
struct rb_node rb_node; struct rb_node rb_node;
...@@ -424,6 +568,97 @@ struct binder_transaction { ...@@ -424,6 +568,97 @@ struct binder_transaction {
spinlock_t lock; spinlock_t lock;
}; };
/**
* binder_proc_lock() - Acquire outer lock for given binder_proc
* @proc: struct binder_proc to acquire
*
* Acquires proc->outer_lock. Used to protect binder_ref
* structures associated with the given proc.
*/
#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
static void
_binder_proc_lock(struct binder_proc *proc, int line)
{
binder_debug(BINDER_DEBUG_SPINLOCKS,
"%s: line=%d\n", __func__, line);
spin_lock(&proc->outer_lock);
}
/**
* binder_proc_unlock() - Release spinlock for given binder_proc
* @proc: struct binder_proc to acquire
*
* Release lock acquired via binder_proc_lock()
*/
#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
static void
_binder_proc_unlock(struct binder_proc *proc, int line)
{
binder_debug(BINDER_DEBUG_SPINLOCKS,
"%s: line=%d\n", __func__, line);
spin_unlock(&proc->outer_lock);
}
/**
* binder_inner_proc_lock() - Acquire inner lock for given binder_proc
* @proc: struct binder_proc to acquire
*
* Acquires proc->inner_lock. Used to protect todo lists
*/
#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
static void
_binder_inner_proc_lock(struct binder_proc *proc, int line)
{
binder_debug(BINDER_DEBUG_SPINLOCKS,
"%s: line=%d\n", __func__, line);
spin_lock(&proc->inner_lock);
}
/**
* binder_inner_proc_unlock() - Release inner lock for given binder_proc
* @proc: struct binder_proc to acquire
*
* Release lock acquired via binder_inner_proc_lock()
*/
#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
static void
_binder_inner_proc_unlock(struct binder_proc *proc, int line)
{
binder_debug(BINDER_DEBUG_SPINLOCKS,
"%s: line=%d\n", __func__, line);
spin_unlock(&proc->inner_lock);
}
/**
* binder_node_lock() - Acquire spinlock for given binder_node
* @node: struct binder_node to acquire
*
* Acquires node->lock. Used to protect binder_node fields
*/
#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
static void
_binder_node_lock(struct binder_node *node, int line)
{
binder_debug(BINDER_DEBUG_SPINLOCKS,
"%s: line=%d\n", __func__, line);
spin_lock(&node->lock);
}
/**
* binder_node_unlock() - Release spinlock for given binder_proc
* @node: struct binder_node to acquire
*
* Release lock acquired via binder_node_lock()
*/
#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
static void
_binder_node_unlock(struct binder_node *node, int line)
{
binder_debug(BINDER_DEBUG_SPINLOCKS,
"%s: line=%d\n", __func__, line);
spin_unlock(&node->lock);
}
static void static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
static void binder_free_thread(struct binder_thread *thread); static void binder_free_thread(struct binder_thread *thread);
...@@ -568,6 +803,7 @@ static struct binder_node *binder_new_node(struct binder_proc *proc, ...@@ -568,6 +803,7 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
node->ptr = ptr; node->ptr = ptr;
node->cookie = cookie; node->cookie = cookie;
node->work.type = BINDER_WORK_NODE; node->work.type = BINDER_WORK_NODE;
spin_lock_init(&node->lock);
INIT_LIST_HEAD(&node->work.entry); INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo); INIT_LIST_HEAD(&node->async_todo);
binder_debug(BINDER_DEBUG_INTERNAL_REFS, binder_debug(BINDER_DEBUG_INTERNAL_REFS,
...@@ -3599,6 +3835,8 @@ static int binder_open(struct inode *nodp, struct file *filp) ...@@ -3599,6 +3835,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
proc = kzalloc(sizeof(*proc), GFP_KERNEL); proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL) if (proc == NULL)
return -ENOMEM; return -ENOMEM;
spin_lock_init(&proc->inner_lock);
spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader); get_task_struct(current->group_leader);
proc->tsk = current->group_leader; proc->tsk = current->group_leader;
INIT_LIST_HEAD(&proc->todo); INIT_LIST_HEAD(&proc->todo);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment