Commit 148ade2c authored by Martijn Coenen's avatar Martijn Coenen Committed by Greg Kroah-Hartman

ANDROID: binder: Add thread->process_todo flag.

This flag determines whether the thread should currently
process the work in the thread->todo worklist.

The prime usecase for this is improving the performance
of synchronous transactions: all synchronous transactions
post a BR_TRANSACTION_COMPLETE to the calling thread,
but there's no reason to return that command to userspace
right away - userspace anyway needs to wait for the reply.

Likewise, a synchronous transaction that contains a binder
object can cause a BC_ACQUIRE/BC_INCREFS to be returned to
userspace; since the caller must anyway hold a strong/weak
ref for the duration of the call, postponing these commands
until the reply comes in is not a problem.

Note that this flag is not used to determine whether a
thread can handle process work; a thread should never pick
up process work when thread work is still pending.

Before patch:
------------------------------------------------------------------
Benchmark                           Time           CPU Iterations
------------------------------------------------------------------
BM_sendVec_binderize/4          45959 ns      20288 ns      34351
BM_sendVec_binderize/8          45603 ns      20080 ns      34909
BM_sendVec_binderize/16         45528 ns      20113 ns      34863
BM_sendVec_binderize/32         45551 ns      20122 ns      34881
BM_sendVec_binderize/64         45701 ns      20183 ns      34864
BM_sendVec_binderize/128        45824 ns      20250 ns      34576
BM_sendVec_binderize/256        45695 ns      20171 ns      34759
BM_sendVec_binderize/512        45743 ns      20211 ns      34489
BM_sendVec_binderize/1024       46169 ns      20430 ns      34081

After patch:
------------------------------------------------------------------
Benchmark                           Time           CPU Iterations
------------------------------------------------------------------
BM_sendVec_binderize/4          42939 ns      17262 ns      40653
BM_sendVec_binderize/8          42823 ns      17243 ns      40671
BM_sendVec_binderize/16         42898 ns      17243 ns      40594
BM_sendVec_binderize/32         42838 ns      17267 ns      40527
BM_sendVec_binderize/64         42854 ns      17249 ns      40379
BM_sendVec_binderize/128        42881 ns      17288 ns      40427
BM_sendVec_binderize/256        42917 ns      17297 ns      40429
BM_sendVec_binderize/512        43184 ns      17395 ns      40411
BM_sendVec_binderize/1024       43119 ns      17357 ns      40432
Signed-off-by: default avatarMartijn Coenen <maco@android.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 39b759ca
...@@ -577,6 +577,8 @@ enum { ...@@ -577,6 +577,8 @@ enum {
* (protected by @proc->inner_lock) * (protected by @proc->inner_lock)
* @todo: list of work to do for this thread * @todo: list of work to do for this thread
* (protected by @proc->inner_lock) * (protected by @proc->inner_lock)
* @process_todo: whether work in @todo should be processed
* (protected by @proc->inner_lock)
* @return_error: transaction errors reported by this thread * @return_error: transaction errors reported by this thread
* (only accessed by this thread) * (only accessed by this thread)
* @reply_error: transaction errors reported by target thread * @reply_error: transaction errors reported by target thread
...@@ -602,6 +604,7 @@ struct binder_thread { ...@@ -602,6 +604,7 @@ struct binder_thread {
bool looper_need_return; /* can be written by other thread */ bool looper_need_return; /* can be written by other thread */
struct binder_transaction *transaction_stack; struct binder_transaction *transaction_stack;
struct list_head todo; struct list_head todo;
bool process_todo;
struct binder_error return_error; struct binder_error return_error;
struct binder_error reply_error; struct binder_error reply_error;
wait_queue_head_t wait; wait_queue_head_t wait;
...@@ -787,6 +790,16 @@ static bool binder_worklist_empty(struct binder_proc *proc, ...@@ -787,6 +790,16 @@ static bool binder_worklist_empty(struct binder_proc *proc,
return ret; return ret;
} }
/**
* binder_enqueue_work_ilocked() - Add an item to the work list
* @work: struct binder_work to add to list
* @target_list: list to add work to
*
* Adds the work to the specified list. Asserts that work
* is not already on a list.
*
* Requires the proc->inner_lock to be held.
*/
static void static void
binder_enqueue_work_ilocked(struct binder_work *work, binder_enqueue_work_ilocked(struct binder_work *work,
struct list_head *target_list) struct list_head *target_list)
...@@ -797,22 +810,56 @@ binder_enqueue_work_ilocked(struct binder_work *work, ...@@ -797,22 +810,56 @@ binder_enqueue_work_ilocked(struct binder_work *work,
} }
/** /**
* binder_enqueue_work() - Add an item to the work list * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
* @proc: binder_proc associated with list * @thread: thread to queue work to
* @work: struct binder_work to add to list * @work: struct binder_work to add to list
* @target_list: list to add work to
* *
* Adds the work to the specified list. Asserts that work * Adds the work to the todo list of the thread. Doesn't set the process_todo
* is not already on a list. * flag, which means that (if it wasn't already set) the thread will go to
* sleep without handling this work when it calls read.
*
* Requires the proc->inner_lock to be held.
*/ */
static void static void
binder_enqueue_work(struct binder_proc *proc, binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
struct binder_work *work, struct binder_work *work)
struct list_head *target_list)
{ {
binder_inner_proc_lock(proc); binder_enqueue_work_ilocked(work, &thread->todo);
binder_enqueue_work_ilocked(work, target_list); }
binder_inner_proc_unlock(proc);
/**
* binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
* @thread: thread to queue work to
* @work: struct binder_work to add to list
*
* Adds the work to the todo list of the thread, and enables processing
* of the todo queue.
*
* Requires the proc->inner_lock to be held.
*/
static void
binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
struct binder_work *work)
{
binder_enqueue_work_ilocked(work, &thread->todo);
thread->process_todo = true;
}
/**
* binder_enqueue_thread_work() - Add an item to the thread work list
* @thread: thread to queue work to
* @work: struct binder_work to add to list
*
* Adds the work to the todo list of the thread, and enables processing
* of the todo queue.
*/
static void
binder_enqueue_thread_work(struct binder_thread *thread,
struct binder_work *work)
{
binder_inner_proc_lock(thread->proc);
binder_enqueue_thread_work_ilocked(thread, work);
binder_inner_proc_unlock(thread->proc);
} }
static void static void
...@@ -927,7 +974,7 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd) ...@@ -927,7 +974,7 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
static bool binder_has_work_ilocked(struct binder_thread *thread, static bool binder_has_work_ilocked(struct binder_thread *thread,
bool do_proc_work) bool do_proc_work)
{ {
return !binder_worklist_empty_ilocked(&thread->todo) || return thread->process_todo ||
thread->looper_need_return || thread->looper_need_return ||
(do_proc_work && (do_proc_work &&
!binder_worklist_empty_ilocked(&thread->proc->todo)); !binder_worklist_empty_ilocked(&thread->proc->todo));
...@@ -1215,6 +1262,17 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong, ...@@ -1215,6 +1262,17 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
node->local_strong_refs++; node->local_strong_refs++;
if (!node->has_strong_ref && target_list) { if (!node->has_strong_ref && target_list) {
binder_dequeue_work_ilocked(&node->work); binder_dequeue_work_ilocked(&node->work);
/*
* Note: this function is the only place where we queue
* directly to a thread->todo without using the
* corresponding binder_enqueue_thread_work() helper
* functions; in this case it's ok to not set the
* process_todo flag, since we know this node work will
* always be followed by other work that starts queue
* processing: in case of synchronous transactions, a
* BR_REPLY or BR_ERROR; in case of oneway
* transactions, a BR_TRANSACTION_COMPLETE.
*/
binder_enqueue_work_ilocked(&node->work, target_list); binder_enqueue_work_ilocked(&node->work, target_list);
} }
} else { } else {
...@@ -1226,6 +1284,9 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong, ...@@ -1226,6 +1284,9 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
node->debug_id); node->debug_id);
return -EINVAL; return -EINVAL;
} }
/*
* See comment above
*/
binder_enqueue_work_ilocked(&node->work, target_list); binder_enqueue_work_ilocked(&node->work, target_list);
} }
} }
...@@ -1915,9 +1976,9 @@ static void binder_send_failed_reply(struct binder_transaction *t, ...@@ -1915,9 +1976,9 @@ static void binder_send_failed_reply(struct binder_transaction *t,
binder_pop_transaction_ilocked(target_thread, t); binder_pop_transaction_ilocked(target_thread, t);
if (target_thread->reply_error.cmd == BR_OK) { if (target_thread->reply_error.cmd == BR_OK) {
target_thread->reply_error.cmd = error_code; target_thread->reply_error.cmd = error_code;
binder_enqueue_work_ilocked( binder_enqueue_thread_work_ilocked(
&target_thread->reply_error.work, target_thread,
&target_thread->todo); &target_thread->reply_error.work);
wake_up_interruptible(&target_thread->wait); wake_up_interruptible(&target_thread->wait);
} else { } else {
WARN(1, "Unexpected reply error: %u\n", WARN(1, "Unexpected reply error: %u\n",
...@@ -2536,18 +2597,16 @@ static bool binder_proc_transaction(struct binder_transaction *t, ...@@ -2536,18 +2597,16 @@ static bool binder_proc_transaction(struct binder_transaction *t,
struct binder_proc *proc, struct binder_proc *proc,
struct binder_thread *thread) struct binder_thread *thread)
{ {
struct list_head *target_list = NULL;
struct binder_node *node = t->buffer->target_node; struct binder_node *node = t->buffer->target_node;
bool oneway = !!(t->flags & TF_ONE_WAY); bool oneway = !!(t->flags & TF_ONE_WAY);
bool wakeup = true; bool pending_async = false;
BUG_ON(!node); BUG_ON(!node);
binder_node_lock(node); binder_node_lock(node);
if (oneway) { if (oneway) {
BUG_ON(thread); BUG_ON(thread);
if (node->has_async_transaction) { if (node->has_async_transaction) {
target_list = &node->async_todo; pending_async = true;
wakeup = false;
} else { } else {
node->has_async_transaction = 1; node->has_async_transaction = 1;
} }
...@@ -2561,19 +2620,17 @@ static bool binder_proc_transaction(struct binder_transaction *t, ...@@ -2561,19 +2620,17 @@ static bool binder_proc_transaction(struct binder_transaction *t,
return false; return false;
} }
if (!thread && !target_list) if (!thread && !pending_async)
thread = binder_select_thread_ilocked(proc); thread = binder_select_thread_ilocked(proc);
if (thread) if (thread)
target_list = &thread->todo; binder_enqueue_thread_work_ilocked(thread, &t->work);
else if (!target_list) else if (!pending_async)
target_list = &proc->todo; binder_enqueue_work_ilocked(&t->work, &proc->todo);
else else
BUG_ON(target_list != &node->async_todo); binder_enqueue_work_ilocked(&t->work, &node->async_todo);
binder_enqueue_work_ilocked(&t->work, target_list);
if (wakeup) if (!pending_async)
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
binder_inner_proc_unlock(proc); binder_inner_proc_unlock(proc);
...@@ -3068,10 +3125,10 @@ static void binder_transaction(struct binder_proc *proc, ...@@ -3068,10 +3125,10 @@ static void binder_transaction(struct binder_proc *proc,
} }
} }
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
binder_enqueue_work(proc, tcomplete, &thread->todo);
t->work.type = BINDER_WORK_TRANSACTION; t->work.type = BINDER_WORK_TRANSACTION;
if (reply) { if (reply) {
binder_enqueue_thread_work(thread, tcomplete);
binder_inner_proc_lock(target_proc); binder_inner_proc_lock(target_proc);
if (target_thread->is_dead) { if (target_thread->is_dead) {
binder_inner_proc_unlock(target_proc); binder_inner_proc_unlock(target_proc);
...@@ -3079,13 +3136,21 @@ static void binder_transaction(struct binder_proc *proc, ...@@ -3079,13 +3136,21 @@ static void binder_transaction(struct binder_proc *proc,
} }
BUG_ON(t->buffer->async_transaction != 0); BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction_ilocked(target_thread, in_reply_to); binder_pop_transaction_ilocked(target_thread, in_reply_to);
binder_enqueue_work_ilocked(&t->work, &target_thread->todo); binder_enqueue_thread_work_ilocked(target_thread, &t->work);
binder_inner_proc_unlock(target_proc); binder_inner_proc_unlock(target_proc);
wake_up_interruptible_sync(&target_thread->wait); wake_up_interruptible_sync(&target_thread->wait);
binder_free_transaction(in_reply_to); binder_free_transaction(in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) { } else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0); BUG_ON(t->buffer->async_transaction != 0);
binder_inner_proc_lock(proc); binder_inner_proc_lock(proc);
/*
* Defer the TRANSACTION_COMPLETE, so we don't return to
* userspace immediately; this allows the target process to
* immediately start processing this transaction, reducing
* latency. We will then return the TRANSACTION_COMPLETE when
* the target replies (or there is an error).
*/
binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
t->need_reply = 1; t->need_reply = 1;
t->from_parent = thread->transaction_stack; t->from_parent = thread->transaction_stack;
thread->transaction_stack = t; thread->transaction_stack = t;
...@@ -3099,6 +3164,7 @@ static void binder_transaction(struct binder_proc *proc, ...@@ -3099,6 +3164,7 @@ static void binder_transaction(struct binder_proc *proc,
} else { } else {
BUG_ON(target_node == NULL); BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1); BUG_ON(t->buffer->async_transaction != 1);
binder_enqueue_thread_work(thread, tcomplete);
if (!binder_proc_transaction(t, target_proc, NULL)) if (!binder_proc_transaction(t, target_proc, NULL))
goto err_dead_proc_or_thread; goto err_dead_proc_or_thread;
} }
...@@ -3177,15 +3243,11 @@ static void binder_transaction(struct binder_proc *proc, ...@@ -3177,15 +3243,11 @@ static void binder_transaction(struct binder_proc *proc,
BUG_ON(thread->return_error.cmd != BR_OK); BUG_ON(thread->return_error.cmd != BR_OK);
if (in_reply_to) { if (in_reply_to) {
thread->return_error.cmd = BR_TRANSACTION_COMPLETE; thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
binder_enqueue_work(thread->proc, binder_enqueue_thread_work(thread, &thread->return_error.work);
&thread->return_error.work,
&thread->todo);
binder_send_failed_reply(in_reply_to, return_error); binder_send_failed_reply(in_reply_to, return_error);
} else { } else {
thread->return_error.cmd = return_error; thread->return_error.cmd = return_error;
binder_enqueue_work(thread->proc, binder_enqueue_thread_work(thread, &thread->return_error.work);
&thread->return_error.work,
&thread->todo);
} }
} }
...@@ -3489,10 +3551,9 @@ static int binder_thread_write(struct binder_proc *proc, ...@@ -3489,10 +3551,9 @@ static int binder_thread_write(struct binder_proc *proc,
WARN_ON(thread->return_error.cmd != WARN_ON(thread->return_error.cmd !=
BR_OK); BR_OK);
thread->return_error.cmd = BR_ERROR; thread->return_error.cmd = BR_ERROR;
binder_enqueue_work( binder_enqueue_thread_work(
thread->proc, thread,
&thread->return_error.work, &thread->return_error.work);
&thread->todo);
binder_debug( binder_debug(
BINDER_DEBUG_FAILED_TRANSACTION, BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
...@@ -3572,9 +3633,9 @@ static int binder_thread_write(struct binder_proc *proc, ...@@ -3572,9 +3633,9 @@ static int binder_thread_write(struct binder_proc *proc,
if (thread->looper & if (thread->looper &
(BINDER_LOOPER_STATE_REGISTERED | (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED)) BINDER_LOOPER_STATE_ENTERED))
binder_enqueue_work_ilocked( binder_enqueue_thread_work_ilocked(
&death->work, thread,
&thread->todo); &death->work);
else { else {
binder_enqueue_work_ilocked( binder_enqueue_work_ilocked(
&death->work, &death->work,
...@@ -3629,8 +3690,8 @@ static int binder_thread_write(struct binder_proc *proc, ...@@ -3629,8 +3690,8 @@ static int binder_thread_write(struct binder_proc *proc,
if (thread->looper & if (thread->looper &
(BINDER_LOOPER_STATE_REGISTERED | (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED)) BINDER_LOOPER_STATE_ENTERED))
binder_enqueue_work_ilocked( binder_enqueue_thread_work_ilocked(
&death->work, &thread->todo); thread, &death->work);
else { else {
binder_enqueue_work_ilocked( binder_enqueue_work_ilocked(
&death->work, &death->work,
...@@ -3804,6 +3865,8 @@ static int binder_thread_read(struct binder_proc *proc, ...@@ -3804,6 +3865,8 @@ static int binder_thread_read(struct binder_proc *proc,
break; break;
} }
w = binder_dequeue_work_head_ilocked(list); w = binder_dequeue_work_head_ilocked(list);
if (binder_worklist_empty_ilocked(&thread->todo))
thread->process_todo = false;
switch (w->type) { switch (w->type) {
case BINDER_WORK_TRANSACTION: { case BINDER_WORK_TRANSACTION: {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment