Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 77fdc9db authored by Martijn Coenen's avatar Martijn Coenen Committed by Kyle Yan
Browse files

ANDROID: binder: Add thread->process_todo flag.



This flag determines whether the thread should currently
process the work in the thread->todo worklist.

The prime usecase for this is improving the performance
of synchronous transactions: all synchronous transactions
post a BR_TRANSACTION_COMPLETE to the calling thread,
but there's no reason to return that command to userspace
right away - userspace anyway needs to wait for the reply.

Likewise, a synchronous transaction that contains a binder
object can cause a BC_ACQUIRE/BC_INCREFS to be returned to
userspace; since the caller must anyway hold a strong/weak
ref for the duration of the call, postponing these commands
until the reply comes in is not a problem.

Note that this flag is not used to determine whether a
thread can handle process work; a thread should never pick
up process work when thread work is still pending.

Before patch:
------------------------------------------------------------------
Benchmark                           Time           CPU Iterations
------------------------------------------------------------------
BM_sendVec_binderize/4          45959 ns      20288 ns      34351
BM_sendVec_binderize/8          45603 ns      20080 ns      34909
BM_sendVec_binderize/16         45528 ns      20113 ns      34863
BM_sendVec_binderize/32         45551 ns      20122 ns      34881
BM_sendVec_binderize/64         45701 ns      20183 ns      34864
BM_sendVec_binderize/128        45824 ns      20250 ns      34576
BM_sendVec_binderize/256        45695 ns      20171 ns      34759
BM_sendVec_binderize/512        45743 ns      20211 ns      34489
BM_sendVec_binderize/1024       46169 ns      20430 ns      34081

After patch:
------------------------------------------------------------------
Benchmark                           Time           CPU Iterations
------------------------------------------------------------------
BM_sendVec_binderize/4          42939 ns      17262 ns      40653
BM_sendVec_binderize/8          42823 ns      17243 ns      40671
BM_sendVec_binderize/16         42898 ns      17243 ns      40594
BM_sendVec_binderize/32         42838 ns      17267 ns      40527
BM_sendVec_binderize/64         42854 ns      17249 ns      40379
BM_sendVec_binderize/128        42881 ns      17288 ns      40427
BM_sendVec_binderize/256        42917 ns      17297 ns      40429
BM_sendVec_binderize/512        43184 ns      17395 ns      40411
BM_sendVec_binderize/1024       43119 ns      17357 ns      40432

Change-Id: Ia70287066d62aba64e98ac44ff1214e37ca75693
Signed-off-by: default avatarMartijn Coenen <maco@android.com>
Git-commit: 1af6180b
Git-repo: https://android.googlesource.com/kernel/common


Signed-off-by: default avatarKyle Yan <kyan@codeaurora.org>
parent efdd9934
Loading
Loading
Loading
Loading
+100 −44
Original line number Original line Diff line number Diff line
@@ -596,6 +596,8 @@ enum {
 *                        (protected by @proc->inner_lock)
 *                        (protected by @proc->inner_lock)
 * @todo:                 list of work to do for this thread
 * @todo:                 list of work to do for this thread
 *                        (protected by @proc->inner_lock)
 *                        (protected by @proc->inner_lock)
 * @process_todo:         whether work in @todo should be processed
 *                        (protected by @proc->inner_lock)
 * @return_error:         transaction errors reported by this thread
 * @return_error:         transaction errors reported by this thread
 *                        (only accessed by this thread)
 *                        (only accessed by this thread)
 * @reply_error:          transaction errors reported by target thread
 * @reply_error:          transaction errors reported by target thread
@@ -622,6 +624,7 @@ struct binder_thread {
	bool looper_need_return; /* can be written by other thread */
	bool looper_need_return; /* can be written by other thread */
	struct binder_transaction *transaction_stack;
	struct binder_transaction *transaction_stack;
	struct list_head todo;
	struct list_head todo;
	bool process_todo;
	struct binder_error return_error;
	struct binder_error return_error;
	struct binder_error reply_error;
	struct binder_error reply_error;
	wait_queue_head_t wait;
	wait_queue_head_t wait;
@@ -809,6 +812,16 @@ static bool binder_worklist_empty(struct binder_proc *proc,
	return ret;
	return ret;
}
}


/**
 * binder_enqueue_work_ilocked() - Add an item to the work list
 * @work:         struct binder_work to add to list
 * @target_list:  list to add work to
 *
 * Adds the work to the specified list. Asserts that work
 * is not already on a list.
 *
 * Requires the proc->inner_lock to be held.
 */
static void
static void
binder_enqueue_work_ilocked(struct binder_work *work,
binder_enqueue_work_ilocked(struct binder_work *work,
			   struct list_head *target_list)
			   struct list_head *target_list)
@@ -819,22 +832,56 @@ binder_enqueue_work_ilocked(struct binder_work *work,
}
}


/**
/**
 * binder_enqueue_work() - Add an item to the work list
 * binder_enqueue_thread_work_ilocked_nowake() - Add thread work
 * @proc:         binder_proc associated with list
 * @thread:       thread to queue work to
 * @work:         struct binder_work to add to list
 * @work:         struct binder_work to add to list
 * @target_list:  list to add work to
 *
 *
 * Adds the work to the specified list. Asserts that work
 * Adds the work to the todo list of the thread. Doesn't set the process_todo
 * is not already on a list.
 * flag, which means that (if it wasn't already set) the thread will go to
 * sleep without handling this work when it calls read.
 *
 * Requires the proc->inner_lock to be held.
 */
 */
static void
static void
binder_enqueue_work(struct binder_proc *proc,
binder_enqueue_thread_work_ilocked_nowake(struct binder_thread *thread,
		    struct binder_work *work,
					  struct binder_work *work)
		    struct list_head *target_list)
{
{
	binder_inner_proc_lock(proc);
	binder_enqueue_work_ilocked(work, &thread->todo);
	binder_enqueue_work_ilocked(work, target_list);
}
	binder_inner_proc_unlock(proc);

/**
 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
 * @thread:       thread to queue work to
 * @work:         struct binder_work to add to list
 *
 * Adds the work to the todo list of the thread, and enables processing
 * of the todo queue.
 *
 * Requires the proc->inner_lock to be held.
 */
static void
binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
				   struct binder_work *work)
{
	binder_enqueue_work_ilocked(work, &thread->todo);
	thread->process_todo = true;
}

/**
 * binder_enqueue_thread_work() - Add an item to the thread work list
 * @thread:       thread to queue work to
 * @work:         struct binder_work to add to list
 *
 * Adds the work to the todo list of the thread, and enables processing
 * of the todo queue.
 */
static void
binder_enqueue_thread_work(struct binder_thread *thread,
			   struct binder_work *work)
{
	binder_inner_proc_lock(thread->proc);
	binder_enqueue_thread_work_ilocked(thread, work);
	binder_inner_proc_unlock(thread->proc);
}
}


static void
static void
@@ -967,7 +1014,7 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
static bool binder_has_work_ilocked(struct binder_thread *thread,
static bool binder_has_work_ilocked(struct binder_thread *thread,
				    bool do_proc_work)
				    bool do_proc_work)
{
{
	return !binder_worklist_empty_ilocked(&thread->todo) ||
	return thread->process_todo ||
		thread->looper_need_return ||
		thread->looper_need_return ||
		(do_proc_work &&
		(do_proc_work &&
		 !binder_worklist_empty_ilocked(&thread->proc->todo));
		 !binder_worklist_empty_ilocked(&thread->proc->todo));
@@ -1384,6 +1431,17 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
			node->local_strong_refs++;
			node->local_strong_refs++;
		if (!node->has_strong_ref && target_list) {
		if (!node->has_strong_ref && target_list) {
			binder_dequeue_work_ilocked(&node->work);
			binder_dequeue_work_ilocked(&node->work);
			/*
			 * Note: this function is the only place where we queue
			 * directly to a thread->todo without using the
			 * corresponding binder_enqueue_thread_work() helper
			 * functions; in this case it's ok to not set the
			 * process_todo flag, since we know this node work will
			 * always be followed by other work that starts queue
			 * processing: in case of synchronous transactions, a
			 * BR_REPLY or BR_ERROR; in case of oneway
			 * transactions, a BR_TRANSACTION_COMPLETE.
			 */
			binder_enqueue_work_ilocked(&node->work, target_list);
			binder_enqueue_work_ilocked(&node->work, target_list);
		}
		}
	} else {
	} else {
@@ -1395,6 +1453,9 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
					node->debug_id);
					node->debug_id);
				return -EINVAL;
				return -EINVAL;
			}
			}
			/*
			 * See comment above
			 */
			binder_enqueue_work_ilocked(&node->work, target_list);
			binder_enqueue_work_ilocked(&node->work, target_list);
		}
		}
	}
	}
@@ -2084,9 +2145,9 @@ static void binder_send_failed_reply(struct binder_transaction *t,
			binder_pop_transaction_ilocked(target_thread, t);
			binder_pop_transaction_ilocked(target_thread, t);
			if (target_thread->reply_error.cmd == BR_OK) {
			if (target_thread->reply_error.cmd == BR_OK) {
				target_thread->reply_error.cmd = error_code;
				target_thread->reply_error.cmd = error_code;
				binder_enqueue_work_ilocked(
				binder_enqueue_thread_work_ilocked(
					&target_thread->reply_error.work,
					target_thread,
					&target_thread->todo);
					&target_thread->reply_error.work);
				wake_up_interruptible(&target_thread->wait);
				wake_up_interruptible(&target_thread->wait);
			} else {
			} else {
				WARN(1, "Unexpected reply error: %u\n",
				WARN(1, "Unexpected reply error: %u\n",
@@ -2725,11 +2786,10 @@ static bool binder_proc_transaction(struct binder_transaction *t,
				    struct binder_proc *proc,
				    struct binder_proc *proc,
				    struct binder_thread *thread)
				    struct binder_thread *thread)
{
{
	struct list_head *target_list = NULL;
	struct binder_node *node = t->buffer->target_node;
	struct binder_node *node = t->buffer->target_node;
	struct binder_priority node_prio;
	struct binder_priority node_prio;
	bool oneway = !!(t->flags & TF_ONE_WAY);
	bool oneway = !!(t->flags & TF_ONE_WAY);
	bool wakeup = true;
	bool pending_async = false;


	BUG_ON(!node);
	BUG_ON(!node);
	binder_node_lock(node);
	binder_node_lock(node);
@@ -2739,8 +2799,7 @@ static bool binder_proc_transaction(struct binder_transaction *t,
	if (oneway) {
	if (oneway) {
		BUG_ON(thread);
		BUG_ON(thread);
		if (node->has_async_transaction) {
		if (node->has_async_transaction) {
			target_list = &node->async_todo;
			pending_async = true;
			wakeup = false;
		} else {
		} else {
			node->has_async_transaction = 1;
			node->has_async_transaction = 1;
		}
		}
@@ -2754,22 +2813,20 @@ static bool binder_proc_transaction(struct binder_transaction *t,
		return false;
		return false;
	}
	}


	if (!thread && !target_list)
	if (!thread && !pending_async)
		thread = binder_select_thread_ilocked(proc);
		thread = binder_select_thread_ilocked(proc);


	if (thread) {
	if (thread) {
		target_list = &thread->todo;
		binder_transaction_priority(thread->task, t, node_prio,
		binder_transaction_priority(thread->task, t, node_prio,
					    node->inherit_rt);
					    node->inherit_rt);
	} else if (!target_list) {
		binder_enqueue_thread_work_ilocked(thread, &t->work);
		target_list = &proc->todo;
	} else if (!pending_async) {
		binder_enqueue_work_ilocked(&t->work, &proc->todo);
	} else {
	} else {
		BUG_ON(target_list != &node->async_todo);
		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
	}
	}


	binder_enqueue_work_ilocked(&t->work, target_list);
	if (!pending_async)

	if (wakeup)
		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);


	binder_inner_proc_unlock(proc);
	binder_inner_proc_unlock(proc);
@@ -3271,10 +3328,10 @@ static void binder_transaction(struct binder_proc *proc,
		}
		}
	}
	}
	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
	binder_enqueue_work(proc, tcomplete, &thread->todo);
	t->work.type = BINDER_WORK_TRANSACTION;
	t->work.type = BINDER_WORK_TRANSACTION;


	if (reply) {
	if (reply) {
		binder_enqueue_thread_work(thread, tcomplete);
		binder_inner_proc_lock(target_proc);
		binder_inner_proc_lock(target_proc);
		if (target_thread->is_dead) {
		if (target_thread->is_dead) {
			binder_inner_proc_unlock(target_proc);
			binder_inner_proc_unlock(target_proc);
@@ -3282,7 +3339,7 @@ static void binder_transaction(struct binder_proc *proc,
		}
		}
		BUG_ON(t->buffer->async_transaction != 0);
		BUG_ON(t->buffer->async_transaction != 0);
		binder_pop_transaction_ilocked(target_thread, in_reply_to);
		binder_pop_transaction_ilocked(target_thread, in_reply_to);
		binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
		binder_inner_proc_unlock(target_proc);
		binder_inner_proc_unlock(target_proc);
		wake_up_interruptible_sync(&target_thread->wait);
		wake_up_interruptible_sync(&target_thread->wait);
		binder_restore_priority(current, in_reply_to->saved_priority);
		binder_restore_priority(current, in_reply_to->saved_priority);
@@ -3290,6 +3347,7 @@ static void binder_transaction(struct binder_proc *proc,
	} else if (!(t->flags & TF_ONE_WAY)) {
	} else if (!(t->flags & TF_ONE_WAY)) {
		BUG_ON(t->buffer->async_transaction != 0);
		BUG_ON(t->buffer->async_transaction != 0);
		binder_inner_proc_lock(proc);
		binder_inner_proc_lock(proc);
		binder_enqueue_thread_work_ilocked_nowake(thread, tcomplete);
		t->need_reply = 1;
		t->need_reply = 1;
		t->from_parent = thread->transaction_stack;
		t->from_parent = thread->transaction_stack;
		thread->transaction_stack = t;
		thread->transaction_stack = t;
@@ -3303,6 +3361,7 @@ static void binder_transaction(struct binder_proc *proc,
	} else {
	} else {
		BUG_ON(target_node == NULL);
		BUG_ON(target_node == NULL);
		BUG_ON(t->buffer->async_transaction != 1);
		BUG_ON(t->buffer->async_transaction != 1);
		binder_enqueue_thread_work(thread, tcomplete);
		if (!binder_proc_transaction(t, target_proc, NULL))
		if (!binder_proc_transaction(t, target_proc, NULL))
			goto err_dead_proc_or_thread;
			goto err_dead_proc_or_thread;
	}
	}
@@ -3382,15 +3441,11 @@ static void binder_transaction(struct binder_proc *proc,
	if (in_reply_to) {
	if (in_reply_to) {
		binder_restore_priority(current, in_reply_to->saved_priority);
		binder_restore_priority(current, in_reply_to->saved_priority);
		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
		binder_enqueue_work(thread->proc,
		binder_enqueue_thread_work(thread, &thread->return_error.work);
				    &thread->return_error.work,
				    &thread->todo);
		binder_send_failed_reply(in_reply_to, return_error);
		binder_send_failed_reply(in_reply_to, return_error);
	} else {
	} else {
		thread->return_error.cmd = return_error;
		thread->return_error.cmd = return_error;
		binder_enqueue_work(thread->proc,
		binder_enqueue_thread_work(thread, &thread->return_error.work);
				    &thread->return_error.work,
				    &thread->todo);
	}
	}
}
}


@@ -3694,10 +3749,9 @@ static int binder_thread_write(struct binder_proc *proc,
					WARN_ON(thread->return_error.cmd !=
					WARN_ON(thread->return_error.cmd !=
						BR_OK);
						BR_OK);
					thread->return_error.cmd = BR_ERROR;
					thread->return_error.cmd = BR_ERROR;
					binder_enqueue_work(
					binder_enqueue_thread_work(
						thread->proc,
						thread,
						&thread->return_error.work,
						&thread->return_error.work);
						&thread->todo);
					binder_debug(
					binder_debug(
						BINDER_DEBUG_FAILED_TRANSACTION,
						BINDER_DEBUG_FAILED_TRANSACTION,
						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
@@ -3777,9 +3831,9 @@ static int binder_thread_write(struct binder_proc *proc,
					if (thread->looper &
					if (thread->looper &
					    (BINDER_LOOPER_STATE_REGISTERED |
					    (BINDER_LOOPER_STATE_REGISTERED |
					     BINDER_LOOPER_STATE_ENTERED))
					     BINDER_LOOPER_STATE_ENTERED))
						binder_enqueue_work_ilocked(
						binder_enqueue_thread_work_ilocked(
								&death->work,
								thread,
								&thread->todo);
								&death->work);
					else {
					else {
						binder_enqueue_work_ilocked(
						binder_enqueue_work_ilocked(
								&death->work,
								&death->work,
@@ -3834,8 +3888,8 @@ static int binder_thread_write(struct binder_proc *proc,
				if (thread->looper &
				if (thread->looper &
					(BINDER_LOOPER_STATE_REGISTERED |
					(BINDER_LOOPER_STATE_REGISTERED |
					 BINDER_LOOPER_STATE_ENTERED))
					 BINDER_LOOPER_STATE_ENTERED))
					binder_enqueue_work_ilocked(
					binder_enqueue_thread_work_ilocked(
						&death->work, &thread->todo);
						thread, &death->work);
				else {
				else {
					binder_enqueue_work_ilocked(
					binder_enqueue_work_ilocked(
							&death->work,
							&death->work,
@@ -4009,6 +4063,8 @@ static int binder_thread_read(struct binder_proc *proc,
			break;
			break;
		}
		}
		w = binder_dequeue_work_head_ilocked(list);
		w = binder_dequeue_work_head_ilocked(list);
		if (binder_worklist_empty_ilocked(&thread->todo))
			thread->process_todo = false;


		switch (w->type) {
		switch (w->type) {
		case BINDER_WORK_TRANSACTION: {
		case BINDER_WORK_TRANSACTION: {