Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 220bdae2 authored by Blagovest Kolenichev's avatar Blagovest Kolenichev
Browse files

android: binder: Revert fixes for preemption and locking issues



  These were hacks and no longer needed as next binder from
  android-4.4@59ff2e1 (v4.4.78) has been reworked to use
  fine-grained locking.

  This is a preparation for clean merge of binder changes from
  android-4.4@59ff2e1 (v4.4.78), which follows right after this
  commit.

Reverted changes:
=================

  773fc2f1 android: binder: use copy_from_user_preempt_disabled

  To keep the driver consistent, and until we have
  fine-grained locking in place.

  Change-Id: I90fe02cdedb8a5677b900a68528fb443b9204322
  Signed-off-by: default avatarRiley Andrews <riandrews@google.com>
  Git-repo: https://source.codeaurora.org/quic/la/kernel/msm-4.4


  Git-commit: 5c9ce54ca3a66a57e4ebfe3ae71c5733b6bcc579
  Signed-off-by: default avatarVikram Mulukutla <markivx@codeaurora.org>

  821e02f2 CHROMIUM: android: binder: Fix potential scheduling-while-atomica

  Commit f1e7f0a724f6 ("android: binder: Disable preemption while holding
  the global binder lock.") re-enabled preemption around most of the sites
  where calls to potentially sleeping functions were made, but missed
  __alloc_fd(), which can sleep if the fdtable needs to be resized.
  Re-enable preemption around __alloc_fd() as well as __fd_install() which
  can now sleep in upstream kernels as of commit 8a81252b ("fs/file.c:
  don't acquire files->file_lock in fd_install()").

  BUG=chrome-os-partner:44012
  TEST=Build and boot on Smaug.

  Change-Id: I9819c4b95876f697e75b1b84810b6c520d9c33ec
  Signed-off-by: default avatarAndrew Bresticker <abrestic@chromium.org>
  Reviewed-on: https://chromium-review.googlesource.com/308582


  Reviewed-by: default avatarStephen Barber <smbarber@chromium.org>
  Reviewed-by: default avatarRiley Andrews <riandrews@google.com>
  Git-repo: https://source.codeaurora.org/quic/la/kernel/msm-4.4


  Git-commit: c267ff1d548ed1bdad6a08f1c70776c5e60d569e
  Signed-off-by: default avatarVikram Mulukutla <markivx@codeaurora.org>

  e4045d60 android: binder: Disable preemption while holding the global binder lock.

  Change-Id: I90fe02cdedb8a5677b900a68528fb443b9204322
  Signed-off-by: default avatarRiley Andrews <riandrews@google.com>
  Git-repo: https://source.codeaurora.org/quic/la/kernel/msm-4.4


  Git-commit: 5c9ce54ca3a66a57e4ebfe3ae71c5733b6bcc579
  Signed-off-by: default avatarVikram Mulukutla <markivx@codeaurora.org>

Change-Id: Id97e04f3694d76c2b6a1e7c0dc39da15d83c2867
Signed-off-by: default avatarBlagovest Kolenichev <bkolenichev@codeaurora.org>
parent b1b3c5a6
Loading
Loading
Loading
Loading
+49 −130
Original line number Diff line number Diff line
@@ -417,7 +417,6 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
	struct files_struct *files = proc->files;
	unsigned long rlim_cur;
	unsigned long irqs;
	int ret;

	if (files == NULL)
		return -ESRCH;
@@ -428,11 +427,7 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
	rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
	unlock_task_sighand(proc->tsk, &irqs);

	preempt_enable_no_resched();
	ret = __alloc_fd(files, 0, rlim_cur, flags);
	preempt_disable();

	return ret;
	return __alloc_fd(files, 0, rlim_cur, flags);
}

/*
@@ -441,11 +436,8 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
static void task_fd_install(
	struct binder_proc *proc, unsigned int fd, struct file *file)
{
	if (proc->files) {
		preempt_enable_no_resched();
	if (proc->files)
		__fd_install(proc->files, fd, file);
		preempt_disable();
	}
}

/*
@@ -473,7 +465,6 @@ static inline void binder_lock(struct binder_context *context, const char *tag)
{
	trace_binder_lock(tag);
	mutex_lock(&context->binder_main_lock);
	preempt_disable();
	trace_binder_locked(tag);
}

@@ -482,62 +473,8 @@ static inline void binder_unlock(struct binder_context *context,
{
	trace_binder_unlock(tag);
	mutex_unlock(&context->binder_main_lock);
	preempt_enable();
}

static inline void *kzalloc_preempt_disabled(size_t size)
{
	void *ptr;

	ptr = kzalloc(size, GFP_NOWAIT);
	if (ptr)
		return ptr;

	preempt_enable_no_resched();
	ptr = kzalloc(size, GFP_KERNEL);
	preempt_disable();

	return ptr;
}

static inline long copy_to_user_preempt_disabled(void __user *to, const void *from, long n)
{
	long ret;

	preempt_enable_no_resched();
	ret = copy_to_user(to, from, n);
	preempt_disable();
	return ret;
}

static inline long copy_from_user_preempt_disabled(void *to, const void __user *from, long n)
{
	long ret;

	preempt_enable_no_resched();
	ret = copy_from_user(to, from, n);
	preempt_disable();
	return ret;
}

#define get_user_preempt_disabled(x, ptr)	\
({						\
	int __ret;				\
	preempt_enable_no_resched();		\
	__ret = get_user(x, ptr);		\
	preempt_disable();			\
	__ret;					\
})

#define put_user_preempt_disabled(x, ptr)	\
({						\
	int __ret;				\
	preempt_enable_no_resched();		\
	__ret = put_user(x, ptr);		\
	preempt_disable();			\
	__ret;					\
})

static void binder_set_nice(long nice)
{
	long min_nice;
@@ -670,8 +607,6 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
	else
		mm = get_task_mm(proc->tsk);

	preempt_enable_no_resched();

	if (mm) {
		down_write(&mm->mmap_sem);
		vma = proc->vma;
@@ -726,9 +661,6 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
		up_write(&mm->mmap_sem);
		mmput(mm);
	}

	preempt_disable();

	return 0;

free_range:
@@ -751,9 +683,6 @@ err_no_vma:
		up_write(&mm->mmap_sem);
		mmput(mm);
	}

	preempt_disable();
	
	return -ENOMEM;
}

@@ -1022,7 +951,7 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
			return NULL;
	}

	node = kzalloc_preempt_disabled(sizeof(*node));
	node = kzalloc(sizeof(*node), GFP_KERNEL);
	if (node == NULL)
		return NULL;
	binder_stats_created(BINDER_STAT_NODE);
@@ -1166,7 +1095,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
		else
			return ref;
	}
	new_ref = kzalloc_preempt_disabled(sizeof(*ref));
	new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
	if (new_ref == NULL)
		return NULL;
	binder_stats_created(BINDER_STAT_REF);
@@ -2039,14 +1968,14 @@ static void binder_transaction(struct binder_proc *proc,
	e->to_proc = target_proc->pid;

	/* TODO: reuse incoming transaction for reply */
	t = kzalloc_preempt_disabled(sizeof(*t));
	t = kzalloc(sizeof(*t), GFP_KERNEL);
	if (t == NULL) {
		return_error = BR_FAILED_REPLY;
		goto err_alloc_t_failed;
	}
	binder_stats_created(BINDER_STAT_TRANSACTION);

	tcomplete = kzalloc_preempt_disabled(sizeof(*tcomplete));
	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
	if (tcomplete == NULL) {
		return_error = BR_FAILED_REPLY;
		goto err_alloc_tcomplete_failed;
@@ -2107,14 +2036,14 @@ static void binder_transaction(struct binder_proc *proc,
				      ALIGN(tr->data_size, sizeof(void *)));
	offp = off_start;

	if (copy_from_user_preempt_disabled(t->buffer->data, (const void __user *)(uintptr_t)
	if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
			   tr->data.ptr.buffer, tr->data_size)) {
		binder_user_error("%d:%d got transaction with invalid data ptr\n",
				proc->pid, thread->pid);
		return_error = BR_FAILED_REPLY;
		goto err_copy_data_failed;
	}
	if (copy_from_user_preempt_disabled(offp, (const void __user *)(uintptr_t)
	if (copy_from_user(offp, (const void __user *)(uintptr_t)
			   tr->data.ptr.offsets, tr->offsets_size)) {
		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
				proc->pid, thread->pid);
@@ -2232,8 +2161,7 @@ static void binder_transaction(struct binder_proc *proc,
				return_error = BR_FAILED_REPLY;
				goto err_bad_offset;
			}
			if (copy_from_user_preempt_disabled(
					sg_bufp,
			if (copy_from_user(sg_bufp,
					   (const void __user *)(uintptr_t)
					   bp->buffer, bp->length)) {
				binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
@@ -2351,7 +2279,7 @@ static int binder_thread_write(struct binder_proc *proc,
	void __user *end = buffer + size;

	while (ptr < end && thread->return_error == BR_OK) {
		if (get_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
		if (get_user(cmd, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
		trace_binder_command(cmd);
@@ -2369,7 +2297,7 @@ static int binder_thread_write(struct binder_proc *proc,
			struct binder_ref *ref;
			const char *debug_string;

			if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
			if (get_user(target, (uint32_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(uint32_t);
			if (target == 0 && context->binder_context_mgr_node &&
@@ -2421,10 +2349,10 @@ static int binder_thread_write(struct binder_proc *proc,
			binder_uintptr_t cookie;
			struct binder_node *node;

			if (get_user_preempt_disabled(node_ptr, (binder_uintptr_t __user *)ptr))
			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(binder_uintptr_t);
			if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(binder_uintptr_t);
			node = binder_get_node(proc, node_ptr);
@@ -2482,7 +2410,7 @@ static int binder_thread_write(struct binder_proc *proc,
			binder_uintptr_t data_ptr;
			struct binder_buffer *buffer;

			if (get_user_preempt_disabled(data_ptr, (binder_uintptr_t __user *)ptr))
			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(binder_uintptr_t);

@@ -2524,8 +2452,7 @@ static int binder_thread_write(struct binder_proc *proc,
		case BC_REPLY_SG: {
			struct binder_transaction_data_sg tr;

			if (copy_from_user_preempt_disabled(&tr, ptr,
							    sizeof(tr)))
			if (copy_from_user(&tr, ptr, sizeof(tr)))
				return -EFAULT;
			ptr += sizeof(tr);
			binder_transaction(proc, thread, &tr.transaction_data,
@@ -2536,7 +2463,7 @@ static int binder_thread_write(struct binder_proc *proc,
		case BC_REPLY: {
			struct binder_transaction_data tr;

			if (copy_from_user_preempt_disabled(&tr, ptr, sizeof(tr)))
			if (copy_from_user(&tr, ptr, sizeof(tr)))
				return -EFAULT;
			ptr += sizeof(tr);
			binder_transaction(proc, thread, &tr,
@@ -2587,10 +2514,10 @@ static int binder_thread_write(struct binder_proc *proc,
			struct binder_ref *ref;
			struct binder_ref_death *death;

			if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
			if (get_user(target, (uint32_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(uint32_t);
			if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(binder_uintptr_t);
			ref = binder_get_ref(proc, target, false);
@@ -2619,7 +2546,7 @@ static int binder_thread_write(struct binder_proc *proc,
						proc->pid, thread->pid);
					break;
				}
				death = kzalloc_preempt_disabled(sizeof(*death));
				death = kzalloc(sizeof(*death), GFP_KERNEL);
				if (death == NULL) {
					thread->return_error = BR_ERROR;
					binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
@@ -2673,7 +2600,8 @@ static int binder_thread_write(struct binder_proc *proc,
			struct binder_work *w;
			binder_uintptr_t cookie;
			struct binder_ref_death *death = NULL;
			if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))

			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
				return -EFAULT;

			ptr += sizeof(cookie);
@@ -2705,8 +2633,7 @@ static int binder_thread_write(struct binder_proc *proc,
					wake_up_interruptible(&proc->wait);
				}
			}
		}
		break;
		} break;

		default:
			pr_err("%d:%d unknown command %d\n",
@@ -2755,7 +2682,7 @@ static int binder_thread_read(struct binder_proc *proc,
	int wait_for_proc_work;

	if (*consumed == 0) {
		if (put_user_preempt_disabled(BR_NOOP, (uint32_t __user *)ptr))
		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
	}
@@ -2766,7 +2693,7 @@ retry:

	if (thread->return_error != BR_OK && ptr < end) {
		if (thread->return_error2 != BR_OK) {
			if (put_user_preempt_disabled(thread->return_error2, (uint32_t __user *)ptr))
			if (put_user(thread->return_error2, (uint32_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(uint32_t);
			binder_stat_br(proc, thread, thread->return_error2);
@@ -2774,7 +2701,7 @@ retry:
				goto done;
			thread->return_error2 = BR_OK;
		}
		if (put_user_preempt_disabled(thread->return_error, (uint32_t __user *)ptr))
		if (put_user(thread->return_error, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
		binder_stat_br(proc, thread, thread->return_error);
@@ -2852,7 +2779,7 @@ retry:
		} break;
		case BINDER_WORK_TRANSACTION_COMPLETE: {
			cmd = BR_TRANSACTION_COMPLETE;
				if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
			if (put_user(cmd, (uint32_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(uint32_t);

@@ -2894,14 +2821,14 @@ retry:
				node->has_weak_ref = 0;
			}
			if (cmd != BR_NOOP) {
					if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
				if (put_user(cmd, (uint32_t __user *)ptr))
					return -EFAULT;
				ptr += sizeof(uint32_t);
					if (put_user_preempt_disabled(node->ptr, (binder_uintptr_t __user *)
				if (put_user(node->ptr,
					     (binder_uintptr_t __user *)ptr))
					return -EFAULT;
				ptr += sizeof(binder_uintptr_t);
					if (put_user_preempt_disabled(node->cookie, (binder_uintptr_t __user *)
				if (put_user(node->cookie,
					     (binder_uintptr_t __user *)ptr))
					return -EFAULT;
				ptr += sizeof(binder_uintptr_t);
@@ -2945,10 +2872,11 @@ retry:
				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
			else
				cmd = BR_DEAD_BINDER;
				if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
			if (put_user(cmd, (uint32_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(uint32_t);
			if (put_user_preempt_disabled(death->cookie, (binder_uintptr_t __user *) ptr))
			if (put_user(death->cookie,
				     (binder_uintptr_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(binder_uintptr_t);
			binder_stat_br(proc, thread, cmd);
@@ -3015,10 +2943,10 @@ retry:
					ALIGN(t->buffer->data_size,
					    sizeof(void *));

		if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
		if (put_user(cmd, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
		if (copy_to_user_preempt_disabled(ptr, &tr, sizeof(tr)))
		if (copy_to_user(ptr, &tr, sizeof(tr)))
			return -EFAULT;
		ptr += sizeof(tr);

@@ -3060,7 +2988,7 @@ done:
		binder_debug(BINDER_DEBUG_THREADS,
			     "%d:%d BR_SPAWN_LOOPER\n",
			     proc->pid, thread->pid);
		if (put_user_preempt_disabled(BR_SPAWN_LOOPER, (uint32_t __user *) buffer))
		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
			return -EFAULT;
		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
	}
@@ -3135,7 +3063,7 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
			break;
	}
	if (*p == NULL) {
		thread = kzalloc_preempt_disabled(sizeof(*thread));
		thread = kzalloc(sizeof(*thread), GFP_KERNEL);
		if (thread == NULL)
			return NULL;
		binder_stats_created(BINDER_STAT_THREAD);
@@ -3239,7 +3167,7 @@ static int binder_ioctl_write_read(struct file *filp,
		ret = -EINVAL;
		goto out;
	}
	if (copy_from_user_preempt_disabled(&bwr, ubuf, sizeof(bwr))) {
	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
		ret = -EFAULT;
		goto out;
	}
@@ -3257,7 +3185,7 @@ static int binder_ioctl_write_read(struct file *filp,
		trace_binder_write_done(ret);
		if (ret < 0) {
			bwr.read_consumed = 0;
			if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
				ret = -EFAULT;
			goto out;
		}
@@ -3271,7 +3199,7 @@ static int binder_ioctl_write_read(struct file *filp,
		if (!list_empty(&proc->todo))
			wake_up_interruptible(&proc->wait);
		if (ret < 0) {
			if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
				ret = -EFAULT;
			goto out;
		}
@@ -3281,7 +3209,7 @@ static int binder_ioctl_write_read(struct file *filp,
		     proc->pid, thread->pid,
		     (u64)bwr.write_consumed, (u64)bwr.write_size,
		     (u64)bwr.read_consumed, (u64)bwr.read_size);
	if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr))) {
	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
		ret = -EFAULT;
		goto out;
	}
@@ -3362,7 +3290,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
			goto err;
		break;
	case BINDER_SET_MAX_THREADS:
		if (copy_from_user_preempt_disabled(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
		if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
			ret = -EINVAL;
			goto err;
		}
@@ -3385,7 +3313,8 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
			ret = -EINVAL;
			goto err;
		}
			if (put_user_preempt_disabled(BINDER_CURRENT_PROTOCOL_VERSION, &ver->protocol_version)) {
		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
			     &ver->protocol_version)) {
			ret = -EINVAL;
			goto err;
		}
@@ -3447,7 +3376,6 @@ static const struct vm_operations_struct binder_vm_ops = {
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
	int ret;

	struct vm_struct *area;
	struct binder_proc *proc = filp->private_data;
	const char *failure_string;
@@ -3508,11 +3436,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
	vma->vm_ops = &binder_vm_ops;
	vma->vm_private_data = proc;

	/* binder_update_page_range assumes preemption is disabled */
	preempt_disable();
	ret = binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma);
	preempt_enable_no_resched();
	if (ret) {
	if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
		ret = -ENOMEM;
		failure_string = "alloc small buf";
		goto err_alloc_small_buf_failed;
@@ -3803,12 +3727,9 @@ static void binder_deferred_func(struct work_struct *work)
	int defer;

	do {
		trace_binder_lock(__func__);
		mutex_lock(&context->binder_main_lock);
		trace_binder_locked(__func__);
		binder_lock(context, __func__);

		mutex_lock(&context->binder_deferred_lock);
		preempt_disable();
		if (!hlist_empty(&context->binder_deferred_list)) {
			proc = hlist_entry(context->binder_deferred_list.first,
					struct binder_proc, deferred_work_node);
@@ -3834,9 +3755,7 @@ static void binder_deferred_func(struct work_struct *work)
		if (defer & BINDER_DEFERRED_RELEASE)
			binder_deferred_release(proc); /* frees proc */

		trace_binder_unlock(__func__);
		mutex_unlock(&context->binder_main_lock);
		preempt_enable_no_resched();
		binder_unlock(context, __func__);
		if (files)
			put_files_struct(files);
	} while (proc);