Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8d24e2a3 authored by Todd Kjos's avatar Todd Kjos Committed by Todd Kjos
Browse files

BACKPORT: binder: remove user_buffer_offset



Remove user_buffer_offset since there is no kernel
buffer pointer anymore.

(cherry pick from commit c41358a5f5217abd7c051e8d42397e5b80f3b3ed)
Bug: 67668716
Signed-off-by: default avatarTodd Kjos <tkjos@google.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>

Change-Id: I399219867704dc5013453a7738193c742fc970ad
parent 46dc639b
Loading
Loading
Loading
Loading
+7 −32
Original line number Diff line number Diff line
@@ -2500,7 +2500,6 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
			struct binder_fd_array_object *fda;
			struct binder_buffer_object *parent;
			struct binder_object ptr_object;
			uintptr_t parent_buffer;
			u32 *fd_array;
			size_t fd_index;
			binder_size_t fd_buf_size;
@@ -2516,14 +2515,6 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
				       debug_id);
				continue;
			}
			/*
			 * Since the parent was already fixed up, convert it
			 * back to kernel address space to access it
			 */
			parent_buffer = parent->buffer -
				binder_alloc_get_user_buffer_offset(
						&proc->alloc);

			fd_buf_size = sizeof(u32) * fda->num_fds;
			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
				pr_err("transaction release %d invalid number of fds (%lld)\n",
@@ -2537,7 +2528,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
				       debug_id, (u64)fda->num_fds);
				continue;
			}
			fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
			fd_array = (u32 *)(uintptr_t)
				(parent->buffer + fda->parent_offset);
			for (fd_index = 0; fd_index < fda->num_fds;
			     fd_index++) {
				u32 fd;
@@ -2749,7 +2741,6 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
{
	binder_size_t fdi, fd_buf_size, num_installed_fds;
	int target_fd;
	uintptr_t parent_buffer;
	u32 *fd_array;
	struct binder_proc *proc = thread->proc;
	struct binder_proc *target_proc = t->to_proc;
@@ -2767,13 +2758,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
				  proc->pid, thread->pid, (u64)fda->num_fds);
		return -EINVAL;
	}
	/*
	 * Since the parent was already fixed up, convert it
	 * back to the kernel address space to access it
	 */
	parent_buffer = parent->buffer -
		binder_alloc_get_user_buffer_offset(&target_proc->alloc);
	fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
	fd_array = (u32 *)(uintptr_t)(parent->buffer + fda->parent_offset);
	if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
		binder_user_error("%d:%d parent offset not aligned correctly.\n",
				  proc->pid, thread->pid);
@@ -2827,7 +2812,6 @@ static int binder_fixup_parent(struct binder_transaction *t,
			       binder_size_t last_fixup_min_off)
{
	struct binder_buffer_object *parent;
	u8 *parent_buffer;
	struct binder_buffer *b = t->buffer;
	struct binder_proc *proc = thread->proc;
	struct binder_proc *target_proc = t->to_proc;
@@ -2863,11 +2847,8 @@ static int binder_fixup_parent(struct binder_transaction *t,
				  proc->pid, thread->pid);
		return -EINVAL;
	}
	parent_buffer = (u8 *)((uintptr_t)parent->buffer -
			binder_alloc_get_user_buffer_offset(
				&target_proc->alloc));
	buffer_offset = bp->parent_offset +
			(uintptr_t)parent_buffer - (uintptr_t)b->data;
			(uintptr_t)parent->buffer - (uintptr_t)b->data;
	binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
				    &bp->buffer, sizeof(bp->buffer));

@@ -3270,10 +3251,8 @@ static void binder_transaction(struct binder_proc *proc,
				    ALIGN(tr->offsets_size, sizeof(void *)) +
				    ALIGN(extra_buffers_size, sizeof(void *)) -
				    ALIGN(secctx_sz, sizeof(u64));
		char *kptr = t->buffer->data + buf_offset;

		t->security_ctx = (uintptr_t)kptr +
		    binder_alloc_get_user_buffer_offset(&target_proc->alloc);
		t->security_ctx = (uintptr_t)t->buffer->data + buf_offset;
		binder_alloc_copy_to_buffer(&target_proc->alloc,
					    t->buffer, buf_offset,
					    secctx, secctx_sz);
@@ -3490,9 +3469,7 @@ static void binder_transaction(struct binder_proc *proc,
				goto err_copy_data_failed;
			}
			/* Fixup buffer pointer to target proc address space */
			bp->buffer = (uintptr_t)sg_bufp +
				binder_alloc_get_user_buffer_offset(
						&target_proc->alloc);
			bp->buffer = (uintptr_t)sg_bufp;
			sg_bufp += ALIGN(bp->length, sizeof(u64));

			ret = binder_fixup_parent(t, thread, bp,
@@ -4476,9 +4453,7 @@ static int binder_thread_read(struct binder_proc *proc,

		trd->data_size = t->buffer->data_size;
		trd->offsets_size = t->buffer->offsets_size;
		trd->data.ptr.buffer = (binder_uintptr_t)
			((uintptr_t)t->buffer->data +
			binder_alloc_get_user_buffer_offset(&proc->alloc));
		trd->data.ptr.buffer = (uintptr_t)t->buffer->data;
		trd->data.ptr.offsets = trd->data.ptr.buffer +
					ALIGN(t->buffer->data_size,
					    sizeof(void *));
+6 −11
Original line number Diff line number Diff line
@@ -136,17 +136,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
{
	struct rb_node *n = alloc->allocated_buffers.rb_node;
	struct binder_buffer *buffer;
	void *kern_ptr;
	void *uptr;

	kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
	uptr = (void *)user_ptr;

	while (n) {
		buffer = rb_entry(n, struct binder_buffer, rb_node);
		BUG_ON(buffer->free);

		if (kern_ptr < buffer->data)
		if (uptr < buffer->data)
			n = n->rb_left;
		else if (kern_ptr > buffer->data)
		else if (uptr > buffer->data)
			n = n->rb_right;
		else {
			/*
@@ -262,8 +262,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
		page->alloc = alloc;
		INIT_LIST_HEAD(&page->lru);

		user_page_addr =
			(uintptr_t)page_addr + alloc->user_buffer_offset;
		user_page_addr = (uintptr_t)page_addr;
		ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
		if (ret) {
			pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
@@ -658,7 +657,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
	}

	alloc->buffer = (void *)vma->vm_start;
	alloc->user_buffer_offset = 0;
	mutex_unlock(&binder_alloc_mmap_lock);

	alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
@@ -905,10 +903,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
	if (vma) {
		trace_binder_unmap_user_start(alloc, index);

		zap_page_range(vma,
			       page_addr +
			       alloc->user_buffer_offset,
			       PAGE_SIZE, NULL);
		zap_page_range(vma, page_addr, PAGE_SIZE, NULL);

		trace_binder_unmap_user_end(alloc, index);

+0 −23
Original line number Diff line number Diff line
@@ -82,7 +82,6 @@ struct binder_lru_page {
 *                      (invariant after init)
 * @vma_vm_mm:          copy of vma->vm_mm (invarient after mmap)
 * @buffer:             base of per-proc address space mapped via mmap
 * @user_buffer_offset: offset between user and kernel VAs for buffer
 * @buffers:            list of all buffers for this proc
 * @free_buffers:       rb tree of buffers available for allocation
 *                      sorted by size
@@ -104,7 +103,6 @@ struct binder_alloc {
	struct vm_area_struct *vma;
	struct mm_struct *vma_vm_mm;
	void *buffer;
	ptrdiff_t user_buffer_offset;
	struct list_head buffers;
	struct rb_root free_buffers;
	struct rb_root allocated_buffers;
@@ -163,27 +161,6 @@ binder_alloc_get_free_async_space(struct binder_alloc *alloc)
	return free_async_space;
}

/**
 * binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs
 * @alloc:	binder_alloc for this proc
 *
 * Return:	the offset between kernel and user-space addresses to use for
 * virtual address conversion
 */
static inline ptrdiff_t
binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc)
{
	/*
	 * user_buffer_offset is constant if vma is set and
	 * undefined if vma is not set. It is possible to
	 * get here with !alloc->vma if the target process
	 * is dying while a transaction is being initiated.
	 * Returning the old value is ok in this case and
	 * the transaction will fail.
	 */
	return alloc->user_buffer_offset;
}

unsigned long
binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
				 struct binder_buffer *buffer,