Loading drivers/android/binder.c +0 −1 Original line number Diff line number Diff line Loading @@ -3251,7 +3251,6 @@ static void binder_transaction(struct binder_proc *proc, t->buffer->debug_id = t->debug_id; t->buffer->transaction = t; t->buffer->target_node = target_node; t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF); trace_binder_transaction_alloc_buf(t->buffer); off_start = (binder_size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *))); Loading drivers/android/binder_alloc.c +0 −159 Original line number Diff line number Diff line Loading @@ -642,8 +642,6 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, binder_insert_free_buffer(alloc, buffer); } static void binder_alloc_clear_buf(struct binder_alloc *alloc, struct binder_buffer *buffer); /** * binder_alloc_free_buf() - free a binder buffer * @alloc: binder_alloc for this proc Loading @@ -654,18 +652,6 @@ static void binder_alloc_clear_buf(struct binder_alloc *alloc, void binder_alloc_free_buf(struct binder_alloc *alloc, struct binder_buffer *buffer) { /* * We could eliminate the call to binder_alloc_clear_buf() * from binder_alloc_deferred_release() by moving this to * binder_alloc_free_buf_locked(). However, that could * increase contention for the alloc mutex if clear_on_free * is used frequently for large buffers. The mutex is not * needed for correctness here. */ if (buffer->clear_on_free) { binder_alloc_clear_buf(alloc, buffer); buffer->clear_on_free = false; } mutex_lock(&alloc->mutex); binder_free_buf_locked(alloc, buffer); mutex_unlock(&alloc->mutex); Loading Loading @@ -773,10 +759,6 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) /* Transaction should already have been freed */ BUG_ON(buffer->transaction); if (buffer->clear_on_free) { binder_alloc_clear_buf(alloc, buffer); buffer->clear_on_free = false; } binder_free_buf_locked(alloc, buffer); buffers++; } Loading Loading @@ -897,144 +879,3 @@ void binder_alloc_init(struct binder_alloc *alloc) INIT_LIST_HEAD(&alloc->buffers); } /** * check_buffer() - verify that buffer/offset is safe to access * @alloc: binder_alloc for this proc * @buffer: binder buffer to be accessed * @offset: offset into @buffer data * @bytes: bytes to access from offset * * Check that the @offset/@bytes are within the size of the given * @buffer and that the buffer is currently active and not freeable. * Offsets must also be multiples of sizeof(u32). The kernel is * allowed to touch the buffer in two cases: * * 1) when the buffer is being created: * (buffer->free == 0 && buffer->allow_user_free == 0) * 2) when the buffer is being torn down: * (buffer->free == 0 && buffer->transaction == NULL). * * Return: true if the buffer is safe to access */ static inline bool check_buffer(struct binder_alloc *alloc, struct binder_buffer *buffer, binder_size_t offset, size_t bytes) { size_t buffer_size = binder_alloc_buffer_size(alloc, buffer); return buffer_size >= bytes && offset <= buffer_size - bytes && IS_ALIGNED(offset, sizeof(u32)) && !buffer->free && (!buffer->allow_user_free || !buffer->transaction); } /** * binder_alloc_get_page() - get kernel pointer for given buffer offset * @alloc: binder_alloc for this proc * @buffer: binder buffer to be accessed * @buffer_offset: offset into @buffer data * @pgoffp: address to copy final page offset to * * Lookup the struct page corresponding to the address * at @buffer_offset into @buffer->data. If @pgoffp is not * NULL, the byte-offset into the page is written there. * * The caller is responsible to ensure that the offset points * to a valid address within the @buffer and that @buffer is * not freeable by the user. Since it can't be freed, we are * guaranteed that the corresponding elements of @alloc->pages[] * cannot change. * * Return: struct page */ static struct page *binder_alloc_get_page(struct binder_alloc *alloc, struct binder_buffer *buffer, binder_size_t buffer_offset, pgoff_t *pgoffp) { binder_size_t buffer_space_offset = buffer_offset + (buffer->data - alloc->buffer); pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK; size_t index = buffer_space_offset >> PAGE_SHIFT; struct binder_lru_page *lru_page; lru_page = &alloc->pages[index]; *pgoffp = pgoff; return lru_page->page_ptr; } /** * binder_alloc_clear_buf() - zero out buffer * @alloc: binder_alloc for this proc * @buffer: binder buffer to be cleared * * memset the given buffer to 0 */ static void binder_alloc_clear_buf(struct binder_alloc *alloc, struct binder_buffer *buffer) { size_t bytes = binder_alloc_buffer_size(alloc, buffer); binder_size_t buffer_offset = 0; while (bytes) { unsigned long size; struct page *page; pgoff_t pgoff; void *kptr; page = binder_alloc_get_page(alloc, buffer, buffer_offset, &pgoff); size = min_t(size_t, bytes, PAGE_SIZE - pgoff); kptr = kmap(page) + pgoff; memset(kptr, 0, size); kunmap(page); bytes -= size; buffer_offset += size; } } /** * binder_alloc_copy_user_to_buffer() - copy src user to tgt user * @alloc: binder_alloc for this proc * @buffer: binder buffer to be accessed * @buffer_offset: offset into @buffer data * @from: userspace pointer to source buffer * @bytes: bytes to copy * * Copy bytes from source userspace to target buffer. * * Return: bytes remaining to be copied */ unsigned long binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, struct binder_buffer *buffer, binder_size_t buffer_offset, const void __user *from, size_t bytes) { if (!check_buffer(alloc, buffer, buffer_offset, bytes)) return bytes; while (bytes) { unsigned long size; unsigned long ret; struct page *page; pgoff_t pgoff; void *kptr; page = binder_alloc_get_page(alloc, buffer, buffer_offset, &pgoff); size = min_t(size_t, bytes, PAGE_SIZE - pgoff); kptr = kmap(page) + pgoff; ret = copy_from_user(kptr, from, size); kunmap(page); if (ret) return bytes - size + ret; bytes -= size; from += size; buffer_offset += size; } return 0; } drivers/android/binder_alloc.h +1 −3 Original line number Diff line number Diff line Loading @@ -30,7 +30,6 @@ struct binder_transaction; * @entry: entry alloc->buffers * @rb_node: node for allocated_buffers/free_buffers rb trees * @free: %true if buffer is free * @clear_on_free: %true if buffer must be zeroed after use * @allow_user_free: %true if user is allowed to free buffer * @async_transaction: %true if buffer is in use for an async txn * @debug_id: unique ID for debugging Loading @@ -49,10 +48,9 @@ struct binder_buffer { struct rb_node rb_node; /* free entry by size or allocated entry */ /* by address */ unsigned free:1; unsigned clear_on_free:1; unsigned allow_user_free:1; unsigned async_transaction:1; unsigned debug_id:28; unsigned debug_id:29; struct binder_transaction *transaction; Loading include/uapi/linux/android/binder.h +0 −1 Original line number Diff line number Diff line Loading @@ -309,7 +309,6 @@ enum transaction_flags { TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */ TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ TF_CLEAR_BUF = 0x20, /* clear buffer on txn complete */ }; struct binder_transaction_data { Loading Loading
drivers/android/binder.c +0 −1 Original line number Diff line number Diff line Loading @@ -3251,7 +3251,6 @@ static void binder_transaction(struct binder_proc *proc, t->buffer->debug_id = t->debug_id; t->buffer->transaction = t; t->buffer->target_node = target_node; t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF); trace_binder_transaction_alloc_buf(t->buffer); off_start = (binder_size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *))); Loading
drivers/android/binder_alloc.c +0 −159 Original line number Diff line number Diff line Loading @@ -642,8 +642,6 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, binder_insert_free_buffer(alloc, buffer); } static void binder_alloc_clear_buf(struct binder_alloc *alloc, struct binder_buffer *buffer); /** * binder_alloc_free_buf() - free a binder buffer * @alloc: binder_alloc for this proc Loading @@ -654,18 +652,6 @@ static void binder_alloc_clear_buf(struct binder_alloc *alloc, void binder_alloc_free_buf(struct binder_alloc *alloc, struct binder_buffer *buffer) { /* * We could eliminate the call to binder_alloc_clear_buf() * from binder_alloc_deferred_release() by moving this to * binder_alloc_free_buf_locked(). However, that could * increase contention for the alloc mutex if clear_on_free * is used frequently for large buffers. The mutex is not * needed for correctness here. */ if (buffer->clear_on_free) { binder_alloc_clear_buf(alloc, buffer); buffer->clear_on_free = false; } mutex_lock(&alloc->mutex); binder_free_buf_locked(alloc, buffer); mutex_unlock(&alloc->mutex); Loading Loading @@ -773,10 +759,6 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) /* Transaction should already have been freed */ BUG_ON(buffer->transaction); if (buffer->clear_on_free) { binder_alloc_clear_buf(alloc, buffer); buffer->clear_on_free = false; } binder_free_buf_locked(alloc, buffer); buffers++; } Loading Loading @@ -897,144 +879,3 @@ void binder_alloc_init(struct binder_alloc *alloc) INIT_LIST_HEAD(&alloc->buffers); } /** * check_buffer() - verify that buffer/offset is safe to access * @alloc: binder_alloc for this proc * @buffer: binder buffer to be accessed * @offset: offset into @buffer data * @bytes: bytes to access from offset * * Check that the @offset/@bytes are within the size of the given * @buffer and that the buffer is currently active and not freeable. * Offsets must also be multiples of sizeof(u32). The kernel is * allowed to touch the buffer in two cases: * * 1) when the buffer is being created: * (buffer->free == 0 && buffer->allow_user_free == 0) * 2) when the buffer is being torn down: * (buffer->free == 0 && buffer->transaction == NULL). * * Return: true if the buffer is safe to access */ static inline bool check_buffer(struct binder_alloc *alloc, struct binder_buffer *buffer, binder_size_t offset, size_t bytes) { size_t buffer_size = binder_alloc_buffer_size(alloc, buffer); return buffer_size >= bytes && offset <= buffer_size - bytes && IS_ALIGNED(offset, sizeof(u32)) && !buffer->free && (!buffer->allow_user_free || !buffer->transaction); } /** * binder_alloc_get_page() - get kernel pointer for given buffer offset * @alloc: binder_alloc for this proc * @buffer: binder buffer to be accessed * @buffer_offset: offset into @buffer data * @pgoffp: address to copy final page offset to * * Lookup the struct page corresponding to the address * at @buffer_offset into @buffer->data. If @pgoffp is not * NULL, the byte-offset into the page is written there. * * The caller is responsible to ensure that the offset points * to a valid address within the @buffer and that @buffer is * not freeable by the user. Since it can't be freed, we are * guaranteed that the corresponding elements of @alloc->pages[] * cannot change. * * Return: struct page */ static struct page *binder_alloc_get_page(struct binder_alloc *alloc, struct binder_buffer *buffer, binder_size_t buffer_offset, pgoff_t *pgoffp) { binder_size_t buffer_space_offset = buffer_offset + (buffer->data - alloc->buffer); pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK; size_t index = buffer_space_offset >> PAGE_SHIFT; struct binder_lru_page *lru_page; lru_page = &alloc->pages[index]; *pgoffp = pgoff; return lru_page->page_ptr; } /** * binder_alloc_clear_buf() - zero out buffer * @alloc: binder_alloc for this proc * @buffer: binder buffer to be cleared * * memset the given buffer to 0 */ static void binder_alloc_clear_buf(struct binder_alloc *alloc, struct binder_buffer *buffer) { size_t bytes = binder_alloc_buffer_size(alloc, buffer); binder_size_t buffer_offset = 0; while (bytes) { unsigned long size; struct page *page; pgoff_t pgoff; void *kptr; page = binder_alloc_get_page(alloc, buffer, buffer_offset, &pgoff); size = min_t(size_t, bytes, PAGE_SIZE - pgoff); kptr = kmap(page) + pgoff; memset(kptr, 0, size); kunmap(page); bytes -= size; buffer_offset += size; } } /** * binder_alloc_copy_user_to_buffer() - copy src user to tgt user * @alloc: binder_alloc for this proc * @buffer: binder buffer to be accessed * @buffer_offset: offset into @buffer data * @from: userspace pointer to source buffer * @bytes: bytes to copy * * Copy bytes from source userspace to target buffer. * * Return: bytes remaining to be copied */ unsigned long binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, struct binder_buffer *buffer, binder_size_t buffer_offset, const void __user *from, size_t bytes) { if (!check_buffer(alloc, buffer, buffer_offset, bytes)) return bytes; while (bytes) { unsigned long size; unsigned long ret; struct page *page; pgoff_t pgoff; void *kptr; page = binder_alloc_get_page(alloc, buffer, buffer_offset, &pgoff); size = min_t(size_t, bytes, PAGE_SIZE - pgoff); kptr = kmap(page) + pgoff; ret = copy_from_user(kptr, from, size); kunmap(page); if (ret) return bytes - size + ret; bytes -= size; from += size; buffer_offset += size; } return 0; }
drivers/android/binder_alloc.h +1 −3 Original line number Diff line number Diff line Loading @@ -30,7 +30,6 @@ struct binder_transaction; * @entry: entry alloc->buffers * @rb_node: node for allocated_buffers/free_buffers rb trees * @free: %true if buffer is free * @clear_on_free: %true if buffer must be zeroed after use * @allow_user_free: %true if user is allowed to free buffer * @async_transaction: %true if buffer is in use for an async txn * @debug_id: unique ID for debugging Loading @@ -49,10 +48,9 @@ struct binder_buffer { struct rb_node rb_node; /* free entry by size or allocated entry */ /* by address */ unsigned free:1; unsigned clear_on_free:1; unsigned allow_user_free:1; unsigned async_transaction:1; unsigned debug_id:28; unsigned debug_id:29; struct binder_transaction *transaction; Loading
include/uapi/linux/android/binder.h +0 −1 Original line number Diff line number Diff line Loading @@ -309,7 +309,6 @@ enum transaction_flags { TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */ TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ TF_CLEAR_BUF = 0x20, /* clear buffer on txn complete */ }; struct binder_transaction_data { Loading