Commit f5c99a55 authored by Dyneteve's avatar Dyneteve

staging: Update binder driver

Change-Id: I948b327cc7c40cd2bea739f3dae66e93c7233655
parent 65cef798
......@@ -59,6 +59,4 @@ obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
obj-$(CONFIG_ZCACHE) += zcache/
obj-$(CONFIG_GOLDFISH) += goldfish/
obj-$(CONFIG_USB_DWC2) += dwc2/
obj-$(CONFIG_PRIMA_WLAN) += prima/
obj-$(CONFIG_PRONTO_WLAN) += prima/
obj-$(CONFIG_PRONTO_WLAN) += prima/
......@@ -38,6 +38,16 @@ config ANDROID_BINDER_DEVICES
created. Each binder device has its own context manager, and is
therefore logically separated from the other devices.
config ANDROID_BINDER_IPC_SELFTEST
bool "Android Binder IPC Driver Selftest"
depends on ANDROID_BINDER_IPC
---help---
This feature allows binder selftest to run.
Binder selftest checks the allocation and free of binder buffers
exhaustively with combinations of various buffer sizes and
alignments.
config ASHMEM
bool "Enable the Anonymous Shared Memory Subsystem"
default n
......
......@@ -3,7 +3,8 @@ ccflags-y += -I$(src) # needed for trace events
obj-y += ion/
obj-$(CONFIG_FIQ_DEBUGGER) += fiq_debugger/
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
obj-$(CONFIG_ASHMEM) += ashmem.o
obj-$(CONFIG_ANDROID_LOGGER) += logger.o
obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
......
......@@ -340,6 +340,7 @@ static int alarm_release(struct inode *inode, struct file *file)
if (file->private_data) {
for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) {
uint32_t alarm_type_mask = 1U << i;
if (alarm_enabled & alarm_type_mask) {
alarm_dbg(INFO,
"%s: clear alarm, pending %d\n",
......
......@@ -375,7 +375,7 @@ static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
loff_t start = range->pgstart * PAGE_SIZE;
loff_t end = (range->pgend + 1) * PAGE_SIZE;
range->asma->file->f_op->fallocate(range->asma->file,
do_fallocate(range->asma->file,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
start, end - start);
range->purged = ASHMEM_WAS_PURGED;
......
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (C) 2017 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _LINUX_BINDER_ALLOC_H
#define _LINUX_BINDER_ALLOC_H
#include <linux/rbtree.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/rtmutex.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
struct binder_transaction;
/**
* struct binder_buffer - buffer used for binder transactions
* @entry: entry alloc->buffers
* @rb_node: node for allocated_buffers/free_buffers rb trees
* @free: true if buffer is free
* @allow_user_free: describe the second member of struct blah,
* @async_transaction: describe the second member of struct blah,
* @debug_id: describe the second member of struct blah,
* @transaction: describe the second member of struct blah,
* @target_node: describe the second member of struct blah,
* @data_size: describe the second member of struct blah,
* @offsets_size: describe the second member of struct blah,
* @extra_buffers_size: describe the second member of struct blah,
* @data:i describe the second member of struct blah,
*
* Bookkeeping structure for binder transaction buffers
*/
struct binder_buffer {
struct list_head entry; /* free and allocated entries by address */
struct rb_node rb_node; /* free entry by size or allocated entry */
/* by address */
unsigned free:1;
unsigned allow_user_free:1;
unsigned async_transaction:1;
unsigned free_in_progress:1;
unsigned debug_id:28;
struct binder_transaction *transaction;
struct binder_node *target_node;
size_t data_size;
size_t offsets_size;
size_t extra_buffers_size;
void *data;
};
/**
* struct binder_alloc - per-binder proc state for binder allocator
* @vma: vm_area_struct passed to mmap_handler
* (invarient after mmap)
* @tsk: tid for task that called init for this proc
* (invariant after init)
* @vma_vm_mm: copy of vma->vm_mm (invarient after mmap)
* @buffer: base of per-proc address space mapped via mmap
* @user_buffer_offset: offset between user and kernel VAs for buffer
* @buffers: list of all buffers for this proc
* @free_buffers: rb tree of buffers available for allocation
* sorted by size
* @allocated_buffers: rb tree of allocated buffers sorted by address
* @free_async_space: VA space available for async buffers. This is
* initialized at mmap time to 1/2 the full VA space
* @pages: array of physical page addresses for each
* page of mmap'd space
* @buffer_size: size of address space specified via mmap
* @pid: pid for associated binder_proc (invariant after init)
*
* Bookkeeping structure for per-proc address space management for binder
* buffers. It is normally initialized during binder_init() and binder_mmap()
* calls. The address space is used for both user-visible buffers and for
* struct binder_buffer objects used to track the user buffers
*/
struct binder_alloc {
struct mutex mutex;
struct task_struct *tsk;
struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm;
void *buffer;
ptrdiff_t user_buffer_offset;
struct list_head buffers;
struct rb_root free_buffers;
struct rb_root allocated_buffers;
size_t free_async_space;
struct page **pages;
size_t buffer_size;
uint32_t buffer_free;
int pid;
};
#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
void binder_selftest_alloc(struct binder_alloc *alloc);
#else
static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
#endif
extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
int is_async);
extern void binder_alloc_init(struct binder_alloc *alloc);
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
extern struct binder_buffer *
binder_alloc_prepare_to_free(struct binder_alloc *alloc,
uintptr_t user_ptr);
extern void binder_alloc_free_buf(struct binder_alloc *alloc,
struct binder_buffer *buffer);
extern int binder_alloc_mmap_handler(struct binder_alloc *alloc,
struct vm_area_struct *vma);
extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
extern void binder_alloc_print_allocated(struct seq_file *m,
struct binder_alloc *alloc);
/**
* binder_alloc_get_free_async_space() - get free space available for async
* @alloc: binder_alloc for this proc
*
* Return: the bytes remaining in the address-space for async transactions
*/
static inline size_t
binder_alloc_get_free_async_space(struct binder_alloc *alloc)
{
size_t free_async_space;
mutex_lock(&alloc->mutex);
free_async_space = alloc->free_async_space;
mutex_unlock(&alloc->mutex);
return free_async_space;
}
/**
* binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs
* @alloc: binder_alloc for this proc
*
* Return: the offset between kernel and user-space addresses to use for
* virtual address conversion
*/
static inline ptrdiff_t
binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc)
{
/*
* user_buffer_offset is constant if vma is set and
* undefined if vma is not set. It is possible to
* get here with !alloc->vma if the target process
* is dying while a transaction is being initiated.
* Returning the old value is ok in this case and
* the transaction will fail.
*/
return alloc->user_buffer_offset;
}
#endif /* _LINUX_BINDER_ALLOC_H */
/* binder_alloc_selftest.c
*
* Android IPC Subsystem
*
* Copyright (C) 2017 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/mm_types.h>
#include <linux/err.h>
#include "binder_alloc.h"
#define BUFFER_NUM 5
#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
static bool binder_selftest_run = true;
static int binder_selftest_failures;
static DEFINE_MUTEX(binder_selftest_lock);
/**
* enum buf_end_align_type - Page alignment of a buffer
* end with regard to the end of the previous buffer.
*
* In the pictures below, buf2 refers to the buffer we
* are aligning. buf1 refers to previous buffer by addr.
* Symbol [ means the start of a buffer, ] means the end
* of a buffer, and | means page boundaries.
*/
enum buf_end_align_type {
/**
* @SAME_PAGE_UNALIGNED: The end of this buffer is on
* the same page as the end of the previous buffer and
* is not page aligned. Examples:
* buf1 ][ buf2 ][ ...
* buf1 ]|[ buf2 ][ ...
*/
SAME_PAGE_UNALIGNED = 0,
/**
* @SAME_PAGE_ALIGNED: When the end of the previous buffer
* is not page aligned, the end of this buffer is on the
* same page as the end of the previous buffer and is page
* aligned. When the previous buffer is page aligned, the
* end of this buffer is aligned to the next page boundary.
* Examples:
* buf1 ][ buf2 ]| ...
* buf1 ]|[ buf2 ]| ...
*/
SAME_PAGE_ALIGNED,
/**
* @NEXT_PAGE_UNALIGNED: The end of this buffer is on
* the page next to the end of the previous buffer and
* is not page aligned. Examples:
* buf1 ][ buf2 | buf2 ][ ...
* buf1 ]|[ buf2 | buf2 ][ ...
*/
NEXT_PAGE_UNALIGNED,
/**
* @NEXT_PAGE_ALIGNED: The end of this buffer is on
* the page next to the end of the previous buffer and
* is page aligned. Examples:
* buf1 ][ buf2 | buf2 ]| ...
* buf1 ]|[ buf2 | buf2 ]| ...
*/
NEXT_PAGE_ALIGNED,
/**
* @NEXT_NEXT_UNALIGNED: The end of this buffer is on
* the page that follows the page after the end of the
* previous buffer and is not page aligned. Examples:
* buf1 ][ buf2 | buf2 | buf2 ][ ...
* buf1 ]|[ buf2 | buf2 | buf2 ][ ...
*/
NEXT_NEXT_UNALIGNED,
LOOP_END,
};
static void pr_err_size_seq(size_t *sizes, int *seq)
{
int i;
pr_err("alloc sizes: ");
for (i = 0; i < BUFFER_NUM; i++)
pr_cont("[%zu]", sizes[i]);
pr_cont("\n");
pr_err("free seq: ");
for (i = 0; i < BUFFER_NUM; i++)
pr_cont("[%d]", seq[i]);
pr_cont("\n");
}
static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
struct binder_buffer *buffer,
size_t size)
{
void *page_addr, *end;
int page_index;
end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
page_addr = buffer->data;
for (; page_addr < end; page_addr += PAGE_SIZE) {
page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
if (!alloc->pages[page_index]) {
pr_err("incorrect alloc state at page index %d\n",
page_index);
return false;
}
}
return true;
}
static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
struct binder_buffer *buffers[],
size_t *sizes, int *seq)
{
int i;
for (i = 0; i < BUFFER_NUM; i++) {
buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
if (IS_ERR(buffers[i]) ||
!check_buffer_pages_allocated(alloc, buffers[i],
sizes[i])) {
pr_err_size_seq(sizes, seq);
binder_selftest_failures++;
}
}
}
static void binder_selftest_free_buf(struct binder_alloc *alloc,
struct binder_buffer *buffers[],
size_t *sizes, int *seq)
{
int i;
for (i = 0; i < BUFFER_NUM; i++)
binder_alloc_free_buf(alloc, buffers[seq[i]]);
for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
if ((!alloc->pages[i]) == (i == 0)) {
pr_err("incorrect free state at page index %d\n", i);
binder_selftest_failures++;
}
}
}
static void binder_selftest_alloc_free(struct binder_alloc *alloc,
size_t *sizes, int *seq)
{
struct binder_buffer *buffers[BUFFER_NUM];
binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
binder_selftest_free_buf(alloc, buffers, sizes, seq);
}
static bool is_dup(int *seq, int index, int val)
{
int i;
for (i = 0; i < index; i++) {
if (seq[i] == val)
return true;
}
return false;
}
/* Generate BUFFER_NUM factorial free orders. */
static void binder_selftest_free_seq(struct binder_alloc *alloc,
size_t *sizes, int *seq, int index)
{
int i;
if (index == BUFFER_NUM) {
binder_selftest_alloc_free(alloc, sizes, seq);
return;
}
for (i = 0; i < BUFFER_NUM; i++) {
if (is_dup(seq, index, i))
continue;
seq[index] = i;
binder_selftest_free_seq(alloc, sizes, seq, index + 1);
}
}
static void binder_selftest_alloc_size(struct binder_alloc *alloc,
size_t *end_offset)
{
int i;
int seq[BUFFER_NUM] = {0};
size_t front_sizes[BUFFER_NUM];
size_t back_sizes[BUFFER_NUM];
size_t last_offset, offset = 0;
for (i = 0; i < BUFFER_NUM; i++) {
last_offset = offset;
offset = end_offset[i];
front_sizes[i] = offset - last_offset;
back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
}
/*
* Buffers share the first or last few pages.
* Only BUFFER_NUM - 1 buffer sizes are adjustable since
* we need one giant buffer before getting to the last page.
*/
back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
binder_selftest_free_seq(alloc, front_sizes, seq, 0);
binder_selftest_free_seq(alloc, back_sizes, seq, 0);
}
static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
size_t *end_offset, int index)
{
int align;
size_t end, prev;
if (index == BUFFER_NUM) {
binder_selftest_alloc_size(alloc, end_offset);
return;
}
prev = index == 0 ? 0 : end_offset[index - 1];
end = prev;
BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
if (align % 2)
end = ALIGN(end, PAGE_SIZE);
else
end += BUFFER_MIN_SIZE;
end_offset[index] = end;
binder_selftest_alloc_offset(alloc, end_offset, index + 1);
}
}
/**
* binder_selftest_alloc() - Test alloc and free of buffer pages.
* @alloc: Pointer to alloc struct.
*
* Allocate BUFFER_NUM buffers to cover all page alignment cases,
* then free them in all orders possible. Check that pages are
* allocated after buffer alloc and freed after freeing buffer.
*/
void binder_selftest_alloc(struct binder_alloc *alloc)
{
size_t end_offset[BUFFER_NUM];
if (!binder_selftest_run)
return;
mutex_lock(&binder_selftest_lock);
if (!binder_selftest_run || !alloc->vma)
goto done;
pr_info("STARTED\n");
binder_selftest_alloc_offset(alloc, end_offset, 0);
binder_selftest_run = false;
if (binder_selftest_failures > 0)
pr_info("%d tests FAILED\n", binder_selftest_failures);
else
pr_info("PASSED\n");
done:
mutex_unlock(&binder_selftest_lock);
}
......@@ -23,7 +23,8 @@
struct binder_buffer;
struct binder_node;
struct binder_proc;
struct binder_ref;
struct binder_alloc;
struct binder_ref_data;
struct binder_thread;
struct binder_transaction;
......@@ -146,8 +147,8 @@ TRACE_EVENT(binder_transaction_received,
TRACE_EVENT(binder_transaction_node_to_ref,
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
struct binder_ref *ref),
TP_ARGS(t, node, ref),
struct binder_ref_data *rdata),
TP_ARGS(t, node, rdata),
TP_STRUCT__entry(
__field(int, debug_id)
......@@ -160,8 +161,8 @@ TRACE_EVENT(binder_transaction_node_to_ref,
__entry->debug_id = t->debug_id;
__entry->node_debug_id = node->debug_id;
__entry->node_ptr = node->ptr;
__entry->ref_debug_id = ref->debug_id;
__entry->ref_desc = ref->desc;
__entry->ref_debug_id = rdata->debug_id;
__entry->ref_desc = rdata->desc;
),
TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
__entry->debug_id, __entry->node_debug_id,
......@@ -170,8 +171,9 @@ TRACE_EVENT(binder_transaction_node_to_ref,
);
TRACE_EVENT(binder_transaction_ref_to_node,
TP_PROTO(struct binder_transaction *t, struct binder_ref *ref),
TP_ARGS(t, ref),
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
struct binder_ref_data *rdata),
TP_ARGS(t, node, rdata),
TP_STRUCT__entry(
__field(int, debug_id)
......@@ -182,10 +184,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
__entry->ref_debug_id = ref->debug_id;
__entry->ref_desc = ref->desc;
__entry->node_debug_id = ref->node->debug_id;
__entry->node_ptr = ref->node->ptr;
__entry->ref_debug_id = rdata->debug_id;
__entry->ref_desc = rdata->desc;
__entry->node_debug_id = node->debug_id;
__entry->node_ptr = node->ptr;
),
TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
__entry->debug_id, __entry->node_debug_id,
......@@ -194,9 +196,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
);
TRACE_EVENT(binder_transaction_ref_to_ref,
TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref,
struct binder_ref *dest_ref),
TP_ARGS(t, src_ref, dest_ref),
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
struct binder_ref_data *src_ref,
struct binder_ref_data *dest_ref),
TP_ARGS(t, node, src_ref, dest_ref),
TP_STRUCT__entry(
__field(int, debug_id)
......@@ -208,7 +211,7 @@ TRACE_EVENT(binder_transaction_ref_to_ref,
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
__entry->node_debug_id = src_ref->node->debug_id;
__entry->node_debug_id = node->debug_id;
__entry->src_ref_debug_id = src_ref->debug_id;
__entry->src_ref_desc = src_ref->desc;
__entry->dest_ref_debug_id = dest_ref->debug_id;
......@@ -268,9 +271,9 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release,
TP_ARGS(buffer));
TRACE_EVENT(binder_update_page_range,
TP_PROTO(struct binder_proc *proc, bool allocate,
TP_PROTO(struct binder_alloc *alloc, bool allocate,
void *start, void *end),
TP_ARGS(proc, allocate, start, end),
TP_ARGS(alloc, allocate, start, end),
TP_STRUCT__entry(
__field(int, proc)
__field(bool, allocate)
......@@ -278,9 +281,9 @@ TRACE_EVENT(binder_update_page_range,
__field(size_t, size)
),
TP_fast_assign(
__entry->proc = proc->pid;
__entry->proc = alloc->pid;
__entry->allocate = allocate;
__entry->offset = start - proc->buffer;
__entry->offset = start - alloc->buffer;
__entry->size = end - start;
),
TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
......
......@@ -485,6 +485,7 @@ static struct ion_handle *ion_handle_lookup(struct ion_client *client,
while (n) {
struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
if (buffer < entry->buffer)
n = n->rb_left;
else if (buffer > entry->buffer)
......@@ -915,9 +916,11 @@ static int ion_get_client_serial(const struct rb_root *root,
{
int serial = -1;
struct rb_node *node;
for (node = rb_first(root); node; node = rb_next(node)) {
struct ion_client *client = rb_entry(node, struct ion_client,
node);
if (strcmp(client->name, name))
continue;
serial = max(serial, client->display_serial);
......@@ -1310,12 +1313,14 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
static void ion_dma_buf_release(struct dma_buf *dmabuf)
{
struct ion_buffer *buffer = dmabuf->priv;
ion_buffer_put(buffer);
}
static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
{
struct ion_buffer *buffer = dmabuf->priv;
return buffer->vaddr + offset * PAGE_SIZE;
}
......@@ -1577,6 +1582,7 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case ION_IOC_IMPORT:
{
struct ion_handle *handle;
handle = ion_import_dma_buf(client, data.fd.fd);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
......@@ -1799,6 +1805,7 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
struct ion_client *client = rb_entry(n, struct ion_client,
node);
size_t size = ion_debug_heap_total(client, heap->id);
if (!size)
continue;
if (client->task) {
......@@ -1949,6 +1956,7 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
if (!debug_file) {
char buf[256], *path;
path = dentry_path(dev->heaps_debug_root, buf, 256);
pr_err("Failed to created heap debugfs at %s/%s\n",
path, heap->name);
......@@ -1964,6 +1972,7 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
&debug_shrink_fops);
if (!debug_file) {
char buf[256], *path;
path = dentry_path(dev->heaps_debug_root, buf, 256);
pr_err("Failed to created heap shrinker debugfs at %s/%s\n",
path, debug_name);
......@@ -2064,6 +2073,7 @@ void __init ion_reserve(struct ion_platform_data *data)
if (data->heaps[i].base == 0) {
phys_addr_t paddr;
paddr = memblock_alloc_base(data->heaps[i].size,
data->heaps[i].align,
MEMBLOCK_ALLOC_ANYWHERE);
......
......@@ -52,6 +52,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
for_each_sg(table->sgl, sg, table->nents, i) {
int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
struct page *page = sg_page(sg);
BUG_ON(i >= npages);
for (j = 0; j < npages_this_entry; j++)
*(tmp++) = page++;
......@@ -109,6 +110,7 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
{
void *addr = vm_map_ram(pages, num, -1, pgprot);
if (!addr)
return -ENOMEM;
memset(addr, 0, PAGE_SIZE * num);
......
......@@ -193,6 +193,7 @@ struct ion_heap {
spinlock_t free_lock;
wait_queue_head_t waitqueue;
struct task_struct *task;
int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
atomic_t total_allocated;
atomic_t total_handles;
......
......@@ -44,6 +44,7 @@ static const int num_orders = ARRAY_SIZE(orders);
static int order_to_index(unsigned int order)
{
int i;
for (i = 0; i < num_orders; i++)
if (order == orders[i])
return i;
......@@ -97,6 +98,7 @@ static void free_buffer_page(struct ion_system_heap *heap,
if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
struct ion_page_pool *pool;
if (cached)
pool = heap->cached_pools[order_to_index(order)];
else
......@@ -360,6 +362,7 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
for (i = 0; i < num_orders; i++) {
struct ion_page_pool *pool = sys_heap->uncached_pools[i];
nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);