Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ef02a3d3 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "ion : Merge ion changes for msm-kona kernel upgrade"

parents afa8139d ac04f073
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -30,4 +30,15 @@ config SW_SYNC
	  WARNING: improper use of this can result in deadlocking kernel
	  drivers from userspace. Intended for test and debug only.

config DEBUG_DMA_BUF_REF
	bool "DEBUG Reference Count"
	depends on STACKDEPOT
	depends on DMA_SHARED_BUFFER
	default n
	help
	  Save stack traces for every call to dma_buf_get and dma_buf_put, to
	  help debug memory leaks. Potential leaks may be found by manually
	  matching the get/put call stacks.  This feature consumes extra memory
	  in order to save the stack traces using STACKDEPOT.

endmenu
+1 −0
Original line number Diff line number Diff line
obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o
obj-$(CONFIG_SYNC_FILE)		+= sync_file.o
obj-$(CONFIG_SW_SYNC)		+= sw_sync.o sync_debug.o
obj-$(CONFIG_DEBUG_DMA_BUF_REF)	+= dma-buf-ref.o
+112 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only

#include <linux/dma-buf.h>
#include <linux/slab.h>
#include <linux/stackdepot.h>
#include <linux/stacktrace.h>
#include <linux/seq_file.h>

#define DMA_BUF_STACK_DEPTH (16)

struct dma_buf_ref {
	struct list_head list;
	depot_stack_handle_t handle;
	int count;
};

void dma_buf_ref_init(struct dma_buf *dmabuf)
{
	INIT_LIST_HEAD(&dmabuf->refs);
}

void dma_buf_ref_destroy(struct dma_buf *dmabuf)
{
	struct dma_buf_ref *r, *n;

	mutex_lock(&dmabuf->lock);
	list_for_each_entry_safe(r, n, &dmabuf->refs, list) {
		list_del(&r->list);
		kfree(r);
	}
	mutex_unlock(&dmabuf->lock);
}

static void dma_buf_ref_insert_handle(struct dma_buf *dmabuf,
				      depot_stack_handle_t handle,
				      int count)
{
	struct dma_buf_ref *r;

	mutex_lock(&dmabuf->lock);
	list_for_each_entry(r, &dmabuf->refs, list) {
		if (r->handle == handle) {
			r->count += count;
			goto out;
		}
	}

	r = kzalloc(sizeof(*r), GFP_KERNEL);
	if (!r)
		goto out;

	INIT_LIST_HEAD(&r->list);
	r->handle = handle;
	r->count = count;
	list_add(&r->list, &dmabuf->refs);

out:
	mutex_unlock(&dmabuf->lock);
}

void dma_buf_ref_mod(struct dma_buf *dmabuf, int nr)
{
	unsigned long entries[DMA_BUF_STACK_DEPTH];
	struct stack_trace trace = {
		.nr_entries = 0,
		.entries = entries,
		.max_entries = DMA_BUF_STACK_DEPTH,
		.skip = 1
	};
	depot_stack_handle_t handle;

	save_stack_trace(&trace);
	if (trace.nr_entries != 0 &&
	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
		trace.nr_entries--;

	handle = depot_save_stack(&trace, GFP_KERNEL);
	if (!handle)
		return;

	dma_buf_ref_insert_handle(dmabuf, handle, nr);
}

/**
 * Called with dmabuf->lock held
 */
int dma_buf_ref_show(struct seq_file *s, struct dma_buf *dmabuf)
{
	char *buf;
	struct dma_buf_ref *ref;
	int count = 0;
	struct stack_trace trace;

	buf = (void *)__get_free_page(GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

	list_for_each_entry(ref, &dmabuf->refs, list) {
		count += ref->count;

		seq_printf(s, "References: %d\n", ref->count);
		depot_fetch_stack(ref->handle, &trace);
		snprint_stack_trace(buf, PAGE_SIZE, &trace, 0);
		seq_puts(s, buf);
		seq_putc(s, '\n');
	}

	seq_printf(s, "Total references: %d\n\n\n", count);
	free_page((unsigned long)buf);

	return 0;
}
+190 −2
Original line number Diff line number Diff line
@@ -36,6 +36,9 @@
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/atomic.h>
#include <linux/sched/signal.h>
#include <linux/fdtable.h>
#include <linux/list_sort.h>

#include <uapi/linux/dma-buf.h>

@@ -48,6 +51,19 @@ struct dma_buf_list {
	struct mutex lock;
};

struct dma_info {
	struct dma_buf *dmabuf;
	struct list_head head;
};

struct dma_proc {
	char name[TASK_COMM_LEN];
	pid_t pid;
	size_t size;
	struct list_head dma_bufs;
	struct list_head head;
};

static struct dma_buf_list db_list;

static int dma_buf_release(struct inode *inode, struct file *file)
@@ -71,12 +87,14 @@ static int dma_buf_release(struct inode *inode, struct file *file)
	 */
	BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);

	dmabuf->ops->release(dmabuf);

	mutex_lock(&db_list.lock);
	list_del(&dmabuf->list_node);
	mutex_unlock(&db_list.lock);

	dmabuf->ops->release(dmabuf);

	dma_buf_ref_destroy(dmabuf);

	if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
		reservation_object_fini(dmabuf->resv);

@@ -457,6 +475,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
	dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
	dmabuf->name = bufname;
	dmabuf->ktime = ktime_get();

	if (!resv) {
		resv = (struct reservation_object *)&dmabuf[1];
@@ -477,6 +496,9 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
	mutex_init(&dmabuf->lock);
	INIT_LIST_HEAD(&dmabuf->attachments);

	dma_buf_ref_init(dmabuf);
	dma_buf_ref_mod(dmabuf, 1);

	mutex_lock(&db_list.lock);
	list_add(&dmabuf->list_node, &db_list.head);
	mutex_unlock(&db_list.lock);
@@ -538,6 +560,7 @@ struct dma_buf *dma_buf_get(int fd)
		fput(file);
		return ERR_PTR(-EINVAL);
	}
	dma_buf_ref_mod(file->private_data, 1);

	return file->private_data;
}
@@ -558,6 +581,7 @@ void dma_buf_put(struct dma_buf *dmabuf)
	if (WARN_ON(!dmabuf || !dmabuf->file))
		return;

	dma_buf_ref_mod(dmabuf, -1);
	fput(dmabuf->file);
}
EXPORT_SYMBOL_GPL(dma_buf_put);
@@ -1203,6 +1227,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
		seq_printf(s, "Total %d devices attached\n\n",
				attach_count);

		dma_buf_ref_show(s, buf_obj);

		count++;
		size += buf_obj->size;
		mutex_unlock(&buf_obj->lock);
@@ -1226,6 +1252,157 @@ static const struct file_operations dma_buf_debug_fops = {
	.release        = single_release,
};

static bool list_contains(struct list_head *list, struct dma_buf *info)
{
	struct dma_info *curr;

	list_for_each_entry(curr, list, head)
		if (curr->dmabuf == info)
			return true;

	return false;
}

static int get_dma_info(const void *data, struct file *file, unsigned int n)
{
	struct dma_proc *dma_proc;
	struct dma_info *dma_info;

	dma_proc = (struct dma_proc *)data;
	if (!is_dma_buf_file(file))
		return 0;

	if (list_contains(&dma_proc->dma_bufs, file->private_data))
		return 0;

	dma_info = kzalloc(sizeof(*dma_info), GFP_ATOMIC);
	if (!dma_info)
		return -ENOMEM;

	get_file(file);
	dma_info->dmabuf = file->private_data;
	dma_proc->size += dma_info->dmabuf->size / SZ_1K;
	list_add(&dma_info->head, &dma_proc->dma_bufs);
	return 0;
}

static void write_proc(struct seq_file *s, struct dma_proc *proc)
{
	struct dma_info *tmp;

	seq_printf(s, "\n%s (PID %ld) size: %ld\nDMA Buffers:\n",
		proc->name, proc->pid, proc->size);
	seq_printf(s, "%-8s\t%-8s\t%-8s\n",
		"Name", "Size (KB)", "Time Alive (sec)");

	list_for_each_entry(tmp, &proc->dma_bufs, head) {
		struct dma_buf *dmabuf = tmp->dmabuf;
		ktime_t elapmstime = ktime_ms_delta(ktime_get(), dmabuf->ktime);

		elapmstime = ktime_divns(elapmstime, MSEC_PER_SEC);
		seq_printf(s, "%-8s\t%-8ld\t%-8ld\n",
				dmabuf->name,
				dmabuf->size / SZ_1K,
				elapmstime);
	}
}

static void free_proc(struct dma_proc *proc)
{
	struct dma_info *tmp, *n;

	list_for_each_entry_safe(tmp, n, &proc->dma_bufs, head) {
		dma_buf_put(tmp->dmabuf);
		list_del(&tmp->head);
		kfree(tmp);
	}
	kfree(proc);
}

static int dmacmp(void *unused, struct list_head *a, struct list_head *b)
{
	struct dma_info *a_buf, *b_buf;

	a_buf = list_entry(a, struct dma_info, head);
	b_buf = list_entry(b, struct dma_info, head);
	return b_buf->dmabuf->size - a_buf->dmabuf->size;
}

static int proccmp(void *unused, struct list_head *a, struct list_head *b)
{
	struct dma_proc *a_proc, *b_proc;

	a_proc = list_entry(a, struct dma_proc, head);
	b_proc = list_entry(b, struct dma_proc, head);
	return b_proc->size - a_proc->size;
}

static int dma_procs_debug_show(struct seq_file *s, void *unused)
{
	struct task_struct *task, *thread;
	struct files_struct *files;
	int ret = 0;
	struct dma_proc *tmp, *n;
	LIST_HEAD(plist);

	read_lock(&tasklist_lock);
	for_each_process(task) {
		struct files_struct *group_leader_files = NULL;

		tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
		if (!tmp) {
			ret = -ENOMEM;
			read_unlock(&tasklist_lock);
			goto mem_err;
		}
		INIT_LIST_HEAD(&tmp->dma_bufs);
		for_each_thread(task, thread) {
			task_lock(thread);
			if (unlikely(!group_leader_files))
				group_leader_files = task->group_leader->files;
			files = thread->files;
			if (files && (group_leader_files != files ||
				      thread == task->group_leader))
				ret = iterate_fd(files, 0, get_dma_info, tmp);
			task_unlock(thread);
		}
		if (ret || list_empty(&tmp->dma_bufs))
			goto skip;
		list_sort(NULL, &tmp->dma_bufs, dmacmp);
		get_task_comm(tmp->name, task);
		tmp->pid = task->tgid;
		list_add(&tmp->head, &plist);
		continue;
skip:
		free_proc(tmp);
	}
	read_unlock(&tasklist_lock);

	list_sort(NULL, &plist, proccmp);
	list_for_each_entry(tmp, &plist, head)
		write_proc(s, tmp);

	ret = 0;
mem_err:
	list_for_each_entry_safe(tmp, n, &plist, head) {
		list_del(&tmp->head);
		free_proc(tmp);
	}
	return ret;
}

static int dma_procs_debug_open(struct inode *f_inode, struct file *file)
{
	return single_open(file, dma_procs_debug_show, NULL);
}

static const struct file_operations dma_procs_debug_fops = {
	.open           = dma_procs_debug_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = single_release
};

static struct dentry *dma_buf_debugfs_dir;

static int dma_buf_init_debugfs(void)
@@ -1246,6 +1423,17 @@ static int dma_buf_init_debugfs(void)
		debugfs_remove_recursive(dma_buf_debugfs_dir);
		dma_buf_debugfs_dir = NULL;
		err = PTR_ERR(d);
		return err;
	}

	d = debugfs_create_file("dmaprocs", 0444, dma_buf_debugfs_dir,
				NULL, &dma_procs_debug_fops);

	if (IS_ERR(d)) {
		pr_debug("dma_buf: debugfs: failed to create node dmaprocs\n");
		debugfs_remove_recursive(dma_buf_debugfs_dir);
		dma_buf_debugfs_dir = NULL;
		err = PTR_ERR(d);
	}

	return err;
+44 −5
Original line number Diff line number Diff line
@@ -238,10 +238,17 @@ static struct mem_prot_info *get_info_list_from_table(struct sg_table *table,
#define BATCH_MAX_SIZE SZ_2M
#define BATCH_MAX_SECTIONS 32

int hyp_assign_table(struct sg_table *table,
/*
 *  When -EAGAIN is returned it is safe for the caller to try to call
 *  __hyp_assign_table again.
 *
 *  When -EADDRNOTAVAIL is returned the memory may no longer be in
 *  a usable state and should no longer be accessed by the HLOS.
 */
static int __hyp_assign_table(struct sg_table *table,
			u32 *source_vm_list, int source_nelems,
			int *dest_vmids, int *dest_perms,
			int dest_nelems)
			int dest_nelems, bool try_lock)
{
	int ret = 0;
	struct scm_desc desc = {0};
@@ -271,10 +278,17 @@ int hyp_assign_table(struct sg_table *table,
					  &dest_vm_copy_size);
	if (!dest_vm_copy) {
		ret = -ENOMEM;
		goto out_free;
		goto out_free_src;
	}

	if (try_lock) {
		if (!mutex_trylock(&secure_buffer_mutex)) {
			ret = -EAGAIN;
			goto out_free_dest;
		}
	} else {
		mutex_lock(&secure_buffer_mutex);
	}

	sg_table_copy = get_info_list_from_table(table, &sg_table_copy_size);
	if (!sg_table_copy) {
@@ -330,6 +344,12 @@ int hyp_assign_table(struct sg_table *table,
		if (ret) {
			pr_info("%s: Failed to assign memory protection, ret = %d\n",
				__func__, ret);

			/*
			 * Make it clear to clients that the memory may no
			 * longer be in a usable state.
			 */
			ret = -EADDRNOTAVAIL;
			break;
		}
		batch_start = batch_end;
@@ -337,12 +357,31 @@ int hyp_assign_table(struct sg_table *table,

out_unlock:
	mutex_unlock(&secure_buffer_mutex);
out_free_dest:
	kfree(dest_vm_copy);
out_free:
out_free_src:
	kfree(source_vm_copy);
	return ret;
}

int hyp_assign_table(struct sg_table *table,
			u32 *source_vm_list, int source_nelems,
			int *dest_vmids, int *dest_perms,
			int dest_nelems)
{
	return __hyp_assign_table(table, source_vm_list, source_nelems,
				  dest_vmids, dest_perms, dest_nelems, false);
}

int try_hyp_assign_table(struct sg_table *table,
			u32 *source_vm_list, int source_nelems,
			int *dest_vmids, int *dest_perms,
			int dest_nelems)
{
	return __hyp_assign_table(table, source_vm_list, source_nelems,
				  dest_vmids, dest_perms, dest_nelems, true);
}

int hyp_assign_phys(phys_addr_t addr, u64 size, u32 *source_vm_list,
			int source_nelems, int *dest_vmids,
			int *dest_perms, int dest_nelems)
Loading