Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f4686dcc authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: Add sparse memory support"

parents 9dbc6e03 aa29f626
Loading
Loading
Loading
Loading
+584 −18
Original line number Diff line number Diff line
@@ -78,6 +78,16 @@ struct kgsl_dma_buf_meta {
	struct sg_table *table;
};

static inline struct kgsl_pagetable *_get_memdesc_pagetable(
		struct kgsl_pagetable *pt, struct kgsl_mem_entry *entry)
{
	/* if a secured buffer, map it to secure global pagetable */
	if (kgsl_memdesc_is_secured(&entry->memdesc))
		return pt->mmu->securepagetable;

	return pt;
}

static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry);

static const struct file_operations kgsl_fops;
@@ -445,13 +455,16 @@ kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,

	/* map the memory after unlocking if gpuaddr has been assigned */
	if (entry->memdesc.gpuaddr) {
		/* if a secured buffer map it to secure global pagetable */
		if (kgsl_memdesc_is_secured(&entry->memdesc))
			pagetable = process->pagetable->mmu->securepagetable;
		else
		pagetable = process->pagetable;
		if (kgsl_memdesc_is_secured(&entry->memdesc))
			pagetable = pagetable->mmu->securepagetable;

		entry->memdesc.pagetable = pagetable;

		if (entry->memdesc.flags & KGSL_MEMFLAGS_SPARSE_VIRT)
			ret = kgsl_mmu_sparse_dummy_map(pagetable,
				&entry->memdesc, 0, entry->memdesc.size);
		else if (entry->memdesc.gpuaddr)
			ret = kgsl_mmu_map(pagetable, &entry->memdesc);
		if (ret)
			kgsl_mem_entry_detach_process(entry);
@@ -1270,6 +1283,24 @@ kgsl_sharedmem_find(struct kgsl_process_private *private, uint64_t gpuaddr)
}
EXPORT_SYMBOL(kgsl_sharedmem_find);

struct kgsl_mem_entry * __must_check
kgsl_sharedmem_find_id_flags(struct kgsl_process_private *process,
		unsigned int id, uint64_t flags)
{
	int count = 0;
	struct kgsl_mem_entry *entry;

	spin_lock(&process->mem_lock);
	entry = idr_find(&process->mem_idr, id);
	if (entry)
		if (!entry->pending_free &&
				(flags & entry->memdesc.flags) == flags)
			count = kgsl_mem_entry_get(entry);
	spin_unlock(&process->mem_lock);

	return (count == 0) ? NULL : entry;
}

/**
 * kgsl_sharedmem_find_id() - find a memory entry by id
 * @process: the owning process
@@ -1283,19 +1314,7 @@ EXPORT_SYMBOL(kgsl_sharedmem_find);
struct kgsl_mem_entry * __must_check
kgsl_sharedmem_find_id(struct kgsl_process_private *process, unsigned int id)
{
	int result;
	struct kgsl_mem_entry *entry;

	drain_workqueue(kgsl_driver.mem_workqueue);

	spin_lock(&process->mem_lock);
	entry = idr_find(&process->mem_idr, id);
	result = kgsl_mem_entry_get(entry);
	spin_unlock(&process->mem_lock);

	if (result == 0)
		return NULL;
	return entry;
	return kgsl_sharedmem_find_id_flags(process, id, 0);
}

/**
@@ -3121,6 +3140,546 @@ long kgsl_ioctl_gpumem_get_info(struct kgsl_device_private *dev_priv,
	return result;
}

static inline int _sparse_alloc_param_sanity_check(uint64_t size,
		uint64_t pagesize)
{
	if (size == 0 || pagesize == 0)
		return -EINVAL;

	if (pagesize != PAGE_SIZE && pagesize != SZ_64K)
		return -EINVAL;

	if (pagesize > size || !IS_ALIGNED(size, pagesize))
		return -EINVAL;

	return 0;
}

long kgsl_ioctl_sparse_phys_alloc(struct kgsl_device_private *dev_priv,
	unsigned int cmd, void *data)
{
	struct kgsl_process_private *process = dev_priv->process_priv;
	struct kgsl_sparse_phys_alloc *param = data;
	struct kgsl_mem_entry *entry;
	int ret;
	int id;

	ret = _sparse_alloc_param_sanity_check(param->size, param->pagesize);
	if (ret)
		return ret;

	entry = kgsl_mem_entry_create();
	if (entry == NULL)
		return -ENOMEM;

	ret = kgsl_process_private_get(process);
	if (!ret) {
		ret = -EBADF;
		goto err_free_entry;
	}

	idr_preload(GFP_KERNEL);
	spin_lock(&process->mem_lock);
	/* Allocate the ID but don't attach the pointer just yet */
	id = idr_alloc(&process->mem_idr, NULL, 1, 0, GFP_NOWAIT);
	spin_unlock(&process->mem_lock);
	idr_preload_end();

	if (id < 0) {
		ret = id;
		goto err_put_proc_priv;
	}

	entry->id = id;
	entry->priv = process;

	entry->memdesc.flags = KGSL_MEMFLAGS_SPARSE_PHYS;
	kgsl_memdesc_set_align(&entry->memdesc, ilog2(param->pagesize));

	ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
			process->pagetable, param->size, entry->memdesc.flags);
	if (ret)
		goto err_remove_idr;

	/* Sanity check to verify we got correct pagesize */
	if (param->pagesize != PAGE_SIZE && entry->memdesc.sgt != NULL) {
		struct scatterlist *s;
		int i;

		for_each_sg(entry->memdesc.sgt->sgl, s,
				entry->memdesc.sgt->nents, i) {
			if (!IS_ALIGNED(s->length, param->pagesize))
				goto err_invalid_pages;
		}
	}

	param->id = entry->id;
	param->flags = entry->memdesc.flags;

	trace_sparse_phys_alloc(entry->id, param->size, param->pagesize);
	kgsl_mem_entry_commit_process(entry);

	return 0;

err_invalid_pages:
	kgsl_sharedmem_free(&entry->memdesc);
err_remove_idr:
	spin_lock(&process->mem_lock);
	idr_remove(&process->mem_idr, entry->id);
	spin_unlock(&process->mem_lock);
err_put_proc_priv:
	kgsl_process_private_put(process);
err_free_entry:
	kfree(entry);

	return ret;
}

long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
	unsigned int cmd, void *data)
{
	struct kgsl_process_private *process = dev_priv->process_priv;
	struct kgsl_sparse_phys_free *param = data;
	struct kgsl_mem_entry *entry;

	entry = kgsl_sharedmem_find_id_flags(process, param->id,
			KGSL_MEMFLAGS_SPARSE_PHYS);
	if (entry == NULL)
		return -EINVAL;

	if (entry->memdesc.cur_bindings != 0) {
		kgsl_mem_entry_put(entry);
		return -EINVAL;
	}

	trace_sparse_phys_free(entry->id);

	/* One put for find_id(), one put for the kgsl_mem_entry_create() */
	kgsl_mem_entry_put(entry);
	kgsl_mem_entry_put(entry);

	return 0;
}

long kgsl_ioctl_sparse_virt_alloc(struct kgsl_device_private *dev_priv,
	unsigned int cmd, void *data)
{
	struct kgsl_sparse_virt_alloc *param = data;
	struct kgsl_mem_entry *entry;
	int ret;

	ret = _sparse_alloc_param_sanity_check(param->size, param->pagesize);
	if (ret)
		return ret;

	entry = kgsl_mem_entry_create();
	if (entry == NULL)
		return -ENOMEM;

	entry->memdesc.flags = KGSL_MEMFLAGS_SPARSE_VIRT;
	entry->memdesc.size = param->size;
	entry->memdesc.cur_bindings = 0;
	kgsl_memdesc_set_align(&entry->memdesc, ilog2(param->pagesize));

	spin_lock_init(&entry->bind_lock);
	entry->bind_tree = RB_ROOT;

	ret = kgsl_mem_entry_attach_process(entry, dev_priv);
	if (ret) {
		kfree(entry);
		return ret;
	}

	param->id = entry->id;
	param->gpuaddr = entry->memdesc.gpuaddr;
	param->flags = entry->memdesc.flags;

	trace_sparse_virt_alloc(entry->id, param->size, param->pagesize);
	kgsl_mem_entry_commit_process(entry);

	return 0;
}

long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
	unsigned int cmd, void *data)
{
	struct kgsl_process_private *process = dev_priv->process_priv;
	struct kgsl_sparse_virt_free *param = data;
	struct kgsl_mem_entry *entry = NULL;

	entry = kgsl_sharedmem_find_id_flags(process, param->id,
			KGSL_MEMFLAGS_SPARSE_VIRT);
	if (entry == NULL)
		return -EINVAL;

	if (entry->bind_tree.rb_node != NULL) {
		kgsl_mem_entry_put(entry);
		return -EINVAL;
	}

	trace_sparse_virt_free(entry->id);

	/* One put for find_id(), one put for the kgsl_mem_entry_create() */
	kgsl_mem_entry_put(entry);
	kgsl_mem_entry_put(entry);

	return 0;
}

static int _sparse_add_to_bind_tree(struct kgsl_mem_entry *entry,
		uint64_t v_offset,
		struct kgsl_memdesc *memdesc,
		uint64_t p_offset,
		uint64_t size,
		uint64_t flags)
{
	struct sparse_bind_object *new;
	struct rb_node **node, *parent = NULL;

	new = kzalloc(sizeof(*new), GFP_KERNEL);
	if (new == NULL)
		return -ENOMEM;

	new->v_off = v_offset;
	new->p_off = p_offset;
	new->p_memdesc = memdesc;
	new->size = size;
	new->flags = flags;

	node = &entry->bind_tree.rb_node;

	while (*node != NULL) {
		struct sparse_bind_object *this;

		parent = *node;
		this = rb_entry(parent, struct sparse_bind_object, node);

		if (new->v_off < this->v_off)
			node = &parent->rb_left;
		else if (new->v_off > this->v_off)
			node = &parent->rb_right;
	}

	rb_link_node(&new->node, parent, node);
	rb_insert_color(&new->node, &entry->bind_tree);

	return 0;
}

static int _sparse_rm_from_bind_tree(struct kgsl_mem_entry *entry,
		struct sparse_bind_object *obj,
		uint64_t v_offset, uint64_t size)
{
	spin_lock(&entry->bind_lock);
	if (v_offset == obj->v_off && size >= obj->size) {
		/*
		 * We are all encompassing, remove the entry and free
		 * things up
		 */
		rb_erase(&obj->node, &entry->bind_tree);
		kfree(obj);
	} else if (v_offset == obj->v_off) {
		/*
		 * We are the front of the node, adjust the front of
		 * the node
		 */
		obj->v_off += size;
		obj->p_off += size;
		obj->size -= size;
	} else if ((v_offset + size) == (obj->v_off + obj->size)) {
		/*
		 * We are at the end of the obj, adjust the beginning
		 * points
		 */
		obj->size -= size;
	} else {
		/*
		 * We are in the middle of a node, split it up and
		 * create a new mini node. Adjust this node's bounds
		 * and add the new node to the list.
		 */
		uint64_t tmp_size = obj->size;
		int ret;

		obj->size = v_offset - obj->v_off;

		spin_unlock(&entry->bind_lock);
		ret = _sparse_add_to_bind_tree(entry, v_offset + size,
				obj->p_memdesc,
				obj->p_off + (v_offset - obj->v_off) + size,
				tmp_size - (v_offset - obj->v_off) - size,
				obj->flags);

		return ret;
	}

	spin_unlock(&entry->bind_lock);

	return 0;
}

static struct sparse_bind_object *_find_containing_bind_obj(
		struct kgsl_mem_entry *entry,
		uint64_t offset, uint64_t size)
{
	struct sparse_bind_object *obj = NULL;
	struct rb_node *node = entry->bind_tree.rb_node;

	spin_lock(&entry->bind_lock);

	while (node != NULL) {
		obj = rb_entry(node, struct sparse_bind_object, node);

		if (offset == obj->v_off) {
			break;
		} else if (offset < obj->v_off) {
			if (offset + size > obj->v_off)
				break;
			node = node->rb_left;
			obj = NULL;
		} else if (offset > obj->v_off) {
			if (offset < obj->v_off + obj->size)
				break;
			node = node->rb_right;
			obj = NULL;
		}
	}

	spin_unlock(&entry->bind_lock);

	return obj;
}

static int _sparse_unbind(struct kgsl_mem_entry *entry,
		struct sparse_bind_object *bind_obj,
		uint64_t offset, uint64_t size)
{
	struct kgsl_memdesc *memdesc = bind_obj->p_memdesc;
	struct kgsl_pagetable *pt = memdesc->pagetable;
	int ret;

	if (memdesc->cur_bindings < (size / PAGE_SIZE))
		return -EINVAL;

	memdesc->cur_bindings -= size / PAGE_SIZE;

	ret = kgsl_mmu_unmap_offset(pt, memdesc,
			entry->memdesc.gpuaddr, offset, size);
	if (ret)
		return ret;

	ret = kgsl_mmu_sparse_dummy_map(pt, &entry->memdesc, offset, size);
	if (ret)
		return ret;

	ret = _sparse_rm_from_bind_tree(entry, bind_obj, offset, size);
	if (ret == 0) {
		atomic_long_sub(size, &kgsl_driver.stats.mapped);
		trace_sparse_unbind(entry->id, offset, size);
	}

	return ret;
}

static long sparse_unbind_range(struct kgsl_sparse_binding_object *obj,
	struct kgsl_mem_entry *virt_entry)
{
	struct sparse_bind_object *bind_obj;
	int ret = 0;
	uint64_t size = obj->size;
	uint64_t tmp_size = obj->size;
	uint64_t offset = obj->virtoffset;

	while (size > 0 && ret == 0) {
		tmp_size = size;
		bind_obj = _find_containing_bind_obj(virt_entry, offset, size);
		if (bind_obj == NULL)
			return 0;

		if (bind_obj->v_off > offset) {
			tmp_size = size - bind_obj->v_off - offset;
			if (tmp_size > bind_obj->size)
				tmp_size = bind_obj->size;
			offset = bind_obj->v_off;
		} else if (bind_obj->v_off < offset) {
			uint64_t diff = offset - bind_obj->v_off;

			if (diff + size > bind_obj->size)
				tmp_size = bind_obj->size - diff;
		} else {
			if (tmp_size > bind_obj->size)
				tmp_size = bind_obj->size;
		}

		ret = _sparse_unbind(virt_entry, bind_obj, offset, tmp_size);
		if (ret == 0) {
			offset += tmp_size;
			size -= tmp_size;
		}
	}

	return ret;
}

static inline bool _is_phys_bindable(struct kgsl_mem_entry *phys_entry,
		uint64_t offset, uint64_t size, uint64_t flags)
{
	struct kgsl_memdesc *memdesc = &phys_entry->memdesc;

	if (!IS_ALIGNED(offset | size, kgsl_memdesc_get_pagesize(memdesc)))
		return false;

	if (!(flags & KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS) &&
			offset + size > memdesc->size)
		return false;

	return true;
}

static int _sparse_bind(struct kgsl_process_private *process,
		struct kgsl_mem_entry *virt_entry, uint64_t v_offset,
		struct kgsl_mem_entry *phys_entry, uint64_t p_offset,
		uint64_t size, uint64_t flags)
{
	int ret;
	struct kgsl_pagetable *pagetable;
	struct kgsl_memdesc *memdesc = &phys_entry->memdesc;

	/* map the memory after unlocking if gpuaddr has been assigned */
	if (memdesc->gpuaddr)
		return -EINVAL;

	if (memdesc->useraddr != 0)
		return -EINVAL;

	pagetable = memdesc->pagetable;

	/* Clear out any mappings */
	ret = kgsl_mmu_unmap_offset(pagetable, &virt_entry->memdesc,
			virt_entry->memdesc.gpuaddr, v_offset, size);
	if (ret)
		return ret;

	ret = kgsl_mmu_map_offset(pagetable, virt_entry->memdesc.gpuaddr,
			v_offset, memdesc, p_offset, size, flags);
	if (ret) {
		/* Try to clean up, but not the end of the world */
		kgsl_mmu_sparse_dummy_map(pagetable, &virt_entry->memdesc,
				v_offset, size);
		return ret;
	}

	ret = _sparse_add_to_bind_tree(virt_entry, v_offset, memdesc,
			p_offset, size, flags);
	if (ret == 0)
		memdesc->cur_bindings += size / PAGE_SIZE;

	return ret;
}

static long sparse_bind_range(struct kgsl_process_private *private,
		struct kgsl_sparse_binding_object *obj,
		struct kgsl_mem_entry *virt_entry)
{
	struct kgsl_mem_entry *phys_entry;
	int ret;

	phys_entry = kgsl_sharedmem_find_id_flags(private, obj->id,
			KGSL_MEMFLAGS_SPARSE_PHYS);
	if (phys_entry == NULL)
		return -EINVAL;

	if (!_is_phys_bindable(phys_entry, obj->physoffset, obj->size,
				obj->flags)) {
		kgsl_mem_entry_put(phys_entry);
		return -EINVAL;
	}

	if (kgsl_memdesc_get_align(&virt_entry->memdesc) !=
			kgsl_memdesc_get_align(&phys_entry->memdesc)) {
		kgsl_mem_entry_put(phys_entry);
		return -EINVAL;
	}

	ret = sparse_unbind_range(obj, virt_entry);
	if (ret) {
		kgsl_mem_entry_put(phys_entry);
		return -EINVAL;
	}

	ret = _sparse_bind(private, virt_entry, obj->virtoffset,
			phys_entry, obj->physoffset, obj->size,
			obj->flags & KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS);
	if (ret == 0) {
		KGSL_STATS_ADD(obj->size, &kgsl_driver.stats.mapped,
				&kgsl_driver.stats.mapped_max);

		trace_sparse_bind(virt_entry->id, obj->virtoffset,
				phys_entry->id, obj->physoffset,
				obj->size, obj->flags);
	}

	kgsl_mem_entry_put(phys_entry);

	return ret;
}

long kgsl_ioctl_sparse_bind(struct kgsl_device_private *dev_priv,
		unsigned int cmd, void *data)
{
	struct kgsl_process_private *private = dev_priv->process_priv;
	struct kgsl_sparse_bind *param = data;
	struct kgsl_sparse_binding_object obj;
	struct kgsl_mem_entry *virt_entry;
	int pg_sz;
	void __user *ptr;
	int ret = 0;
	int i = 0;

	ptr = (void __user *) (uintptr_t) param->list;

	if (param->size > sizeof(struct kgsl_sparse_binding_object) ||
		param->count == 0 || ptr == NULL)
		return -EINVAL;

	virt_entry = kgsl_sharedmem_find_id_flags(private, param->id,
			KGSL_MEMFLAGS_SPARSE_VIRT);
	if (virt_entry == NULL)
		return -EINVAL;

	pg_sz = kgsl_memdesc_get_pagesize(&virt_entry->memdesc);

	for (i = 0; i < param->count; i++) {
		memset(&obj, 0, sizeof(obj));
		ret = _copy_from_user(&obj, ptr, sizeof(obj), param->size);
		if (ret)
			break;

		/* Sanity check initial range */
		if (obj.size == 0 ||
			obj.virtoffset + obj.size > virt_entry->memdesc.size ||
			!(IS_ALIGNED(obj.virtoffset | obj.size, pg_sz))) {
			ret = -EINVAL;
			break;
		}

		if (obj.flags & KGSL_SPARSE_BIND)
			ret = sparse_bind_range(private, &obj, virt_entry);
		else if (obj.flags & KGSL_SPARSE_UNBIND)
			ret = sparse_unbind_range(&obj, virt_entry);
		else
			ret = -EINVAL;
		if (ret)
			break;

		ptr += sizeof(obj);
	}

	kgsl_mem_entry_put(virt_entry);

	return ret;
}

long kgsl_ioctl_gpuobj_info(struct kgsl_device_private *dev_priv,
		unsigned int cmd, void *data)
{
@@ -3356,6 +3915,13 @@ get_mmap_entry(struct kgsl_process_private *private,
		goto err_put;
	}

	if (entry->memdesc.flags & KGSL_MEMFLAGS_SPARSE_PHYS) {
		if (len != entry->memdesc.size) {
			ret = -EINVAL;
			goto err_put;
		}
	}

	if (entry->memdesc.useraddr != 0) {
		ret = -EBUSY;
		goto err_put;
+37 −0
Original line number Diff line number Diff line
@@ -184,6 +184,7 @@ struct kgsl_memdesc_ops {
 * @attrs: dma attributes for this memory
 * @pages: An array of pointers to allocated pages
 * @page_count: Total number of pages allocated
 * @cur_bindings: Number of sparse pages actively bound
 */
struct kgsl_memdesc {
	struct kgsl_pagetable *pagetable;
@@ -202,6 +203,7 @@ struct kgsl_memdesc {
	struct dma_attrs attrs;
	struct page **pages;
	unsigned int page_count;
	unsigned int cur_bindings;
};

/*
@@ -235,6 +237,8 @@ struct kgsl_memdesc {
 * @dev_priv: back pointer to the device file that created this entry.
 * @metadata: String containing user specified metadata for the entry
 * @work: Work struct used to schedule a kgsl_mem_entry_put in atomic contexts
 * @bind_lock: Lock for sparse memory bindings
 * @bind_tree: RB Tree for sparse memory bindings
 */
struct kgsl_mem_entry {
	struct kref refcount;
@@ -246,6 +250,8 @@ struct kgsl_mem_entry {
	int pending_free;
	char metadata[KGSL_GPUOBJ_ALLOC_METADATA_MAX + 1];
	struct work_struct work;
	spinlock_t bind_lock;
	struct rb_root bind_tree;
};

struct kgsl_device_private;
@@ -315,6 +321,24 @@ struct kgsl_protected_registers {
	int range;
};

/**
 * struct sparse_bind_object - Bind metadata
 * @node: Node for the rb tree
 * @p_memdesc: Physical memdesc bound to
 * @v_off: Offset of bind in the virtual entry
 * @p_off: Offset of bind in the physical memdesc
 * @size: Size of the bind
 * @flags: Flags for the bind
 */
struct sparse_bind_object {
	struct rb_node node;
	struct kgsl_memdesc *p_memdesc;
	uint64_t v_off;
	uint64_t p_off;
	uint64_t size;
	uint64_t flags;
};

long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
					  unsigned int cmd, void *data);
long kgsl_ioctl_device_setproperty(struct kgsl_device_private *dev_priv,
@@ -377,6 +401,19 @@ long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv,
long kgsl_ioctl_gpuobj_set_info(struct kgsl_device_private *dev_priv,
				unsigned int cmd, void *data);

long kgsl_ioctl_sparse_phys_alloc(struct kgsl_device_private *dev_priv,
					unsigned int cmd, void *data);
long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
					unsigned int cmd, void *data);
long kgsl_ioctl_sparse_virt_alloc(struct kgsl_device_private *dev_priv,
					unsigned int cmd, void *data);
long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
					unsigned int cmd, void *data);
long kgsl_ioctl_sparse_bind(struct kgsl_device_private *dev_priv,
					unsigned int cmd, void *data);
long kgsl_ioctl_sparse_unbind(struct kgsl_device_private *dev_priv,
					unsigned int cmd, void *data);

void kgsl_mem_entry_destroy(struct kref *kref);

struct kgsl_mem_entry * __must_check
+11 −1
Original line number Diff line number Diff line
/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -372,6 +372,16 @@ static const struct kgsl_ioctl kgsl_compat_ioctl_funcs[] = {
			kgsl_ioctl_gpu_command),
	KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUOBJ_SET_INFO,
			kgsl_ioctl_gpuobj_set_info),
	KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_PHYS_ALLOC,
			kgsl_ioctl_sparse_phys_alloc),
	KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_PHYS_FREE,
			kgsl_ioctl_sparse_phys_free),
	KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_VIRT_ALLOC,
			kgsl_ioctl_sparse_virt_alloc),
	KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_VIRT_FREE,
			kgsl_ioctl_sparse_virt_free),
	KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_BIND,
			kgsl_ioctl_sparse_bind),
};

long kgsl_compat_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+79 −2
Original line number Diff line number Diff line
@@ -129,10 +129,13 @@ static int print_mem_entry(int id, void *ptr, void *data)
{
	struct seq_file *s = data;
	struct kgsl_mem_entry *entry = ptr;
	char flags[9];
	char flags[10];
	char usage[16];
	struct kgsl_memdesc *m = &entry->memdesc;

	if (m->flags & KGSL_MEMFLAGS_SPARSE_VIRT)
		return 0;

	flags[0] = kgsl_memdesc_is_global(m) ?  'g' : '-';
	flags[1] = '-';
	flags[2] = !(m->flags & KGSL_MEMFLAGS_GPUREADONLY) ? 'w' : '-';
@@ -141,7 +144,8 @@ static int print_mem_entry(int id, void *ptr, void *data)
	flags[5] = kgsl_memdesc_use_cpu_map(m) ? 'p' : '-';
	flags[6] = (m->useraddr) ? 'Y' : 'N';
	flags[7] = kgsl_memdesc_is_secured(m) ?  's' : '-';
	flags[8] = '\0';
	flags[8] = m->flags & KGSL_MEMFLAGS_SPARSE_PHYS ? 'P' : '-';
	flags[9] = '\0';

	kgsl_get_memory_usage(usage, sizeof(usage), m->flags);

@@ -211,6 +215,70 @@ static const struct file_operations process_mem_fops = {
	.release = process_mem_release,
};

static int print_sparse_mem_entry(int id, void *ptr, void *data)
{
	struct seq_file *s = data;
	struct kgsl_mem_entry *entry = ptr;
	struct kgsl_memdesc *m = &entry->memdesc;
	struct rb_node *node;

	if (!(m->flags & KGSL_MEMFLAGS_SPARSE_VIRT))
		return 0;

	node = rb_first(&entry->bind_tree);

	while (node != NULL) {
		struct sparse_bind_object *obj = rb_entry(node,
				struct sparse_bind_object, node);
		seq_printf(s, "%5d %16llx %16llx %16llx %16llx\n",
				entry->id, entry->memdesc.gpuaddr,
				obj->v_off, obj->size, obj->p_off);
		node = rb_next(node);
	}

	seq_putc(s, '\n');

	return 0;
}

static int process_sparse_mem_print(struct seq_file *s, void *unused)
{
	struct kgsl_process_private *private = s->private;

	seq_printf(s, "%5s %16s %16s %16s %16s\n",
		   "v_id", "gpuaddr", "v_offset", "v_size", "p_offset");

	spin_lock(&private->mem_lock);
	idr_for_each(&private->mem_idr, print_sparse_mem_entry, s);
	spin_unlock(&private->mem_lock);

	return 0;
}

static int process_sparse_mem_open(struct inode *inode, struct file *file)
{
	int ret;
	pid_t pid = (pid_t) (unsigned long) inode->i_private;
	struct kgsl_process_private *private = NULL;

	private = kgsl_process_private_find(pid);

	if (!private)
		return -ENODEV;

	ret = single_open(file, process_sparse_mem_print, private);
	if (ret)
		kgsl_process_private_put(private);

	return ret;
}

static const struct file_operations process_sparse_mem_fops = {
	.open = process_sparse_mem_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = process_mem_release,
};

/**
 * kgsl_process_init_debugfs() - Initialize debugfs for a process
@@ -251,6 +319,15 @@ void kgsl_process_init_debugfs(struct kgsl_process_private *private)
	if (IS_ERR_OR_NULL(dentry))
		WARN((dentry == NULL),
			"Unable to create 'mem' file for %s\n", name);

	dentry = debugfs_create_file("sparse_mem", 0444, private->debug_root,
		(void *) ((unsigned long) private->pid),
		&process_sparse_mem_fops);

	if (IS_ERR_OR_NULL(dentry))
		WARN((dentry == NULL),
			"Unable to create 'sparse_mem' file for %s\n", name);

}

void kgsl_core_debugfs_init(void)
+11 −1
Original line number Diff line number Diff line
/* Copyright (c) 2008-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -90,6 +90,16 @@ static const struct kgsl_ioctl kgsl_ioctl_funcs[] = {
			kgsl_ioctl_gpu_command),
	KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUOBJ_SET_INFO,
			kgsl_ioctl_gpuobj_set_info),
	KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_PHYS_ALLOC,
			kgsl_ioctl_sparse_phys_alloc),
	KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_PHYS_FREE,
			kgsl_ioctl_sparse_phys_free),
	KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_VIRT_ALLOC,
			kgsl_ioctl_sparse_virt_alloc),
	KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_VIRT_FREE,
			kgsl_ioctl_sparse_virt_free),
	KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_BIND,
			kgsl_ioctl_sparse_bind),
};

long kgsl_ioctl_copy_in(unsigned int kernel_cmd, unsigned int user_cmd,
Loading