Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bbbc80b6 authored by Swathi Sridhar's avatar Swathi Sridhar
Browse files

ion : Merge ion changes from msm-4.14 to msm-kona.



This patch merges changes pertinent to ion from msm-4.14
to msm-kona.

Conflicts:
	include/linux/oom.h

Change-Id: I33239643d8eb5e98f6d7529ff986db03b043da2d
Signed-off-by: default avatarSwathi Sridhar <swatsrid@codeaurora.org>
parent 4008eb49
Loading
Loading
Loading
Loading
+59 −0
Original line number Diff line number Diff line
ION Memory Manager (ION)

ION is a memory manager that allows for sharing of buffers between different
processes and between user space and kernel space. ION manages different
memory spaces by separating the memory spaces into "heaps".

Required properties for Ion

- compatible: "qcom,msm-ion"


All child nodes of a qcom,msm-ion node are interpreted as Ion heap
configurations.

Required properties for Ion heaps

- reg: The ID of the ION heap.
- qcom,ion-heap-type: The heap type to use for this heap. Should be one of
  the following:
    - "SYSTEM"
    - "CARVEOUT"
    - "DMA"
    - "HYP_CMA"
    - "SYSTEM_SECURE"
    - "SECURE_DMA"

Optional properties for Ion heaps

- memory-region: phandle to memory region associated with heap.

Example:
	qcom,ion {
                 compatible = "qcom,msm-ion";
                 #address-cells = <1>;
                 #size-cells = <0>;

                 system_heap: qcom,ion-heap@25 {
                        reg = <25>;
                        qcom,ion-heap-type = "SYSTEM";
                 };

                 qcom,ion-heap@22 { /* ADSP HEAP */
                        reg = <22>;
                        memory-region = <&adsp_mem>;
                        qcom,ion-heap-type = "DMA";
                 };

                 qcom,ion-heap@10 { /* SECURE DISPLAY HEAP */
                        reg = <10>;
                        memory-region = <&secure_display_memory>;
                        qcom,ion-heap-type = "HYP_CMA";
                 };

                 qcom,ion-heap@9 {
                        reg = <9>;
                        qcom,ion-heap-type = "SYSTEM_SECURE";
                 };

        };
+2 −0
Original line number Diff line number Diff line
@@ -51,6 +51,8 @@ compatible (optional) - standard definition
          used as a shared pool of DMA buffers for a set of devices. It can
          be used by an operating system to instanciate the necessary pool
          management subsystem if necessary.
	- removed-dma-pool: This indicates a region of memory which is meant to
	  be carved out and not exposed to kernel.
        - vendor specific string in the form <vendor>,[<device>-]<usage>
no-map (optional) - empty property
    - Indicates the operating system must not create a virtual mapping
+22 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
 */

&soc {
	qcom,ion {
		compatible = "qcom,msm-ion";
		#address-cells = <1>;
		#size-cells = <0>;

		system_heap: qcom,ion-heap@25 {
			reg = <25>;
			qcom,ion-heap-type = "SYSTEM";
		};

		system_secure_heap: qcom,ion-heap@9 {
			reg = <9>;
			qcom,ion-heap-type = "SYSTEM_SECURE";
		};
	};
};
+1 −0
Original line number Diff line number Diff line
@@ -348,4 +348,5 @@
	};
};

#include "kona-ion.dtsi"
#include "msm-arm-smmu-kona.dtsi"
+133 −13
Original line number Diff line number Diff line
@@ -34,9 +34,13 @@
#include <linux/poll.h>
#include <linux/reservation.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/atomic.h>

#include <uapi/linux/dma-buf.h>

static atomic_long_t name_counter;

static inline int is_dma_buf_file(struct file *);

struct dma_buf_list {
@@ -77,6 +81,7 @@ static int dma_buf_release(struct inode *inode, struct file *file)
		reservation_object_fini(dmabuf->resv);

	module_put(dmabuf->owner);
	kfree(dmabuf->name);
	kfree(dmabuf);
	return 0;
}
@@ -276,12 +281,19 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
	return events;
}

static int dma_buf_begin_cpu_access_umapped(struct dma_buf *dmabuf,
					    enum dma_data_direction direction);


static int dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf,
					  enum dma_data_direction direction);

static long dma_buf_ioctl(struct file *file,
			  unsigned int cmd, unsigned long arg)
{
	struct dma_buf *dmabuf;
	struct dma_buf_sync sync;
	enum dma_data_direction direction;
	enum dma_data_direction dir;
	int ret;

	dmabuf = file->private_data;
@@ -296,22 +308,30 @@ static long dma_buf_ioctl(struct file *file,

		switch (sync.flags & DMA_BUF_SYNC_RW) {
		case DMA_BUF_SYNC_READ:
			direction = DMA_FROM_DEVICE;
			dir = DMA_FROM_DEVICE;
			break;
		case DMA_BUF_SYNC_WRITE:
			direction = DMA_TO_DEVICE;
			dir = DMA_TO_DEVICE;
			break;
		case DMA_BUF_SYNC_RW:
			direction = DMA_BIDIRECTIONAL;
			dir = DMA_BIDIRECTIONAL;
			break;
		default:
			return -EINVAL;
		}

		if (sync.flags & DMA_BUF_SYNC_END)
			ret = dma_buf_end_cpu_access(dmabuf, direction);
			if (sync.flags & DMA_BUF_SYNC_USER_MAPPED)
				ret = dma_buf_end_cpu_access_umapped(dmabuf,
								     dir);
			else
				ret = dma_buf_end_cpu_access(dmabuf, dir);
		else
			ret = dma_buf_begin_cpu_access(dmabuf, direction);
			if (sync.flags & DMA_BUF_SYNC_USER_MAPPED)
				ret = dma_buf_begin_cpu_access_umapped(dmabuf,
								       dir);
			else
				ret = dma_buf_begin_cpu_access(dmabuf, dir);

		return ret;
	default:
@@ -392,7 +412,9 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
	struct reservation_object *resv = exp_info->resv;
	struct file *file;
	size_t alloc_size = sizeof(struct dma_buf);
	char *bufname;
	int ret;
	long cnt;

	if (!exp_info->resv)
		alloc_size += sizeof(struct reservation_object);
@@ -414,10 +436,17 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
	if (!try_module_get(exp_info->owner))
		return ERR_PTR(-ENOENT);

	cnt = atomic_long_inc_return(&name_counter);
	bufname = kasprintf(GFP_KERNEL, "dmabuf%ld", cnt);
	if (!bufname) {
		ret = -ENOMEM;
		goto err_module;
	}

	dmabuf = kzalloc(alloc_size, GFP_KERNEL);
	if (!dmabuf) {
		ret = -ENOMEM;
		goto err_module;
		goto err_name;
	}

	dmabuf->priv = exp_info->priv;
@@ -428,6 +457,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
	init_waitqueue_head(&dmabuf->poll);
	dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
	dmabuf->name = bufname;

	if (!resv) {
		resv = (struct reservation_object *)&dmabuf[1];
@@ -435,7 +465,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
	}
	dmabuf->resv = resv;

	file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
	file = anon_inode_getfile(bufname, &dma_buf_fops, dmabuf,
					exp_info->flags);
	if (IS_ERR(file)) {
		ret = PTR_ERR(file);
@@ -456,6 +486,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)

err_dmabuf:
	kfree(dmabuf);
err_name:
	kfree(bufname);
err_module:
	module_put(exp_info->owner);
	return ERR_PTR(ret);
@@ -746,7 +778,8 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
 *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
 *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
 *       want (with the new data being consumed by say the GPU or the scanout
 *       device)
 *       device). Optionally SYNC_USER_MAPPED can be set to restrict cache
 *       maintenance to only the parts of the buffer which are mmap(ed).
 *     - munmap once you don't need the buffer any more
 *
 *    For correctness and optimal performance, it is always required to use
@@ -833,6 +866,50 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
}
EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);

static int dma_buf_begin_cpu_access_umapped(struct dma_buf *dmabuf,
			     enum dma_data_direction direction)
{
	int ret = 0;

	if (WARN_ON(!dmabuf))
		return -EINVAL;

	if (dmabuf->ops->begin_cpu_access_umapped)
		ret = dmabuf->ops->begin_cpu_access_umapped(dmabuf, direction);

	/* Ensure that all fences are waited upon - but we first allow
	 * the native handler the chance to do so more efficiently if it
	 * chooses. A double invocation here will be reasonably cheap no-op.
	 */
	if (ret == 0)
		ret = __dma_buf_begin_cpu_access(dmabuf, direction);

	return ret;
}
int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
				     enum dma_data_direction direction,
				     unsigned int offset, unsigned int len)
{
	int ret = 0;

	if (WARN_ON(!dmabuf))
		return -EINVAL;

	if (dmabuf->ops->begin_cpu_access_partial)
		ret = dmabuf->ops->begin_cpu_access_partial(dmabuf, direction,
							    offset, len);

	/* Ensure that all fences are waited upon - but we first allow
	 * the native handler the chance to do so more efficiently if it
	 * chooses. A double invocation here will be reasonably cheap no-op.
	 */
	if (ret == 0)
		ret = __dma_buf_begin_cpu_access(dmabuf, direction);

	return ret;
}
EXPORT_SYMBOL(dma_buf_begin_cpu_access_partial);

/**
 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
@@ -859,6 +936,35 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
}
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);

int dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf,
			   enum dma_data_direction direction)
{
	int ret = 0;

	WARN_ON(!dmabuf);

	if (dmabuf->ops->end_cpu_access_umapped)
		ret = dmabuf->ops->end_cpu_access_umapped(dmabuf, direction);

	return ret;
}

int dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
				   enum dma_data_direction direction,
				   unsigned int offset, unsigned int len)
{
	int ret = 0;

	WARN_ON(!dmabuf);

	if (dmabuf->ops->end_cpu_access_partial)
		ret = dmabuf->ops->end_cpu_access_partial(dmabuf, direction,
							  offset, len);

	return ret;
}
EXPORT_SYMBOL(dma_buf_end_cpu_access_partial);

/**
 * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
 * space. The same restrictions as for kmap_atomic and friends apply.
@@ -1053,6 +1159,20 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
}
EXPORT_SYMBOL_GPL(dma_buf_vunmap);

int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags)
{
	int ret = 0;

	if (WARN_ON(!dmabuf))
		return -EINVAL;

	if (dmabuf->ops->get_flags)
		ret = dmabuf->ops->get_flags(dmabuf, flags);

	return ret;
}
EXPORT_SYMBOL(dma_buf_get_flags);

#ifdef CONFIG_DEBUG_FS
static int dma_buf_debug_show(struct seq_file *s, void *unused)
{
@@ -1072,8 +1192,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
		return ret;

	seq_puts(s, "\nDma-buf Objects:\n");
	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\n",
		   "size", "flags", "mode", "count");
	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\t%-12s\t%-s\n",
		   "size", "flags", "mode", "count", "exp_name", "buf name");

	list_for_each_entry(buf_obj, &db_list.head, list_node) {
		ret = mutex_lock_interruptible(&buf_obj->lock);
@@ -1084,11 +1204,11 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
			continue;
		}

		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%-12s\t%-s\n",
				buf_obj->size,
				buf_obj->file->f_flags, buf_obj->file->f_mode,
				file_count(buf_obj->file),
				buf_obj->exp_name);
				buf_obj->exp_name, buf_obj->name);

		robj = buf_obj->resv;
		while (true) {
Loading