Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 80933739 authored by Swathi Sridhar's avatar Swathi Sridhar Committed by Suren Baghdasaryan
Browse files

ANDROID: GKI: dma-buf: Add support for XXX_cpu_access_umapped ops



Userspace clients will be able to restrict cache maintenance to only
the subset of the dma-buf which is mmap(ed) by setting the
DMA_BUF_SYNC_USER_MAPPED flag when calling the DMA_BUF_IOCTL_SYNC IOCT.

Signed-off-by: default avatarSwathi Sridhar <swatsrid@codeaurora.org>

Bug: 150611569
Test: build
(cherry-picked from bbbc80b6)
[surenb: partial cherry-pick from
bbbc80b6 ion : Merge ion changes from ...
to resolve ABI diffs caused by {begin/end}_cpu_access_umapped
dma_buf_ops.
changed dma_buf_end_cpu_access_umapped to be static.]
Signed-off-by: default avatarSuren Baghdasaryan <surenb@google.com>
Change-Id: Ic2029c5218ca99330a0e7e6128e12ac29cdd1c08
parent 253542f2
Loading
Loading
Loading
Loading
+57 −7
Original line number Diff line number Diff line
@@ -360,12 +360,19 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
	return ret;
}

static int dma_buf_begin_cpu_access_umapped(struct dma_buf *dmabuf,
					    enum dma_data_direction direction);


static int dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf,
					  enum dma_data_direction direction);

static long dma_buf_ioctl(struct file *file,
			  unsigned int cmd, unsigned long arg)
{
	struct dma_buf *dmabuf;
	struct dma_buf_sync sync;
	enum dma_data_direction direction;
	enum dma_data_direction dir;
	int ret;

	dmabuf = file->private_data;
@@ -380,22 +387,30 @@ static long dma_buf_ioctl(struct file *file,

		switch (sync.flags & DMA_BUF_SYNC_RW) {
		case DMA_BUF_SYNC_READ:
			direction = DMA_FROM_DEVICE;
			dir = DMA_FROM_DEVICE;
			break;
		case DMA_BUF_SYNC_WRITE:
			direction = DMA_TO_DEVICE;
			dir = DMA_TO_DEVICE;
			break;
		case DMA_BUF_SYNC_RW:
			direction = DMA_BIDIRECTIONAL;
			dir = DMA_BIDIRECTIONAL;
			break;
		default:
			return -EINVAL;
		}

		if (sync.flags & DMA_BUF_SYNC_END)
			ret = dma_buf_end_cpu_access(dmabuf, direction);
			if (sync.flags & DMA_BUF_SYNC_USER_MAPPED)
				ret = dma_buf_end_cpu_access_umapped(dmabuf,
								     dir);
			else
				ret = dma_buf_end_cpu_access(dmabuf, dir);
		else
			if (sync.flags & DMA_BUF_SYNC_USER_MAPPED)
				ret = dma_buf_begin_cpu_access_umapped(dmabuf,
								       dir);
			else
			ret = dma_buf_begin_cpu_access(dmabuf, direction);
				ret = dma_buf_begin_cpu_access(dmabuf, dir);

		return ret;

@@ -862,7 +877,8 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
 *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
 *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
 *       want (with the new data being consumed by say the GPU or the scanout
 *       device)
 *       device). Optionally SYNC_USER_MAPPED can be set to restrict cache
 *       maintenance to only the parts of the buffer which are mmap(ed).
 *     - munmap once you don't need the buffer any more
 *
 *    For correctness and optimal performance, it is always required to use
@@ -949,6 +965,27 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
}
EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);

static int dma_buf_begin_cpu_access_umapped(struct dma_buf *dmabuf,
			     enum dma_data_direction direction)
{
	int ret = 0;

	if (WARN_ON(!dmabuf))
		return -EINVAL;

	if (dmabuf->ops->begin_cpu_access_umapped)
		ret = dmabuf->ops->begin_cpu_access_umapped(dmabuf, direction);

	/* Ensure that all fences are waited upon - but we first allow
	 * the native handler the chance to do so more efficiently if it
	 * chooses. A double invocation here will be reasonably cheap no-op.
	 */
	if (ret == 0)
		ret = __dma_buf_begin_cpu_access(dmabuf, direction);

	return ret;
}

int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
				     enum dma_data_direction direction,
				     unsigned int offset, unsigned int len)
@@ -999,6 +1036,19 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
}
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);

static int dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf,
			   enum dma_data_direction direction)
{
	int ret = 0;

	WARN_ON(!dmabuf);

	if (dmabuf->ops->end_cpu_access_umapped)
		ret = dmabuf->ops->end_cpu_access_umapped(dmabuf, direction);

	return ret;
}

int dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
				   enum dma_data_direction direction,
				   unsigned int offset, unsigned int len)
+49 −0
Original line number Diff line number Diff line
@@ -187,6 +187,33 @@ struct dma_buf_ops {
	 */
	int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);

	/**
	 * @begin_cpu_access_umapped:
	 *
	 * This is called as a result of the DMA_BUF_IOCTL_SYNC IOCTL being
	 * called with the DMA_BUF_SYNC_START and DMA_BUF_SYNC_USER_MAPPED flags
	 * set. It allows the exporter to ensure that the mmap(ed) portions of
	 * the buffer are available for cpu access - the exporter might need to
	 * allocate or swap-in and pin the backing storage.
	 * The exporter also needs to ensure that cpu access is
	 * coherent for the access direction. The direction can be used by the
	 * exporter to optimize the cache flushing, i.e. access with a different
	 * direction (read instead of write) might return stale or even bogus
	 * data (e.g. when the exporter needs to copy the data to temporary
	 * storage).
	 *
	 * This callback is optional.
	 *
	 * Returns:
	 *
	 * 0 on success or a negative error code on failure. This can for
	 * example fail when the backing storage can't be allocated. Can also
	 * return -ERESTARTSYS or -EINTR when the call has been interrupted and
	 * needs to be restarted.
	 */
	int (*begin_cpu_access_umapped)(struct dma_buf *dmabuf,
					enum dma_data_direction);

	/**
	 * @begin_cpu_access_partial:
	 *
@@ -241,6 +268,28 @@ struct dma_buf_ops {
	 */
	int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);

	/**
	 * @end_cpu_access_umapped:
	 *
	 * This is called as result a of the DMA_BUF_IOCTL_SYNC IOCTL being
	 * called with the DMA_BUF_SYNC_END and DMA_BUF_SYNC_USER_MAPPED flags
	 * set. The exporter can use to limit cache flushing to only those parts
	 * of the buffer which are mmap(ed) and to unpin any resources pinned in
	 * @begin_cpu_access_umapped.
	 * The result of any dma_buf kmap calls after end_cpu_access_umapped is
	 * undefined.
	 *
	 * This callback is optional.
	 *
	 * Returns:
	 *
	 * 0 on success or a negative error code on failure. Can return
	 * -ERESTARTSYS or -EINTR when the call has been interrupted and needs
	 * to be restarted.
	 */
	int (*end_cpu_access_umapped)(struct dma_buf *dmabuf,
				      enum dma_data_direction);

	/**
	 * @end_cpu_access_partial:
	 *
+3 −1
Original line number Diff line number Diff line
@@ -32,8 +32,10 @@ struct dma_buf_sync {
#define DMA_BUF_SYNC_RW        (DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE)
#define DMA_BUF_SYNC_START     (0 << 2)
#define DMA_BUF_SYNC_END       (1 << 2)
#define DMA_BUF_SYNC_USER_MAPPED       (1 << 3)

#define DMA_BUF_SYNC_VALID_FLAGS_MASK \
	(DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END)
	(DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END | DMA_BUF_SYNC_USER_MAPPED)

#define DMA_BUF_NAME_LEN	32