Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit eb749ee5 authored by Isaac J. Manjarres's avatar Isaac J. Manjarres
Browse files

soc: qcom: mem-buf: Add support for consumers to import dma-bufs



Supplier virtual machines (VMs) support exporting dma-bufs to
a consumer VM. However, consumers do not support importing dma-bufs
into their VMs, so add an IOCTL command to import a dma-buf into
a consumer VM.

The client must provide a memory parcel handle that corresponds to a
dma-buf that has been shared with the consumer VM, as well as an
access control list that is used for validating the access control
rules for the buffer. Upon success, the client is given a dma-buf
fd, which they can use to map the buffer and access it from both
the CPU, and peripherals.

Change-Id: I548e004e73543421b932430fbd9f84ad76658ef8
Signed-off-by: default avatarIsaac J. Manjarres <isaacm@codeaurora.org>
parent 50c067fc
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -42,7 +42,7 @@ obj-$(CONFIG_QCOM_LAHAINA_LLCC) += llcc-lahaina.o
obj-$(CONFIG_QCOM_SHIMA_LLCC) += llcc-shima.o
obj-$(CONFIG_QCOM_MINIDUMP) += msm_minidump.o minidump_log.o
obj-$(CONFIG_QCOM_MEM_OFFLINE) += mem-offline.o
obj-$(CONFIG_QCOM_MEM_BUF) += mem-buf.o
obj-$(CONFIG_QCOM_MEM_BUF) += mem-buf.o mem_buf_dma_buf.o
obj-$(CONFIG_QCOM_MEMORY_DUMP_V2) += memory_dump_v2.o
obj-$(CONFIG_QCOM_DCC_V2) += dcc_v2.o
obj-$(CONFIG_MSM_JTAGV8) += jtagv8.o jtagv8-etm.o
+43 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2020, The Linux Foundation. All rights reserved.
 */

#ifndef MEM_BUF_PRIVATE_H
#define MEM_BUF_PRIVATE_H

#include <linux/device.h>
#include <linux/dma-buf.h>
#include <linux/haven/hh_rm_drv.h>
#include <linux/slab.h>

/**
 * strcut mem_buf_import: Represents a memory buffer that was imported from
 * another VM.
 * @memparcel_hdl: The handle associated with the memparcel that represents the
 * memory that was imported from another VM.
 * @size: The size of the buffer.
 * @sgl_desc: The SG descriptor that represents the memory buffer.
 * @dmabuf: The dma-buf that corresponds to the buffer.
 * @kmap_cnt: The number of kernel mapping references associated with the buffer
 * @vaddr: The virtual address for the buffer after it has been mapped into a
 * contiguous range in the kernel virtual address space.
 * @lock: protects accesses to attachments.
 * @attachments: a list of attachments for the buffer.
 */
struct mem_buf_import {
	hh_memparcel_handle_t memparcel_hdl;
	size_t size;
	struct hh_sgl_desc *sgl_desc;
	struct dma_buf *dmabuf;
	int kmap_cnt;
	void *vaddr;
	struct mutex lock;
	struct list_head attachments;
};

void mem_buf_unimport_dma_buf(struct mem_buf_import *import_buf);

extern const struct dma_buf_ops mem_buf_dma_buf_ops;
#endif
+138 −0
Original line number Diff line number Diff line
@@ -27,6 +27,8 @@
#include <soc/qcom/secure_buffer.h>
#include <uapi/linux/mem-buf.h>

#include "mem-buf-private.h"

#define CREATE_TRACE_POINTS
#include "trace-mem-buf.h"

@@ -1492,6 +1494,11 @@ static int mem_buf_get_export_fd(struct mem_buf_export *export_buf)
	return _mem_buf_get_fd(export_buf->filp);
}

static int mem_buf_get_import_fd(struct mem_buf_import *import_buf)
{
	return dma_buf_fd(import_buf->dmabuf, O_CLOEXEC);
}

static void _mem_buf_put(struct file *filp)
{
	fput(filp);
@@ -1511,6 +1518,11 @@ static void mem_buf_export_put(struct mem_buf_export *export_buf)
	_mem_buf_put(export_buf->filp);
}

static void mem_buf_import_put(struct mem_buf_import *import_buf)
{
	dma_buf_put(import_buf->dmabuf);
}

static bool is_mem_buf_file(struct file *filp)
{
	return filp->f_op == &mem_buf_fops;
@@ -1619,6 +1631,7 @@ static int mem_buf_alloc_fd(struct mem_buf_alloc_ioctl_arg *allocation_args)
union mem_buf_ioctl_arg {
	struct mem_buf_alloc_ioctl_arg allocation;
	struct mem_buf_export_ioctl_arg export;
	struct mem_buf_import_ioctl_arg import;
};

static int validate_ioctl_arg(union mem_buf_ioctl_arg *arg, unsigned int cmd)
@@ -1648,6 +1661,16 @@ static int validate_ioctl_arg(union mem_buf_ioctl_arg *arg, unsigned int cmd)
			return -EINVAL;
		break;
	}
	case MEM_BUF_IOC_IMPORT:
	{
		struct mem_buf_import_ioctl_arg *import = &arg->import;

		if (!import->nr_acl_entries || !import->acl_list ||
		    import->nr_acl_entries > MEM_BUF_MAX_NR_ACL_ENTS ||
		    import->reserved0 || import->reserved1 || import->reserved2)
			return -EINVAL;
		break;
	}
	default:
		return -EINVAL;
	}
@@ -1877,6 +1900,98 @@ static struct mem_buf_export *mem_buf_export_dma_buf(int dma_buf_fd,
	return ERR_PTR(ret);
}

static size_t mem_buf_get_sgl_buf_size(struct hh_sgl_desc *sgl_desc)
{
	size_t size = 0;
	unsigned int i;

	for (i = 0; i < sgl_desc->n_sgl_entries; i++)
		size += sgl_desc->sgl_entries[i].size;

	return size;
}

static struct mem_buf_import *mem_buf_import_dma_buf(
					hh_memparcel_handle_t memparcel_hdl,
					unsigned int nr_acl_entries,
					const void __user *acl_list)
{
	int ret;
	struct mem_buf_import *import;
	struct hh_acl_desc *acl_desc;
	struct hh_sgl_desc *sgl_desc;
	struct acl_entry *k_acl_list;
	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
	struct dma_buf *dmabuf;

	if (!nr_acl_entries || !acl_list)
		return ERR_PTR(-EINVAL);

	import = kzalloc(sizeof(*import), GFP_KERNEL);
	if (!import)
		return ERR_PTR(-ENOMEM);
	import->memparcel_hdl = memparcel_hdl;
	mutex_init(&import->lock);
	INIT_LIST_HEAD(&import->attachments);

	k_acl_list = memdup_user(acl_list, sizeof(*k_acl_list) *
				 nr_acl_entries);
	if (IS_ERR(k_acl_list)) {
		ret = PTR_ERR(k_acl_list);
		goto err_out;
	}

	acl_desc = mem_buf_acl_to_hh_acl(nr_acl_entries, k_acl_list);
	kfree(k_acl_list);
	if (IS_ERR(acl_desc)) {
		ret = PTR_ERR(acl_desc);
		goto err_out;
	}

	sgl_desc = mem_buf_map_mem_s2(memparcel_hdl, acl_desc);
	kfree(acl_desc);
	if (IS_ERR(sgl_desc)) {
		ret = PTR_ERR(sgl_desc);
		goto err_out;
	}
	import->sgl_desc = sgl_desc;
	import->size = mem_buf_get_sgl_buf_size(sgl_desc);

	ret = mem_buf_map_mem_s1(sgl_desc);
	if (ret < 0)
		goto err_map_mem_s1;

	exp_info.ops = &mem_buf_dma_buf_ops;
	exp_info.size = import->size;
	exp_info.flags = O_RDWR;
	exp_info.priv = import;

	dmabuf = dma_buf_export(&exp_info);
	if (IS_ERR(dmabuf))
		goto err_export_dma_buf;
	import->dmabuf = dmabuf;

	return import;

err_export_dma_buf:
	mem_buf_unmap_mem_s1(sgl_desc);
err_map_mem_s1:
	kfree(import->sgl_desc);
	mem_buf_unmap_mem_s2(memparcel_hdl);
err_out:
	kfree(import);
	return ERR_PTR(ret);
}

void mem_buf_unimport_dma_buf(struct mem_buf_import *import_buf)
{
	mem_buf_unmap_mem_s1(import_buf->sgl_desc);
	kfree(import_buf->sgl_desc);
	mem_buf_unmap_mem_s2(import_buf->memparcel_hdl);
	mutex_destroy(&import_buf->lock);
	kfree(import_buf);
}

static long mem_buf_dev_ioctl(struct file *filp, unsigned int cmd,
			      unsigned long arg)
{
@@ -1939,6 +2054,29 @@ static long mem_buf_dev_ioctl(struct file *filp, unsigned int cmd,
		export->memparcel_hdl = ret_memparcel_hdl;
		break;
	}
	case MEM_BUF_IOC_IMPORT:
	{
		struct mem_buf_import_ioctl_arg *import = &ioctl_arg.import;
		struct mem_buf_import *import_buf;

		if (!(mem_buf_capability & MEM_BUF_CAP_CONSUMER))
			return -ENOTSUPP;

		import_buf = mem_buf_import_dma_buf(import->memparcel_hdl,
						import->nr_acl_entries,
					(const void __user *)import->acl_list);
		if (IS_ERR(import_buf))
			return PTR_ERR(import_buf);

		fd = mem_buf_get_import_fd(import_buf);
		if (fd < 0) {
			mem_buf_import_put(import_buf);
			return fd;
		}

		import->dma_buf_import_fd = fd;
		break;
	}
	default:
		return -ENOTTY;
	}
+461 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2020, The Linux Foundation. All rights reserved.
 */

#include "mem-buf-private.h"

static struct sg_table *dup_hh_sgl_desc_to_sgt(struct hh_sgl_desc *sgl_desc)
{
	struct sg_table *new_table;
	int ret, i;
	struct scatterlist *sg;

	if (!sgl_desc || !sgl_desc->n_sgl_entries)
		return ERR_PTR(-EINVAL);

	new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
	if (!new_table)
		return ERR_PTR(-ENOMEM);

	ret = sg_alloc_table(new_table, sgl_desc->n_sgl_entries, GFP_KERNEL);
	if (ret) {
		kfree(new_table);
		return ERR_PTR(-ENOMEM);
	}

	for_each_sg(new_table->sgl, sg, new_table->nents, i) {
		sg_set_page(sg, phys_to_page(sgl_desc->sgl_entries[i].ipa_base),
			    sgl_desc->sgl_entries[i].size, 0);
		sg_dma_address(sg) = 0;
		sg_dma_len(sg) = 0;
	}

	return new_table;
}

static void free_duped_table(struct sg_table *table)
{
	sg_free_table(table);
	kfree(table);
}

struct mem_buf_dma_buf_attachment {
	struct device *dev;
	struct sg_table *table;
	struct list_head list;
	bool dma_mapped;
};

static int mem_buf_dma_buf_attach(struct dma_buf *dmabuf,
				  struct dma_buf_attachment *attachment)
{
	struct mem_buf_dma_buf_attachment *a;
	struct sg_table *table;
	struct mem_buf_import *import_buf = dmabuf->priv;

	a = kzalloc(sizeof(*a), GFP_KERNEL);
	if (!a)
		return -ENOMEM;

	table = dup_hh_sgl_desc_to_sgt(import_buf->sgl_desc);
	if (IS_ERR(table)) {
		kfree(a);
		return -ENOMEM;
	}

	a->table = table;
	a->dev = attachment->dev;
	a->dma_mapped = false;
	INIT_LIST_HEAD(&a->list);

	attachment->priv = a;

	mutex_lock(&import_buf->lock);
	list_add(&a->list, &import_buf->attachments);
	mutex_unlock(&import_buf->lock);

	return 0;
}

static void mem_buf_dma_buf_detatch(struct dma_buf *dmabuf,
				    struct dma_buf_attachment *attachment)
{
	struct mem_buf_dma_buf_attachment *a = attachment->priv;
	struct mem_buf_import *import_buf = dmabuf->priv;

	mutex_lock(&import_buf->lock);
	list_del(&a->list);
	mutex_unlock(&import_buf->lock);
	free_duped_table(a->table);
	kfree(a);
}

static struct sg_table *mem_buf_dma_map_attachment(
					struct dma_buf_attachment *attachment,
					enum dma_data_direction direction)
{
	struct mem_buf_dma_buf_attachment *a = attachment->priv;
	struct mem_buf_import *buffer = attachment->dmabuf->priv;
	struct sg_table *table;
	int count, map_attrs;

	table = a->table;
	map_attrs = attachment->dma_map_attrs;

	mutex_lock(&buffer->lock);
	count = dma_map_sg_attrs(attachment->dev, table->sgl, table->nents,
				 direction, map_attrs);

	if (count <= 0) {
		mutex_unlock(&buffer->lock);
		return ERR_PTR(-ENOMEM);
	}

	a->dma_mapped = true;
	mutex_unlock(&buffer->lock);
	return table;
}

static void mem_buf_dma_unmap_attachment(struct dma_buf_attachment *attachment,
					 struct sg_table *table,
					 enum dma_data_direction direction)
{
	int map_attrs;
	struct mem_buf_import *buffer = attachment->dmabuf->priv;
	struct mem_buf_dma_buf_attachment *a = attachment->priv;

	map_attrs = attachment->dma_map_attrs;

	mutex_lock(&buffer->lock);
	dma_unmap_sg_attrs(attachment->dev, table->sgl, table->nents, direction,
			   map_attrs);
	a->dma_mapped = false;
	mutex_unlock(&buffer->lock);
}

static int mem_buf_map_user(struct mem_buf_import *import_buf,
			    struct vm_area_struct *vma)
{
	struct hh_sgl_desc *sgl_desc = import_buf->sgl_desc;
	unsigned long addr = vma->vm_start;
	unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
	int i, ret;

	for (i = 0; i < sgl_desc->n_sgl_entries; i++) {
		struct page *page =
			phys_to_page(sgl_desc->sgl_entries[i].ipa_base);
		unsigned long remainder = vma->vm_end - addr;
		unsigned long len = sgl_desc->sgl_entries[i].size;

		if (offset >= len) {
			offset -= len;
			continue;
		} else if (offset) {
			page += offset / PAGE_SIZE;
			len = sgl_desc->sgl_entries[i].size - offset;
			offset = 0;
		}

		len = min(len, remainder);
		ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
				      vma->vm_page_prot);
		if (ret)
			return ret;
		addr += len;
		if (addr >= vma->vm_end)
			return 0;
	}

	return 0;
}

static void *mem_buf_map_kernel(struct mem_buf_import *import_buf)
{
	void *vaddr;
	int npages = PAGE_ALIGN(import_buf->size) / PAGE_SIZE;
	struct page **pages =
		vmalloc(array_size(npages, sizeof(struct page *)));
	struct page **tmp = pages;
	struct hh_sgl_desc *sgl_desc = import_buf->sgl_desc;
	int i, j, n_pages_this_seg;
	u64 seg_ipa_base, seg_size;
	struct page *page;

	if (!pages)
		return ERR_PTR(-ENOMEM);

	for (i = 0; i < sgl_desc->n_sgl_entries; i++) {
		seg_ipa_base = sgl_desc->sgl_entries[i].ipa_base;
		seg_size = sgl_desc->sgl_entries[i].size;
		n_pages_this_seg = PAGE_ALIGN(seg_size) / PAGE_SIZE;
		page = phys_to_page(seg_ipa_base);

		BUG_ON(i >= npages);
		for (j = 0; j < n_pages_this_seg; j++)
			*(tmp++) = page++;
	}

	vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
	vfree(pages);

	if (!vaddr)
		return ERR_PTR(-ENOMEM);

	return vaddr;
}

static void mem_buf_unmap_kernel(struct mem_buf_import *import_buf)
{
	vunmap(import_buf->vaddr);
}

static int mem_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
	struct mem_buf_import *import_buf = dmabuf->priv;
	int ret = 0;

	mutex_lock(&import_buf->lock);
	ret = mem_buf_map_user(import_buf, vma);
	mutex_unlock(&import_buf->lock);

	if (ret)
		pr_err_ratelimited("%s: failure mapping buffer to userspace\n",
				   __func__);

	return ret;
}

static void mem_buf_dma_buf_release(struct dma_buf *dmabuf)
{
	struct mem_buf_import *import_buf = dmabuf->priv;

	mem_buf_unimport_dma_buf(import_buf);
}

static void *mem_buf_buffer_kmap_get(struct mem_buf_import *import_buf)
{
	void *vaddr;

	if (import_buf->kmap_cnt) {
		import_buf->kmap_cnt++;
		return import_buf->vaddr;
	}
	vaddr = mem_buf_map_kernel(import_buf);
	if (IS_ERR(vaddr))
		return vaddr;
	import_buf->vaddr = vaddr;
	import_buf->kmap_cnt++;
	return vaddr;
}

static void mem_buf_buffer_kmap_put(struct mem_buf_import *import_buf)
{
	if (import_buf->kmap_cnt == 0) {
		pr_warn_ratelimited("membuf client likely missing a call to dma_buf_kmap or dma_buf_vmap, pid:%d\n",
				    current->pid);
		return;
	}

	import_buf->kmap_cnt--;
	if (!import_buf->kmap_cnt) {
		mem_buf_unmap_kernel(import_buf);
		import_buf->vaddr = NULL;
	}
}

static void *mem_buf_dma_buf_vmap(struct dma_buf *dmabuf)
{
	struct mem_buf_import *import_buf = dmabuf->priv;
	void *vaddr;

	mutex_lock(&import_buf->lock);
	vaddr = mem_buf_buffer_kmap_get(import_buf);
	mutex_unlock(&import_buf->lock);

	return vaddr;
}

static void mem_buf_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
{
	struct mem_buf_import *import_buf = dmabuf->priv;

	mutex_lock(&import_buf->lock);
	mem_buf_buffer_kmap_put(import_buf);
	mutex_unlock(&import_buf->lock);
}

static void *mem_buf_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
{
	/*
	 * TODO: Once clients remove their hacks where they assume kmap(ed)
	 * addresses are virtually contiguous implement this properly
	 */
	void *vaddr = mem_buf_dma_buf_vmap(dmabuf);

	if (IS_ERR(vaddr))
		return vaddr;

	return vaddr + offset * PAGE_SIZE;
}

static void mem_buf_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
			       void *ptr)
{
	/*
	 * TODO: Once clients remove their hacks where they assume kmap(ed)
	 * addresses are virtually contiguous implement this properly
	 */
	mem_buf_dma_buf_vunmap(dmabuf, ptr);
}

static int mem_buf_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
					    enum dma_data_direction direction)
{
	struct mem_buf_import *import_buf = dmabuf->priv;
	struct mem_buf_dma_buf_attachment *a;

	mutex_lock(&import_buf->lock);
	list_for_each_entry(a, &import_buf->attachments, list) {
		if (!a->dma_mapped)
			continue;

		dma_sync_sg_for_cpu(a->dev, a->table->sgl,
				    a->table->nents, direction);
	}
	mutex_unlock(&import_buf->lock);
	return 0;
}

static int mem_buf_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
					  enum dma_data_direction direction)
{
	struct mem_buf_import *import_buf = dmabuf->priv;
	struct mem_buf_dma_buf_attachment *a;

	mutex_lock(&import_buf->lock);
	list_for_each_entry(a, &import_buf->attachments, list) {
		if (!a->dma_mapped)
			continue;

		dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
				       direction);
	}
	mutex_unlock(&import_buf->lock);
	return 0;
}

static int mem_buf_sgl_sync_range(struct device *dev, struct scatterlist *sgl,
				  unsigned int nents, unsigned long offset,
				  unsigned long length,
				  enum dma_data_direction dir, bool for_cpu)
{
	int i;
	struct scatterlist *sg;
	unsigned int len = 0;
	dma_addr_t sg_dma_addr;

	for_each_sg(sgl, sg, nents, i) {
		if (sg_dma_len(sg) == 0)
			break;

		if (i > 0) {
			pr_warn_ratelimited("Partial cmo only supported with 1 segment\n"
				"is dma_set_max_seg_size being set on dev:%s\n",
				dev_name(dev));
			return -EINVAL;
		}
	}

	for_each_sg(sgl, sg, nents, i) {
		unsigned int sg_offset, sg_left, size = 0;

		if (i == 0)
			sg_dma_addr = sg_dma_address(sg);

		len += sg->length;
		if (len <= offset) {
			sg_dma_addr += sg->length;
			continue;
		}

		sg_left = len - offset;
		sg_offset = sg->length - sg_left;

		size = (length < sg_left) ? length : sg_left;
		if (for_cpu)
			dma_sync_single_range_for_cpu(dev, sg_dma_addr,
						      sg_offset, size, dir);
		else
			dma_sync_single_range_for_device(dev, sg_dma_addr,
							 sg_offset, size, dir);

		offset += size;
		length -= size;
		sg_dma_addr += sg->length;

		if (length == 0)
			break;
	}

	return 0;
}

static int mem_buf_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
						enum dma_data_direction dir,
						unsigned int offset,
						unsigned int len)
{
	struct mem_buf_import *import_buf = dmabuf->priv;
	struct mem_buf_dma_buf_attachment *a;
	int ret = 0;

	mutex_lock(&import_buf->lock);
	list_for_each_entry(a, &import_buf->attachments, list) {
		if (!a->dma_mapped)
			continue;

		ret = mem_buf_sgl_sync_range(a->dev, a->table->sgl,
					     a->table->nents, offset, len, dir,
					     true);

	}
	mutex_unlock(&import_buf->lock);
	return ret;
}

static int mem_buf_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
					      enum dma_data_direction direction,
					      unsigned int offset,
					      unsigned int len)
{
	struct mem_buf_import *import_buf = dmabuf->priv;
	struct mem_buf_dma_buf_attachment *a;
	int ret = 0;

	mutex_lock(&import_buf->lock);
	list_for_each_entry(a, &import_buf->attachments, list) {
		ret = mem_buf_sgl_sync_range(a->dev, a->table->sgl,
					     a->table->nents, offset, len,
					     direction, false);
	}
	mutex_unlock(&import_buf->lock);
	return ret;
}

const struct dma_buf_ops mem_buf_dma_buf_ops = {
	.map_dma_buf = mem_buf_dma_map_attachment,
	.unmap_dma_buf = mem_buf_dma_unmap_attachment,
	.mmap = mem_buf_mmap,
	.release = mem_buf_dma_buf_release,
	.attach = mem_buf_dma_buf_attach,
	.detach = mem_buf_dma_buf_detatch,
	.begin_cpu_access = mem_buf_dma_buf_begin_cpu_access,
	.end_cpu_access = mem_buf_dma_buf_end_cpu_access,
	.begin_cpu_access_partial = mem_buf_dma_buf_begin_cpu_access_partial,
	.end_cpu_access_partial = mem_buf_dma_buf_end_cpu_access_partial,
	.map = mem_buf_dma_buf_kmap,
	.unmap = mem_buf_dma_buf_kunmap,
	.vmap = mem_buf_dma_buf_vmap,
	.vunmap = mem_buf_dma_buf_vunmap,
};
+34 −6
Original line number Diff line number Diff line
@@ -56,13 +56,13 @@ struct mem_buf_ion_data {
};

/**
 * struct mem_buf_alloc_ioctl_arg: An request to allocate memory from another
 * struct mem_buf_alloc_ioctl_arg: A request to allocate memory from another
 * VM to other VMs.
 * @size: The size of the allocation.
 * @nr_acl_entries: The number of ACL entries in @acl_list.
 * @acl_list: An array of structures, where each structure specifies a VMID
 * and the access permissions that the VMID will have to the memory to be
 * allocated.
 * @nr_acl_entries: The number of ACL entries in @acl_list.
 * @src_mem_type: The type of memory that the source VM should allocate from.
 * This should be one of the mem_buf_mem_type enum values.
 * @src_data: A pointer to data that the source VM should interpret when
@@ -70,25 +70,25 @@ struct mem_buf_ion_data {
 * @dst_mem_type: The type of memory that the destination VM should treat the
 * incoming allocation from the source VM as. This should be one of the
 * mem_buf_mem_type enum values.
 * @dst_data: A pointer to data that the destination VM should interpret when
 * adding the memory to the current VM.
 * @mem_buf_fd: A file descriptor representing the memory that was allocated
 * from the source VM and added to the current VM. Calling close() on this file
 * descriptor will deallocate the memory from the current VM, and return it
 * to the source VM.
 * * @dst_data: A pointer to data that the destination VM should interpret when
 * adding the memory to the current VM.
 *
 * All reserved fields must be zeroed out by the caller prior to invoking the
 * allocation IOCTL command with this argument.
 */
struct mem_buf_alloc_ioctl_arg {
	__u64 size;
	__u32 nr_acl_entries;
	__u64 acl_list;
	__u32 nr_acl_entries;
	__u32 src_mem_type;
	__u64 src_data;
	__u32 dst_mem_type;
	__u64 dst_data;
	__u32 mem_buf_fd;
	__u64 dst_data;
	__u64 reserved0;
	__u64 reserved1;
	__u64 reserved2;
@@ -131,4 +131,32 @@ struct mem_buf_export_ioctl_arg {
#define MEM_BUF_IOC_EXPORT		_IOWR(MEM_BUF_IOC_MAGIC, 1,\
					      struct mem_buf_export_ioctl_arg)

/**
 * struct mem_buf_import_ioctl_arg: A request to import memory from another
 * VM as a dma-buf
 * @memparcel_hdl: The handle that corresponds to the memparcel we are
 * importing.
 * @nr_acl_entries: The number of ACL entries in @acl_list.
 * @acl_list: An array of structures, where each structure specifies a VMID
 * and the access permissions that the VMID should have for the memparcel.
 * @dma_buf_import_fd: A dma-buf file descriptor that the client can use to
 * access the buffer. This fd must be closed to release the memory.
 *
 * All reserved fields must be zeroed out by the caller prior to invoking the
 * import IOCTL command with this argument.
 */
struct mem_buf_import_ioctl_arg {
	__u32 memparcel_hdl;
	__u32 nr_acl_entries;
	__u64 acl_list;
	__u32 dma_buf_import_fd;
	__u32 reserved0;
	__u64 reserved1;
	__u64 reserved2;
	__u64 reserved3;
};

#define MEM_BUF_IOC_IMPORT		_IOWR(MEM_BUF_IOC_MAGIC, 2,\
					      struct mem_buf_import_ioctl_arg)

#endif /* _UAPI_LINUX_MEM_BUF_H */