Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bf8090cf authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "hab: import/export between remote buffer and dmafd"

parents 2542cb86 0866bef7
Loading
Loading
Loading
Loading
+10 −14
Original line number Diff line number Diff line
@@ -147,7 +147,8 @@ struct hab_header {
	(((vcid) & HAB_VCID_ID_MASK) >> HAB_VCID_ID_SHIFT)


#define HAB_HEADER_SET_SESSION_ID(header, sid) ((header).session_id = (sid))
#define HAB_HEADER_SET_SESSION_ID(header, sid) \
	((header).session_id = (sid))

#define HAB_HEADER_SET_SIZE(header, size) \
	((header).id_type_size = ((header).id_type_size & \
@@ -281,8 +282,8 @@ struct uhab_context {
};

/*
 * array to describe the VM and its MMID configuration as what is connected to
 * so this is describing a pchan's remote side
 * array to describe the VM and its MMID configuration as
 * what is connected to so this is describing a pchan's remote side
 */
struct vmid_mmid_desc {
	int vmid; /* remote vmid  */
@@ -341,8 +342,9 @@ struct virtual_channel {
};

/*
 * Struct shared between local and remote, contents are composed by exporter,
 * the importer only writes to pdata and local (exporter) domID
 * Struct shared between local and remote, contents
 * are composed by exporter, the importer only writes
 * to pdata and local (exporter) domID
 */
struct export_desc {
	uint32_t  export_id;
@@ -410,16 +412,10 @@ int habmem_hyp_revoke(void *expdata, uint32_t count);
void *habmem_imp_hyp_open(void);
void habmem_imp_hyp_close(void *priv, int kernel);

long habmem_imp_hyp_map(void *priv, void *impdata, uint32_t count,
		uint32_t remotedom,
		uint64_t *index,
		void **pkva,
		int kernel,
		uint32_t userflags);
int habmem_imp_hyp_map(void *imp_ctx, struct hab_import *param,
		struct export_desc *exp, int kernel);

long habmm_imp_hyp_unmap(void *priv, uint64_t index,
		uint32_t count,
		int kernel);
int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp);

int habmem_imp_hyp_mmap(struct file *flip, struct vm_area_struct *vma);

+378 −110
Original line number Diff line number Diff line
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,9 @@ struct pages_list {
	uint32_t userflags;
	struct file *filp_owner;
	struct file *filp_mapper;
	struct dma_buf *dmabuf;
	int32_t export_id;
	int32_t vcid;
};

struct importer_context {
@@ -58,7 +61,7 @@ static int match_file(const void *p, struct file *file, unsigned int fd)
}


static int habmem_get_dma_pages(unsigned long address,
static int habmem_get_dma_pages_from_va(unsigned long address,
		int page_count,
		struct page **pages)
{
@@ -142,6 +145,56 @@ err:
	return rc;
}

static int habmem_get_dma_pages_from_fd(int32_t fd,
		int page_count,
		struct page **pages)
{
	struct dma_buf *dmabuf = NULL;
	struct scatterlist *s;
	struct sg_table *sg_table = NULL;
	struct dma_buf_attachment *attach = NULL;
	struct page *page;
	int i, j, rc = 0;

	dmabuf = dma_buf_get(fd);
	if (IS_ERR(dmabuf))
		return PTR_ERR(dmabuf);

	attach = dma_buf_attach(dmabuf, hab_driver.dev);
	if (IS_ERR_OR_NULL(attach)) {
		pr_err("dma_buf_attach failed\n");
		goto err;
	}

	sg_table = dma_buf_map_attachment(attach, DMA_TO_DEVICE);

	if (IS_ERR_OR_NULL(sg_table)) {
		pr_err("dma_buf_map_attachment failed\n");
		goto err;
	}

	for_each_sg(sg_table->sgl, s, sg_table->nents, i) {
		page = sg_page(s);
		pr_debug("sgl length %d\n", s->length);

		for (j = 0; j < (s->length >> PAGE_SHIFT); j++) {
			pages[rc] = nth_page(page, j);
			rc++;
			if (WARN_ON(rc >= page_count))
				break;
		}
	}

err:
	if (!IS_ERR_OR_NULL(sg_table))
		dma_buf_unmap_attachment(attach, sg_table, DMA_TO_DEVICE);
	if (!IS_ERR_OR_NULL(attach))
		dma_buf_detach(dmabuf, attach);
	if (!IS_ERR_OR_NULL(dmabuf))
		dma_buf_put(dmabuf);
	return rc;
}

/*
 * exporter - grant & revoke
 * degenerate sharabled page list based on CPU friendly virtual "address".
@@ -165,7 +218,11 @@ int habmem_hyp_grant_user(unsigned long address,
	down_read(&current->mm->mmap_sem);

	if (HABMM_EXP_MEM_TYPE_DMA & flags) {
		ret = habmem_get_dma_pages(address,
		ret = habmem_get_dma_pages_from_va(address,
			page_count,
			pages);
	} else if (HABMM_EXPIMP_FLAGS_FD & flags) {
		ret = habmem_get_dma_pages_from_fd(address,
			page_count,
			pages);
	} else {
@@ -260,30 +317,156 @@ void habmem_imp_hyp_close(void *imp_ctx, int kernel)
	kfree(priv);
}

/*
 * setup pages, be ready for the following mmap call
 * index is output to refer to this imported buffer described by the import data
 */
long habmem_imp_hyp_map(void *imp_ctx,
		void *impdata,
		uint32_t count,
		uint32_t remotedom,
		uint64_t *index,
		void **pkva,
		int kernel,
		uint32_t userflags)
static struct sg_table *hab_mem_map_dma_buf(
	struct dma_buf_attachment *attachment,
	enum dma_data_direction direction)
{
	struct dma_buf *dmabuf = attachment->dmabuf;
	struct pages_list *pglist = dmabuf->priv;
	struct sg_table *sgt;
	struct scatterlist *sg;
	int i;
	int ret = 0;
	struct page **pages = pglist->pages;

	sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
	if (!sgt)
		return ERR_PTR(-ENOMEM);

	ret = sg_alloc_table(sgt, pglist->npages, GFP_KERNEL);
	if (ret) {
		kfree(sgt);
		return ERR_PTR(-ENOMEM);
	}

	for_each_sg(sgt->sgl, sg, pglist->npages, i) {
		sg_set_page(sg, pages[i], PAGE_SIZE, 0);
	}

	return sgt;
}


static void hab_mem_unmap_dma_buf(struct dma_buf_attachment *attachment,
	struct sg_table *sgt,
	enum dma_data_direction direction)
{
	sg_free_table(sgt);
	kfree(sgt);
}

static int hab_map_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct page *page;
	struct pages_list *pglist;

	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;

	/* PHY address */
	unsigned long fault_offset =
		(unsigned long)vmf->virtual_address - vma->vm_start + offset;
	unsigned long fault_index = fault_offset>>PAGE_SHIFT;
	int page_idx;

	if (vma == NULL)
		return VM_FAULT_SIGBUS;

	pglist  = vma->vm_private_data;

	page_idx = fault_index - pglist->index;
	if (page_idx < 0 || page_idx >= pglist->npages) {
		pr_err("Out of page array! page_idx %d, pg cnt %ld",
			page_idx, pglist->npages);
		return VM_FAULT_SIGBUS;
	}

	page = pglist->pages[page_idx];
	get_page(page);
	vmf->page = page;
	return 0;
}

static void hab_map_open(struct vm_area_struct *vma)
{
}

static void hab_map_close(struct vm_area_struct *vma)
{
}

static const struct vm_operations_struct habmem_vm_ops = {
	.fault = hab_map_fault,
	.open = hab_map_open,
	.close = hab_map_close,
};

static int hab_mem_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
	struct pages_list *pglist = dmabuf->priv;
	uint32_t obj_size = pglist->npages << PAGE_SHIFT;

	if (vma == NULL)
		return VM_FAULT_SIGBUS;

	/* Check for valid size. */
	if (obj_size < vma->vm_end - vma->vm_start)
		return -EINVAL;

	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
	vma->vm_ops = &habmem_vm_ops;
	vma->vm_private_data = pglist;
	vma->vm_flags |= VM_MIXEDMAP;

	return 0;
}

static void hab_mem_dma_buf_release(struct dma_buf *dmabuf)
{
}

static void *hab_mem_dma_buf_kmap(struct dma_buf *dmabuf,
		unsigned long offset)
{
	return NULL;
}

static void hab_mem_dma_buf_kunmap(struct dma_buf *dmabuf,
		unsigned long offset,
		void *ptr)
{
}

static struct dma_buf_ops dma_buf_ops = {
	.map_dma_buf = hab_mem_map_dma_buf,
	.unmap_dma_buf = hab_mem_unmap_dma_buf,
	.mmap = hab_mem_mmap,
	.release = hab_mem_dma_buf_release,
	.kmap_atomic = hab_mem_dma_buf_kmap,
	.kunmap_atomic = hab_mem_dma_buf_kunmap,
	.kmap = hab_mem_dma_buf_kmap,
	.kunmap = hab_mem_dma_buf_kunmap,
};

static int habmem_imp_hyp_map_fd(void *imp_ctx,
	struct export_desc *exp,
	uint32_t userflags,
	int32_t *pfd)
{
	struct page **pages;
	struct compressed_pfns *pfn_table = (struct compressed_pfns *)impdata;
	struct compressed_pfns *pfn_table =
			(struct compressed_pfns *)exp->payload;
	struct pages_list *pglist;
	struct importer_context *priv = imp_ctx;
	unsigned long pfn;
	int i, j, k = 0;
	pgprot_t prot = PAGE_KERNEL;
	int32_t fd;
	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);

	if (!pfn_table || !priv)
		return -EINVAL;

	pages = vmalloc(count * sizeof(struct page *));
	pages = vmalloc(exp->payload_count * sizeof(struct page *));
	if (!pages)
		return -ENOMEM;

@@ -303,17 +486,90 @@ long habmem_imp_hyp_map(void *imp_ctx,
	}

	pglist->pages = pages;
	pglist->npages = count;
	pglist->kernel = kernel;
	pglist->index = page_to_phys(pages[0]) >> PAGE_SHIFT;
	pglist->npages = exp->payload_count;
	pglist->kernel = 0;
	pglist->index = 0;
	pglist->refcntk = pglist->refcntu = 0;
	pglist->userflags = userflags;
	pglist->export_id = exp->export_id;
	pglist->vcid = exp->vcid_remote;

	*index = pglist->index << PAGE_SHIFT;
	if (!(userflags & HABMM_IMPORT_FLAGS_CACHED))
		prot = pgprot_writecombine(prot);

	exp_info.ops = &dma_buf_ops;
	exp_info.size = exp->payload_count << PAGE_SHIFT;
	exp_info.flags = O_RDWR;
	exp_info.priv = pglist;
	pglist->dmabuf = dma_buf_export(&exp_info);
	if (IS_ERR(pglist->dmabuf)) {
		vfree(pages);
		kfree(pglist);
		return PTR_ERR(pglist->dmabuf);
	}

	fd = dma_buf_fd(pglist->dmabuf, O_CLOEXEC);
	if (fd < 0) {
		dma_buf_put(pglist->dmabuf);
		vfree(pages);
		kfree(pglist);
		return -EINVAL;
	}

	pglist->refcntk++;

	if (kernel) {
	write_lock(&priv->implist_lock);
	list_add_tail(&pglist->list,  &priv->imp_list);
	priv->cnt++;
	write_unlock(&priv->implist_lock);

	*pfd = fd;

	return 0;
}

static int habmem_imp_hyp_map_kva(void *imp_ctx,
	struct export_desc *exp,
	uint32_t userflags,
	void **pkva)
{
	struct page **pages;
	struct compressed_pfns *pfn_table =
		(struct compressed_pfns *)exp->payload;
	struct pages_list *pglist;
	struct importer_context *priv = imp_ctx;
	unsigned long pfn;
	int i, j, k = 0;
	pgprot_t prot = PAGE_KERNEL;

	if (!pfn_table || !priv)
		return -EINVAL;
	pages = vmalloc(exp->payload_count * sizeof(struct page *));
	if (!pages)
		return -ENOMEM;
	pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
	if (!pglist) {
		vfree(pages);
		return -ENOMEM;
	}

	pfn = pfn_table->first_pfn;
	for (i = 0; i < pfn_table->nregions; i++) {
		for (j = 0; j < pfn_table->region[i].size; j++) {
			pages[k] = pfn_to_page(pfn+j);
			k++;
		}
		pfn += pfn_table->region[i].size + pfn_table->region[i].space;
	}

	pglist->pages = pages;
	pglist->npages = exp->payload_count;
	pglist->kernel = 1;
	pglist->refcntk = pglist->refcntu = 0;
	pglist->userflags = userflags;
	pglist->export_id = exp->export_id;
	pglist->vcid = exp->vcid_remote;

	if (!(userflags & HABMM_IMPORT_FLAGS_CACHED))
		prot = pgprot_writecombine(prot);

@@ -323,49 +579,107 @@ long habmem_imp_hyp_map(void *imp_ctx,
		kfree(pglist);
		pr_err("%ld pages vmap failed\n", pglist->npages);
		return -ENOMEM;
		} else {
			pr_debug("%ld pages vmap pass, return %pK\n",
				pglist->npages, pglist->kva);
	}

		pglist->uva = NULL;
	pr_debug("%ld pages vmap pass, return %p\n",
			pglist->npages, pglist->kva);

	pglist->refcntk++;

	write_lock(&priv->implist_lock);
	list_add_tail(&pglist->list,  &priv->imp_list);
	priv->cnt++;
	write_unlock(&priv->implist_lock);

	*pkva = pglist->kva;
		*index = (uint64_t)((uintptr_t)pglist->kva);
	} else {
		pglist->kva = NULL;

	return 0;
}

static int habmem_imp_hyp_map_uva(void *imp_ctx,
	struct export_desc *exp,
	uint32_t userflags,
	uint64_t *index)
{
	struct page **pages;
	struct compressed_pfns *pfn_table =
		(struct compressed_pfns *)exp->payload;
	struct pages_list *pglist;
	struct importer_context *priv = imp_ctx;
	unsigned long pfn;
	int i, j, k = 0;

	if (!pfn_table || !priv)
		return -EINVAL;

	pages = vmalloc(exp->payload_count * sizeof(struct page *));
	if (!pages)
		return -ENOMEM;

	pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
	if (!pglist) {
		vfree(pages);
		return -ENOMEM;
	}

	pfn = pfn_table->first_pfn;
	for (i = 0; i < pfn_table->nregions; i++) {
		for (j = 0; j < pfn_table->region[i].size; j++) {
			pages[k] = pfn_to_page(pfn+j);
			k++;
		}
		pfn += pfn_table->region[i].size + pfn_table->region[i].space;
	}

	pglist->pages = pages;
	pglist->npages = exp->payload_count;
	pglist->index = page_to_phys(pages[0]) >> PAGE_SHIFT;
	pglist->refcntk = pglist->refcntu = 0;
	pglist->userflags = userflags;
	pglist->export_id = exp->export_id;
	pglist->vcid = exp->vcid_remote;

	write_lock(&priv->implist_lock);
	list_add_tail(&pglist->list,  &priv->imp_list);
	priv->cnt++;
	write_unlock(&priv->implist_lock);
	pr_debug("index returned %llx\n", *index);

	*index = pglist->index << PAGE_SHIFT;

	return 0;
}

/* the input index is PHY address shifted for uhab, and kva for khab */
long habmm_imp_hyp_unmap(void *imp_ctx,
		uint64_t index,
		uint32_t count,
		int kernel)
int habmem_imp_hyp_map(void *imp_ctx, struct hab_import *param,
		struct export_desc *exp, int kernel)
{
	int ret = 0;

	if (kernel)
		ret = habmem_imp_hyp_map_kva(imp_ctx, exp,
					param->flags,
					(void **)&param->kva);
	else if (param->flags & HABMM_EXPIMP_FLAGS_FD)
		ret = habmem_imp_hyp_map_fd(imp_ctx, exp,
					param->flags,
					(int32_t *)&param->kva);
	else
		ret = habmem_imp_hyp_map_uva(imp_ctx, exp,
					param->flags,
					&param->index);

	return ret;
}

int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp)
{
	struct importer_context *priv = imp_ctx;
	struct pages_list *pglist, *tmp;
	int found = 0;
	uint64_t pg_index = index >> PAGE_SHIFT;

	write_lock(&priv->implist_lock);
	list_for_each_entry_safe(pglist, tmp, &priv->imp_list, list) {
		pr_debug("node pglist %pK, kernel %d, pg_index %llx\n",
			pglist, pglist->kernel, pg_index);

		if (kernel) {
			if (pglist->kva == (void *)((uintptr_t)index))
				found  = 1;
		} else {
			if (pglist->index == pg_index)
		if (pglist->export_id == exp->export_id &&
		    pglist->vcid == exp->vcid_remote) {
			found = 1;
		}

@@ -375,74 +689,28 @@ long habmm_imp_hyp_unmap(void *imp_ctx,
			break;
		}
	}

	write_unlock(&priv->implist_lock);

	if (!found) {
		pr_err("failed to find export id on index %llx\n", index);
		pr_err("failed to find export id %u\n", exp->export_id);
		return -EINVAL;
	}

	pr_debug("detach pglist %pK, index %llx, kernel %d, list cnt %d\n",
		pglist, pglist->index, pglist->kernel, priv->cnt);
	pr_debug("detach pglist %p, kernel %d, list cnt %d\n",
		pglist, pglist->kernel, priv->cnt);

	if (kernel)
	if (pglist->kva)
		vunmap(pglist->kva);

	if (pglist->dmabuf)
		dma_buf_put(pglist->dmabuf);

	vfree(pglist->pages);
	kfree(pglist);

	return 0;
}

static int hab_map_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct page *page;
	struct pages_list *pglist;

	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;

	/* PHY address */
	unsigned long fault_offset =
		(unsigned long)vmf->virtual_address - vma->vm_start + offset;
	unsigned long fault_index = fault_offset>>PAGE_SHIFT;
	int page_idx;

	if (vma == NULL)
		return VM_FAULT_SIGBUS;

	pglist  = vma->vm_private_data;

	page_idx = fault_index - pglist->index;
	if (page_idx < 0 || page_idx >= pglist->npages) {
		pr_err("Out of page array. page_idx %d, pg cnt %ld",
			page_idx, pglist->npages);
		return VM_FAULT_SIGBUS;
	}

	pr_debug("Fault page index %d\n", page_idx);

	page = pglist->pages[page_idx];
	get_page(page);
	vmf->page = page;
	return 0;
}

static void hab_map_open(struct vm_area_struct *vma)
{
}

static void hab_map_close(struct vm_area_struct *vma)
{
}

static const struct vm_operations_struct habmem_vm_ops = {

	.fault = hab_map_fault,
	.open = hab_map_open,
	.close = hab_map_close,
};

int habmem_imp_hyp_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct uhab_context *ctx = (struct uhab_context *) filp->private_data;
+11 −19
Original line number Diff line number Diff line
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -345,25 +345,20 @@ int hab_mem_import(struct uhab_context *ctx,
		exp->export_id, exp->payload_count, exp->domid_local,
		*((uint32_t *)exp->payload));

	ret = habmem_imp_hyp_map(ctx->import_ctx,
		exp->payload,
		exp->payload_count,
		exp->domid_local,
		&exp->import_index,
		&exp->kva,
		kernel,
		param->flags);
	ret = habmem_imp_hyp_map(ctx->import_ctx, param, exp, kernel);

	if (ret) {
		pr_err("Import fail ret:%d pcnt:%d rem:%d 1st_ref:0x%X\n",
			ret, exp->payload_count,
			exp->domid_local, *((uint32_t *)exp->payload));
		return ret;
	}
	pr_debug("import index %llx, kva %llx, kernel %d\n",
		exp->import_index, param->kva, kernel);

	param->index = exp->import_index;
	param->kva = (uint64_t)exp->kva;
	exp->import_index = param->index;
	exp->kva = kernel ? (void *)param->kva : NULL;

	pr_debug("import index %llx, kva or fd %llx, kernel %d\n",
		exp->import_index, param->kva, kernel);

	return ret;
}
@@ -396,13 +391,10 @@ int hab_mem_unimport(struct uhab_context *ctx,
	if (!found)
		ret = -EINVAL;
	else {
		ret = habmm_imp_hyp_unmap(ctx->import_ctx,
			exp->import_index,
			exp->payload_count,
			kernel);
		ret = habmm_imp_hyp_unmap(ctx->import_ctx, exp);
		if (ret) {
			pr_err("unmap fail id:%d pcnt:%d kernel:%d\n",
				exp->export_id, exp->payload_count, kernel);
			pr_err("unmap fail id:%d pcnt:%d vcid:%d\n",
			exp->export_id, exp->payload_count, exp->vcid_remote);
		}
		param->kva = (uint64_t)exp->kva;
		kfree(exp);
+1 −4
Original line number Diff line number Diff line
@@ -110,10 +110,7 @@ hab_vchan_free(struct kref *ref)
		}
		spin_unlock_bh(&ctx->imp_lock);
		if (found) {
			habmm_imp_hyp_unmap(ctx->import_ctx,
				exp->import_index,
				exp->payload_count,
				ctx->kernel);
			habmm_imp_hyp_unmap(ctx->import_ctx, exp);
			ctx->import_total--;
			kfree(exp);
		}
+5 −0
Original line number Diff line number Diff line
@@ -214,6 +214,11 @@ int32_t habmm_socket_recvfrom(int32_t handle, void *dst_buff,
 */
#define HABMM_EXP_MEM_TYPE_DMA 0x00000001

/*
 * this flag is used for export from dma_buf fd or import to dma_buf fd
 */
#define HABMM_EXPIMP_FLAGS_FD  0x00010000

#define HAB_MAX_EXPORT_SIZE 0x8000000

/*