Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit af33a919 authored by Laura Abbott's avatar Laura Abbott Committed by Daniel Vetter
Browse files

drm/vgem: Enable dmabuf import interfaces



Enable the GEM dma-buf import interfaces in addition to the export
interfaces. This lets vgem be used as a test source for other allocators
(e.g. Ion).

Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarLaura Abbott <labbott@redhat.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1493923548-20878-4-git-send-email-labbott@redhat.com
parent 7e491583
Loading
Loading
Loading
Loading
+107 −29
Original line number Diff line number Diff line
@@ -48,6 +48,11 @@ static void vgem_gem_free_object(struct drm_gem_object *obj)
{
	struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);

	drm_free_large(vgem_obj->pages);

	if (obj->import_attach)
		drm_prime_gem_destroy(obj, vgem_obj->table);

	drm_gem_object_release(obj);
	kfree(vgem_obj);
}
@@ -58,26 +63,49 @@ static int vgem_gem_fault(struct vm_fault *vmf)
	struct drm_vgem_gem_object *obj = vma->vm_private_data;
	/* We don't use vmf->pgoff since that has the fake offset */
	unsigned long vaddr = vmf->address;
	int ret;
	loff_t num_pages;
	pgoff_t page_offset;
	page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;

	num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);

	if (page_offset > num_pages)
		return VM_FAULT_SIGBUS;

	if (obj->pages) {
		get_page(obj->pages[page_offset]);
		vmf->page = obj->pages[page_offset];
		ret = 0;
	} else {
		struct page *page;

	page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
				       (vaddr - vma->vm_start) >> PAGE_SHIFT);
		page = shmem_read_mapping_page(
					file_inode(obj->base.filp)->i_mapping,
					page_offset);
		if (!IS_ERR(page)) {
			vmf->page = page;
		return 0;
			ret = 0;
		} else switch (PTR_ERR(page)) {
			case -ENOSPC:
			case -ENOMEM:
			return VM_FAULT_OOM;
				ret = VM_FAULT_OOM;
				break;
			case -EBUSY:
			return VM_FAULT_RETRY;
				ret = VM_FAULT_RETRY;
				break;
			case -EFAULT:
			case -EINVAL:
			return VM_FAULT_SIGBUS;
				ret = VM_FAULT_SIGBUS;
				break;
			default:
			WARN_ON_ONCE(PTR_ERR(page));
			return VM_FAULT_SIGBUS;
				WARN_ON(PTR_ERR(page));
				ret = VM_FAULT_SIGBUS;
				break;
		}

	}
	return ret;
}

static const struct vm_operations_struct vgem_gem_vm_ops = {
@@ -114,11 +142,7 @@ static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
	kfree(vfile);
}

/* ioctls */

static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
					      struct drm_file *file,
					      unsigned int *handle,
static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
						unsigned long size)
{
	struct drm_vgem_gem_object *obj;
@@ -129,8 +153,31 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
		return ERR_PTR(-ENOMEM);

	ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
	if (ret)
		goto err_free;
	if (ret) {
		kfree(obj);
		return ERR_PTR(ret);
	}

	return obj;
}

static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
{
	drm_gem_object_release(&obj->base);
	kfree(obj);
}

static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
					      struct drm_file *file,
					      unsigned int *handle,
					      unsigned long size)
{
	struct drm_vgem_gem_object *obj;
	int ret;

	obj = __vgem_gem_create(dev, size);
	if (IS_ERR(obj))
		return ERR_CAST(obj);

	ret = drm_gem_handle_create(file, &obj->base, handle);
	drm_gem_object_unreference_unlocked(&obj->base);
@@ -139,9 +186,8 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,

	return &obj->base;

err_free:
	kfree(obj);
err:
	__vgem_gem_destroy(obj);
	return ERR_PTR(ret);
}

@@ -258,6 +304,35 @@ static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
	return st;
}

static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
						struct dma_buf *dma_buf)
{
	return drm_gem_prime_import_dev(dev, dma_buf, &vgem_platform->dev);
}

static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
			struct dma_buf_attachment *attach, struct sg_table *sg)
{
	struct drm_vgem_gem_object *obj;
	int npages;

	obj = __vgem_gem_create(dev, attach->dmabuf->size);
	if (IS_ERR(obj))
		return ERR_CAST(obj);

	npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;

	obj->table = sg;
	obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
	if (!obj->pages) {
		__vgem_gem_destroy(obj);
		return ERR_PTR(-ENOMEM);
	}
	drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
					npages);
	return &obj->base;
}

static void *vgem_prime_vmap(struct drm_gem_object *obj)
{
	long n_pages = obj->size >> PAGE_SHIFT;
@@ -316,8 +391,11 @@ static struct drm_driver vgem_driver = {
	.dumb_map_offset		= vgem_gem_dumb_map,

	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
	.gem_prime_pin = vgem_prime_pin,
	.gem_prime_import = vgem_prime_import,
	.gem_prime_export = drm_gem_prime_export,
	.gem_prime_import_sg_table = vgem_prime_import_sg_table,
	.gem_prime_get_sg_table = vgem_prime_get_sg_table,
	.gem_prime_vmap = vgem_prime_vmap,
	.gem_prime_vunmap = vgem_prime_vunmap,
+2 −0
Original line number Diff line number Diff line
@@ -43,6 +43,8 @@ struct vgem_file {
#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
struct drm_vgem_gem_object {
	struct drm_gem_object base;
	struct page **pages;
	struct sg_table *table;
};

int vgem_fence_open(struct vgem_file *file);