Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit edee06b6 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'drm/next' of git://linuxtv.org/pinchartl/fbdev into drm-next

GEM CMA PRIME support from Laurent.

* 'drm/next' of git://linuxtv.org/pinchartl/fbdev:
  drm: GEM CMA: Add DRM PRIME support
  drm: GEM CMA: Split object mapping into GEM mapping and CMA mapping
  drm: GEM CMA: Split object creation into object alloc and DMA memory alloc
  drm/omap: Use drm_gem_mmap_obj() to implement dma-buf mmap
  drm/gem: Split drm_gem_mmap() into object search and object mapping
parents d20d3174 71d7282a
Loading
Loading
Loading
Loading
+52 −31
Original line number Diff line number Diff line
@@ -644,6 +644,55 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
}
EXPORT_SYMBOL(drm_gem_vm_close);

/**
 * drm_gem_mmap_obj - memory map a GEM object
 * @obj: the GEM object to map
 * @obj_size: the object size to be mapped, in bytes
 * @vma: VMA for the area to be mapped
 *
 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
 * provided by the driver. Depending on their requirements, drivers can either
 * provide a fault handler in their gem_vm_ops (in which case any accesses to
 * the object will be trapped, to perform migration, GTT binding, surface
 * register allocation, or performance monitoring), or mmap the buffer memory
 * synchronously after calling drm_gem_mmap_obj.
 *
 * This function is mainly intended to implement the DMABUF mmap operation, when
 * the GEM object is not looked up based on its fake offset. To implement the
 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
 *
 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
 * size, or if no gem_vm_ops are provided.
 */
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
		     struct vm_area_struct *vma)
{
	struct drm_device *dev = obj->dev;

	/* Check for valid size. */
	if (obj_size < vma->vm_end - vma->vm_start)
		return -EINVAL;

	if (!dev->driver->gem_vm_ops)
		return -EINVAL;

	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
	vma->vm_ops = dev->driver->gem_vm_ops;
	vma->vm_private_data = obj;
	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));

	/* Take a ref for this mapping of the object, so that the fault
	 * handler can dereference the mmap offset's pointer to the object.
	 * This reference is cleaned up by the corresponding vm_close
	 * (which should happen whether the vma was created by this call, or
	 * by a vm_open due to mremap or partial unmap or whatever).
	 */
	drm_gem_object_reference(obj);

	drm_vm_open_locked(dev, vma);
	return 0;
}
EXPORT_SYMBOL(drm_gem_mmap_obj);

/**
 * drm_gem_mmap - memory map routine for GEM objects
@@ -653,11 +702,9 @@ EXPORT_SYMBOL(drm_gem_vm_close);
 * If a driver supports GEM object mapping, mmap calls on the DRM file
 * descriptor will end up here.
 *
 * If we find the object based on the offset passed in (vma->vm_pgoff will
 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
 * contain the fake offset we created when the GTT map ioctl was called on
 * the object), we set up the driver fault handler so that any accesses
 * to the object can be trapped, to perform migration, GTT binding, surface
 * register allocation, or performance monitoring.
 * the object) and map it with a call to drm_gem_mmap_obj().
 */
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
@@ -665,7 +712,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
	struct drm_device *dev = priv->minor->dev;
	struct drm_gem_mm *mm = dev->mm_private;
	struct drm_local_map *map = NULL;
	struct drm_gem_object *obj;
	struct drm_hash_item *hash;
	int ret = 0;

@@ -686,32 +732,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
		goto out_unlock;
	}

	/* Check for valid size. */
	if (map->size < vma->vm_end - vma->vm_start) {
		ret = -EINVAL;
		goto out_unlock;
	}

	obj = map->handle;
	if (!obj->dev->driver->gem_vm_ops) {
		ret = -EINVAL;
		goto out_unlock;
	}

	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
	vma->vm_ops = obj->dev->driver->gem_vm_ops;
	vma->vm_private_data = map->handle;
	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));

	/* Take a ref for this mapping of the object, so that the fault
	 * handler can dereference the mmap offset's pointer to the object.
	 * This reference is cleaned up by the corresponding vm_close
	 * (which should happen whether the vma was created by this call, or
	 * by a vm_open due to mremap or partial unmap or whatever).
	 */
	drm_gem_object_reference(obj);

	drm_vm_open_locked(dev, vma);
	ret = drm_gem_mmap_obj(map->handle, map->size, vma);

out_unlock:
	mutex_unlock(&dev->struct_mutex);
+368 −36
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/export.h>
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>

#include <drm/drmP.h>
@@ -32,11 +33,44 @@ static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
	return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
}

static void drm_gem_cma_buf_destroy(struct drm_device *drm,
		struct drm_gem_cma_object *cma_obj)
/*
 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
 * @drm: The drm device
 * @size: The GEM object size
 *
 * This function creates and initializes a GEM CMA object of the given size, but
 * doesn't allocate any memory to back the object.
 *
 * Return a struct drm_gem_cma_object* on success or ERR_PTR values on failure.
 */
static struct drm_gem_cma_object *
__drm_gem_cma_create(struct drm_device *drm, unsigned int size)
{
	dma_free_writecombine(drm->dev, cma_obj->base.size, cma_obj->vaddr,
			cma_obj->paddr);
	struct drm_gem_cma_object *cma_obj;
	struct drm_gem_object *gem_obj;
	int ret;

	cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
	if (!cma_obj)
		return ERR_PTR(-ENOMEM);

	gem_obj = &cma_obj->base;

	ret = drm_gem_object_init(drm, gem_obj, size);
	if (ret)
		goto error;

	ret = drm_gem_create_mmap_offset(gem_obj);
	if (ret) {
		drm_gem_object_release(gem_obj);
		goto error;
	}

	return cma_obj;

error:
	kfree(cma_obj);
	return ERR_PTR(ret);
}

/*
@@ -49,44 +83,42 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
		unsigned int size)
{
	struct drm_gem_cma_object *cma_obj;
	struct drm_gem_object *gem_obj;
	struct sg_table *sgt = NULL;
	int ret;

	size = round_up(size, PAGE_SIZE);

	cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
	if (!cma_obj)
		return ERR_PTR(-ENOMEM);
	cma_obj = __drm_gem_cma_create(drm, size);
	if (IS_ERR(cma_obj))
		return cma_obj;

	cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size,
			&cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN);
	if (!cma_obj->vaddr) {
		dev_err(drm->dev, "failed to allocate buffer with size %d\n", size);
		dev_err(drm->dev, "failed to allocate buffer with size %d\n",
			size);
		ret = -ENOMEM;
		goto err_dma_alloc;
		goto error;
	}

	gem_obj = &cma_obj->base;
	sgt = kzalloc(sizeof(*cma_obj->sgt), GFP_KERNEL);
	if (sgt == NULL) {
		ret = -ENOMEM;
		goto error;
	}

	ret = drm_gem_object_init(drm, gem_obj, size);
	if (ret)
		goto err_obj_init;
	ret = dma_get_sgtable(drm->dev, sgt, cma_obj->vaddr,
			      cma_obj->paddr, size);
	if (ret < 0)
		goto error;

	ret = drm_gem_create_mmap_offset(gem_obj);
	if (ret)
		goto err_create_mmap_offset;
	cma_obj->sgt = sgt;

	return cma_obj;

err_create_mmap_offset:
	drm_gem_object_release(gem_obj);

err_obj_init:
	drm_gem_cma_buf_destroy(drm, cma_obj);

err_dma_alloc:
	kfree(cma_obj);

error:
	kfree(sgt);
	drm_gem_cma_free_object(&cma_obj->base);
	return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(drm_gem_cma_create);
@@ -143,11 +175,20 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
	if (gem_obj->map_list.map)
		drm_gem_free_mmap_offset(gem_obj);

	drm_gem_object_release(gem_obj);

	cma_obj = to_drm_gem_cma_obj(gem_obj);

	drm_gem_cma_buf_destroy(gem_obj->dev, cma_obj);
	if (cma_obj->vaddr) {
		dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size,
				      cma_obj->vaddr, cma_obj->paddr);
		if (cma_obj->sgt) {
			sg_free_table(cma_obj->sgt);
			kfree(cma_obj->sgt);
		}
	} else if (gem_obj->import_attach) {
		drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
	}

	drm_gem_object_release(gem_obj);

	kfree(cma_obj);
}
@@ -215,13 +256,26 @@ const struct vm_operations_struct drm_gem_cma_vm_ops = {
};
EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);

static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
				struct vm_area_struct *vma)
{
	int ret;

	ret = remap_pfn_range(vma, vma->vm_start, cma_obj->paddr >> PAGE_SHIFT,
			vma->vm_end - vma->vm_start, vma->vm_page_prot);
	if (ret)
		drm_gem_vm_close(vma);

	return ret;
}

/*
 * drm_gem_cma_mmap - (struct file_operation)->mmap callback function
 */
int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_gem_object *gem_obj;
	struct drm_gem_cma_object *cma_obj;
	struct drm_gem_object *gem_obj;
	int ret;

	ret = drm_gem_mmap(filp, vma);
@@ -231,12 +285,7 @@ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
	gem_obj = vma->vm_private_data;
	cma_obj = to_drm_gem_cma_obj(gem_obj);

	ret = remap_pfn_range(vma, vma->vm_start, cma_obj->paddr >> PAGE_SHIFT,
			vma->vm_end - vma->vm_start, vma->vm_page_prot);
	if (ret)
		drm_gem_vm_close(vma);

	return ret;
	return drm_gem_cma_mmap_obj(cma_obj, vma);
}
EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);

@@ -270,3 +319,286 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m
}
EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
#endif

/* -----------------------------------------------------------------------------
 * DMA-BUF
 */

struct drm_gem_cma_dmabuf_attachment {
	struct sg_table sgt;
	enum dma_data_direction dir;
};

static int drm_gem_cma_dmabuf_attach(struct dma_buf *dmabuf, struct device *dev,
				     struct dma_buf_attachment *attach)
{
	struct drm_gem_cma_dmabuf_attachment *cma_attach;

	cma_attach = kzalloc(sizeof(*cma_attach), GFP_KERNEL);
	if (!cma_attach)
		return -ENOMEM;

	cma_attach->dir = DMA_NONE;
	attach->priv = cma_attach;

	return 0;
}

static void drm_gem_cma_dmabuf_detach(struct dma_buf *dmabuf,
				      struct dma_buf_attachment *attach)
{
	struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv;
	struct sg_table *sgt;

	if (cma_attach == NULL)
		return;

	sgt = &cma_attach->sgt;

	if (cma_attach->dir != DMA_NONE)
		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
				cma_attach->dir);

	sg_free_table(sgt);
	kfree(cma_attach);
	attach->priv = NULL;
}

static struct sg_table *
drm_gem_cma_dmabuf_map(struct dma_buf_attachment *attach,
		       enum dma_data_direction dir)
{
	struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv;
	struct drm_gem_cma_object *cma_obj = attach->dmabuf->priv;
	struct drm_device *drm = cma_obj->base.dev;
	struct scatterlist *rd, *wr;
	struct sg_table *sgt;
	unsigned int i;
	int nents, ret;

	DRM_DEBUG_PRIME("\n");

	if (WARN_ON(dir == DMA_NONE))
		return ERR_PTR(-EINVAL);

	/* Return the cached mapping when possible. */
	if (cma_attach->dir == dir)
		return &cma_attach->sgt;

	/* Two mappings with different directions for the same attachment are
	 * not allowed.
	 */
	if (WARN_ON(cma_attach->dir != DMA_NONE))
		return ERR_PTR(-EBUSY);

	sgt = &cma_attach->sgt;

	ret = sg_alloc_table(sgt, cma_obj->sgt->orig_nents, GFP_KERNEL);
	if (ret) {
		DRM_ERROR("failed to alloc sgt.\n");
		return ERR_PTR(-ENOMEM);
	}

	mutex_lock(&drm->struct_mutex);

	rd = cma_obj->sgt->sgl;
	wr = sgt->sgl;
	for (i = 0; i < sgt->orig_nents; ++i) {
		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
		rd = sg_next(rd);
		wr = sg_next(wr);
	}

	nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
	if (!nents) {
		DRM_ERROR("failed to map sgl with iommu.\n");
		sg_free_table(sgt);
		sgt = ERR_PTR(-EIO);
		goto done;
	}

	cma_attach->dir = dir;
	attach->priv = cma_attach;

	DRM_DEBUG_PRIME("buffer size = %zu\n", cma_obj->base.size);

done:
	mutex_unlock(&drm->struct_mutex);
	return sgt;
}

static void drm_gem_cma_dmabuf_unmap(struct dma_buf_attachment *attach,
				     struct sg_table *sgt,
				     enum dma_data_direction dir)
{
	/* Nothing to do. */
}

static void drm_gem_cma_dmabuf_release(struct dma_buf *dmabuf)
{
	struct drm_gem_cma_object *cma_obj = dmabuf->priv;

	DRM_DEBUG_PRIME("%s\n", __FILE__);

	/*
	 * drm_gem_cma_dmabuf_release() call means that file object's
	 * f_count is 0 and it calls drm_gem_object_handle_unreference()
	 * to drop the references that these values had been increased
	 * at drm_prime_handle_to_fd()
	 */
	if (cma_obj->base.export_dma_buf == dmabuf) {
		cma_obj->base.export_dma_buf = NULL;

		/*
		 * drop this gem object refcount to release allocated buffer
		 * and resources.
		 */
		drm_gem_object_unreference_unlocked(&cma_obj->base);
	}
}

static void *drm_gem_cma_dmabuf_kmap_atomic(struct dma_buf *dmabuf,
					    unsigned long page_num)
{
	/* TODO */

	return NULL;
}

static void drm_gem_cma_dmabuf_kunmap_atomic(struct dma_buf *dmabuf,
					     unsigned long page_num, void *addr)
{
	/* TODO */
}

static void *drm_gem_cma_dmabuf_kmap(struct dma_buf *dmabuf,
				     unsigned long page_num)
{
	/* TODO */

	return NULL;
}

static void drm_gem_cma_dmabuf_kunmap(struct dma_buf *dmabuf,
				      unsigned long page_num, void *addr)
{
	/* TODO */
}

static int drm_gem_cma_dmabuf_mmap(struct dma_buf *dmabuf,
				   struct vm_area_struct *vma)
{
	struct drm_gem_cma_object *cma_obj = dmabuf->priv;
	struct drm_gem_object *gem_obj = &cma_obj->base;
	int ret;

	ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
	if (ret < 0)
		return ret;

	return drm_gem_cma_mmap_obj(cma_obj, vma);
}

static void *drm_gem_cma_dmabuf_vmap(struct dma_buf *dmabuf)
{
	struct drm_gem_cma_object *cma_obj = dmabuf->priv;

	return cma_obj->vaddr;
}

static struct dma_buf_ops drm_gem_cma_dmabuf_ops = {
	.attach			= drm_gem_cma_dmabuf_attach,
	.detach			= drm_gem_cma_dmabuf_detach,
	.map_dma_buf		= drm_gem_cma_dmabuf_map,
	.unmap_dma_buf		= drm_gem_cma_dmabuf_unmap,
	.kmap			= drm_gem_cma_dmabuf_kmap,
	.kmap_atomic		= drm_gem_cma_dmabuf_kmap_atomic,
	.kunmap			= drm_gem_cma_dmabuf_kunmap,
	.kunmap_atomic		= drm_gem_cma_dmabuf_kunmap_atomic,
	.mmap			= drm_gem_cma_dmabuf_mmap,
	.vmap			= drm_gem_cma_dmabuf_vmap,
	.release		= drm_gem_cma_dmabuf_release,
};

struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm,
					  struct drm_gem_object *obj, int flags)
{
	struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);

	return dma_buf_export(cma_obj, &drm_gem_cma_dmabuf_ops,
			      cma_obj->base.size, flags);
}
EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_export);

struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm,
						 struct dma_buf *dma_buf)
{
	struct drm_gem_cma_object *cma_obj;
	struct dma_buf_attachment *attach;
	struct sg_table *sgt;
	int ret;

	DRM_DEBUG_PRIME("%s\n", __FILE__);

	/* is this one of own objects? */
	if (dma_buf->ops == &drm_gem_cma_dmabuf_ops) {
		struct drm_gem_object *obj;

		cma_obj = dma_buf->priv;
		obj = &cma_obj->base;

		/* is it from our device? */
		if (obj->dev == drm) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(obj);
			dma_buf_put(dma_buf);
			return obj;
		}
	}

	/* Create a CMA GEM buffer. */
	cma_obj = __drm_gem_cma_create(drm, dma_buf->size);
	if (IS_ERR(cma_obj))
		return ERR_PTR(PTR_ERR(cma_obj));

	/* Attach to the buffer and map it. Make sure the mapping is contiguous
	 * on the device memory bus, as that's all we support.
	 */
	attach = dma_buf_attach(dma_buf, drm->dev);
	if (IS_ERR(attach)) {
		ret = -EINVAL;
		goto error_gem_free;
	}

	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR_OR_NULL(sgt)) {
		ret = sgt ? PTR_ERR(sgt) : -ENOMEM;
		goto error_buf_detach;
	}

	if (sgt->nents != 1) {
		ret = -EINVAL;
		goto error_buf_unmap;
	}

	cma_obj->base.import_attach = attach;
	cma_obj->paddr = sg_dma_address(sgt->sgl);
	cma_obj->sgt = sgt;

	DRM_DEBUG_PRIME("dma_addr = 0x%x, size = %zu\n", cma_obj->paddr,
			dma_buf->size);

	return &cma_obj->base;

error_buf_unmap:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
error_buf_detach:
	dma_buf_detach(dma_buf, attach);
error_gem_free:
	drm_gem_cma_free_object(&cma_obj->base);
	return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_import);
+3 −29
Original line number Diff line number Diff line
@@ -136,10 +136,6 @@ static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer,
	kunmap(pages[page_num]);
}

/*
 * TODO maybe we can split up drm_gem_mmap to avoid duplicating
 * some here.. or at least have a drm_dmabuf_mmap helper.
 */
static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
		struct vm_area_struct *vma)
{
@@ -149,31 +145,9 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
	if (WARN_ON(!obj->filp))
		return -EINVAL;

	/* Check for valid size. */
	if (omap_gem_mmap_size(obj) < vma->vm_end - vma->vm_start) {
		ret = -EINVAL;
		goto out_unlock;
	}

	if (!obj->dev->driver->gem_vm_ops) {
		ret = -EINVAL;
		goto out_unlock;
	}

	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
	vma->vm_ops = obj->dev->driver->gem_vm_ops;
	vma->vm_private_data = obj;
	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));

	/* Take a ref for this mapping of the object, so that the fault
	 * handler can dereference the mmap offset's pointer to the object.
	 * This reference is cleaned up by the corresponding vm_close
	 * (which should happen whether the vma was created by this call, or
	 * by a vm_open due to mremap or partial unmap or whatever).
	 */
	vma->vm_ops->open(vma);

out_unlock:
	ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
	if (ret < 0)
		return ret;

	return omap_gem_mmap_obj(obj, vma);
}
+2 −0
Original line number Diff line number Diff line
@@ -1616,6 +1616,8 @@ int drm_gem_private_object_init(struct drm_device *dev,
void drm_gem_object_handle_free(struct drm_gem_object *obj);
void drm_gem_vm_open(struct vm_area_struct *vma);
void drm_gem_vm_close(struct vm_area_struct *vma);
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
		     struct vm_area_struct *vma);
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);

#include <drm/drm_global.h>
+9 −0
Original line number Diff line number Diff line
@@ -4,6 +4,9 @@
struct drm_gem_cma_object {
	struct drm_gem_object base;
	dma_addr_t paddr;
	struct sg_table *sgt;

	/* For objects with DMA memory allocated by GEM CMA */
	void *vaddr;
};

@@ -45,4 +48,10 @@ extern const struct vm_operations_struct drm_gem_cma_vm_ops;
void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m);
#endif

struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm_dev,
					  struct drm_gem_object *obj,
					  int flags);
struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm_dev,
						 struct dma_buf *dma_buf);

#endif /* __DRM_GEM_CMA_HELPER_H__ */