Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 89177644 authored by Aaron Plattner's avatar Aaron Plattner Committed by Dave Airlie
Browse files

drm: add prime helpers



Instead of reimplementing all of the dma_buf functionality in every driver,
create helpers drm_prime_import and drm_prime_export that implement them in
terms of new, lower-level hook functions:

  gem_prime_pin: callback when a buffer is created, used to pin buffers into GTT
  gem_prime_get_sg_table: convert a drm_gem_object to an sg_table for export
  gem_prime_import_sg_table: convert an sg_table into a drm_gem_object
  gem_prime_vmap, gem_prime_vunmap: map and unmap an object

These hooks are optional; drivers can opt in by using drm_gem_prime_import and
drm_gem_prime_export as the .gem_prime_import and .gem_prime_export fields of
struct drm_driver.

v2:
- Drop .begin_cpu_access.  None of the drivers this code replaces implemented
  it.  Having it here was a leftover from when I was trying to include i915 in
  this rework.
- Use mutex_lock instead of mutex_lock_interruptible, as these three drivers
  did.  This patch series shouldn't change that behavior.
- Rename helpers to gem_prime_get_sg_table and gem_prime_import_sg_table.
  Rename struct sg_table* variables to 'sgt' for clarity.
- Update drm.tmpl for these new hooks.

v3:
- Pass the vaddr down to the driver.  This lets drivers that just call vunmap on
  the pointer avoid having to store the pointer in their GEM private structures.
- Move documentation into a /** DOC */ comment in drm_prime.c and include it in
  drm.tmpl with a !P line.  I tried to use !F lines to include documentation of
  the individual functions from drmP.h, but the docproc / kernel-doc scripts
  barf on that file, so hopefully this is good enough for now.
- apply refcount fix from commit be8a42ae
  ("drm/prime: drop reference on imported dma-buf come from gem")

Signed-off-by: default avatarAaron Plattner <aplattner@nvidia.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Airlie <airlied@linux.ie>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 6504d0d9
Loading
Loading
Loading
Loading
+4 −0
Original line number Original line Diff line number Diff line
@@ -743,6 +743,10 @@ char *date;</synopsis>
          These two operations are mandatory for GEM drivers that support DRM
          These two operations are mandatory for GEM drivers that support DRM
          PRIME.
          PRIME.
        </para>
        </para>
        <sect4>
          <title>DRM PRIME Helper Functions Reference</title>
!Pdrivers/gpu/drm/drm_prime.c PRIME Helpers
        </sect4>
      </sect3>
      </sect3>
      <sect3 id="drm-gem-objects-mapping">
      <sect3 id="drm-gem-objects-mapping">
        <title>GEM Objects Mapping</title>
        <title>GEM Objects Mapping</title>
+185 −1
Original line number Original line Diff line number Diff line
@@ -53,7 +53,8 @@
 * Self-importing: if userspace is using PRIME as a replacement for flink
 * Self-importing: if userspace is using PRIME as a replacement for flink
 * then it will get a fd->handle request for a GEM object that it created.
 * then it will get a fd->handle request for a GEM object that it created.
 * Drivers should detect this situation and return back the gem object
 * Drivers should detect this situation and return back the gem object
 * from the dma-buf private.
 * from the dma-buf private.  Prime will do this automatically for drivers that
 * use the drm_gem_prime_{import,export} helpers.
 */
 */


struct drm_prime_member {
struct drm_prime_member {
@@ -62,6 +63,137 @@ struct drm_prime_member {
	uint32_t handle;
	uint32_t handle;
};
};


static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
		enum dma_data_direction dir)
{
	struct drm_gem_object *obj = attach->dmabuf->priv;
	struct sg_table *sgt;

	mutex_lock(&obj->dev->struct_mutex);

	sgt = obj->dev->driver->gem_prime_get_sg_table(obj);

	if (!IS_ERR_OR_NULL(sgt))
		dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);

	mutex_unlock(&obj->dev->struct_mutex);
	return sgt;
}

static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
		struct sg_table *sgt, enum dma_data_direction dir)
{
	dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
	sg_free_table(sgt);
	kfree(sgt);
}

static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
{
	struct drm_gem_object *obj = dma_buf->priv;

	if (obj->export_dma_buf == dma_buf) {
		/* drop the reference on the export fd holds */
		obj->export_dma_buf = NULL;
		drm_gem_object_unreference_unlocked(obj);
	}
}

static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
	struct drm_gem_object *obj = dma_buf->priv;
	struct drm_device *dev = obj->dev;

	return dev->driver->gem_prime_vmap(obj);
}

static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
	struct drm_gem_object *obj = dma_buf->priv;
	struct drm_device *dev = obj->dev;

	dev->driver->gem_prime_vunmap(obj, vaddr);
}

static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
		unsigned long page_num)
{
	return NULL;
}

static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
		unsigned long page_num, void *addr)
{

}
static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
		unsigned long page_num)
{
	return NULL;
}

static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
		unsigned long page_num, void *addr)
{

}

static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
		struct vm_area_struct *vma)
{
	return -EINVAL;
}

static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
	.map_dma_buf = drm_gem_map_dma_buf,
	.unmap_dma_buf = drm_gem_unmap_dma_buf,
	.release = drm_gem_dmabuf_release,
	.kmap = drm_gem_dmabuf_kmap,
	.kmap_atomic = drm_gem_dmabuf_kmap_atomic,
	.kunmap = drm_gem_dmabuf_kunmap,
	.kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
	.mmap = drm_gem_dmabuf_mmap,
	.vmap = drm_gem_dmabuf_vmap,
	.vunmap = drm_gem_dmabuf_vunmap,
};

/**
 * DOC: PRIME Helpers
 *
 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
 * simpler APIs by using the helper functions @drm_gem_prime_export and
 * @drm_gem_prime_import.  These functions implement dma-buf support in terms of
 * five lower-level driver callbacks:
 *
 * Export callbacks:
 *
 *  - @gem_prime_pin (optional): prepare a GEM object for exporting
 *
 *  - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
 *
 *  - @gem_prime_vmap: vmap a buffer exported by your driver
 *
 *  - @gem_prime_vunmap: vunmap a buffer exported by your driver
 *
 * Import callback:
 *
 *  - @gem_prime_import_sg_table (import): produce a GEM object from another
 *    driver's scatter/gather table
 */

struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
				     struct drm_gem_object *obj, int flags)
{
	if (dev->driver->gem_prime_pin) {
		int ret = dev->driver->gem_prime_pin(obj);
		if (ret)
			return ERR_PTR(ret);
	}
	return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
			      0600);
}
EXPORT_SYMBOL(drm_gem_prime_export);

int drm_gem_prime_handle_to_fd(struct drm_device *dev,
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
		struct drm_file *file_priv, uint32_t handle, uint32_t flags,
		struct drm_file *file_priv, uint32_t handle, uint32_t flags,
		int *prime_fd)
		int *prime_fd)
@@ -117,6 +249,58 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
}
}
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);


struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
					    struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sgt;
	struct drm_gem_object *obj;
	int ret;

	if (!dev->driver->gem_prime_import_sg_table)
		return ERR_PTR(-EINVAL);

	if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
		obj = dma_buf->priv;
		if (obj->dev == dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(obj);
			dma_buf_put(dma_buf);
			return obj;
		}
	}

	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
		return ERR_PTR(PTR_ERR(attach));

	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR_OR_NULL(sgt)) {
		ret = PTR_ERR(sgt);
		goto fail_detach;
	}

	obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt);
	if (IS_ERR(obj)) {
		ret = PTR_ERR(obj);
		goto fail_unmap;
	}

	obj->import_attach = attach;

	return obj;

fail_unmap:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
fail_detach:
	dma_buf_detach(dma_buf, attach);
	return ERR_PTR(ret);
}
EXPORT_SYMBOL(drm_gem_prime_import);

int drm_gem_prime_fd_to_handle(struct drm_device *dev,
int drm_gem_prime_fd_to_handle(struct drm_device *dev,
		struct drm_file *file_priv, int prime_fd, uint32_t *handle)
		struct drm_file *file_priv, int prime_fd, uint32_t *handle)
{
{
+12 −0
Original line number Original line Diff line number Diff line
@@ -930,6 +930,14 @@ struct drm_driver {
	/* import dmabuf -> GEM */
	/* import dmabuf -> GEM */
	struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
	struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
				struct dma_buf *dma_buf);
				struct dma_buf *dma_buf);
	/* low-level interface used by drm_gem_prime_{import,export} */
	int (*gem_prime_pin)(struct drm_gem_object *obj);
	struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
	struct drm_gem_object *(*gem_prime_import_sg_table)(
				struct drm_device *dev, size_t size,
				struct sg_table *sgt);
	void *(*gem_prime_vmap)(struct drm_gem_object *obj);
	void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);


	/* vga arb irq handler */
	/* vga arb irq handler */
	void (*vgaarb_irq)(struct drm_device *dev, bool state);
	void (*vgaarb_irq)(struct drm_device *dev, bool state);
@@ -1562,9 +1570,13 @@ extern int drm_clients_info(struct seq_file *m, void* data);
extern int drm_gem_name_info(struct seq_file *m, void *data);
extern int drm_gem_name_info(struct seq_file *m, void *data);




extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
		struct drm_gem_object *obj, int flags);
extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
		struct drm_file *file_priv, uint32_t handle, uint32_t flags,
		struct drm_file *file_priv, uint32_t handle, uint32_t flags,
		int *prime_fd);
		int *prime_fd);
extern struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
		struct dma_buf *dma_buf);
extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
		struct drm_file *file_priv, int prime_fd, uint32_t *handle);
		struct drm_file *file_priv, int prime_fd, uint32_t *handle);