Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a5ed8940 authored by Laurent Pinchart's avatar Laurent Pinchart
Browse files

drm: GEM CMA: Split object creation into object alloc and DMA memory alloc



This allows creating a GEM CMA object without an associated DMA memory
buffer, and will be used to implement DRM PRIME support.

Signed-off-by: default avatarLaurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
Reviewed-by: default avatarRob Clark <robdclark@gmail.com>
parent bda3fdaa
Loading
Loading
Loading
Loading
+48 −35
Original line number Diff line number Diff line
@@ -32,62 +32,73 @@ static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
	return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
}

static void drm_gem_cma_buf_destroy(struct drm_device *drm,
		struct drm_gem_cma_object *cma_obj)
{
	dma_free_writecombine(drm->dev, cma_obj->base.size, cma_obj->vaddr,
			cma_obj->paddr);
}

/*
 * drm_gem_cma_create - allocate an object with the given size
 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
 * @drm: The drm device
 * @size: The GEM object size
 *
 * returns a struct drm_gem_cma_object* on success or ERR_PTR values
 * on failure.
 * This function creates and initializes a GEM CMA object of the given size, but
 * doesn't allocate any memory to back the object.
 *
 * Return a struct drm_gem_cma_object* on success or ERR_PTR values on failure.
 */
struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
		unsigned int size)
static struct drm_gem_cma_object *
__drm_gem_cma_create(struct drm_device *drm, unsigned int size)
{
	struct drm_gem_cma_object *cma_obj;
	struct drm_gem_object *gem_obj;
	int ret;

	size = round_up(size, PAGE_SIZE);

	cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
	if (!cma_obj)
		return ERR_PTR(-ENOMEM);

	cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size,
			&cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN);
	if (!cma_obj->vaddr) {
		dev_err(drm->dev, "failed to allocate buffer with size %d\n", size);
		ret = -ENOMEM;
		goto err_dma_alloc;
	}

	gem_obj = &cma_obj->base;

	ret = drm_gem_object_init(drm, gem_obj, size);
	if (ret)
		goto err_obj_init;
		goto error;

	ret = drm_gem_create_mmap_offset(gem_obj);
	if (ret)
		goto err_create_mmap_offset;
	if (ret) {
		drm_gem_object_release(gem_obj);
		goto error;
	}

	return cma_obj;

err_create_mmap_offset:
	drm_gem_object_release(gem_obj);
error:
	kfree(cma_obj);
	return ERR_PTR(ret);
}

/*
 * drm_gem_cma_create - allocate an object with the given size
 *
 * returns a struct drm_gem_cma_object* on success or ERR_PTR values
 * on failure.
 */
struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
		unsigned int size)
{
	struct drm_gem_cma_object *cma_obj;

err_obj_init:
	drm_gem_cma_buf_destroy(drm, cma_obj);
	size = round_up(size, PAGE_SIZE);

err_dma_alloc:
	kfree(cma_obj);
	cma_obj = __drm_gem_cma_create(drm, size);
	if (IS_ERR(cma_obj))
		return cma_obj;

	return ERR_PTR(ret);
	cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size,
			&cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN);
	if (!cma_obj->vaddr) {
		dev_err(drm->dev, "failed to allocate buffer with size %d\n",
			size);
		drm_gem_cma_free_object(&cma_obj->base);
		return ERR_PTR(-ENOMEM);
	}

	return cma_obj;
}
EXPORT_SYMBOL_GPL(drm_gem_cma_create);

@@ -143,11 +154,13 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
	if (gem_obj->map_list.map)
		drm_gem_free_mmap_offset(gem_obj);

	drm_gem_object_release(gem_obj);

	cma_obj = to_drm_gem_cma_obj(gem_obj);

	drm_gem_cma_buf_destroy(gem_obj->dev, cma_obj);
	if (cma_obj->vaddr)
		dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size,
				      cma_obj->vaddr, cma_obj->paddr);

	drm_gem_object_release(gem_obj);

	kfree(cma_obj);
}