Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ef5be4af authored by Gerd Hoffmann's avatar Gerd Hoffmann Committed by Alistair Delva
Browse files

UPSTREAM: drm/virtio: rework resource creation workflow.



This patch moves the virtio_gpu_cmd_create_resource() call (which
notifies the host about the new resource created) into the
virtio_gpu_object_create() function.  That way we can call
virtio_gpu_cmd_create_resource() before ttm_bo_init(), so the host
already knows about the object when ttm initializes the object and calls
our driver callbacks.

Specifically the object is already created when the
virtio_gpu_ttm_tt_bind() callback invokes virtio_gpu_object_attach(),
so the extra virtio_gpu_object_attach() calls done after
virtio_gpu_object_create() are not needed any more.

The fence support for the create ioctl becomes a bit more tricky though.
The code moved into virtio_gpu_object_create() too.  We first submit the
(fenced) virtio_gpu_cmd_create_resource() command, then initialize the
ttm object, and finally attach just created object to the fence for the
command in case it didn't finish yet.

Signed-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
Acked-by: default avatarNoralf Trønnes <noralf@tronnes.org>
Link: http://patchwork.freedesktop.org/patch/msgid/20190318113332.10900-6-kraxel@redhat.com


(cherry picked from commit 530b28426a94b822b3c03491cde5c9a961d80e7f)
Signed-off-by: default avatarGreg Hartman <ghartman@google.com>
BUG: 139386237
Change-Id: I08729493e57f858304d8e0986706fd88814f33e3
parent aa39f5f5
Loading
Loading
Loading
Loading
+14 −4
Original line number Original line Diff line number Diff line
@@ -58,7 +58,9 @@ struct virtio_gpu_object_params {
	uint32_t width;
	uint32_t width;
	uint32_t height;
	uint32_t height;
	unsigned long size;
	unsigned long size;
	bool dumb;
	/* 3d */
	/* 3d */
	bool virgl;
	uint32_t target;
	uint32_t target;
	uint32_t bind;
	uint32_t bind;
	uint32_t depth;
	uint32_t depth;
@@ -225,6 +227,9 @@ struct virtio_gpu_fpriv {
/* virtio_ioctl.c */
/* virtio_ioctl.c */
#define DRM_VIRTIO_NUM_IOCTLS 10
#define DRM_VIRTIO_NUM_IOCTLS 10
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
				    struct list_head *head);
void virtio_gpu_unref_list(struct list_head *head);


/* virtio_kms.c */
/* virtio_kms.c */
int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags);
int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags);
@@ -247,7 +252,8 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
				 struct drm_file *file);
				 struct drm_file *file);
struct virtio_gpu_object*
struct virtio_gpu_object*
virtio_gpu_alloc_object(struct drm_device *dev,
virtio_gpu_alloc_object(struct drm_device *dev,
			struct virtio_gpu_object_params *params);
			struct virtio_gpu_object_params *params,
			struct virtio_gpu_fence *fence);
int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
				struct drm_device *dev,
				struct drm_device *dev,
				struct drm_mode_create_dumb *args);
				struct drm_mode_create_dumb *args);
@@ -264,7 +270,8 @@ int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
				    struct virtio_gpu_object *bo,
				    struct virtio_gpu_object *bo,
				    struct virtio_gpu_object_params *params);
				    struct virtio_gpu_object_params *params,
				    struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
				   uint32_t resource_id);
				   uint32_t resource_id);
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
@@ -323,7 +330,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
void
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
				  struct virtio_gpu_object *bo,
				  struct virtio_gpu_object *bo,
				  struct virtio_gpu_object_params *params);
				  struct virtio_gpu_object_params *params,
				  struct virtio_gpu_fence *fence);
void virtio_gpu_ctrl_ack(struct virtqueue *vq);
void virtio_gpu_ctrl_ack(struct virtqueue *vq);
void virtio_gpu_cursor_ack(struct virtqueue *vq);
void virtio_gpu_cursor_ack(struct virtqueue *vq);
void virtio_gpu_fence_ack(struct virtqueue *vq);
void virtio_gpu_fence_ack(struct virtqueue *vq);
@@ -351,6 +359,7 @@ void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);


/* virtio_gpu_fence.c */
/* virtio_gpu_fence.c */
bool virtio_fence_signaled(struct dma_fence *f);
struct virtio_gpu_fence *virtio_gpu_fence_alloc(
struct virtio_gpu_fence *virtio_gpu_fence_alloc(
	struct virtio_gpu_device *vgdev);
	struct virtio_gpu_device *vgdev);
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
@@ -362,7 +371,8 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
/* virtio_gpu_object */
/* virtio_gpu_object */
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
			     struct virtio_gpu_object_params *params,
			     struct virtio_gpu_object_params *params,
			     struct virtio_gpu_object **bo_ptr);
			     struct virtio_gpu_object **bo_ptr,
			     struct virtio_gpu_fence *fence);
void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo);
void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo);
int virtio_gpu_object_kmap(struct virtio_gpu_object *bo);
int virtio_gpu_object_kmap(struct virtio_gpu_object *bo);
int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
+2 −2
Original line number Original line Diff line number Diff line
@@ -36,7 +36,7 @@ static const char *virtio_get_timeline_name(struct dma_fence *f)
	return "controlq";
	return "controlq";
}
}


static bool virtio_signaled(struct dma_fence *f)
bool virtio_fence_signaled(struct dma_fence *f)
{
{
	struct virtio_gpu_fence *fence = to_virtio_fence(f);
	struct virtio_gpu_fence *fence = to_virtio_fence(f);


@@ -62,7 +62,7 @@ static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
static const struct dma_fence_ops virtio_fence_ops = {
static const struct dma_fence_ops virtio_fence_ops = {
	.get_driver_name     = virtio_get_driver_name,
	.get_driver_name     = virtio_get_driver_name,
	.get_timeline_name   = virtio_get_timeline_name,
	.get_timeline_name   = virtio_get_timeline_name,
	.signaled            = virtio_signaled,
	.signaled            = virtio_fence_signaled,
	.fence_value_str     = virtio_fence_value_str,
	.fence_value_str     = virtio_fence_value_str,
	.timeline_value_str  = virtio_timeline_value_str,
	.timeline_value_str  = virtio_timeline_value_str,
};
};
+5 −14
Original line number Original line Diff line number Diff line
@@ -36,13 +36,14 @@ void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj)


struct virtio_gpu_object*
struct virtio_gpu_object*
virtio_gpu_alloc_object(struct drm_device *dev,
virtio_gpu_alloc_object(struct drm_device *dev,
			struct virtio_gpu_object_params *params)
			struct virtio_gpu_object_params *params,
			struct virtio_gpu_fence *fence)
{
{
	struct virtio_gpu_device *vgdev = dev->dev_private;
	struct virtio_gpu_device *vgdev = dev->dev_private;
	struct virtio_gpu_object *obj;
	struct virtio_gpu_object *obj;
	int ret;
	int ret;


	ret = virtio_gpu_object_create(vgdev, params, &obj);
	ret = virtio_gpu_object_create(vgdev, params, &obj, fence);
	if (ret)
	if (ret)
		return ERR_PTR(ret);
		return ERR_PTR(ret);


@@ -59,7 +60,7 @@ int virtio_gpu_gem_create(struct drm_file *file,
	int ret;
	int ret;
	u32 handle;
	u32 handle;


	obj = virtio_gpu_alloc_object(dev, params);
	obj = virtio_gpu_alloc_object(dev, params, NULL);
	if (IS_ERR(obj))
	if (IS_ERR(obj))
		return PTR_ERR(obj);
		return PTR_ERR(obj);


@@ -82,9 +83,7 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
				struct drm_device *dev,
				struct drm_device *dev,
				struct drm_mode_create_dumb *args)
				struct drm_mode_create_dumb *args)
{
{
	struct virtio_gpu_device *vgdev = dev->dev_private;
	struct drm_gem_object *gobj;
	struct drm_gem_object *gobj;
	struct virtio_gpu_object *obj;
	struct virtio_gpu_object_params params = { 0 };
	struct virtio_gpu_object_params params = { 0 };
	int ret;
	int ret;
	uint32_t pitch;
	uint32_t pitch;
@@ -97,20 +96,12 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
	params.width = args->width;
	params.width = args->width;
	params.height = args->height;
	params.height = args->height;
	params.size = args->size;
	params.size = args->size;
	params.dumb = true;
	ret = virtio_gpu_gem_create(file_priv, dev, &params, &gobj,
	ret = virtio_gpu_gem_create(file_priv, dev, &params, &gobj,
				    &args->handle);
				    &args->handle);
	if (ret)
	if (ret)
		goto fail;
		goto fail;


	obj = gem_to_virtio_gpu_obj(gobj);
	virtio_gpu_cmd_create_resource(vgdev, obj, &params);

	/* attach the object to the resource */
	ret = virtio_gpu_object_attach(vgdev, obj, NULL);
	if (ret)
		goto fail;

	obj->dumb = true;
	args->pitch = pitch;
	args->pitch = pitch;
	return ret;
	return ret;


+10 −62
Original line number Original line Diff line number Diff line
@@ -54,7 +54,7 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
					 &virtio_gpu_map->offset);
					 &virtio_gpu_map->offset);
}
}


static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
				    struct list_head *head)
				    struct list_head *head)
{
{
	struct ttm_operation_ctx ctx = { false, false };
	struct ttm_operation_ctx ctx = { false, false };
@@ -79,7 +79,7 @@ static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
	return 0;
	return 0;
}
}


static void virtio_gpu_unref_list(struct list_head *head)
void virtio_gpu_unref_list(struct list_head *head)
{
{
	struct ttm_validate_buffer *buf;
	struct ttm_validate_buffer *buf;
	struct ttm_buffer_object *bo;
	struct ttm_buffer_object *bo;
@@ -275,14 +275,11 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
{
{
	struct virtio_gpu_device *vgdev = dev->dev_private;
	struct virtio_gpu_device *vgdev = dev->dev_private;
	struct drm_virtgpu_resource_create *rc = data;
	struct drm_virtgpu_resource_create *rc = data;
	struct virtio_gpu_fence *fence;
	int ret;
	int ret;
	struct virtio_gpu_object *qobj;
	struct virtio_gpu_object *qobj;
	struct drm_gem_object *obj;
	struct drm_gem_object *obj;
	uint32_t handle = 0;
	uint32_t handle = 0;
	struct list_head validate_list;
	struct ttm_validate_buffer mainbuf;
	struct virtio_gpu_fence *fence = NULL;
	struct ww_acquire_ctx ticket;
	struct virtio_gpu_object_params params = { 0 };
	struct virtio_gpu_object_params params = { 0 };


	if (vgdev->has_virgl_3d == false) {
	if (vgdev->has_virgl_3d == false) {
@@ -298,14 +295,12 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
			return -EINVAL;
			return -EINVAL;
	}
	}


	INIT_LIST_HEAD(&validate_list);
	memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));

	params.format = rc->format;
	params.format = rc->format;
	params.width = rc->width;
	params.width = rc->width;
	params.height = rc->height;
	params.height = rc->height;
	params.size = rc->size;
	params.size = rc->size;
	if (vgdev->has_virgl_3d) {
	if (vgdev->has_virgl_3d) {
		params.virgl = true;
		params.target = rc->target;
		params.target = rc->target;
		params.bind = rc->bind;
		params.bind = rc->bind;
		params.depth = rc->depth;
		params.depth = rc->depth;
@@ -318,72 +313,25 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
	if (params.size == 0)
	if (params.size == 0)
		params.size = PAGE_SIZE;
		params.size = PAGE_SIZE;


	qobj = virtio_gpu_alloc_object(dev, &params);
	fence = virtio_gpu_fence_alloc(vgdev);
	if (!fence)
		return -ENOMEM;
	qobj = virtio_gpu_alloc_object(dev, &params, fence);
	dma_fence_put(&fence->f);
	if (IS_ERR(qobj))
	if (IS_ERR(qobj))
		return PTR_ERR(qobj);
		return PTR_ERR(qobj);
	obj = &qobj->gem_base;
	obj = &qobj->gem_base;


	if (!vgdev->has_virgl_3d) {
		virtio_gpu_cmd_create_resource(vgdev, qobj, &params);

		ret = virtio_gpu_object_attach(vgdev, qobj, NULL);
	} else {
		/* use a gem reference since unref list undoes them */
		drm_gem_object_get(&qobj->gem_base);
		mainbuf.bo = &qobj->tbo;
		list_add(&mainbuf.head, &validate_list);

		ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
		if (ret) {
			DRM_DEBUG("failed to validate\n");
			goto fail_unref;
		}

		fence = virtio_gpu_fence_alloc(vgdev);
		if (!fence) {
			ret = -ENOMEM;
			goto fail_backoff;
		}

		virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &params);
		ret = virtio_gpu_object_attach(vgdev, qobj, fence);
		if (ret) {
			dma_fence_put(&fence->f);
			goto fail_backoff;
		}
		ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
	}

	ret = drm_gem_handle_create(file_priv, obj, &handle);
	ret = drm_gem_handle_create(file_priv, obj, &handle);
	if (ret) {
	if (ret) {

		drm_gem_object_release(obj);
		drm_gem_object_release(obj);
		if (vgdev->has_virgl_3d) {
			virtio_gpu_unref_list(&validate_list);
			dma_fence_put(&fence->f);
		}
		return ret;
		return ret;
	}
	}
	drm_gem_object_put_unlocked(obj);
	drm_gem_object_put_unlocked(obj);


	rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
	rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
	rc->bo_handle = handle;
	rc->bo_handle = handle;

	if (vgdev->has_virgl_3d) {
		virtio_gpu_unref_list(&validate_list);
		dma_fence_put(&fence->f);
	}
	return 0;
	return 0;
fail_backoff:
	ttm_eu_backoff_reservation(&ticket, &validate_list);
fail_unref:
	if (vgdev->has_virgl_3d) {
		virtio_gpu_unref_list(&validate_list);
		dma_fence_put(&fence->f);
	}
//fail_obj:
//	drm_gem_object_handle_unreference_unlocked(obj);
	return ret;
}
}


static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
+44 −3
Original line number Original line Diff line number Diff line
@@ -23,6 +23,8 @@
 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 */
 */


#include <drm/ttm/ttm_execbuf_util.h>

#include "virtgpu_drv.h"
#include "virtgpu_drv.h"


static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
@@ -90,7 +92,8 @@ static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo)


int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
			     struct virtio_gpu_object_params *params,
			     struct virtio_gpu_object_params *params,
			     struct virtio_gpu_object **bo_ptr)
			     struct virtio_gpu_object **bo_ptr,
			     struct virtio_gpu_fence *fence)
{
{
	struct virtio_gpu_object *bo;
	struct virtio_gpu_object *bo;
	size_t acc_size;
	size_t acc_size;
@@ -116,9 +119,15 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
		kfree(bo);
		kfree(bo);
		return ret;
		return ret;
	}
	}
	bo->dumb = false;
	bo->dumb = params->dumb;
	virtio_gpu_init_ttm_placement(bo);

	if (params->virgl) {
		virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, fence);
	} else {
		virtio_gpu_cmd_create_resource(vgdev, bo, params, fence);
	}


	virtio_gpu_init_ttm_placement(bo);
	ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size,
	ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size,
			  ttm_bo_type_device, &bo->placement, 0,
			  ttm_bo_type_device, &bo->placement, 0,
			  true, acc_size, NULL, NULL,
			  true, acc_size, NULL, NULL,
@@ -127,6 +136,38 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
	if (ret != 0)
	if (ret != 0)
		return ret;
		return ret;


	if (fence) {
		struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
		struct list_head validate_list;
		struct ttm_validate_buffer mainbuf;
		struct ww_acquire_ctx ticket;
		unsigned long irq_flags;
		bool signaled;

		INIT_LIST_HEAD(&validate_list);
		memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));

		/* use a gem reference since unref list undoes them */
		drm_gem_object_get(&bo->gem_base);
		mainbuf.bo = &bo->tbo;
		list_add(&mainbuf.head, &validate_list);

		ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
		if (ret == 0) {
			spin_lock_irqsave(&drv->lock, irq_flags);
			signaled = virtio_fence_signaled(&fence->f);
			if (!signaled)
				/* virtio create command still in flight */
				ttm_eu_fence_buffer_objects(&ticket, &validate_list,
							    &fence->f);
			spin_unlock_irqrestore(&drv->lock, irq_flags);
			if (signaled)
				/* virtio create command finished */
				ttm_eu_backoff_reservation(&ticket, &validate_list);
		}
		virtio_gpu_unref_list(&validate_list);
	}

	*bo_ptr = bo;
	*bo_ptr = bo;
	return 0;
	return 0;
}
}
Loading