Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 30f82d81 authored by Thomas Hellstrom's avatar Thomas Hellstrom
Browse files

drm/vmwgfx: Reemit context bindings when necessary v2



When a context is first referenced in the command stream, make sure that all
scrubbed (as a result of eviction) bindings are re-emitted. Also make sure that
all bound resources are put on the resource validate list.

This is needed for legacy emulation, since legacy user-space drivers will
typically not re-emit shader bindings. It also removes the requirement for
user-space drivers to re-emit render-target- and texture bindings.

Makes suspend and hibernate now also work with legacy user-space drivers on
guest-backed devices.

v2: Don't rebind on legacy devices.

Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarJakob Bornecrantz <jakob@vmware.com>
parent a6fc955f
Loading
Loading
Loading
Loading
+123 −21
Original line number Diff line number Diff line
@@ -37,7 +37,7 @@ struct vmw_user_context {



typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *);
typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);

static void vmw_user_context_free(struct vmw_resource *res);
static struct vmw_resource *
@@ -50,9 +50,11 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
				 bool readback,
				 struct ttm_validate_buffer *val_buf);
static int vmw_gb_context_destroy(struct vmw_resource *res);
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi);
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi);
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi);
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
					   bool rebind);
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
static uint64_t vmw_user_context_size;

@@ -111,10 +113,14 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)

	if (res->func->destroy == vmw_gb_context_destroy) {
		mutex_lock(&dev_priv->cmdbuf_mutex);
		mutex_lock(&dev_priv->binding_mutex);
		(void) vmw_context_binding_state_kill
			(&container_of(res, struct vmw_user_context, res)->cbs);
		(void) vmw_gb_context_destroy(res);
		if (dev_priv->pinned_bo != NULL &&
		    !dev_priv->query_cid_valid)
			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
		mutex_unlock(&dev_priv->binding_mutex);
		mutex_unlock(&dev_priv->cmdbuf_mutex);
		return;
	}
@@ -328,7 +334,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);

	mutex_lock(&dev_priv->binding_mutex);
	vmw_context_binding_state_kill(&uctx->cbs);
	vmw_context_binding_state_scrub(&uctx->cbs);

	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);

@@ -378,10 +384,6 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
		SVGA3dCmdHeader header;
		SVGA3dCmdDestroyGBContext body;
	} *cmd;
	struct vmw_user_context *uctx =
		container_of(res, struct vmw_user_context, res);

	BUG_ON(!list_empty(&uctx->cbs.list));

	if (likely(res->id == -1))
		return 0;
@@ -528,8 +530,9 @@ out_unlock:
 * vmw_context_scrub_shader - scrub a shader binding from a context.
 *
 * @bi: single binding information.
 * @rebind: Whether to issue a bind instead of scrub command.
 */
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
{
	struct vmw_private *dev_priv = bi->ctx->dev_priv;
	struct {
@@ -548,7 +551,8 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
	cmd->header.size = sizeof(cmd->body);
	cmd->body.cid = bi->ctx->id;
	cmd->body.type = bi->i1.shader_type;
	cmd->body.shid = SVGA3D_INVALID_ID;
	cmd->body.shid =
		cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
	vmw_fifo_commit(dev_priv, sizeof(*cmd));

	return 0;
@@ -559,8 +563,10 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
 * from a context.
 *
 * @bi: single binding information.
 * @rebind: Whether to issue a bind instead of scrub command.
 */
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
					   bool rebind)
{
	struct vmw_private *dev_priv = bi->ctx->dev_priv;
	struct {
@@ -579,7 +585,8 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
	cmd->header.size = sizeof(cmd->body);
	cmd->body.cid = bi->ctx->id;
	cmd->body.type = bi->i1.rt_type;
	cmd->body.target.sid = SVGA3D_INVALID_ID;
	cmd->body.target.sid =
		cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
	cmd->body.target.face = 0;
	cmd->body.target.mipmap = 0;
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
@@ -591,11 +598,13 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
 * vmw_context_scrub_texture - scrub a texture binding from a context.
 *
 * @bi: single binding information.
 * @rebind: Whether to issue a bind instead of scrub command.
 *
 * TODO: Possibly complement this function with a function that takes
 * a list of texture bindings and combines them to a single command.
 */
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
				     bool rebind)
{
	struct vmw_private *dev_priv = bi->ctx->dev_priv;
	struct {
@@ -619,7 +628,8 @@ static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
	cmd->body.c.cid = bi->ctx->id;
	cmd->body.s1.stage = bi->i1.texture_stage;
	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
	cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID;
	cmd->body.s1.value =
		cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
	vmw_fifo_commit(dev_priv, sizeof(*cmd));

	return 0;
@@ -692,6 +702,7 @@ int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
		vmw_context_binding_drop(loc);

	loc->bi = *bi;
	loc->bi.scrubbed = false;
	list_add_tail(&loc->ctx_list, &cbs->list);
	INIT_LIST_HEAD(&loc->res_list);

@@ -727,12 +738,11 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
	if (loc->bi.ctx != NULL)
		vmw_context_binding_drop(loc);

	if (bi->res != NULL) {
		loc->bi = *bi;
		list_add_tail(&loc->ctx_list, &cbs->list);
	if (bi->res != NULL)
		list_add_tail(&loc->res_list, &bi->res->binding_head);
	else
		INIT_LIST_HEAD(&loc->res_list);
	}
}

/**
@@ -746,7 +756,10 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
 */
static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
{
	(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi);
	if (!cb->bi.scrubbed) {
		(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
		cb->bi.scrubbed = true;
	}
	vmw_context_binding_drop(cb);
}

@@ -767,6 +780,27 @@ static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
		vmw_context_binding_kill(entry);
}

/**
 * vmw_context_binding_state_scrub - Scrub all bindings associated with a
 * struct vmw_ctx_binding state structure.
 *
 * @cbs: Pointer to the context binding state tracker.
 *
 * Emits commands to scrub all bindings associated with the
 * context binding state tracker.
 */
static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
{
	struct vmw_ctx_binding *entry;

	list_for_each_entry(entry, &cbs->list, ctx_list) {
		if (!entry->bi.scrubbed) {
			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
			entry->bi.scrubbed = true;
		}
	}
}

/**
 * vmw_context_binding_res_list_kill - Kill all bindings on a
 * resource binding list
@@ -784,6 +818,27 @@ void vmw_context_binding_res_list_kill(struct list_head *head)
		vmw_context_binding_kill(entry);
}

/**
 * vmw_context_binding_res_list_scrub - Scrub all bindings on a
 * resource binding list
 *
 * @head: list head of resource binding list
 *
 * Scrub all bindings associated with a specific resource. Typically
 * called before the resource is evicted.
 */
void vmw_context_binding_res_list_scrub(struct list_head *head)
{
	struct vmw_ctx_binding *entry;

	list_for_each_entry(entry, head, res_list) {
		if (!entry->bi.scrubbed) {
			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
			entry->bi.scrubbed = true;
		}
	}
}

/**
 * vmw_context_binding_state_transfer - Commit staged binding info
 *
@@ -803,3 +858,50 @@ void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
	list_for_each_entry_safe(entry, next, &from->list, ctx_list)
		vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
}

/**
 * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
 *
 * @ctx: The context resource
 *
 * Walks through the context binding list and rebinds all scrubbed
 * resources.
 */
int vmw_context_rebind_all(struct vmw_resource *ctx)
{
	struct vmw_ctx_binding *entry;
	struct vmw_user_context *uctx =
		container_of(ctx, struct vmw_user_context, res);
	struct vmw_ctx_binding_state *cbs = &uctx->cbs;
	int ret;

	list_for_each_entry(entry, &cbs->list, ctx_list) {
		if (likely(!entry->bi.scrubbed))
			continue;

		if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
			    SVGA3D_INVALID_ID))
			continue;

		ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
		if (unlikely(ret != 0))
			return ret;

		entry->bi.scrubbed = false;
	}

	return 0;
}

/**
 * vmw_context_binding_list - Return a list of context bindings
 *
 * @ctx: The context resource
 *
 * Returns the current list of bindings of the given context. Note that
 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
 */
struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
{
	return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
}
+6 −0
Original line number Diff line number Diff line
@@ -276,6 +276,7 @@ struct vmw_ctx_bindinfo {
	struct vmw_resource *ctx;
	struct vmw_resource *res;
	enum vmw_ctx_binding_type bt;
	bool scrubbed;
	union {
		SVGA3dShaderType shader_type;
		SVGA3dRenderTargetType rt_type;
@@ -574,6 +575,8 @@ struct vmw_user_resource_conv;

extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
extern struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource *res);
extern int vmw_resource_validate(struct vmw_resource *res);
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
@@ -962,6 +965,9 @@ extern void
vmw_context_binding_state_transfer(struct vmw_resource *res,
				   struct vmw_ctx_binding_state *cbs);
extern void vmw_context_binding_res_list_kill(struct list_head *head);
extern void vmw_context_binding_res_list_scrub(struct list_head *head);
extern int vmw_context_rebind_all(struct vmw_resource *ctx);
extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);

/*
 * Surface management - vmwgfx_surface.c
+81 −4
Original line number Diff line number Diff line
@@ -179,6 +179,44 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
	return 0;
}

/**
 * vmw_resource_context_res_add - Put resources previously bound to a context on
 * the validation list
 *
 * @dev_priv: Pointer to a device private structure
 * @sw_context: Pointer to a software context used for this command submission
 * @ctx: Pointer to the context resource
 *
 * This function puts all resources that were previously bound to @ctx on
 * the resource validation list. This is part of the context state reemission
 */
static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
					struct vmw_sw_context *sw_context,
					struct vmw_resource *ctx)
{
	struct list_head *binding_list;
	struct vmw_ctx_binding *entry;
	int ret = 0;
	struct vmw_resource *res;

	mutex_lock(&dev_priv->binding_mutex);
	binding_list = vmw_context_binding_list(ctx);

	list_for_each_entry(entry, binding_list, ctx_list) {
		res = vmw_resource_reference_unless_doomed(entry->bi.res);
		if (unlikely(res == NULL))
			continue;

		ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
		vmw_resource_unreference(&res);
		if (unlikely(ret != 0))
			break;
	}

	mutex_unlock(&dev_priv->binding_mutex);
	return ret;
}

/**
 * vmw_resource_relocation_add - Add a relocation to the relocation list
 *
@@ -470,7 +508,11 @@ vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
	if (p_val)
		*p_val = node;

	if (node->first_usage && res_type == vmw_res_context) {
	if (dev_priv->has_mob && node->first_usage &&
	    res_type == vmw_res_context) {
		ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
		if (unlikely(ret != 0))
			goto out_no_reloc;
		node->staged_bindings =
			kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
		if (node->staged_bindings == NULL) {
@@ -516,6 +558,34 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
					converter, *id_loc, id_loc, p_val);
}

/**
 * vmw_rebind_contexts - Rebind all resources previously bound to
 * referenced contexts.
 *
 * @sw_context: Pointer to the software context.
 *
 * Rebind context binding points that have been scrubbed because of eviction.
 */
static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
{
	struct vmw_resource_val_node *val;
	int ret;

	list_for_each_entry(val, &sw_context->resource_list, head) {
		if (likely(!val->staged_bindings))
			continue;

		ret = vmw_context_rebind_all(val->res);
		if (unlikely(ret != 0)) {
			if (ret != -ERESTARTSYS)
				DRM_ERROR("Failed to rebind context.\n");
			return ret;
		}
	}

	return 0;
}

/**
 * vmw_cmd_cid_check - Check a command header for valid context information.
 *
@@ -1640,6 +1710,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
		struct vmw_resource_val_node *res_node;
		u32 shid = cmd->body.shid;

		if (shid != SVGA3D_INVALID_ID)
			(void) vmw_compat_shader_lookup(sw_context->fp->shman,
							cmd->body.type,
							&shid);
@@ -2395,6 +2466,12 @@ int vmw_execbuf_process(struct drm_file *file_priv,
		goto out_err;
	}

	if (dev_priv->has_mob) {
		ret = vmw_rebind_contexts(sw_context);
		if (unlikely(ret != 0))
			goto out_err;
	}

	cmd = vmw_fifo_reserve(dev_priv, command_size);
	if (unlikely(cmd == NULL)) {
		DRM_ERROR("Failed reserving fifo space for commands.\n");
+10 −1
Original line number Diff line number Diff line
@@ -88,6 +88,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
	return res;
}

struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource *res)
{
	return kref_get_unless_zero(&res->kref) ? res : NULL;
}

/**
 * vmw_resource_release_id - release a resource id to the id manager.
@@ -136,8 +141,12 @@ static void vmw_resource_release(struct kref *kref)
		vmw_dmabuf_unreference(&res->backup);
	}

	if (likely(res->hw_destroy != NULL))
	if (likely(res->hw_destroy != NULL)) {
		res->hw_destroy(res);
		mutex_lock(&dev_priv->binding_mutex);
		vmw_context_binding_res_list_kill(&res->binding_head);
		mutex_unlock(&dev_priv->binding_mutex);
	}

	id = res->id;
	if (res->res_free != NULL)
+1 −1
Original line number Diff line number Diff line
@@ -304,7 +304,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
		return 0;

	mutex_lock(&dev_priv->binding_mutex);
	vmw_context_binding_res_list_kill(&res->binding_head);
	vmw_context_binding_res_list_scrub(&res->binding_head);

	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
	if (unlikely(cmd == NULL)) {
Loading