Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 695fbc08 authored by Tina Zhang's avatar Tina Zhang Committed by Zhenyu Wang
Browse files

drm/i915/gvt: replace the gvt_err with gvt_vgpu_err



gvt_err should be used only for the very few critical error message
during host i915 drvier initialization. This patch
1. removes the redundant gvt_err;
2. creates a new gvt_vgpu_err to show errors caused by vgpu;
3. replaces the most gvt_err with gvt_vgpu_err;
4. leaves very few gvt_err for dumping gvt error during host gvt
   initialization.

v2. change name to gvt_vgpu_err and add vgpu id to the message. (Kevin)
    add gpu id to gvt_vgpu_err. (Zhi)
v3. remove gpu id from gvt_vgpu_err caller. (Zhi)
v4. add vgpu check to the gvt_vgpu_err macro. (Zhiyuan)
v5. add comments for v3 and v4.
v6. split the big patch into two, with this patch only for checking
    gvt_vgpu_err. (Zhenyu)
v7. rebase to staging branch
v8. rebase to fix branch

Signed-off-by: default avatarTina Zhang <tina.zhang@intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
parent 4938ca90
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -242,7 +242,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
	const char *item;

	if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
		gvt_err("Invalid vGPU creation params\n");
		gvt_vgpu_err("Invalid vGPU creation params\n");
		return -EINVAL;
	}

@@ -285,9 +285,9 @@ static int alloc_resource(struct intel_vgpu *vgpu,
	return 0;

no_enough_resource:
	gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item);
	gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n",
		vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail),
	gvt_vgpu_err("fail to allocate resource %s\n", item);
	gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n",
		BYTES_TO_MB(request), BYTES_TO_MB(avail),
		BYTES_TO_MB(max), BYTES_TO_MB(taken));
	return -ENOSPC;
}
+48 −36
Original line number Diff line number Diff line
@@ -843,20 +843,19 @@ static int cmd_reg_handler(struct parser_exec_state *s,
	struct intel_gvt *gvt = vgpu->gvt;

	if (offset + 4 > gvt->device_info.mmio_size) {
		gvt_err("%s access to (%x) outside of MMIO range\n",
		gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
				cmd, offset);
		return -EINVAL;
	}

	if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
		gvt_err("vgpu%d: %s access to non-render register (%x)\n",
				s->vgpu->id, cmd, offset);
		gvt_vgpu_err("%s access to non-render register (%x)\n",
				cmd, offset);
		return 0;
	}

	if (is_shadowed_mmio(offset)) {
		gvt_err("vgpu%d: found access of shadowed MMIO %x\n",
				s->vgpu->id, offset);
		gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
		return 0;
	}

@@ -1152,6 +1151,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
		struct mi_display_flip_command_info *info)
{
	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
	struct intel_vgpu *vgpu = s->vgpu;
	u32 dword0 = cmd_val(s, 0);
	u32 dword1 = cmd_val(s, 1);
	u32 dword2 = cmd_val(s, 2);
@@ -1190,7 +1190,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
		break;

	default:
		gvt_err("unknown plane code %d\n", plane);
		gvt_vgpu_err("unknown plane code %d\n", plane);
		return -EINVAL;
	}

@@ -1297,25 +1297,26 @@ static int update_plane_mmio_from_mi_display_flip(
static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
{
	struct mi_display_flip_command_info info;
	struct intel_vgpu *vgpu = s->vgpu;
	int ret;
	int i;
	int len = cmd_length(s);

	ret = decode_mi_display_flip(s, &info);
	if (ret) {
		gvt_err("fail to decode MI display flip command\n");
		gvt_vgpu_err("fail to decode MI display flip command\n");
		return ret;
	}

	ret = check_mi_display_flip(s, &info);
	if (ret) {
		gvt_err("invalid MI display flip command\n");
		gvt_vgpu_err("invalid MI display flip command\n");
		return ret;
	}

	ret = update_plane_mmio_from_mi_display_flip(s, &info);
	if (ret) {
		gvt_err("fail to update plane mmio\n");
		gvt_vgpu_err("fail to update plane mmio\n");
		return ret;
	}

@@ -1373,7 +1374,8 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
	int ret;

	if (op_size > max_surface_size) {
		gvt_err("command address audit fail name %s\n", s->info->name);
		gvt_vgpu_err("command address audit fail name %s\n",
			s->info->name);
		return -EINVAL;
	}

@@ -1390,7 +1392,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
	}
	return 0;
err:
	gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
	gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
			s->info->name, guest_gma, op_size);

	pr_err("cmd dump: ");
@@ -1435,8 +1437,10 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)

static inline int unexpected_cmd(struct parser_exec_state *s)
{
	gvt_err("vgpu%d: Unexpected %s in command buffer!\n",
			s->vgpu->id, s->info->name);
	struct intel_vgpu *vgpu = s->vgpu;

	gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);

	return -EINVAL;
}

@@ -1539,7 +1543,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
	while (gma != end_gma) {
		gpa = intel_vgpu_gma_to_gpa(mm, gma);
		if (gpa == INTEL_GVT_INVALID_ADDR) {
			gvt_err("invalid gma address: %lx\n", gma);
			gvt_vgpu_err("invalid gma address: %lx\n", gma);
			return -EFAULT;
		}

@@ -1580,6 +1584,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
	uint32_t bb_size = 0;
	uint32_t cmd_len = 0;
	bool met_bb_end = false;
	struct intel_vgpu *vgpu = s->vgpu;
	u32 cmd;

	/* get the start gm address of the batch buffer */
@@ -1588,7 +1593,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)

	info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
	if (info == NULL) {
		gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
				cmd, get_opcode(cmd, s->ring_id));
		return -EINVAL;
	}
@@ -1597,7 +1602,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
				gma, gma + 4, &cmd);
		info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
		if (info == NULL) {
			gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
			gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
				cmd, get_opcode(cmd, s->ring_id));
			return -EINVAL;
		}
@@ -1622,6 +1627,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
static int perform_bb_shadow(struct parser_exec_state *s)
{
	struct intel_shadow_bb_entry *entry_obj;
	struct intel_vgpu *vgpu = s->vgpu;
	unsigned long gma = 0;
	uint32_t bb_size;
	void *dst = NULL;
@@ -1656,7 +1662,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)

	ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
	if (ret) {
		gvt_err("failed to set shadow batch to CPU\n");
		gvt_vgpu_err("failed to set shadow batch to CPU\n");
		goto unmap_src;
	}

@@ -1668,7 +1674,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
			      gma, gma + bb_size,
			      dst);
	if (ret) {
		gvt_err("fail to copy guest ring buffer\n");
		gvt_vgpu_err("fail to copy guest ring buffer\n");
		goto unmap_src;
	}

@@ -1699,15 +1705,16 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
{
	bool second_level;
	int ret = 0;
	struct intel_vgpu *vgpu = s->vgpu;

	if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
		gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
		gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
		return -EINVAL;
	}

	second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
	if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
		gvt_err("Jumping to 2nd level BB from RB is not allowed\n");
		gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
		return -EINVAL;
	}

@@ -1725,7 +1732,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
	if (batch_buffer_needs_scan(s)) {
		ret = perform_bb_shadow(s);
		if (ret < 0)
			gvt_err("invalid shadow batch buffer\n");
			gvt_vgpu_err("invalid shadow batch buffer\n");
	} else {
		/* emulate a batch buffer end to do return right */
		ret = cmd_handler_mi_batch_buffer_end(s);
@@ -2452,6 +2459,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
	int ret = 0;
	cycles_t t0, t1, t2;
	struct parser_exec_state s_before_advance_custom;
	struct intel_vgpu *vgpu = s->vgpu;

	t0 = get_cycles();

@@ -2459,7 +2467,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)

	info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
	if (info == NULL) {
		gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
				cmd, get_opcode(cmd, s->ring_id));
		return -EINVAL;
	}
@@ -2475,7 +2483,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
	if (info->handler) {
		ret = info->handler(s);
		if (ret < 0) {
			gvt_err("%s handler error\n", info->name);
			gvt_vgpu_err("%s handler error\n", info->name);
			return ret;
		}
	}
@@ -2486,7 +2494,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
	if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
		ret = cmd_advance_default(s);
		if (ret) {
			gvt_err("%s IP advance error\n", info->name);
			gvt_vgpu_err("%s IP advance error\n", info->name);
			return ret;
		}
	}
@@ -2509,6 +2517,7 @@ static int command_scan(struct parser_exec_state *s,

	unsigned long gma_head, gma_tail, gma_bottom;
	int ret = 0;
	struct intel_vgpu *vgpu = s->vgpu;

	gma_head = rb_start + rb_head;
	gma_tail = rb_start + rb_tail;
@@ -2520,7 +2529,7 @@ static int command_scan(struct parser_exec_state *s,
		if (s->buf_type == RING_BUFFER_INSTRUCTION) {
			if (!(s->ip_gma >= rb_start) ||
				!(s->ip_gma < gma_bottom)) {
				gvt_err("ip_gma %lx out of ring scope."
				gvt_vgpu_err("ip_gma %lx out of ring scope."
					"(base:0x%lx, bottom: 0x%lx)\n",
					s->ip_gma, rb_start,
					gma_bottom);
@@ -2528,7 +2537,7 @@ static int command_scan(struct parser_exec_state *s,
				return -EINVAL;
			}
			if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
				gvt_err("ip_gma %lx out of range."
				gvt_vgpu_err("ip_gma %lx out of range."
					"base 0x%lx head 0x%lx tail 0x%lx\n",
					s->ip_gma, rb_start,
					rb_head, rb_tail);
@@ -2538,7 +2547,7 @@ static int command_scan(struct parser_exec_state *s,
		}
		ret = cmd_parser_exec(s);
		if (ret) {
			gvt_err("cmd parser error\n");
			gvt_vgpu_err("cmd parser error\n");
			parser_exec_state_dump(s);
			break;
		}
@@ -2662,7 +2671,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
				gma_head, gma_top,
				workload->shadow_ring_buffer_va);
		if (ret) {
			gvt_err("fail to copy guest ring buffer\n");
			gvt_vgpu_err("fail to copy guest ring buffer\n");
			return ret;
		}
		copy_len = gma_top - gma_head;
@@ -2674,7 +2683,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
			gma_head, gma_tail,
			workload->shadow_ring_buffer_va + copy_len);
	if (ret) {
		gvt_err("fail to copy guest ring buffer\n");
		gvt_vgpu_err("fail to copy guest ring buffer\n");
		return ret;
	}
	ring->tail += workload->rb_len;
@@ -2685,16 +2694,17 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{
	int ret;
	struct intel_vgpu *vgpu = workload->vgpu;

	ret = shadow_workload_ring_buffer(workload);
	if (ret) {
		gvt_err("fail to shadow workload ring_buffer\n");
		gvt_vgpu_err("fail to shadow workload ring_buffer\n");
		return ret;
	}

	ret = scan_workload(workload);
	if (ret) {
		gvt_err("scan workload error\n");
		gvt_vgpu_err("scan workload error\n");
		return ret;
	}
	return 0;
@@ -2704,6 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
	int ctx_size = wa_ctx->indirect_ctx.size;
	unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
	struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
	struct drm_i915_gem_object *obj;
	int ret = 0;
	void *map;
@@ -2717,14 +2728,14 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
	/* get the va of the shadow batch buffer */
	map = i915_gem_object_pin_map(obj, I915_MAP_WB);
	if (IS_ERR(map)) {
		gvt_err("failed to vmap shadow indirect ctx\n");
		gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
		ret = PTR_ERR(map);
		goto put_obj;
	}

	ret = i915_gem_object_set_to_cpu_domain(obj, false);
	if (ret) {
		gvt_err("failed to set shadow indirect ctx to CPU\n");
		gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
		goto unmap_src;
	}

@@ -2733,7 +2744,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
				guest_gma, guest_gma + ctx_size,
				map);
	if (ret) {
		gvt_err("fail to copy guest indirect ctx\n");
		gvt_vgpu_err("fail to copy guest indirect ctx\n");
		goto unmap_src;
	}

@@ -2767,13 +2778,14 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
	int ret;
	struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;

	if (wa_ctx->indirect_ctx.size == 0)
		return 0;

	ret = shadow_indirect_ctx(wa_ctx);
	if (ret) {
		gvt_err("fail to shadow indirect ctx\n");
		gvt_vgpu_err("fail to shadow indirect ctx\n");
		return ret;
	}

@@ -2781,7 +2793,7 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)

	ret = scan_wa_ctx(wa_ctx);
	if (ret) {
		gvt_err("scan wa ctx error\n");
		gvt_vgpu_err("scan wa ctx error\n");
		return ret;
	}

+8 −0
Original line number Diff line number Diff line
@@ -27,6 +27,14 @@
#define gvt_err(fmt, args...) \
	DRM_ERROR("gvt: "fmt, ##args)

#define gvt_vgpu_err(fmt, args...)					\
do {									\
	if (IS_ERR_OR_NULL(vgpu))					\
		DRM_DEBUG_DRIVER("gvt: "fmt, ##args);			\
	else								\
		DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
} while (0)

#define gvt_dbg_core(fmt, args...) \
	DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)

+6 −7
Original line number Diff line number Diff line
@@ -52,16 +52,16 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
	unsigned char chr = 0;

	if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
		gvt_err("Driver tries to read EDID without proper sequence!\n");
		gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n");
		return 0;
	}
	if (edid->current_edid_read >= EDID_SIZE) {
		gvt_err("edid_get_byte() exceeds the size of EDID!\n");
		gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n");
		return 0;
	}

	if (!edid->edid_available) {
		gvt_err("Reading EDID but EDID is not available!\n");
		gvt_vgpu_err("Reading EDID but EDID is not available!\n");
		return 0;
	}

@@ -72,7 +72,7 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
		chr = edid_data->edid_block[edid->current_edid_read];
		edid->current_edid_read++;
	} else {
		gvt_err("No EDID available during the reading?\n");
		gvt_vgpu_err("No EDID available during the reading?\n");
	}
	return chr;
}
@@ -223,7 +223,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
			vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
			break;
		default:
			gvt_err("Unknown/reserved GMBUS cycle detected!\n");
			gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n");
			break;
		}
		/*
@@ -292,8 +292,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
		 */
	} else {
		memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
		gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n",
				vgpu->id);
		gvt_vgpu_err("warning: gmbus3 read with nothing returned\n");
	}
	return 0;
}
+13 −16
Original line number Diff line number Diff line
@@ -172,6 +172,7 @@ static int emulate_execlist_ctx_schedule_out(
		struct intel_vgpu_execlist *execlist,
		struct execlist_ctx_descriptor_format *ctx)
{
	struct intel_vgpu *vgpu = execlist->vgpu;
	struct intel_vgpu_execlist_slot *running = execlist->running_slot;
	struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
	struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
@@ -183,7 +184,7 @@ static int emulate_execlist_ctx_schedule_out(
	gvt_dbg_el("schedule out context id %x\n", ctx->context_id);

	if (WARN_ON(!same_context(ctx, execlist->running_context))) {
		gvt_err("schedule out context is not running context,"
		gvt_vgpu_err("schedule out context is not running context,"
				"ctx id %x running ctx id %x\n",
				ctx->context_id,
				execlist->running_context->context_id);
@@ -254,7 +255,7 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
	status.udw = vgpu_vreg(vgpu, status_reg + 4);

	if (status.execlist_queue_full) {
		gvt_err("virtual execlist slots are full\n");
		gvt_vgpu_err("virtual execlist slots are full\n");
		return NULL;
	}

@@ -270,11 +271,12 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,

	struct execlist_ctx_descriptor_format *ctx0, *ctx1;
	struct execlist_context_status_format status;
	struct intel_vgpu *vgpu = execlist->vgpu;

	gvt_dbg_el("emulate schedule-in\n");

	if (!slot) {
		gvt_err("no available execlist slot\n");
		gvt_vgpu_err("no available execlist slot\n");
		return -EINVAL;
	}

@@ -375,7 +377,6 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)

		vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
		if (IS_ERR(vma)) {
			gvt_err("Cannot pin\n");
			return;
		}

@@ -428,7 +429,6 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
	vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
				       0, CACHELINE_BYTES, 0);
	if (IS_ERR(vma)) {
		gvt_err("Cannot pin indirect ctx obj\n");
		return;
	}

@@ -561,6 +561,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
{
	struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
	struct intel_vgpu_mm *mm;
	struct intel_vgpu *vgpu = workload->vgpu;
	int page_table_level;
	u32 pdp[8];

@@ -569,7 +570,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
	} else if (desc->addressing_mode == 3) { /* legacy 64 bit */
		page_table_level = 4;
	} else {
		gvt_err("Advanced Context mode(SVM) is not supported!\n");
		gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
		return -EINVAL;
	}

@@ -583,7 +584,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
		mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
				pdp, page_table_level, 0);
		if (IS_ERR(mm)) {
			gvt_err("fail to create mm object.\n");
			gvt_vgpu_err("fail to create mm object.\n");
			return PTR_ERR(mm);
		}
	}
@@ -609,7 +610,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
	ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
			(u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
	if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
		gvt_err("invalid guest context LRCA: %x\n", desc->lrca);
		gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
		return -EINVAL;
	}

@@ -724,8 +725,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
			continue;

		if (!desc[i]->privilege_access) {
			gvt_err("vgpu%d: unexpected GGTT elsp submission\n",
					vgpu->id);
			gvt_vgpu_err("unexpected GGTT elsp submission\n");
			return -EINVAL;
		}

@@ -735,15 +735,13 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
	}

	if (!valid_desc_bitmap) {
		gvt_err("vgpu%d: no valid desc in a elsp submission\n",
				vgpu->id);
		gvt_vgpu_err("no valid desc in a elsp submission\n");
		return -EINVAL;
	}

	if (!test_bit(0, (void *)&valid_desc_bitmap) &&
			test_bit(1, (void *)&valid_desc_bitmap)) {
		gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n",
				vgpu->id);
		gvt_vgpu_err("weird elsp submission, desc 0 is not valid\n");
		return -EINVAL;
	}

@@ -752,8 +750,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
		ret = submit_context(vgpu, ring_id, &valid_desc[i],
				emulate_schedule_in);
		if (ret) {
			gvt_err("vgpu%d: fail to schedule workload\n",
					vgpu->id);
			gvt_vgpu_err("fail to schedule workload\n");
			return ret;
		}
		emulate_schedule_in = false;
Loading