Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3dd435ef authored by Rodrigo Vivi's avatar Rodrigo Vivi
Browse files

Merge tag 'gvt-next-2017-11-16' of https://github.com/intel/gvt-linux into drm-intel-next-queued



gvt-next-2017-11-16

- CSB HWSP update support (Weinan)
- GVT debug helpers, dyndbg and debugfs (Chuanxiao, Shuo)
- full virtualized opregion (Xiaolin)
- VM health check for sane fallback (Fred)
- workload submission code refactor for future enabling (Zhi)
- Updated repo URL in MAINTAINERS (Zhenyu)
- other many misc fixes

Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171116092007.ww5bvfx7rf36bjmn@zhen-hp.sh.intel.com
parents 9672a69c f2880e04
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -6928,7 +6928,7 @@ M: Zhi Wang <zhi.a.wang@intel.com>
L:	intel-gvt-dev@lists.freedesktop.org
L:	intel-gfx@lists.freedesktop.org
W:	https://01.org/igvt-g
T:	git https://github.com/01org/gvt-linux.git
T:	git https://github.com/intel/gvt-linux.git
S:	Supported
F:	drivers/gpu/drm/i915/gvt/

+1 −1
Original line number Diff line number Diff line
GVT_DIR := gvt
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
	interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
	execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
	execlist.o scheduler.o sched_policy.o render.o cmd_parser.o debugfs.o

ccflags-y				+= -I$(src) -I$(src)/$(GVT_DIR)
i915-y					+= $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
+21 −0
Original line number Diff line number Diff line
@@ -208,6 +208,20 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
	return 0;
}

static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu,
	unsigned int offset, void *p_data, unsigned int bytes)
{
	u32 *pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
	u32 new = *(u32 *)(p_data);

	if ((new & PCI_ROM_ADDRESS_MASK) == PCI_ROM_ADDRESS_MASK)
		/* We don't have rom, return size of 0. */
		*pval = 0;
	else
		vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
	return 0;
}

static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
	void *p_data, unsigned int bytes)
{
@@ -300,6 +314,11 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
	}

	switch (rounddown(offset, 4)) {
	case PCI_ROM_ADDRESS:
		if (WARN_ON(!IS_ALIGNED(offset, 4)))
			return -EINVAL;
		return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes);

	case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
		if (WARN_ON(!IS_ALIGNED(offset, 4)))
			return -EINVAL;
@@ -375,6 +394,8 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
				pci_resource_len(gvt->dev_priv->drm.pdev, 0);
	vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
				pci_resource_len(gvt->dev_priv->drm.pdev, 2);

	memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
}

/**
+124 −101
Original line number Diff line number Diff line
@@ -709,18 +709,13 @@ static void parser_exec_state_dump(struct parser_exec_state *s)

	print_opcode(cmd_val(s, 0), s->ring_id);

	/* print the whole page to trace */
	pr_err("    ip_va=%p: %08x %08x %08x %08x\n",
			s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
			cmd_val(s, 2), cmd_val(s, 3));

	s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);

	while (cnt < 1024) {
		pr_err("ip_va=%p: ", s->ip_va);
		gvt_dbg_cmd("ip_va=%p: ", s->ip_va);
		for (i = 0; i < 8; i++)
			pr_err("%08x ", cmd_val(s, i));
		pr_err("\n");
			gvt_dbg_cmd("%08x ", cmd_val(s, i));
		gvt_dbg_cmd("\n");

		s->ip_va += 8 * sizeof(u32);
		cnt += 8;
@@ -825,7 +820,7 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
	if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
		gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
			offset, data);
		return -EINVAL;
		return -EPERM;
	}
	return 0;
}
@@ -839,7 +834,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
	if (offset + 4 > gvt->device_info.mmio_size) {
		gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
				cmd, offset);
		return -EINVAL;
		return -EFAULT;
	}

	if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
@@ -855,7 +850,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,

	if (is_force_nonpriv_mmio(offset) &&
		force_nonpriv_reg_handler(s, offset, index))
		return -EINVAL;
		return -EPERM;

	if (offset == i915_mmio_reg_offset(DERRMR) ||
		offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
@@ -894,11 +889,14 @@ static int cmd_handler_lri(struct parser_exec_state *s)
					i915_mmio_reg_offset(DERRMR))
				ret |= 0;
			else
				ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
				ret |= (cmd_reg_inhibit(s, i)) ?
					-EBADRQC : 0;
		}
		if (ret)
			break;
		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
		if (ret)
			break;
	}
	return ret;
}
@@ -912,11 +910,15 @@ static int cmd_handler_lrr(struct parser_exec_state *s)
		if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
			ret |= ((cmd_reg_inhibit(s, i) ||
					(cmd_reg_inhibit(s, i + 1)))) ?
				-EINVAL : 0;
				-EBADRQC : 0;
		if (ret)
			break;
		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
		if (ret)
			break;
		ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
		if (ret)
			break;
	}
	return ret;
}
@@ -934,15 +936,19 @@ static int cmd_handler_lrm(struct parser_exec_state *s)

	for (i = 1; i < cmd_len;) {
		if (IS_BROADWELL(gvt->dev_priv))
			ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
			ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0;
		if (ret)
			break;
		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
		if (ret)
			break;
		if (cmd_val(s, 0) & (1 << 22)) {
			gma = cmd_gma(s, i + 1);
			if (gmadr_bytes == 8)
				gma |= (cmd_gma_hi(s, i + 2)) << 32;
			ret |= cmd_address_audit(s, gma, sizeof(u32), false);
			if (ret)
				break;
		}
		i += gmadr_dw_number(s) + 1;
	}
@@ -958,11 +964,15 @@ static int cmd_handler_srm(struct parser_exec_state *s)

	for (i = 1; i < cmd_len;) {
		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
		if (ret)
			break;
		if (cmd_val(s, 0) & (1 << 22)) {
			gma = cmd_gma(s, i + 1);
			if (gmadr_bytes == 8)
				gma |= (cmd_gma_hi(s, i + 2)) << 32;
			ret |= cmd_address_audit(s, gma, sizeof(u32), false);
			if (ret)
				break;
		}
		i += gmadr_dw_number(s) + 1;
	}
@@ -1116,7 +1126,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,

	v = (dword0 & GENMASK(21, 19)) >> 19;
	if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
		return -EINVAL;
		return -EBADRQC;

	info->pipe = gen8_plane_code[v].pipe;
	info->plane = gen8_plane_code[v].plane;
@@ -1136,7 +1146,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
		info->surf_reg = SPRSURF(info->pipe);
	} else {
		WARN_ON(1);
		return -EINVAL;
		return -EBADRQC;
	}
	return 0;
}
@@ -1185,7 +1195,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,

	default:
		gvt_vgpu_err("unknown plane code %d\n", plane);
		return -EINVAL;
		return -EBADRQC;
	}

	info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
@@ -1348,10 +1358,13 @@ static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
{
	unsigned long addr;
	unsigned long gma_high, gma_low;
	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
	struct intel_vgpu *vgpu = s->vgpu;
	int gmadr_bytes = vgpu->gvt->device_info.gmadr_bytes_in_cmd;

	if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
	if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) {
		gvt_vgpu_err("invalid gma bytes %d\n", gmadr_bytes);
		return INTEL_GVT_INVALID_ADDR;
	}

	gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
	if (gmadr_bytes == 4) {
@@ -1374,16 +1387,16 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
	if (op_size > max_surface_size) {
		gvt_vgpu_err("command address audit fail name %s\n",
			s->info->name);
		return -EINVAL;
		return -EFAULT;
	}

	if (index_mode)	{
		if (guest_gma >= GTT_PAGE_SIZE / sizeof(u64)) {
			ret = -EINVAL;
		if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
			ret = -EFAULT;
			goto err;
		}
	} else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) {
		ret = -EINVAL;
		ret = -EFAULT;
		goto err;
	}

@@ -1439,7 +1452,7 @@ static inline int unexpected_cmd(struct parser_exec_state *s)

	gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);

	return -EINVAL;
	return -EBADRQC;
}

static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
@@ -1545,10 +1558,10 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
			return -EFAULT;
		}

		offset = gma & (GTT_PAGE_SIZE - 1);
		offset = gma & (I915_GTT_PAGE_SIZE - 1);

		copy_len = (end_gma - gma) >= (GTT_PAGE_SIZE - offset) ?
			GTT_PAGE_SIZE - offset : end_gma - gma;
		copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ?
			I915_GTT_PAGE_SIZE - offset : end_gma - gma;

		intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);

@@ -1576,110 +1589,113 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
	return 1;
}

static int find_bb_size(struct parser_exec_state *s)
static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
{
	unsigned long gma = 0;
	struct cmd_info *info;
	int bb_size = 0;
	uint32_t cmd_len = 0;
	bool met_bb_end = false;
	bool bb_end = false;
	struct intel_vgpu *vgpu = s->vgpu;
	u32 cmd;

	*bb_size = 0;

	/* get the start gm address of the batch buffer */
	gma = get_gma_bb_from_cmd(s, 1);
	cmd = cmd_val(s, 0);
	if (gma == INTEL_GVT_INVALID_ADDR)
		return -EFAULT;

	cmd = cmd_val(s, 0);
	info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
	if (info == NULL) {
		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
				cmd, get_opcode(cmd, s->ring_id));
		return -EINVAL;
		return -EBADRQC;
	}
	do {
		copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
				gma, gma + 4, &cmd);
		if (copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
				gma, gma + 4, &cmd) < 0)
			return -EFAULT;
		info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
		if (info == NULL) {
			gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
				cmd, get_opcode(cmd, s->ring_id));
			return -EINVAL;
			return -EBADRQC;
		}

		if (info->opcode == OP_MI_BATCH_BUFFER_END) {
			met_bb_end = true;
			bb_end = true;
		} else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
			if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) {
			if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0)
				/* chained batch buffer */
				met_bb_end = true;
			}
				bb_end = true;
		}
		cmd_len = get_cmd_length(info, cmd) << 2;
		bb_size += cmd_len;
		*bb_size += cmd_len;
		gma += cmd_len;
	} while (!bb_end);

	} while (!met_bb_end);

	return bb_size;
	return 0;
}

static int perform_bb_shadow(struct parser_exec_state *s)
{
	struct intel_shadow_bb_entry *entry_obj;
	struct intel_vgpu *vgpu = s->vgpu;
	struct intel_vgpu_shadow_bb *bb;
	unsigned long gma = 0;
	uint32_t bb_size;
	void *dst = NULL;
	unsigned long bb_size;
	int ret = 0;

	/* get the start gm address of the batch buffer */
	gma = get_gma_bb_from_cmd(s, 1);
	if (gma == INTEL_GVT_INVALID_ADDR)
		return -EFAULT;

	/* get the size of the batch buffer */
	bb_size = find_bb_size(s);
	if (bb_size < 0)
		return -EINVAL;
	ret = find_bb_size(s, &bb_size);
	if (ret)
		return ret;

	/* allocate shadow batch buffer */
	entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL);
	if (entry_obj == NULL)
	bb = kzalloc(sizeof(*bb), GFP_KERNEL);
	if (!bb)
		return -ENOMEM;

	entry_obj->obj =
		i915_gem_object_create(s->vgpu->gvt->dev_priv,
	bb->obj = i915_gem_object_create(s->vgpu->gvt->dev_priv,
					 roundup(bb_size, PAGE_SIZE));
	if (IS_ERR(entry_obj->obj)) {
		ret = PTR_ERR(entry_obj->obj);
		goto free_entry;
	if (IS_ERR(bb->obj)) {
		ret = PTR_ERR(bb->obj);
		goto err_free_bb;
	}
	entry_obj->len = bb_size;
	INIT_LIST_HEAD(&entry_obj->list);

	dst = i915_gem_object_pin_map(entry_obj->obj, I915_MAP_WB);
	if (IS_ERR(dst)) {
		ret = PTR_ERR(dst);
		goto put_obj;
	}
	ret = i915_gem_obj_prepare_shmem_write(bb->obj, &bb->clflush);
	if (ret)
		goto err_free_obj;

	ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
	if (ret) {
		gvt_vgpu_err("failed to set shadow batch to CPU\n");
		goto unmap_src;
	bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB);
	if (IS_ERR(bb->va)) {
		ret = PTR_ERR(bb->va);
		goto err_finish_shmem_access;
	}

	entry_obj->va = dst;
	entry_obj->bb_start_cmd_va = s->ip_va;
	if (bb->clflush & CLFLUSH_BEFORE) {
		drm_clflush_virt_range(bb->va, bb->obj->base.size);
		bb->clflush &= ~CLFLUSH_BEFORE;
	}

	/* copy batch buffer to shadow batch buffer*/
	ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
			      gma, gma + bb_size,
			      dst);
			      bb->va);
	if (ret < 0) {
		gvt_vgpu_err("fail to copy guest ring buffer\n");
		goto unmap_src;
		ret = -EFAULT;
		goto err_unmap;
	}

	list_add(&entry_obj->list, &s->workload->shadow_bb);
	INIT_LIST_HEAD(&bb->list);
	list_add(&bb->list, &s->workload->shadow_bb);

	bb->accessing = true;
	bb->bb_start_cmd_va = s->ip_va;

	/*
	 * ip_va saves the virtual address of the shadow batch buffer, while
	 * ip_gma saves the graphics address of the original batch buffer.
@@ -1688,17 +1704,17 @@ static int perform_bb_shadow(struct parser_exec_state *s)
	 * buffer's gma in pair. After all, we don't want to pin the shadow
	 * buffer here (too early).
	 */
	s->ip_va = dst;
	s->ip_va = bb->va;
	s->ip_gma = gma;

	return 0;

unmap_src:
	i915_gem_object_unpin_map(entry_obj->obj);
put_obj:
	i915_gem_object_put(entry_obj->obj);
free_entry:
	kfree(entry_obj);
err_unmap:
	i915_gem_object_unpin_map(bb->obj);
err_finish_shmem_access:
	i915_gem_obj_finish_shmem_access(bb->obj);
err_free_obj:
	i915_gem_object_put(bb->obj);
err_free_bb:
	kfree(bb);
	return ret;
}

@@ -1710,13 +1726,13 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)

	if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
		gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
		return -EINVAL;
		return -EFAULT;
	}

	second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
	if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
		gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
		return -EINVAL;
		return -EFAULT;
	}

	s->saved_buf_addr_type = s->buf_addr_type;
@@ -1740,7 +1756,6 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
		if (ret < 0)
			return ret;
	}

	return ret;
}

@@ -2430,7 +2445,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
	if (info == NULL) {
		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
				cmd, get_opcode(cmd, s->ring_id));
		return -EINVAL;
		return -EBADRQC;
	}

	s->info = info;
@@ -2465,6 +2480,10 @@ static inline bool gma_out_of_range(unsigned long gma,
		return (gma > gma_tail) && (gma < gma_head);
}

/* Keep the consistent return type, e.g EBADRQC for unknown
 * cmd, EFAULT for invalid address, EPERM for nonpriv. later
 * works as the input of VM healthy status.
 */
static int command_scan(struct parser_exec_state *s,
		unsigned long rb_head, unsigned long rb_tail,
		unsigned long rb_start, unsigned long rb_len)
@@ -2487,7 +2506,7 @@ static int command_scan(struct parser_exec_state *s,
					s->ip_gma, rb_start,
					gma_bottom);
				parser_exec_state_dump(s);
				return -EINVAL;
				return -EFAULT;
			}
			if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
				gvt_vgpu_err("ip_gma %lx out of range."
@@ -2516,7 +2535,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
	int ret = 0;

	/* ring base is page aligned */
	if (WARN_ON(!IS_ALIGNED(workload->rb_start, GTT_PAGE_SIZE)))
	if (WARN_ON(!IS_ALIGNED(workload->rb_start, I915_GTT_PAGE_SIZE)))
		return -EINVAL;

	gma_head = workload->rb_start + workload->rb_head;
@@ -2565,7 +2584,8 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
				wa_ctx);

	/* ring base is page aligned */
	if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
	if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma,
					I915_GTT_PAGE_SIZE)))
		return -EINVAL;

	ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
@@ -2604,6 +2624,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
{
	struct intel_vgpu *vgpu = workload->vgpu;
	struct intel_vgpu_submission *s = &vgpu->submission;
	unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
	void *shadow_ring_buffer_va;
	int ring_id = workload->ring_id;
@@ -2619,19 +2640,21 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
	gma_tail = workload->rb_start + workload->rb_tail;
	gma_top = workload->rb_start + guest_rb_size;

	if (workload->rb_len > vgpu->reserve_ring_buffer_size[ring_id]) {
		void *va = vgpu->reserve_ring_buffer_va[ring_id];
	if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) {
		void *p;

		/* realloc the new ring buffer if needed */
		vgpu->reserve_ring_buffer_va[ring_id] =
			krealloc(va, workload->rb_len, GFP_KERNEL);
		if (!vgpu->reserve_ring_buffer_va[ring_id]) {
			gvt_vgpu_err("fail to alloc reserve ring buffer\n");
		p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len,
				GFP_KERNEL);
		if (!p) {
			gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
			return -ENOMEM;
		}
		vgpu->reserve_ring_buffer_size[ring_id] = workload->rb_len;
		s->ring_scan_buffer[ring_id] = p;
		s->ring_scan_buffer_size[ring_id] = workload->rb_len;
	}

	shadow_ring_buffer_va = vgpu->reserve_ring_buffer_va[ring_id];
	shadow_ring_buffer_va = s->ring_scan_buffer[ring_id];

	/* get shadow ring buffer va */
	workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
+12 −12
Original line number Diff line number Diff line
@@ -25,41 +25,41 @@
#define __GVT_DEBUG_H__

#define gvt_err(fmt, args...) \
	DRM_ERROR("gvt: "fmt, ##args)
	pr_err("gvt: "fmt, ##args)

#define gvt_vgpu_err(fmt, args...)					\
do {									\
	if (IS_ERR_OR_NULL(vgpu))					\
		DRM_DEBUG_DRIVER("gvt: "fmt, ##args);			\
		pr_err("gvt: "fmt, ##args);			\
	else								\
		DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
		pr_err("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
} while (0)

#define gvt_dbg_core(fmt, args...) \
	DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
	pr_debug("gvt: core: "fmt, ##args)

#define gvt_dbg_irq(fmt, args...) \
	DRM_DEBUG_DRIVER("gvt: irq: "fmt, ##args)
	pr_debug("gvt: irq: "fmt, ##args)

#define gvt_dbg_mm(fmt, args...) \
	DRM_DEBUG_DRIVER("gvt: mm: "fmt, ##args)
	pr_debug("gvt: mm: "fmt, ##args)

#define gvt_dbg_mmio(fmt, args...) \
	DRM_DEBUG_DRIVER("gvt: mmio: "fmt, ##args)
	pr_debug("gvt: mmio: "fmt, ##args)

#define gvt_dbg_dpy(fmt, args...) \
	DRM_DEBUG_DRIVER("gvt: dpy: "fmt, ##args)
	pr_debug("gvt: dpy: "fmt, ##args)

#define gvt_dbg_el(fmt, args...) \
	DRM_DEBUG_DRIVER("gvt: el: "fmt, ##args)
	pr_debug("gvt: el: "fmt, ##args)

#define gvt_dbg_sched(fmt, args...) \
	DRM_DEBUG_DRIVER("gvt: sched: "fmt, ##args)
	pr_debug("gvt: sched: "fmt, ##args)

#define gvt_dbg_render(fmt, args...) \
	DRM_DEBUG_DRIVER("gvt: render: "fmt, ##args)
	pr_debug("gvt: render: "fmt, ##args)

#define gvt_dbg_cmd(fmt, args...) \
	DRM_DEBUG_DRIVER("gvt: cmd: "fmt, ##args)
	pr_debug("gvt: cmd: "fmt, ##args)

#endif
Loading