Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5e2d0967 authored by Jani Nikula's avatar Jani Nikula
Browse files

Merge tag 'gvt-fixes-2017-08-07' of https://github.com/01org/gvt-linux into drm-intel-fixes



gvt-fixes-2017-08-07

- two regression fixes for 65f9f6fe, one is for display MMIO
  initial value (Tina), another for 64bit MMIO access (Xiong)
- two reset fixes from Chuanxiao

Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20170807080716.qljcvws6opydnotk@zhen-hp.sh.intel.com
parents 5279fc77 d6086598
Loading
Loading
Loading
Loading
+22 −5
Original line number Original line Diff line number Diff line
@@ -46,6 +46,8 @@
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
		((a)->lrca == (b)->lrca))
		((a)->lrca == (b)->lrca))


static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);

static int context_switch_events[] = {
static int context_switch_events[] = {
	[RCS] = RCS_AS_CONTEXT_SWITCH,
	[RCS] = RCS_AS_CONTEXT_SWITCH,
	[BCS] = BCS_AS_CONTEXT_SWITCH,
	[BCS] = BCS_AS_CONTEXT_SWITCH,
@@ -499,10 +501,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
{
	struct intel_vgpu *vgpu = workload->vgpu;
	struct intel_vgpu *vgpu = workload->vgpu;
	struct intel_vgpu_execlist *execlist =
	int ring_id = workload->ring_id;
		&vgpu->execlist[workload->ring_id];
	struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
	struct intel_vgpu_workload *next_workload;
	struct intel_vgpu_workload *next_workload;
	struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
	struct list_head *next = workload_q_head(vgpu, ring_id)->next;
	bool lite_restore = false;
	bool lite_restore = false;
	int ret;
	int ret;


@@ -512,10 +514,25 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
	release_shadow_batch_buffer(workload);
	release_shadow_batch_buffer(workload);
	release_shadow_wa_ctx(&workload->wa_ctx);
	release_shadow_wa_ctx(&workload->wa_ctx);


	if (workload->status || vgpu->resetting)
	if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
		/* if workload->status is not successful means HW GPU
		 * has occurred GPU hang or something wrong with i915/GVT,
		 * and GVT won't inject context switch interrupt to guest.
		 * So this error is a vGPU hang actually to the guest.
		 * According to this we should emunlate a vGPU hang. If
		 * there are pending workloads which are already submitted
		 * from guest, we should clean them up like HW GPU does.
		 *
		 * if it is in middle of engine resetting, the pending
		 * workloads won't be submitted to HW GPU and will be
		 * cleaned up during the resetting process later, so doing
		 * the workload clean up here doesn't have any impact.
		 **/
		clean_workloads(vgpu, ENGINE_MASK(ring_id));
		goto out;
		goto out;
	}


	if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
	if (!list_empty(workload_q_head(vgpu, ring_id))) {
		struct execlist_ctx_descriptor_format *this_desc, *next_desc;
		struct execlist_ctx_descriptor_format *this_desc, *next_desc;


		next_workload = container_of(next,
		next_workload = container_of(next,
+10 −1
Original line number Original line Diff line number Diff line
@@ -72,11 +72,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
	struct intel_gvt_device_info *info = &gvt->device_info;
	struct intel_gvt_device_info *info = &gvt->device_info;
	struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
	struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
	struct intel_gvt_mmio_info *e;
	struct intel_gvt_mmio_info *e;
	struct gvt_mmio_block *block = gvt->mmio.mmio_block;
	int num = gvt->mmio.num_mmio_block;
	struct gvt_firmware_header *h;
	struct gvt_firmware_header *h;
	void *firmware;
	void *firmware;
	void *p;
	void *p;
	unsigned long size, crc32_start;
	unsigned long size, crc32_start;
	int i;
	int i, j;
	int ret;
	int ret;


	size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
	size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
@@ -105,6 +107,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
	hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
	hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
		*(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
		*(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));


	for (i = 0; i < num; i++, block++) {
		for (j = 0; j < block->size; j += 4)
			*(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =
				I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(
							block->offset) + j));
	}

	memcpy(gvt->firmware.mmio, p, info->mmio_size);
	memcpy(gvt->firmware.mmio, p, info->mmio_size);


	crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
	crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
+13 −1
Original line number Original line Diff line number Diff line
@@ -149,7 +149,7 @@ struct intel_vgpu {
	bool active;
	bool active;
	bool pv_notified;
	bool pv_notified;
	bool failsafe;
	bool failsafe;
	bool resetting;
	unsigned int resetting_eng;
	void *sched_data;
	void *sched_data;
	struct vgpu_sched_ctl sched_ctl;
	struct vgpu_sched_ctl sched_ctl;


@@ -195,6 +195,15 @@ struct intel_gvt_fence {
	unsigned long vgpu_allocated_fence_num;
	unsigned long vgpu_allocated_fence_num;
};
};


/* Special MMIO blocks. */
struct gvt_mmio_block {
	unsigned int device;
	i915_reg_t   offset;
	unsigned int size;
	gvt_mmio_func read;
	gvt_mmio_func write;
};

#define INTEL_GVT_MMIO_HASH_BITS 11
#define INTEL_GVT_MMIO_HASH_BITS 11


struct intel_gvt_mmio {
struct intel_gvt_mmio {
@@ -214,6 +223,9 @@ struct intel_gvt_mmio {
/* This reg could be accessed by unaligned address */
/* This reg could be accessed by unaligned address */
#define F_UNALIGN	(1 << 6)
#define F_UNALIGN	(1 << 6)


	struct gvt_mmio_block *mmio_block;
	unsigned int num_mmio_block;

	DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
	DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
	unsigned int num_tracked_mmio;
	unsigned int num_tracked_mmio;
};
};
+18 −20
Original line number Original line Diff line number Diff line
@@ -2857,31 +2857,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
	return 0;
	return 0;
}
}


/* Special MMIO blocks. */
static struct gvt_mmio_block {
	unsigned int device;
	i915_reg_t   offset;
	unsigned int size;
	gvt_mmio_func read;
	gvt_mmio_func write;
} gvt_mmio_blocks[] = {
	{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
	{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
	{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
		pvinfo_mmio_read, pvinfo_mmio_write},
	{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
	{D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
	{D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
};

static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
					      unsigned int offset)
					      unsigned int offset)
{
{
	unsigned long device = intel_gvt_get_device_type(gvt);
	unsigned long device = intel_gvt_get_device_type(gvt);
	struct gvt_mmio_block *block = gvt_mmio_blocks;
	struct gvt_mmio_block *block = gvt->mmio.mmio_block;
	int num = gvt->mmio.num_mmio_block;
	int i;
	int i;


	for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) {
	for (i = 0; i < num; i++, block++) {
		if (!(device & block->device))
		if (!(device & block->device))
			continue;
			continue;
		if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
		if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
@@ -2912,6 +2896,17 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
	gvt->mmio.mmio_attribute = NULL;
	gvt->mmio.mmio_attribute = NULL;
}
}


/* Special MMIO blocks. */
static struct gvt_mmio_block mmio_blocks[] = {
	{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
	{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
	{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
		pvinfo_mmio_read, pvinfo_mmio_write},
	{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
	{D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
	{D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
};

/**
/**
 * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
 * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
 * @gvt: GVT device
 * @gvt: GVT device
@@ -2951,6 +2946,9 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
			goto err;
			goto err;
	}
	}


	gvt->mmio.mmio_block = mmio_blocks;
	gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);

	gvt_dbg_mmio("traced %u virtual mmio registers\n",
	gvt_dbg_mmio("traced %u virtual mmio registers\n",
		     gvt->mmio.num_tracked_mmio);
		     gvt->mmio.num_tracked_mmio);
	return 0;
	return 0;
@@ -3030,7 +3028,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
	gvt_mmio_func func;
	gvt_mmio_func func;
	int ret;
	int ret;


	if (WARN_ON(bytes > 4))
	if (WARN_ON(bytes > 8))
		return -EINVAL;
		return -EINVAL;


	/*
	/*
+2 −1
Original line number Original line Diff line number Diff line
@@ -432,7 +432,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)


		i915_gem_request_put(fetch_and_zero(&workload->req));
		i915_gem_request_put(fetch_and_zero(&workload->req));


		if (!workload->status && !vgpu->resetting) {
		if (!workload->status && !(vgpu->resetting_eng &
					   ENGINE_MASK(ring_id))) {
			update_guest_context(workload);
			update_guest_context(workload);


			for_each_set_bit(event, workload->pending_events,
			for_each_set_bit(event, workload->pending_events,
Loading