Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 736a1494 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'drm-intel-fixes-2017-01-26' of...

Merge tag 'drm-intel-fixes-2017-01-26' of git://anongit.freedesktop.org/git/drm-intel into drm-fixes

More fixes than I'd like at this stage, but I think the holidays and
conferences have delayed finding and fixing the stuff a bit. Almost all
of them have Fixes: tags, so it's not just random fixes, we can point
fingers at the commits that broke stuff.

There's an ABI fix to GVT from Alex, before we go on an release a kernel
with the wrong attribute name.

* tag 'drm-intel-fixes-2017-01-26' of git://anongit.freedesktop.org/git/drm-intel:
  drm/i915: reinstate call to trace_i915_vma_bind
  drm/i915: Move atomic state free from out of fence release
  drm/i915: Check for NULL atomic state in intel_crtc_disable_noatomic()
  drm/i915: Fix calculation of rotated x and y offsets for planar formats
  drm/i915: Don't init hpd polling for vlv and chv from runtime_suspend()
  drm/i915: Don't leak edid in intel_crt_detect_ddc()
  drm/i915: Release temporary load-detect state upon switching
  drm/i915: prevent crash with .disable_display parameter
  drm/i915: Avoid drm_atomic_state_put(NULL) in intel_display_resume
  MAINTAINERS: update new mail list for intel gvt driver
  drm/i915/gvt: Fix kmem_cache_create() name
  drm/i915/gvt/kvmgt: mdev ABI is available_instances, not available_instance
  drm/i915/gvt: Fix relocation of shadow bb
  drm/i915/gvt: Enable the shadow batch buffer
parents 15266ae3 45d9f439
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -4153,7 +4153,7 @@ F: Documentation/gpu/i915.rst
INTEL GVT-g DRIVERS (Intel GPU Virtualization)
M:      Zhenyu Wang <zhenyuw@linux.intel.com>
M:      Zhi Wang <zhi.a.wang@intel.com>
L:      igvt-g-dev@lists.01.org
L:      intel-gvt-dev@lists.freedesktop.org
L:      intel-gfx@lists.freedesktop.org
W:      https://01.org/igvt-g
T:      git https://github.com/01org/gvt-linux.git
+0 −4
Original line number Diff line number Diff line
@@ -481,7 +481,6 @@ struct parser_exec_state {
	(s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)

static unsigned long bypass_scan_mask = 0;
static bool bypass_batch_buffer_scan = true;

/* ring ALL, type = 0 */
static struct sub_op_bits sub_op_mi[] = {
@@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
{
	struct intel_gvt *gvt = s->vgpu->gvt;

	if (bypass_batch_buffer_scan)
		return 0;

	if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
		/* BDW decides privilege based on address space */
		if (cmd_val(s, 0) & (1 << 8))
+19 −47
Original line number Diff line number Diff line
@@ -364,43 +364,16 @@ static void free_workload(struct intel_vgpu_workload *workload)
#define get_desc_from_elsp_dwords(ed, i) \
	((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))


#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
			     unsigned long add, int gmadr_bytes)
{
	if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
		return -1;

	*((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
		BATCH_BUFFER_ADDR_MASK;
	if (gmadr_bytes == 8) {
		*((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
			add & BATCH_BUFFER_ADDR_HIGH_MASK;
	}

	return 0;
}

static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
	int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
	const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
	struct intel_shadow_bb_entry *entry_obj;

	/* pin the gem object to ggtt */
	if (!list_empty(&workload->shadow_bb)) {
		struct intel_shadow_bb_entry *entry_obj =
			list_first_entry(&workload->shadow_bb,
					 struct intel_shadow_bb_entry,
					 list);
		struct intel_shadow_bb_entry *temp;

		list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
				list) {
	list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
		struct i915_vma *vma;

			vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
						       4, 0);
		vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
		if (IS_ERR(vma)) {
			gvt_err("Cannot pin\n");
			return;
@@ -412,10 +385,9 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
		 */

		/* update the relocate gma with shadow batch buffer*/
			set_gma_to_bb_cmd(entry_obj,
					  i915_ggtt_offset(vma),
					  gmadr_bytes);
		}
		entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
		if (gmadr_bytes == 8)
			entry_obj->bb_start_cmd_va[2] = 0;
	}
}

@@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
		INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
	}

	vgpu->workloads = kmem_cache_create("gvt-g vgpu workload",
	vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
			sizeof(struct intel_vgpu_workload), 0,
			SLAB_HWCACHE_ALIGN,
			NULL);
+4 −4
Original line number Diff line number Diff line
@@ -230,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
	return NULL;
}

static ssize_t available_instance_show(struct kobject *kobj, struct device *dev,
		char *buf)
static ssize_t available_instances_show(struct kobject *kobj,
					struct device *dev, char *buf)
{
	struct intel_vgpu_type *type;
	unsigned int num = 0;
@@ -269,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
				type->fence);
}

static MDEV_TYPE_ATTR_RO(available_instance);
static MDEV_TYPE_ATTR_RO(available_instances);
static MDEV_TYPE_ATTR_RO(device_api);
static MDEV_TYPE_ATTR_RO(description);

static struct attribute *type_attrs[] = {
	&mdev_type_attr_available_instance.attr,
	&mdev_type_attr_available_instances.attr,
	&mdev_type_attr_device_api.attr,
	&mdev_type_attr_description.attr,
	NULL,
+1 −1
Original line number Diff line number Diff line
@@ -113,7 +113,7 @@ struct intel_shadow_bb_entry {
	struct drm_i915_gem_object *obj;
	void *va;
	unsigned long len;
	void *bb_start_cmd_va;
	u32 *bb_start_cmd_va;
};

#define workload_q_head(vgpu, ring_id) \
Loading