Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d92116a0 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'drm-fixes-for-v4.14-rc6' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Standard fixes pull for rc6: one regression fix for amdgpu, a bunch of
  nouveau fixes that I'd missed a pull req for from Ben last week, some
  exynos regression fixes, and a few fixes for i915"

* tag 'drm-fixes-for-v4.14-rc6' of git://people.freedesktop.org/~airlied/linux:
  drm/nouveau/fbcon: fix oops without fbdev emulation
  Revert "drm/amdgpu: discard commands of killed processes"
  drm/i915: Use a mask when applying WaProgramL3SqcReg1Default
  drm/i915: Report -EFAULT before pwrite fast path into shmemfs
  drm/i915/cnl: Fix PLL initialization for HDMI.
  drm/i915/cnl: Fix PLL mapping.
  drm/i915: Use bdw_ddi_translations_fdi for Broadwell
  drm/i915: Fix eviction when the GGTT is idle but full
  drm/i915/gvt: Fix GPU hang after reusing vGPU instance across different guest OS
  drm/exynos: Clear drvdata after component unbind
  drm/exynos: Fix potential NULL pointer dereference in suspend/resume paths
  drm/nouveau/kms/nv50: fix oops during DP IRQ handling on non-MST boards
  drm/nouveau/bsp/g92: disable by default
  drm/nouveau/mmu: flush tlbs before deleting page tables
parents eb62722a 2cb3a34a
Loading
Loading
Loading
Loading
+4 −19
Original line number Diff line number Diff line
@@ -205,32 +205,17 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
			   struct amd_sched_entity *entity)
{
	struct amd_sched_rq *rq = entity->rq;
	int r;

	if (!amd_sched_entity_is_initialized(sched, entity))
		return;

	/**
	 * The client will not queue more IBs during this fini, consume existing
	 * queued IBs or discard them on SIGKILL
	 * queued IBs
	*/
	if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
		r = -ERESTARTSYS;
	else
		r = wait_event_killable(sched->job_scheduled,
					amd_sched_entity_is_idle(entity));
	amd_sched_rq_remove_entity(rq, entity);
	if (r) {
		struct amd_sched_job *job;
	wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));

		/* Park the kernel for a moment to make sure it isn't processing
		 * our enity.
		 */
		kthread_park(sched->thread);
		kthread_unpark(sched->thread);
		while (kfifo_out(&entity->job_queue, &job, sizeof(job)))
			sched->ops->free_job(job);

	}
	amd_sched_rq_remove_entity(rq, entity);
	kfifo_free(&entity->job_queue);
}

+6 −2
Original line number Diff line number Diff line
@@ -168,11 +168,13 @@ static struct drm_driver exynos_drm_driver = {
static int exynos_drm_suspend(struct device *dev)
{
	struct drm_device *drm_dev = dev_get_drvdata(dev);
	struct exynos_drm_private *private = drm_dev->dev_private;
	struct exynos_drm_private *private;

	if (pm_runtime_suspended(dev) || !drm_dev)
		return 0;

	private = drm_dev->dev_private;

	drm_kms_helper_poll_disable(drm_dev);
	exynos_drm_fbdev_suspend(drm_dev);
	private->suspend_state = drm_atomic_helper_suspend(drm_dev);
@@ -188,11 +190,12 @@ static int exynos_drm_suspend(struct device *dev)
static int exynos_drm_resume(struct device *dev)
{
	struct drm_device *drm_dev = dev_get_drvdata(dev);
	struct exynos_drm_private *private = drm_dev->dev_private;
	struct exynos_drm_private *private;

	if (pm_runtime_suspended(dev) || !drm_dev)
		return 0;

	private = drm_dev->dev_private;
	drm_atomic_helper_resume(drm_dev, private->suspend_state);
	exynos_drm_fbdev_resume(drm_dev);
	drm_kms_helper_poll_enable(drm_dev);
@@ -427,6 +430,7 @@ static void exynos_drm_unbind(struct device *dev)

	kfree(drm->dev_private);
	drm->dev_private = NULL;
	dev_set_drvdata(dev, NULL);

	drm_dev_unref(drm);
}
+10 −12
Original line number Diff line number Diff line
@@ -308,20 +308,8 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)

static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
{
	struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler;
	int ring_id;

	kfree(vgpu->sched_data);
	vgpu->sched_data = NULL;

	spin_lock_bh(&scheduler->mmio_context_lock);
	for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
		if (scheduler->engine_owner[ring_id] == vgpu) {
			intel_gvt_switch_mmio(vgpu, NULL, ring_id);
			scheduler->engine_owner[ring_id] = NULL;
		}
	}
	spin_unlock_bh(&scheduler->mmio_context_lock);
}

static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
@@ -388,6 +376,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
{
	struct intel_gvt_workload_scheduler *scheduler =
		&vgpu->gvt->scheduler;
	int ring_id;

	gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);

@@ -401,4 +390,13 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
		scheduler->need_reschedule = true;
		scheduler->current_vgpu = NULL;
	}

	spin_lock_bh(&scheduler->mmio_context_lock);
	for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
		if (scheduler->engine_owner[ring_id] == vgpu) {
			intel_gvt_switch_mmio(vgpu, NULL, ring_id);
			scheduler->engine_owner[ring_id] = NULL;
		}
	}
	spin_unlock_bh(&scheduler->mmio_context_lock);
}
+3 −0
Original line number Diff line number Diff line
@@ -2657,6 +2657,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
	if (READ_ONCE(obj->mm.pages))
		return -ENODEV;

	if (obj->mm.madv != I915_MADV_WILLNEED)
		return -EFAULT;

	/* Before the pages are instantiated the object is treated as being
	 * in the CPU domain. The pages will be clflushed as required before
	 * use, and we can freely write into the pages directly. If userspace
+39 −24
Original line number Diff line number Diff line
@@ -33,17 +33,16 @@
#include "intel_drv.h"
#include "i915_trace.h"

static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
static bool ggtt_is_idle(struct drm_i915_private *i915)
{
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
       struct intel_engine_cs *engine;
       enum intel_engine_id id;

	for_each_engine(engine, dev_priv, id) {
		struct intel_timeline *tl;
       if (i915->gt.active_requests)
	       return false;

		tl = &ggtt->base.timeline.engine[engine->id];
		if (i915_gem_active_isset(&tl->last_request))
       for_each_engine(engine, i915, id) {
	       if (engine->last_retired_context != i915->kernel_context)
		       return false;
       }

@@ -157,7 +156,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
				    min_size, alignment, cache_level,
				    start, end, mode);

	/* Retire before we search the active list. Although we have
	/*
	 * Retire before we search the active list. Although we have
	 * reasonable accuracy in our retirement lists, we may have
	 * a stray pin (preventing eviction) that can only be resolved by
	 * retiring.
@@ -182,7 +182,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
		BUG_ON(ret);
	}

	/* Can we unpin some objects such as idle hw contents,
	/*
	 * Can we unpin some objects such as idle hw contents,
	 * or pending flips? But since only the GGTT has global entries
	 * such as scanouts, rinbuffers and contexts, we can skip the
	 * purge when inspecting per-process local address spaces.
@@ -190,19 +191,33 @@ i915_gem_evict_something(struct i915_address_space *vm,
	if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
		return -ENOSPC;

	if (ggtt_is_idle(dev_priv)) {
		/* If we still have pending pageflip completions, drop
		 * back to userspace to give our workqueues time to
		 * acquire our locks and unpin the old scanouts.
	/*
	 * Not everything in the GGTT is tracked via VMA using
	 * i915_vma_move_to_active(), otherwise we could evict as required
	 * with minimal stalling. Instead we are forced to idle the GPU and
	 * explicitly retire outstanding requests which will then remove
	 * the pinning for active objects such as contexts and ring,
	 * enabling us to evict them on the next iteration.
	 *
	 * To ensure that all user contexts are evictable, we perform
	 * a switch to the perma-pinned kernel context. This all also gives
	 * us a termination condition, when the last retired context is
	 * the kernel's there is no more we can evict.
	 */
		return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
	}

	if (!ggtt_is_idle(dev_priv)) {
		ret = ggtt_flush(dev_priv);
		if (ret)
			return ret;

		goto search_again;
	}

	/*
	 * If we still have pending pageflip completions, drop
	 * back to userspace to give our workqueues time to
	 * acquire our locks and unpin the old scanouts.
	 */
	return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;

found:
	/* drm_mm doesn't allow any other other operations while
Loading