Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cb823ed9 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915/gt: Use intel_gt as the primary object for handling resets



Having taken the first step in encapsulating the functionality by moving
the related files under gt/, the next step is to start encapsulating by
passing around the relevant structs rather than the global
drm_i915_private. In this step, we pass intel_gt to intel_reset.c

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190712192953.9187-1-chris@chris-wilson.co.uk
parent 139ab811
Loading
Loading
Loading
Loading
+13 −9
Original line number Diff line number Diff line
@@ -4271,12 +4271,13 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
		return;

	/* We have a modeset vs reset deadlock, defensively unbreak it. */
	set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
	wake_up_all(&dev_priv->gpu_error.wait_queue);
	set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
	smp_mb__after_atomic();
	wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);

	if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
		DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
		i915_gem_set_wedged(dev_priv);
		intel_gt_set_wedged(&dev_priv->gt);
	}

	/*
@@ -4322,7 +4323,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
	int ret;

	/* reset doesn't touch the display */
	if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
	if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
		return;

	state = fetch_and_zero(&dev_priv->modeset_restore_state);
@@ -4362,7 +4363,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
	drm_modeset_acquire_fini(ctx);
	mutex_unlock(&dev->mode_config.mutex);

	clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
	clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
}

static void icl_set_pipe_chicken(struct intel_crtc *crtc)
@@ -13873,18 +13874,21 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
	for (;;) {
		prepare_to_wait(&intel_state->commit_ready.wait,
				&wait_fence, TASK_UNINTERRUPTIBLE);
		prepare_to_wait(&dev_priv->gpu_error.wait_queue,
		prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
					      I915_RESET_MODESET),
				&wait_reset, TASK_UNINTERRUPTIBLE);


		if (i915_sw_fence_done(&intel_state->commit_ready)
		    || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
		if (i915_sw_fence_done(&intel_state->commit_ready) ||
		    test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
			break;

		schedule();
	}
	finish_wait(&intel_state->commit_ready.wait, &wait_fence);
	finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
	finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
				  I915_RESET_MODESET),
		    &wait_reset);
}

static void intel_atomic_cleanup_work(struct work_struct *work)
+1 −1
Original line number Diff line number Diff line
@@ -2127,7 +2127,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
	if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
		return -EINVAL;

	ret = i915_terminally_wedged(i915);
	ret = intel_gt_terminally_wedged(&i915->gt);
	if (ret)
		return ret;

+1 −1
Original line number Diff line number Diff line
@@ -2130,7 +2130,7 @@ static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
	 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
	 * EIO if the GPU is already wedged.
	 */
	err = i915_terminally_wedged(eb->i915);
	err = intel_gt_terminally_wedged(ce->engine->gt);
	if (err)
		return err;

+5 −3
Original line number Diff line number Diff line
@@ -7,6 +7,8 @@
#include <linux/mman.h>
#include <linux/sizes.h>

#include "gt/intel_gt.h"

#include "i915_drv.h"
#include "i915_gem_gtt.h"
#include "i915_gem_ioctls.h"
@@ -246,7 +248,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)

	wakeref = intel_runtime_pm_get(rpm);

	srcu = i915_reset_trylock(i915);
	srcu = intel_gt_reset_trylock(ggtt->vm.gt);
	if (srcu < 0) {
		ret = srcu;
		goto err_rpm;
@@ -326,7 +328,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
err_unlock:
	mutex_unlock(&dev->struct_mutex);
err_reset:
	i915_reset_unlock(i915, srcu);
	intel_gt_reset_unlock(ggtt->vm.gt, srcu);
err_rpm:
	intel_runtime_pm_put(rpm, wakeref);
	i915_gem_object_unpin_pages(obj);
@@ -339,7 +341,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
		 * fail). But any other -EIO isn't ours (e.g. swap in failure)
		 * and so needs to be reported.
		 */
		if (!i915_terminally_wedged(i915))
		if (!intel_gt_is_wedged(ggtt->vm.gt))
			return VM_FAULT_SIGBUS;
		/* else: fall through */
	case -EAGAIN:
+13 −12
Original line number Diff line number Diff line
@@ -5,6 +5,7 @@
 */

#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"

#include "i915_drv.h"
@@ -106,18 +107,18 @@ static int pm_notifier(struct notifier_block *nb,
	return NOTIFY_OK;
}

static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
static bool switch_to_kernel_context_sync(struct intel_gt *gt)
{
	bool result = !i915_terminally_wedged(i915);
	bool result = !intel_gt_is_wedged(gt);

	do {
		if (i915_gem_wait_for_idle(i915,
		if (i915_gem_wait_for_idle(gt->i915,
					   I915_WAIT_LOCKED |
					   I915_WAIT_FOR_IDLE_BOOST,
					   I915_GEM_IDLE_TIMEOUT) == -ETIME) {
			/* XXX hide warning from gem_eio */
			if (i915_modparams.reset) {
				dev_err(i915->drm.dev,
				dev_err(gt->i915->drm.dev,
					"Failed to idle engines, declaring wedged!\n");
				GEM_TRACE_DUMP();
			}
@@ -126,18 +127,18 @@ static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
			 * Forcibly cancel outstanding work and leave
			 * the gpu quiet.
			 */
			i915_gem_set_wedged(i915);
			intel_gt_set_wedged(gt);
			result = false;
		}
	} while (i915_retire_requests(i915) && result);
	} while (i915_retire_requests(gt->i915) && result);

	GEM_BUG_ON(i915->gt.awake);
	GEM_BUG_ON(gt->awake);
	return result;
}

bool i915_gem_load_power_context(struct drm_i915_private *i915)
{
	return switch_to_kernel_context_sync(i915);
	return switch_to_kernel_context_sync(&i915->gt);
}

void i915_gem_suspend(struct drm_i915_private *i915)
@@ -158,7 +159,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
	 * state. Fortunately, the kernel_context is disposable and we do
	 * not rely on its state.
	 */
	switch_to_kernel_context_sync(i915);
	switch_to_kernel_context_sync(&i915->gt);

	mutex_unlock(&i915->drm.struct_mutex);

@@ -169,7 +170,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
	GEM_BUG_ON(i915->gt.awake);
	flush_work(&i915->gem.idle_work);

	cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
	cancel_delayed_work_sync(&i915->gt.hangcheck.work);

	i915_gem_drain_freed_objects(i915);

@@ -277,10 +278,10 @@ void i915_gem_resume(struct drm_i915_private *i915)
	return;

err_wedged:
	if (!i915_reset_failed(i915)) {
	if (!intel_gt_is_wedged(&i915->gt)) {
		dev_err(i915->drm.dev,
			"Failed to re-initialize GPU, declaring it wedged!\n");
		i915_gem_set_wedged(i915);
		intel_gt_set_wedged(&i915->gt);
	}
	goto out_unlock;
}
Loading