Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 33196ded authored by Daniel Vetter's avatar Daniel Vetter
Browse files

drm/i915: move wedged to the other gpu error handling stuff



And to make Ben Widawsky happier, use the gpu_error instead of
the entire device as the argument in some functions.

Drop the outdated comment on ->wedged for now, a follow-up patch will
change the semantics and add a proper comment again.

Reviewed-by: default avatarDamien Lespiau <damien.lespiau@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 99584db3
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1672,7 +1672,7 @@ i915_wedged_read(struct file *filp,

	len = snprintf(buf, sizeof(buf),
		       "wedged :  %d\n",
		       atomic_read(&dev_priv->mm.wedged));
		       atomic_read(&dev_priv->gpu_error.wedged));

	if (len > sizeof(buf))
		len = sizeof(buf);
+3 −10
Original line number Diff line number Diff line
@@ -744,15 +744,6 @@ struct i915_gem_mm {
	 */
	int suspended;

	/**
	 * Flag if the hardware appears to be wedged.
	 *
	 * This is set when attempts to idle the device timeout.
	 * It prevents command submission from occurring and makes
	 * every pending request fail
	 */
	atomic_t wedged;

	/** Bit 6 swizzling required for X tiling */
	uint32_t bit_6_swizzle_x;
	/** Bit 6 swizzling required for Y tiling */
@@ -784,6 +775,8 @@ struct i915_gpu_error {

	unsigned long last_reset;

	atomic_t wedged;

	/* For gpu hang simulation. */
	unsigned int stop_rings;
};
@@ -1548,7 +1541,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)

void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv,
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
				      bool interruptible);

void i915_gem_reset(struct drm_device *dev);
+17 −17
Original line number Diff line number Diff line
@@ -87,14 +87,13 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
}

static int
i915_gem_wait_for_error(struct drm_device *dev)
i915_gem_wait_for_error(struct i915_gpu_error *error)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct completion *x = &dev_priv->gpu_error.completion;
	struct completion *x = &error->completion;
	unsigned long flags;
	int ret;

	if (!atomic_read(&dev_priv->mm.wedged))
	if (!atomic_read(&error->wedged))
		return 0;

	/*
@@ -110,7 +109,7 @@ i915_gem_wait_for_error(struct drm_device *dev)
		return ret;
	}

	if (atomic_read(&dev_priv->mm.wedged)) {
	if (atomic_read(&error->wedged)) {
		/* GPU is hung, bump the completion count to account for
		 * the token we just consumed so that we never hit zero and
		 * end up waiting upon a subsequent completion event that
@@ -125,9 +124,10 @@ i915_gem_wait_for_error(struct drm_device *dev)

int i915_mutex_lock_interruptible(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	ret = i915_gem_wait_for_error(dev);
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
	if (ret)
		return ret;

@@ -939,11 +939,11 @@ unlock:
}

int
i915_gem_check_wedge(struct drm_i915_private *dev_priv,
i915_gem_check_wedge(struct i915_gpu_error *error,
		     bool interruptible)
{
	if (atomic_read(&dev_priv->mm.wedged)) {
		struct completion *x = &dev_priv->gpu_error.completion;
	if (atomic_read(&error->wedged)) {
		struct completion *x = &error->completion;
		bool recovery_complete;
		unsigned long flags;

@@ -1025,7 +1025,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,

#define EXIT_COND \
	(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
	atomic_read(&dev_priv->mm.wedged))
	atomic_read(&dev_priv->gpu_error.wedged))
	do {
		if (interruptible)
			end = wait_event_interruptible_timeout(ring->irq_queue,
@@ -1035,7 +1035,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
			end = wait_event_timeout(ring->irq_queue, EXIT_COND,
						 timeout_jiffies);

		ret = i915_gem_check_wedge(dev_priv, interruptible);
		ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
		if (ret)
			end = ret;
	} while (end == 0 && wait_forever);
@@ -1081,7 +1081,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
	BUG_ON(seqno == 0);

	ret = i915_gem_check_wedge(dev_priv, interruptible);
	ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
	if (ret)
		return ret;

@@ -1146,7 +1146,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
	if (seqno == 0)
		return 0;

	ret = i915_gem_check_wedge(dev_priv, true);
	ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
	if (ret)
		return ret;

@@ -1379,7 +1379,7 @@ out:
		/* If this -EIO is due to a gpu hang, give the reset code a
		 * chance to clean up the mess. Otherwise return the proper
		 * SIGBUS. */
		if (!atomic_read(&dev_priv->mm.wedged))
		if (!atomic_read(&dev_priv->gpu_error.wedged))
			return VM_FAULT_SIGBUS;
	case -EAGAIN:
		/* Give the error handler a chance to run and move the
@@ -3390,7 +3390,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
	u32 seqno = 0;
	int ret;

	if (atomic_read(&dev_priv->mm.wedged))
	if (atomic_read(&dev_priv->gpu_error.wedged))
		return -EIO;

	spin_lock(&file_priv->mm.lock);
@@ -3978,9 +3978,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return 0;

	if (atomic_read(&dev_priv->mm.wedged)) {
	if (atomic_read(&dev_priv->gpu_error.wedged)) {
		DRM_ERROR("Reenabling wedged hardware, good luck\n");
		atomic_set(&dev_priv->mm.wedged, 0);
		atomic_set(&dev_priv->gpu_error.wedged, 0);
	}

	mutex_lock(&dev->struct_mutex);
+3 −3
Original line number Diff line number Diff line
@@ -871,11 +871,11 @@ static void i915_error_work_func(struct work_struct *work)

	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);

	if (atomic_read(&dev_priv->mm.wedged)) {
	if (atomic_read(&dev_priv->gpu_error.wedged)) {
		DRM_DEBUG_DRIVER("resetting chip\n");
		kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
		if (!i915_reset(dev)) {
			atomic_set(&dev_priv->mm.wedged, 0);
			atomic_set(&dev_priv->gpu_error.wedged, 0);
			kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
		}
		complete_all(&dev_priv->gpu_error.completion);
@@ -1483,7 +1483,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged)

	if (wedged) {
		INIT_COMPLETION(dev_priv->gpu_error.completion);
		atomic_set(&dev_priv->mm.wedged, 1);
		atomic_set(&dev_priv->gpu_error.wedged, 1);

		/*
		 * Wakeup waiting processes so they don't hang
+2 −2
Original line number Diff line number Diff line
@@ -2223,7 +2223,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));

	wait_event(dev_priv->pending_flip_queue,
		   atomic_read(&dev_priv->mm.wedged) ||
		   atomic_read(&dev_priv->gpu_error.wedged) ||
		   atomic_read(&obj->pending_flip) == 0);

	/* Big Hammer, we also need to ensure that any pending
@@ -2871,7 +2871,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
	unsigned long flags;
	bool pending;

	if (atomic_read(&dev_priv->mm.wedged))
	if (atomic_read(&dev_priv->gpu_error.wedged))
		return false;

	spin_lock_irqsave(&dev->event_lock, flags);
Loading