Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c19ae989 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Hide the atomic_read(reset_counter) behind a helper



This is principally a little bit of syntatic sugar to hide the
atomic_read()s throughout the code to retrieve the current reset_counter.
It also provides the other utility functions to check the reset state on the
already read reset_counter, so that (in later patches) we can read it once
and do multiple tests rather than risk the value changing between tests.

v2: Be more strict on converting existing i915_reset_in_progress() over to
the more verbose i915_reset_in_progress_or_wedged().

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1460565315-7748-4-git-send-email-chris@chris-wilson.co.uk
parent d501b1d2
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -4722,7 +4722,7 @@ i915_wedged_get(void *data, u64 *val)
	struct drm_device *dev = data;
	struct drm_i915_private *dev_priv = dev->dev_private;

	*val = atomic_read(&dev_priv->gpu_error.reset_counter);
	*val = i915_reset_counter(&dev_priv->gpu_error);

	return 0;
}
@@ -4741,7 +4741,7 @@ i915_wedged_set(void *data, u64 val)
	 * while it is writing to 'i915_wedged'
	 */

	if (i915_reset_in_progress(&dev_priv->gpu_error))
	if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error))
		return -EAGAIN;

	intel_runtime_pm_get(dev_priv);
+28 −4
Original line number Diff line number Diff line
@@ -3093,20 +3093,44 @@ void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
				      bool interruptible);

static inline u32 i915_reset_counter(struct i915_gpu_error *error)
{
	return atomic_read(&error->reset_counter);
}

static inline bool __i915_reset_in_progress(u32 reset)
{
	return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG);
}

static inline bool __i915_reset_in_progress_or_wedged(u32 reset)
{
	return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
}

static inline bool __i915_terminally_wedged(u32 reset)
{
	return unlikely(reset & I915_WEDGED);
}

static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
	return unlikely(atomic_read(&error->reset_counter)
			& (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
	return __i915_reset_in_progress(i915_reset_counter(error));
}

static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
{
	return __i915_reset_in_progress_or_wedged(i915_reset_counter(error));
}

static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
{
	return atomic_read(&error->reset_counter) & I915_WEDGED;
	return __i915_terminally_wedged(i915_reset_counter(error));
}

static inline u32 i915_reset_count(struct i915_gpu_error *error)
{
	return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
	return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
}

static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
+8 −8
Original line number Diff line number Diff line
@@ -83,7 +83,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
{
	int ret;

#define EXIT_COND (!i915_reset_in_progress(error) || \
#define EXIT_COND (!i915_reset_in_progress_or_wedged(error) || \
		   i915_terminally_wedged(error))
	if (EXIT_COND)
		return 0;
@@ -1112,7 +1112,7 @@ int
i915_gem_check_wedge(struct i915_gpu_error *error,
		     bool interruptible)
{
	if (i915_reset_in_progress(error)) {
	if (i915_reset_in_progress_or_wedged(error)) {
		/* Non-interruptible callers can't handle -EAGAIN, hence return
		 * -EIO unconditionally for these. */
		if (!interruptible)
@@ -1299,7 +1299,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,

		/* We need to check whether any gpu reset happened in between
		 * the caller grabbing the seqno and now ... */
		if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
		if (reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
			/* ... but upgrade the -EAGAIN to an -EIO if the gpu
			 * is truely gone. */
			ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
@@ -1474,7 +1474,7 @@ i915_wait_request(struct drm_i915_gem_request *req)
		return ret;

	ret = __i915_wait_request(req,
				  atomic_read(&dev_priv->gpu_error.reset_counter),
				  i915_reset_counter(&dev_priv->gpu_error),
				  interruptible, NULL, NULL);
	if (ret)
		return ret;
@@ -1563,7 +1563,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
	if (ret)
		return ret;

	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
	reset_counter = i915_reset_counter(&dev_priv->gpu_error);

	if (readonly) {
		struct drm_i915_gem_request *req;
@@ -3179,7 +3179,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
	}

	drm_gem_object_unreference(&obj->base);
	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
	reset_counter = i915_reset_counter(&dev_priv->gpu_error);

	for (i = 0; i < I915_NUM_ENGINES; i++) {
		if (obj->last_read_req[i] == NULL)
@@ -3224,7 +3224,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
	if (!i915_semaphore_is_enabled(obj->base.dev)) {
		struct drm_i915_private *i915 = to_i915(obj->base.dev);
		ret = __i915_wait_request(from_req,
					  atomic_read(&i915->gpu_error.reset_counter),
					  i915_reset_counter(&i915->gpu_error),
					  i915->mm.interruptible,
					  NULL,
					  &i915->rps.semaphores);
@@ -4205,7 +4205,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)

		target = request;
	}
	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
	reset_counter = i915_reset_counter(&dev_priv->gpu_error);
	if (target)
		i915_gem_request_reference(target);
	spin_unlock(&file_priv->mm.lock);
+1 −1
Original line number Diff line number Diff line
@@ -2501,7 +2501,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
	 * the reset in-progress bit is only ever set by code outside of this
	 * work we don't need to worry about any other races.
	 */
	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
	if (i915_reset_in_progress_or_wedged(error) && !i915_terminally_wedged(error)) {
		DRM_DEBUG_DRIVER("resetting chip\n");
		kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
				   reset_event);
+11 −7
Original line number Diff line number Diff line
@@ -3200,10 +3200,12 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
	struct drm_device *dev = crtc->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	unsigned reset_counter;
	bool pending;

	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
	    intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
	reset_counter = i915_reset_counter(&dev_priv->gpu_error);
	if (intel_crtc->reset_counter != reset_counter ||
	    __i915_reset_in_progress_or_wedged(reset_counter))
		return false;

	spin_lock_irq(&dev->event_lock);
@@ -10908,9 +10910,11 @@ static bool page_flip_finished(struct intel_crtc *crtc)
{
	struct drm_device *dev = crtc->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned reset_counter;

	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
	    crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
	reset_counter = i915_reset_counter(&dev_priv->gpu_error);
	if (crtc->reset_counter != reset_counter ||
	    __i915_reset_in_progress_or_wedged(reset_counter))
		return true;

	/*
@@ -11573,7 +11577,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
		goto cleanup;

	atomic_inc(&intel_crtc->unpin_work_count);
	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
	intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);

	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
		work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
@@ -13419,10 +13423,10 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
		return ret;

	ret = drm_atomic_helper_prepare_planes(dev, state);
	if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) {
	if (!ret && !async && !i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) {
		u32 reset_counter;

		reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
		reset_counter = i915_reset_counter(&dev_priv->gpu_error);
		mutex_unlock(&dev->struct_mutex);

		for_each_plane_in_state(state, plane, plane_state, i) {
Loading