Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7ecc45e3 authored by Christian König's avatar Christian König
Browse files

drm/radeon: add error handling to fence_wait_empty_locked



Instead of returning the error handle it directly
and while at it fix the comments about the ring lock.

Signed-off-by: default avatarChristian König <deathsimple@vodafone.de>
Reviewed-by: default avatarMichel Dänzer <michel.daenzer@amd.com>
Reviewed-by: default avatarJerome Glisse <jglisse@redhat.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 49099c49
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -239,7 +239,7 @@ void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_any(struct radeon_device *rdev,
			  struct radeon_fence **fences,
			  bool intr);
+21 −12
Original line number Diff line number Diff line
@@ -440,14 +440,11 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
	return 0;
}

/* caller must hold ring lock */
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
{
	uint64_t seq;

	/* We are not protected by ring lock when reading current seq but
	 * it's ok as worst case is we return to early while we could have
	 * wait.
	 */
	seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
	if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
		/* nothing to wait for, last_seq is
@@ -457,15 +454,27 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
	return radeon_fence_wait_seq(rdev, seq, ring, false, false);
}

int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
/* caller must hold ring lock */
void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
{
	/* We are not protected by ring lock when reading current seq
	 * but it's ok as wait empty is call from place where no more
	 * activity can be scheduled so there won't be concurrent access
	 * to seq value.
	 */
	return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].sync_seq[ring],
				     ring, false, false);
	uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];

	while(1) {
		int r;
		r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
		if (r == -EDEADLK) {
			mutex_unlock(&rdev->ring_lock);
			r = radeon_gpu_reset(rdev);
			mutex_lock(&rdev->ring_lock);
			if (!r)
				continue;
		}
		if (r) {
			dev_err(rdev->dev, "error waiting for ring to become"
				" idle (%d)\n", r);
		}
		return;
	}
}

struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)