Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit be86c606 authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher
Browse files

drm/amdgpu: cleanup amdgpu_sync_rings V2



No longer needed now that semaphores are gone.

V2: remove the first amdgpu_sync_wait in amdgpu_ib_schedule

Signed-off-by: default avatarChunming Zhou <David1.Zhou@amd.com>
Reviewed-by: Ken Wang  <Qingqing.Wang@amd.com> (V1)
Reviewed-by: Monk Liu <monk.liu@amd.com> (V2)
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 2f4b9400
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -654,8 +654,6 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
		     struct amdgpu_sync *sync,
		     struct reservation_object *resv,
		     void *owner);
int amdgpu_sync_rings(struct amdgpu_sync *sync,
		      struct amdgpu_ring *ring);
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
int amdgpu_sync_wait(struct amdgpu_sync *sync);
void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+3 −7
Original line number Diff line number Diff line
@@ -141,11 +141,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
		dev_err(adev->dev, "couldn't schedule ib\n");
		return -EINVAL;
	}
	r = amdgpu_sync_wait(&ibs->sync);
	if (r) {
		dev_err(adev->dev, "IB sync failed (%d).\n", r);
		return r;
	}

	r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs);
	if (r) {
		dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
@@ -161,10 +157,10 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
		}
	}

	r = amdgpu_sync_rings(&ibs->sync, ring);
	r = amdgpu_sync_wait(&ibs->sync);
	if (r) {
		amdgpu_ring_unlock_undo(ring);
		dev_err(adev->dev, "failed to sync rings (%d)\n", r);
		dev_err(adev->dev, "failed to sync wait (%d)\n", r);
		return r;
	}

+0 −42
Original line number Diff line number Diff line
@@ -260,48 +260,6 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync)
	return 0;
}

/**
 * amdgpu_sync_rings - sync ring to all registered fences
 *
 * @sync: sync object to use
 * @ring: ring that needs sync
 *
 * Ensure that all registered fences are signaled before letting
 * the ring continue. The caller must hold the ring lock.
 */
int amdgpu_sync_rings(struct amdgpu_sync *sync,
		      struct amdgpu_ring *ring)
{
	struct amdgpu_device *adev = ring->adev;
	int i, r;

	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
		struct amdgpu_ring *other = adev->rings[i];
		struct amdgpu_fence *fence;

		if (!sync->sync_to[i])
			continue;

		fence = to_amdgpu_fence(sync->sync_to[i]);

		/* prevent GPU deadlocks */
		if (!other->ready) {
			dev_err(adev->dev, "Syncing to a disabled ring!");
			return -EINVAL;
		}

		if (amdgpu_enable_scheduler) {
			r = fence_wait(sync->sync_to[i], true);
			if (r)
				return r;
			continue;
		}

	}

	return 0;
}

/**
 * amdgpu_sync_free - free the sync object
 *