Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a27de35c authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: remove the ring lock v2



It's not needed any more because all access goes through the scheduler now.

v2: Update commit message.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent a9a78b32
Loading
Loading
Loading
Loading
+0 −5
Original line number Diff line number Diff line
@@ -814,7 +814,6 @@ struct amdgpu_ring {
	struct amd_gpu_scheduler 	sched;

	spinlock_t              fence_lock;
	struct mutex		*ring_lock;
	struct amdgpu_bo	*ring_obj;
	volatile uint32_t	*ring;
	unsigned		rptr_offs;
@@ -1190,12 +1189,9 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
/* Ring access between begin & end cannot sleep */
void amdgpu_ring_free_size(struct amdgpu_ring *ring);
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw);
void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring);
unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
			    uint32_t **data);
int amdgpu_ring_restore(struct amdgpu_ring *ring,
@@ -2009,7 +2005,6 @@ struct amdgpu_device {

	/* rings */
	unsigned			fence_context;
	struct mutex			ring_lock;
	unsigned			num_rings;
	struct amdgpu_ring		*rings[AMDGPU_MAX_RINGS];
	bool				ib_pool_ready;
+0 −1
Original line number Diff line number Diff line
@@ -1455,7 +1455,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,

	/* mutex initialization are all done here so we
	 * can recall function without having locking issues */
	mutex_init(&adev->ring_lock);
	mutex_init(&adev->vm_manager.lock);
	atomic_set(&adev->irq.ih.lock, 0);
	mutex_init(&adev->gem.mutex);
+0 −6
Original line number Diff line number Diff line
@@ -487,7 +487,6 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)

	if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
		kmem_cache_destroy(amdgpu_fence_slab);
	mutex_lock(&adev->ring_lock);
	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
		struct amdgpu_ring *ring = adev->rings[i];

@@ -505,7 +504,6 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
		del_timer_sync(&ring->fence_drv.fallback_timer);
		ring->fence_drv.initialized = false;
	}
	mutex_unlock(&adev->ring_lock);
}

/**
@@ -520,7 +518,6 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
{
	int i, r;

	mutex_lock(&adev->ring_lock);
	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
		struct amdgpu_ring *ring = adev->rings[i];
		if (!ring || !ring->fence_drv.initialized)
@@ -537,7 +534,6 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
			       ring->fence_drv.irq_type);
	}
	mutex_unlock(&adev->ring_lock);
}

/**
@@ -556,7 +552,6 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
{
	int i;

	mutex_lock(&adev->ring_lock);
	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
		struct amdgpu_ring *ring = adev->rings[i];
		if (!ring || !ring->fence_drv.initialized)
@@ -566,7 +561,6 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
		amdgpu_irq_get(adev, ring->fence_drv.irq_src,
			       ring->fence_drv.irq_type);
	}
	mutex_unlock(&adev->ring_lock);
}

/**
+5 −5
Original line number Diff line number Diff line
@@ -147,7 +147,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
		return -EINVAL;
	}

	r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs);
	r = amdgpu_ring_alloc(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs);
	if (r) {
		dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
		return r;
@@ -155,7 +155,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,

	r = amdgpu_sync_wait(&ibs->sync);
	if (r) {
		amdgpu_ring_unlock_undo(ring);
		amdgpu_ring_undo(ring);
		dev_err(adev->dev, "failed to sync wait (%d)\n", r);
		return r;
	}
@@ -180,7 +180,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,

		if (ib->ring != ring || ib->ctx != ctx || ib->vm != vm) {
			ring->current_ctx = old_ctx;
			amdgpu_ring_unlock_undo(ring);
			amdgpu_ring_undo(ring);
			return -EINVAL;
		}
		amdgpu_ring_emit_ib(ring, ib);
@@ -191,7 +191,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
	if (r) {
		dev_err(adev->dev, "failed to emit fence (%d)\n", r);
		ring->current_ctx = old_ctx;
		amdgpu_ring_unlock_undo(ring);
		amdgpu_ring_undo(ring);
		return r;
	}

@@ -203,7 +203,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
				       AMDGPU_FENCE_FLAG_64BIT);
	}

	amdgpu_ring_unlock_commit(ring);
	amdgpu_ring_commit(ring);
	return 0;
}

+6 −13
Original line number Diff line number Diff line
@@ -623,14 +623,12 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
	}

	mutex_lock(&adev->ring_lock);

	/* update whether vce is active */
	ps->vce_active = adev->pm.dpm.vce_active;

	ret = amdgpu_dpm_pre_set_power_state(adev);
	if (ret)
		goto done;
		return;

	/* update display watermarks based on new power state */
	amdgpu_display_bandwidth_update(adev);
@@ -667,9 +665,6 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
			amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
		}
	}

done:
	mutex_unlock(&adev->ring_lock);
}

void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
@@ -802,13 +797,11 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
		int i = 0;

		amdgpu_display_bandwidth_update(adev);
		mutex_lock(&adev->ring_lock);
		for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
			struct amdgpu_ring *ring = adev->rings[i];
			if (ring && ring->ready)
				amdgpu_fence_wait_empty(ring);
		}
		mutex_unlock(&adev->ring_lock);

		amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
	} else {
Loading