Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c6a4079b authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher
Browse files

drm/amdgpu: always enable EOP interrupt v2



v2 (chk): always enable EOP interrupt, independent of scheduler,
	  remove now unused delayed_irq handling.

Signed-off-by: default avatarChunming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarJammy Zhou <Jammy.Zhou@amd.com>
parent 7f8a5290
Loading
Loading
Loading
Loading
+0 −1
Original line number Original line Diff line number Diff line
@@ -383,7 +383,6 @@ struct amdgpu_fence_driver {
	uint64_t			sync_seq[AMDGPU_MAX_RINGS];
	uint64_t			sync_seq[AMDGPU_MAX_RINGS];
	atomic64_t			last_seq;
	atomic64_t			last_seq;
	bool				initialized;
	bool				initialized;
	bool				delayed_irq;
	struct amdgpu_irq_src		*irq_src;
	struct amdgpu_irq_src		*irq_src;
	unsigned			irq_type;
	unsigned			irq_type;
	struct delayed_work             lockup_work;
	struct delayed_work             lockup_work;
+6 −36
Original line number Original line Diff line number Diff line
@@ -164,8 +164,6 @@ static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int fl
		else
		else
			FENCE_TRACE(&fence->base, "was already signaled\n");
			FENCE_TRACE(&fence->base, "was already signaled\n");


		amdgpu_irq_put(adev, fence->ring->fence_drv.irq_src,
				fence->ring->fence_drv.irq_type);
		__remove_wait_queue(&adev->fence_queue, &fence->fence_wake);
		__remove_wait_queue(&adev->fence_queue, &fence->fence_wake);
		fence_put(&fence->base);
		fence_put(&fence->base);
	} else
	} else
@@ -267,12 +265,6 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
		return;
		return;
	}
	}


	if (fence_drv->delayed_irq && ring->adev->ddev->irq_enabled) {
		fence_drv->delayed_irq = false;
		amdgpu_irq_update(ring->adev, fence_drv->irq_src,
				fence_drv->irq_type);
	}

	if (amdgpu_fence_activity(ring))
	if (amdgpu_fence_activity(ring))
		wake_up_all(&ring->adev->fence_queue);
		wake_up_all(&ring->adev->fence_queue);
	else if (amdgpu_ring_is_lockup(ring)) {
	else if (amdgpu_ring_is_lockup(ring)) {
@@ -420,29 +412,6 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
		return false;
		return false;


	if (down_read_trylock(&adev->exclusive_lock)) {
		amdgpu_irq_get(adev, ring->fence_drv.irq_src,
			ring->fence_drv.irq_type);
		if (amdgpu_fence_activity(ring))
			wake_up_all_locked(&adev->fence_queue);

		/* did fence get signaled after we enabled the sw irq? */
		if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) {
			amdgpu_irq_put(adev, ring->fence_drv.irq_src,
				ring->fence_drv.irq_type);
			up_read(&adev->exclusive_lock);
			return false;
		}

		up_read(&adev->exclusive_lock);
	} else {
		/* we're probably in a lockup, lets not fiddle too much */
		if (amdgpu_irq_get_delayed(adev, ring->fence_drv.irq_src,
			ring->fence_drv.irq_type))
			ring->fence_drv.delayed_irq = true;
		amdgpu_fence_schedule_check(ring);
	}

	fence->fence_wake.flags = 0;
	fence->fence_wake.flags = 0;
	fence->fence_wake.private = NULL;
	fence->fence_wake.private = NULL;
	fence->fence_wake.func = amdgpu_fence_check_signaled;
	fence->fence_wake.func = amdgpu_fence_check_signaled;
@@ -541,8 +510,6 @@ static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,


			last_seq[i] = atomic64_read(&ring->fence_drv.last_seq);
			last_seq[i] = atomic64_read(&ring->fence_drv.last_seq);
			trace_amdgpu_fence_wait_begin(adev->ddev, i, target_seq[i]);
			trace_amdgpu_fence_wait_begin(adev->ddev, i, target_seq[i]);
			amdgpu_irq_get(adev, ring->fence_drv.irq_src,
				       ring->fence_drv.irq_type);
		}
		}


		if (intr) {
		if (intr) {
@@ -561,8 +528,6 @@ static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,
			if (!ring || !target_seq[i])
			if (!ring || !target_seq[i])
				continue;
				continue;


			amdgpu_irq_put(adev, ring->fence_drv.irq_src,
				       ring->fence_drv.irq_type);
			trace_amdgpu_fence_wait_end(adev->ddev, i, target_seq[i]);
			trace_amdgpu_fence_wait_end(adev->ddev, i, target_seq[i]);
		}
		}


@@ -901,9 +866,12 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
		ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
		ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
	}
	}
	amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
	amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
	ring->fence_drv.initialized = true;
	amdgpu_irq_get(adev, irq_src, irq_type);

	ring->fence_drv.irq_src = irq_src;
	ring->fence_drv.irq_src = irq_src;
	ring->fence_drv.irq_type = irq_type;
	ring->fence_drv.irq_type = irq_type;
	ring->fence_drv.initialized = true;

	dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
	dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
		 "cpu addr 0x%p\n", ring->idx,
		 "cpu addr 0x%p\n", ring->idx,
		 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
		 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
@@ -980,6 +948,8 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
			amdgpu_fence_driver_force_completion(adev);
			amdgpu_fence_driver_force_completion(adev);
		}
		}
		wake_up_all(&adev->fence_queue);
		wake_up_all(&adev->fence_queue);
		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
			       ring->fence_drv.irq_type);
		ring->fence_drv.initialized = false;
		ring->fence_drv.initialized = false;
	}
	}
	mutex_unlock(&adev->ring_lock);
	mutex_unlock(&adev->ring_lock);