Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5ceb54c6 authored by Alex Deucher's avatar Alex Deucher
Browse files

drm/amdgpu: add fence suspend/resume functions



Added to:
- handle draining the ring on suspend
- properly enable/disable interrupts on suspend and resume

Fix breakages from:
commit 467ee3be53d240d08beed2e82a941e820c1ac323
Author: Chunming Zhou <david1.zhou@amd.com>
Date:   Mon Jun 1 14:14:32 2015 +0800

    drm/amdgpu: always enable EOP interrupt v2

Tested-by: default avatarAudrey Grodzovsky <audrey.grodzovsky@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent c6a4079b
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -426,6 +426,8 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
				   struct amdgpu_irq_src *irq_src,
				   unsigned irq_type);
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
		      struct amdgpu_fence **fence);
void amdgpu_fence_process(struct amdgpu_ring *ring);
+4 −17
Original line number Diff line number Diff line
@@ -1627,8 +1627,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
	struct amdgpu_device *adev;
	struct drm_crtc *crtc;
	struct drm_connector *connector;
	int i, r;
	bool force_completion = false;
	int r;

	if (dev == NULL || dev->dev_private == NULL) {
		return -ENODEV;
@@ -1667,21 +1666,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
	/* evict vram memory */
	amdgpu_bo_evict_vram(adev);

	/* wait for gpu to finish processing current batch */
	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
		struct amdgpu_ring *ring = adev->rings[i];
		if (!ring)
			continue;

		r = amdgpu_fence_wait_empty(ring);
		if (r) {
			/* delay GPU reset to resume */
			force_completion = true;
		}
	}
	if (force_completion) {
		amdgpu_fence_driver_force_completion(adev);
	}
	amdgpu_fence_driver_suspend(adev);

	r = amdgpu_suspend(adev);

@@ -1739,6 +1724,8 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)

	r = amdgpu_resume(adev);

	amdgpu_fence_driver_resume(adev);

	r = amdgpu_ib_ring_tests(adev);
	if (r)
		DRM_ERROR("ib ring test failed (%d).\n", r);
+61 −0
Original line number Diff line number Diff line
@@ -955,6 +955,67 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
	mutex_unlock(&adev->ring_lock);
}

/**
 * amdgpu_fence_driver_suspend - suspend the fence driver
 * for all possible rings.
 *
 * @adev: amdgpu device pointer
 *
 * Suspend the fence driver for all possible rings (all asics).
 */
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
{
	int i, r;

	mutex_lock(&adev->ring_lock);
	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
		struct amdgpu_ring *ring = adev->rings[i];
		if (!ring || !ring->fence_drv.initialized)
			continue;

		/* wait for gpu to finish processing current batch */
		r = amdgpu_fence_wait_empty(ring);
		if (r) {
			/* delay GPU reset to resume */
			amdgpu_fence_driver_force_completion(adev);
		}

		/* disable the interrupt */
		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
			       ring->fence_drv.irq_type);
	}
	mutex_unlock(&adev->ring_lock);
}

/**
 * amdgpu_fence_driver_resume - resume the fence driver
 * for all possible rings.
 *
 * @adev: amdgpu device pointer
 *
 * Resume the fence driver for all possible rings (all asics).
 * Not all asics have all rings, so each asic will only
 * start the fence driver on the rings it has using
 * amdgpu_fence_driver_start_ring().
 * Returns 0 for success.
 */
void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
{
	int i;

	mutex_lock(&adev->ring_lock);
	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
		struct amdgpu_ring *ring = adev->rings[i];
		if (!ring || !ring->fence_drv.initialized)
			continue;

		/* enable the interrupt */
		amdgpu_irq_get(adev, ring->fence_drv.irq_src,
			       ring->fence_drv.irq_type);
	}
	mutex_unlock(&adev->ring_lock);
}

/**
 * amdgpu_fence_driver_force_completion - force all fence waiter to complete
 *