Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1ffd2652 authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: fix waiting for all fences before flipping



Otherwise we might see corruption.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 4127a59e
Loading
Loading
Loading
Loading
+3 −1
Original line number Original line Diff line number Diff line
@@ -828,7 +828,9 @@ struct amdgpu_flip_work {
	uint64_t			base;
	uint64_t			base;
	struct drm_pending_vblank_event *event;
	struct drm_pending_vblank_event *event;
	struct amdgpu_bo		*old_rbo;
	struct amdgpu_bo		*old_rbo;
	struct fence			*fence;
	struct fence			*excl;
	unsigned			shared_count;
	struct fence			**shared;
};
};




+53 −28
Original line number Original line Diff line number Diff line
@@ -35,22 +35,16 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_edid.h>



static void amdgpu_flip_wait_fence(struct amdgpu_device *adev,
static void amdgpu_flip_work_func(struct work_struct *__work)
				   struct fence **f)
{
{
	struct amdgpu_flip_work *work =
		container_of(__work, struct amdgpu_flip_work, flip_work);
	struct amdgpu_device *adev = work->adev;
	struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];

	struct drm_crtc *crtc = &amdgpuCrtc->base;
	struct amdgpu_fence *fence;
	struct amdgpu_fence *fence;
	unsigned long flags;
	long r;
	int r;


	down_read(&adev->exclusive_lock);
	if (*f == NULL)
	if (work->fence) {
		return;
		fence = to_amdgpu_fence(work->fence);

	fence = to_amdgpu_fence(*f);
	if (fence) {
	if (fence) {
		r = fence_wait(&fence->base, false);
		r = fence_wait(&fence->base, false);
		if (r == -EDEADLK) {
		if (r == -EDEADLK) {
@@ -59,20 +53,35 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
			down_read(&adev->exclusive_lock);
			down_read(&adev->exclusive_lock);
		}
		}
	} else
	} else
			r = fence_wait(work->fence, false);
		r = fence_wait(*f, false);


	if (r)
	if (r)
			DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
		DRM_ERROR("failed to wait on page flip fence (%ld)!\n", r);


	/* We continue with the page flip even if we failed to wait on
	/* We continue with the page flip even if we failed to wait on
	 * the fence, otherwise the DRM core and userspace will be
	 * the fence, otherwise the DRM core and userspace will be
	 * confused about which BO the CRTC is scanning out
	 * confused about which BO the CRTC is scanning out
	 */
	 */

	fence_put(*f);
		fence_put(work->fence);
	*f = NULL;
		work->fence = NULL;
}
}


static void amdgpu_flip_work_func(struct work_struct *__work)
{
	struct amdgpu_flip_work *work =
		container_of(__work, struct amdgpu_flip_work, flip_work);
	struct amdgpu_device *adev = work->adev;
	struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];

	struct drm_crtc *crtc = &amdgpuCrtc->base;
	unsigned long flags;
	unsigned i;

	down_read(&adev->exclusive_lock);
	amdgpu_flip_wait_fence(adev, &work->excl);
	for (i = 0; i < work->shared_count; ++i)
		amdgpu_flip_wait_fence(adev, &work->shared[i]);

	/* We borrow the event spin lock for protecting flip_status */
	/* We borrow the event spin lock for protecting flip_status */
	spin_lock_irqsave(&crtc->dev->event_lock, flags);
	spin_lock_irqsave(&crtc->dev->event_lock, flags);


@@ -108,6 +117,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
		DRM_ERROR("failed to reserve buffer after flip\n");
		DRM_ERROR("failed to reserve buffer after flip\n");


	drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
	drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
	kfree(work->shared);
	kfree(work);
	kfree(work);
}
}


@@ -127,7 +137,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
	unsigned long flags;
	unsigned long flags;
	u64 tiling_flags;
	u64 tiling_flags;
	u64 base;
	u64 base;
	int r;
	int i, r;


	work = kzalloc(sizeof *work, GFP_KERNEL);
	work = kzalloc(sizeof *work, GFP_KERNEL);
	if (work == NULL)
	if (work == NULL)
@@ -167,7 +177,19 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
		goto cleanup;
		goto cleanup;
	}
	}


	work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
	r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl,
					      &work->shared_count,
					      &work->shared);
	if (unlikely(r != 0)) {
		amdgpu_bo_unreserve(new_rbo);
		DRM_ERROR("failed to get fences for buffer\n");
		goto cleanup;
	}

	fence_get(work->excl);
	for (i = 0; i < work->shared_count; ++i)
		fence_get(work->shared[i]);

	amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
	amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
	amdgpu_bo_unreserve(new_rbo);
	amdgpu_bo_unreserve(new_rbo);


@@ -212,7 +234,10 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,


cleanup:
cleanup:
	drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
	drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
	fence_put(work->fence);
	fence_put(work->excl);
	for (i = 0; i < work->shared_count; ++i)
		fence_put(work->shared[i]);
	kfree(work->shared);
	kfree(work);
	kfree(work);


	return r;
	return r;