Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9021d2ed authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: mitigate workaround for i915



To be able to use DRI_PRIME with amdgpu and i915 we add all our fences
only as exclusive ones.

Disable that behavior when sharing between amdgpu itself cause it
hinders concurrent execution.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 5a13761f
Loading
Loading
Loading
Loading
+20 −11
Original line number Original line Diff line number Diff line
@@ -30,6 +30,8 @@
#include <drm/amdgpu_drm.h>
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
#include <linux/dma-buf.h>


static const struct dma_buf_ops amdgpu_dmabuf_ops;

struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
{
	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -129,20 +131,27 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
	if (unlikely(r != 0))
	if (unlikely(r != 0))
		goto error_detach;
		goto error_detach;



	if (dma_buf->ops != &amdgpu_dmabuf_ops) {
		/*
		/*
		 * Wait for all shared fences to complete before we switch to future
		 * Wait for all shared fences to complete before we switch to future
		 * use of exclusive fence on this prime shared bo.
		 * use of exclusive fence on this prime shared bo.
		 */
		 */
	r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
		r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
							true, false,
							MAX_SCHEDULE_TIMEOUT);
							MAX_SCHEDULE_TIMEOUT);
		if (unlikely(r < 0)) {
		if (unlikely(r < 0)) {
			DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
			DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
			goto error_unreserve;
			goto error_unreserve;
		}
		}
	}


	/* pin buffer into GTT */
	/* pin buffer into GTT */
	r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
	r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
	if (likely(r == 0))
	if (r)
		goto error_unreserve;

	if (dma_buf->ops != &amdgpu_dmabuf_ops)
		bo->prime_shared_count++;
		bo->prime_shared_count++;


error_unreserve:
error_unreserve:
@@ -166,7 +175,7 @@ static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
		goto error;
		goto error;


	amdgpu_bo_unpin(bo);
	amdgpu_bo_unpin(bo);
	if (bo->prime_shared_count)
	if (dma_buf->ops != &amdgpu_dmabuf_ops && bo->prime_shared_count)
		bo->prime_shared_count--;
		bo->prime_shared_count--;
	amdgpu_bo_unreserve(bo);
	amdgpu_bo_unreserve(bo);