Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 03507c4f authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: recreate fence from user seq



And use common fence infrastructure for the wait.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
parent 7cebc728
Loading
Loading
Loading
Loading
+2 −3
Original line number Diff line number Diff line
@@ -425,6 +425,8 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
				   unsigned irq_type);
int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
		      struct amdgpu_fence **fence);
int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
			  uint64_t seq, struct amdgpu_fence **fence);
void amdgpu_fence_process(struct amdgpu_ring *ring);
int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
@@ -435,9 +437,6 @@ int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
int amdgpu_fence_wait_any(struct amdgpu_device *adev,
			  struct amdgpu_fence **fences,
			  bool intr);
long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,
				   u64 *target_seq, bool intr,
				   long timeout);
struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
void amdgpu_fence_unref(struct amdgpu_fence **fence);

+7 −4
Original line number Diff line number Diff line
@@ -739,9 +739,9 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
{
	union drm_amdgpu_wait_cs *wait = data;
	struct amdgpu_device *adev = dev->dev_private;
	uint64_t seq[AMDGPU_MAX_RINGS] = {0};
	struct amdgpu_ring *ring = NULL;
	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
	struct amdgpu_fence *fence = NULL;
	struct amdgpu_ring *ring = NULL;
	struct amdgpu_ctx *ctx;
	long r;

@@ -754,9 +754,12 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
	if (r)
		return r;

	seq[ring->idx] = wait->in.handle;
	r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence);
	if (r)
		return r;

	r = amdgpu_fence_wait_seq_timeout(adev, seq, true, timeout);
	r = fence_wait_timeout(&fence->base, true, timeout);
	amdgpu_fence_unref(&fence);
	amdgpu_ctx_put(ctx);
	if (r < 0)
		return r;
+35 −2
Original line number Diff line number Diff line
@@ -135,6 +135,38 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
	return 0;
}

/**
 * amdgpu_fence_recreate - recreate a fence from an user fence
 *
 * @ring: ring the fence is associated with
 * @owner: creator of the fence
 * @seq: user fence sequence number
 * @fence: resulting amdgpu fence object
 *
 * Recreates a fence command from the user fence sequence number (all asics).
 * Returns 0 on success, -ENOMEM on failure.
 */
int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
			  uint64_t seq, struct amdgpu_fence **fence)
{
	struct amdgpu_device *adev = ring->adev;

	if (seq > ring->fence_drv.sync_seq[ring->idx])
		return -EINVAL;

	*fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
	if ((*fence) == NULL)
		return -ENOMEM;

	(*fence)->seq = seq;
	(*fence)->ring = ring;
	(*fence)->owner = owner;
	fence_init(&(*fence)->base, &amdgpu_fence_ops,
		&adev->fence_queue.lock, adev->fence_context + ring->idx,
		(*fence)->seq);
	return 0;
}

/**
 * amdgpu_fence_check_signaled - callback from fence_queue
 *
@@ -517,8 +549,9 @@ static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq)
 * the wait timeout, or an error for all other cases.
 * -EDEADLK is returned when a GPU lockup has been detected.
 */
long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, u64 *target_seq,
				   bool intr, long timeout)
static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,
					  u64 *target_seq, bool intr,
					  long timeout)
{
	uint64_t last_seq[AMDGPU_MAX_RINGS];
	bool signaled;