Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1aa4051b authored by Junwei Zhang's avatar Junwei Zhang Committed by Alex Deucher
Browse files

drm/amdgpu: modify amdgpu_fence_wait_any() to amdgpu_fence_wait_multiple()



Rename the function and update the related code with this modified function.
Add the new parameter of bool wait_all.

If wait_all is true, it will return when all fences are signaled or timeout.
If wait_all is false, it will return when any fence is signaled or timeout.

Signed-off-by: default avatarJunwei Zhang <Jerry.Zhang@amd.com>
Reviewed-by: default avatarMonk Liu <monk.liu@amd.com>
Reviewed-by: default avatarJammy Zhou <Jammy.Zhou@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
parent 52293c67
Loading
Loading
Loading
Loading
+6 −3
Original line number Diff line number Diff line
@@ -440,9 +440,12 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);

signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
			  struct amdgpu_fence **fences,
			  bool intr, long t);
signed long amdgpu_fence_wait_multiple(struct amdgpu_device *adev,
				       struct amdgpu_fence **array,
				       uint32_t count,
				       bool wait_all,
				       bool intr,
				       signed long t);
struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
void amdgpu_fence_unref(struct amdgpu_fence **fence);

+61 −18
Original line number Diff line number Diff line
@@ -836,13 +836,12 @@ static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence)
	return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
}

static inline bool amdgpu_test_signaled_any(struct amdgpu_fence **fences)
static bool amdgpu_test_signaled_any(struct amdgpu_fence **fences, uint32_t count)
{
	int idx;
	struct amdgpu_fence *fence;

	idx = 0;
	for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
	for (idx = 0; idx < count; ++idx) {
		fence = fences[idx];
		if (fence) {
			if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
@@ -852,6 +851,22 @@ static inline bool amdgpu_test_signaled_any(struct amdgpu_fence **fences)
	return false;
}

static bool amdgpu_test_signaled_all(struct amdgpu_fence **fences, uint32_t count)
{
	int idx;
	struct amdgpu_fence *fence;

	for (idx = 0; idx < count; ++idx) {
		fence = fences[idx];
		if (fence) {
			if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
				return false;
		}
	}

	return true;
}

struct amdgpu_wait_cb {
	struct fence_cb base;
	struct task_struct *task;
@@ -867,33 +882,56 @@ static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
					     signed long t)
{
	struct amdgpu_fence *array[AMDGPU_MAX_RINGS];
	struct amdgpu_fence *fence = to_amdgpu_fence(f);
	struct amdgpu_device *adev = fence->ring->adev;

	memset(&array[0], 0, sizeof(array));
	array[0] = fence;

	return amdgpu_fence_wait_any(adev, array, intr, t);
	return amdgpu_fence_wait_multiple(adev, &fence, 1, false, intr, t);
}

/* wait until any fence in array signaled */
signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
				struct amdgpu_fence **array, bool intr, signed long t)
/**
 * Wait the fence array with timeout
 *
 * @adev:     amdgpu device
 * @array:    the fence array with amdgpu fence pointer
 * @count:    the number of the fence array
 * @wait_all: the flag of wait all(true) or wait any(false)
 * @intr:     when sleep, set the current task interruptable or not
 * @t:        timeout to wait
 *
 * If wait_all is true, it will return when all fences are signaled or timeout.
 * If wait_all is false, it will return when any fence is signaled or timeout.
 */
signed long amdgpu_fence_wait_multiple(struct amdgpu_device *adev,
				       struct amdgpu_fence **array,
				       uint32_t count,
				       bool wait_all,
				       bool intr,
				       signed long t)
{
	long idx = 0;
	struct amdgpu_wait_cb cb[AMDGPU_MAX_RINGS];
	struct amdgpu_wait_cb *cb;
	struct amdgpu_fence *fence;

	BUG_ON(!array);

	for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
	cb = kcalloc(count, sizeof(struct amdgpu_wait_cb), GFP_KERNEL);
	if (cb == NULL) {
		t = -ENOMEM;
		goto err_free_cb;
	}

	for (idx = 0; idx < count; ++idx) {
		fence = array[idx];
		if (fence) {
			cb[idx].task = current;
			if (fence_add_callback(&fence->base,
					&cb[idx].base, amdgpu_fence_wait_cb))
				return t; /* return if fence is already signaled */
					&cb[idx].base, amdgpu_fence_wait_cb)) {
				/* The fence is already signaled */
				if (wait_all)
					continue;
				else
					goto fence_rm_cb;
			}
		}
	}

@@ -907,7 +945,9 @@ signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
		 * amdgpu_test_signaled_any must be called after
		 * set_current_state to prevent a race with wake_up_process
		 */
		if (amdgpu_test_signaled_any(array))
		if (!wait_all && amdgpu_test_signaled_any(array, count))
			break;
		if (wait_all && amdgpu_test_signaled_all(array, count))
			break;

		if (adev->needs_reset) {
@@ -923,13 +963,16 @@ signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,

	__set_current_state(TASK_RUNNING);

	idx = 0;
	for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
fence_rm_cb:
	for (idx = 0; idx < count; ++idx) {
		fence = array[idx];
		if (fence)
			fence_remove_callback(&fence->base, &cb[idx].base);
	}

err_free_cb:
	kfree(cb);

	return t;
}

+2 −1
Original line number Diff line number Diff line
@@ -352,7 +352,8 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
		} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));

		spin_unlock(&sa_manager->wq.lock);
		t = amdgpu_fence_wait_any(adev, fences, false, MAX_SCHEDULE_TIMEOUT);
		t = amdgpu_fence_wait_multiple(adev, fences, AMDGPU_MAX_RINGS, false, false,
						MAX_SCHEDULE_TIMEOUT);
		r = (t > 0) ? 0 : t;
		spin_lock(&sa_manager->wq.lock);
		/* if we have nothing to wait for block */