Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0875dc9e authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher
Browse files

drm/amdgpu: block scheduler when gpu reset

parent eafc9c27
Loading
Loading
Loading
Loading
+13 −2
Original line number Diff line number Diff line
@@ -25,6 +25,7 @@
 *          Alex Deucher
 *          Jerome Glisse
 */
#include <linux/kthread.h>
#include <linux/console.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
@@ -1895,6 +1896,14 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)

	atomic_inc(&adev->gpu_reset_counter);

	/* block scheduler */
	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
		struct amdgpu_ring *ring = adev->rings[i];

		if (!ring)
			continue;
		kthread_park(ring->sched.thread);
	}
	/* block TTM */
	resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);

@@ -1928,7 +1937,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
			struct amdgpu_ring *ring = adev->rings[i];
			if (!ring)
				continue;

			kthread_unpark(ring->sched.thread);
			amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]);
			ring_sizes[i] = 0;
			ring_data[i] = NULL;
@@ -1946,10 +1955,12 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
	} else {
		amdgpu_fence_driver_force_completion(adev);
		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
			if (adev->rings[i])
			if (adev->rings[i]) {
				kthread_unpark(adev->rings[i]->sched.thread);
				kfree(ring_data[i]);
			}
		}
	}

	drm_helper_resume_force_mode(adev->ddev);

+14 −3
Original line number Diff line number Diff line
@@ -476,6 +476,16 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
	wake_up_interruptible(&sched->wake_up_worker);
}

static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
{
	if (kthread_should_park()) {
		kthread_parkme();
		return true;
	}

	return false;
}

static int amd_sched_main(void *param)
{
	struct sched_param sparam = {.sched_priority = 1};
@@ -485,13 +495,14 @@ static int amd_sched_main(void *param)
	sched_setscheduler(current, SCHED_FIFO, &sparam);

	while (!kthread_should_stop()) {
		struct amd_sched_entity *entity;
		struct amd_sched_entity *entity = NULL;
		struct amd_sched_fence *s_fence;
		struct amd_sched_job *sched_job;
		struct fence *fence;

		wait_event_interruptible(sched->wake_up_worker,
			(entity = amd_sched_select_entity(sched)) ||
					 (!amd_sched_blocked(sched) &&
					  (entity = amd_sched_select_entity(sched))) ||
					 kthread_should_stop());

		if (!entity)