Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0c418f10 authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: remove the exclusive lock



Finally getting rid of it.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
parent b7e4dad3
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -1955,7 +1955,6 @@ struct amdgpu_device {
	struct device			*dev;
	struct drm_device		*ddev;
	struct pci_dev			*pdev;
	struct rw_semaphore		exclusive_lock;

	/* ASIC */
	enum amd_asic_type		asic_type;
+2 −8
Original line number Diff line number Diff line
@@ -831,11 +831,8 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
	bool reserved_buffers = false;
	int i, r;

	down_read(&adev->exclusive_lock);
	if (!adev->accel_working) {
		up_read(&adev->exclusive_lock);
	if (!adev->accel_working)
		return -EBUSY;
	}

	parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0);
	if (!parser)
@@ -843,8 +840,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
	r = amdgpu_cs_parser_init(parser, data);
	if (r) {
		DRM_ERROR("Failed to initialize parser !\n");
		kfree(parser);
		up_read(&adev->exclusive_lock);
		amdgpu_cs_parser_fini(parser, r, false);
		r = amdgpu_cs_handle_lockup(adev, r);
		return r;
	}
@@ -915,14 +911,12 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)

		mutex_unlock(&job->job_lock);
		amdgpu_cs_parser_fini_late(parser);
		up_read(&adev->exclusive_lock);
		return 0;
	}

	cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
out:
	amdgpu_cs_parser_fini(parser, r, reserved_buffers);
	up_read(&adev->exclusive_lock);
	r = amdgpu_cs_handle_lockup(adev, r);
	return r;
}
+0 −4
Original line number Diff line number Diff line
@@ -1418,7 +1418,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
	mutex_init(&adev->gfx.gpu_clock_mutex);
	mutex_init(&adev->srbm_mutex);
	mutex_init(&adev->grbm_idx_mutex);
	init_rwsem(&adev->exclusive_lock);
	mutex_init(&adev->mn_lock);
	hash_init(adev->mn_hash);

@@ -1814,8 +1813,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
	int i, r;
	int resched;

	down_write(&adev->exclusive_lock);

	atomic_inc(&adev->gpu_reset_counter);

	/* block TTM */
@@ -1879,7 +1876,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
		dev_info(adev->dev, "GPU reset failed\n");
	}

	up_write(&adev->exclusive_lock);
	return r;
}

+1 −6
Original line number Diff line number Diff line
@@ -47,11 +47,8 @@ static void amdgpu_flip_wait_fence(struct amdgpu_device *adev,
	fence = to_amdgpu_fence(*f);
	if (fence) {
		r = fence_wait(&fence->base, false);
		if (r == -EDEADLK) {
			up_read(&adev->exclusive_lock);
		if (r == -EDEADLK)
			r = amdgpu_gpu_reset(adev);
			down_read(&adev->exclusive_lock);
		}
	} else
		r = fence_wait(*f, false);

@@ -77,7 +74,6 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
	unsigned long flags;
	unsigned i;

	down_read(&adev->exclusive_lock);
	amdgpu_flip_wait_fence(adev, &work->excl);
	for (i = 0; i < work->shared_count; ++i)
		amdgpu_flip_wait_fence(adev, &work->shared[i]);
@@ -93,7 +89,6 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
	amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;

	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
	up_read(&adev->exclusive_lock);
}

/*
+5 −16
Original line number Diff line number Diff line
@@ -260,17 +260,9 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
				lockup_work.work);
	ring = fence_drv->ring;

	if (!down_read_trylock(&ring->adev->exclusive_lock)) {
		/* just reschedule the check if a reset is going on */
		amdgpu_fence_schedule_check(ring);
		return;
	}

	if (amdgpu_fence_activity(ring)) {
	if (amdgpu_fence_activity(ring))
		wake_up_all(&ring->fence_drv.fence_queue);
}
	up_read(&ring->adev->exclusive_lock);
}

/**
 * amdgpu_fence_process - process a fence
@@ -317,18 +309,15 @@ static bool amdgpu_fence_is_signaled(struct fence *f)
{
	struct amdgpu_fence *fence = to_amdgpu_fence(f);
	struct amdgpu_ring *ring = fence->ring;
	struct amdgpu_device *adev = ring->adev;

	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
		return true;

	if (down_read_trylock(&adev->exclusive_lock)) {
	amdgpu_fence_process(ring);
		up_read(&adev->exclusive_lock);

	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
		return true;
	}

	return false;
}

Loading