Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit be3a67ed authored by Prakash Kamliya's avatar Prakash Kamliya
Browse files

msm: kgsl: Avoid race conditions with GPU halt variable



1. Take the device mutex before we check for GPU halt.
It's quite possible that after checking gpu halt this
thread might be preempted by kernel, other thread can
update the gpu halt. This will lead to a situation
where other thread has requested to stop sending cmds
and this thread might be sending cmds to ringbuffer.

2. Handle multiple threads changing the halt variable.
It's possible that one thread has set the halt value
and other thread clears that value.

3. init_completion() cannot be used to reinitialize
completion when reusing completion object,
INIT_COMPLETION macro should be used instead.

CRs-Fixed: 667179
Change-Id: Ie41c69be35bfd3ca14f3ce782ab45efb70aaf46a
Signed-off-by: default avatarPrakash Kamliya <pkamliya@codeaurora.org>
parent 394c9c0d
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -1914,6 +1914,8 @@ static int _adreno_start(struct adreno_device *adreno_dev)
	if (status)
		goto error_irq_off;

	adreno_clear_gpu_halt(adreno_dev);

	/* Start the dispatcher */
	adreno_dispatcher_start(device);

+41 −23
Original line number Diff line number Diff line
@@ -1053,18 +1053,6 @@ static inline unsigned int adreno_gpu_fault(struct adreno_device *adreno_dev)
	return atomic_read(&adreno_dev->dispatcher.fault);
}

/**
 * adreno_gpu_halt() - Return the halt status of GPU
 * @adreno_dev: A pointer to the adreno_device to query
 *
 * Return the halt request value
 */
static inline unsigned int adreno_gpu_halt(struct adreno_device *adreno_dev)
{
	smp_rmb();
	return atomic_read(&adreno_dev->halt);
}

/**
 * adreno_set_gpu_fault() - Set the current fault status of the GPU
 * @adreno_dev: A pointer to the adreno_device to set
@@ -1079,17 +1067,6 @@ static inline void adreno_set_gpu_fault(struct adreno_device *adreno_dev,
	smp_wmb();
}

/**
 * adreno_set_gpu_halt() - Set the halt request
 * @adreno_dev: A pointer to the adreno_device to set
 * @state: Value to set
 */
static inline void adreno_set_gpu_halt(struct adreno_device *adreno_dev,
	int state)
{
	atomic_set(&adreno_dev->halt, state);
	smp_wmb();
}

/**
 * adreno_clear_gpu_fault() - Clear the GPU fault register
@@ -1104,6 +1081,47 @@ static inline void adreno_clear_gpu_fault(struct adreno_device *adreno_dev)
	smp_wmb();
}

/**
 * adreno_gpu_halt() - Return the GPU halt refcount
 * @adreno_dev: A pointer to the adreno_device
 */
static inline int adreno_gpu_halt(struct adreno_device *adreno_dev)
{
	smp_rmb();
	return atomic_read(&adreno_dev->halt);
}


/**
 * adreno_clear_gpu_halt() - Clear the GPU halt refcount
 * @adreno_dev: A pointer to the adreno_device
 */
static inline void adreno_clear_gpu_halt(struct adreno_device *adreno_dev)
{
	atomic_set(&adreno_dev->halt, 0);
	smp_wmb();
}

/**
 * adreno_get_gpu_halt() - Increment GPU halt refcount
 * @adreno_dev: A pointer to the adreno_device
 */
static inline void adreno_get_gpu_halt(struct adreno_device *adreno_dev)
{
	atomic_inc(&adreno_dev->halt);
}

/**
 * adreno_put_gpu_halt() - Decrement GPU halt refcount
 * @adreno_dev: A pointer to the adreno_device
 */
static inline void adreno_put_gpu_halt(struct adreno_device *adreno_dev)
{
	if (atomic_dec_return(&adreno_dev->halt) < 0)
		BUG();
}


/*
 * adreno_vbif_start() - Program VBIF registers, called in device start
 * @device: Pointer to device whose vbif data is to be programmed
+9 −8
Original line number Diff line number Diff line
@@ -368,13 +368,14 @@ static int sendcmd(struct adreno_device *adreno_dev,
	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
	int ret;

	if (0 != adreno_gpu_halt(adreno_dev))
	kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
	if (adreno_gpu_halt(adreno_dev) != 0) {
		kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
		return -EINVAL;
	}

	dispatcher->inflight++;

	kgsl_mutex_lock(&device->mutex, &device->mutex_owner);

	if (dispatcher->inflight == 1 &&
			!test_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv)) {
		/* Time to make the donuts.  Turn on the GPU */
@@ -401,7 +402,7 @@ static int sendcmd(struct adreno_device *adreno_dev,

			if (!test_and_set_bit(ADRENO_DISPATCHER_ACTIVE,
				&dispatcher->priv))
				init_completion(&dispatcher->idle_gate);
				INIT_COMPLETION(dispatcher->idle_gate);
		} else {
			kgsl_active_count_put(device);
			clear_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv);
@@ -1432,7 +1433,7 @@ replay:
	kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
	/* make sure halt is not set during recovery */
	halt = adreno_gpu_halt(adreno_dev);
	adreno_set_gpu_halt(adreno_dev, 0);
	adreno_clear_gpu_halt(adreno_dev);
	ret = adreno_reset(device);
	kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
	/* if any other fault got in until reset then ignore */
@@ -1492,7 +1493,7 @@ replay:

	kfree(replay);
	/* restore halt indicator */
	adreno_set_gpu_halt(adreno_dev, halt);
	atomic_add(halt, &adreno_dev->halt);

	return 1;
}
@@ -2033,7 +2034,7 @@ int adreno_dispatcher_idle(struct adreno_device *adreno_dev)
		dispatcher->mutex.owner == current)
		BUG_ON(1);

	adreno_set_gpu_halt(adreno_dev, 1);
	adreno_get_gpu_halt(adreno_dev);

	kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);

@@ -2048,7 +2049,7 @@ int adreno_dispatcher_idle(struct adreno_device *adreno_dev)
	}

	kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
	adreno_set_gpu_halt(adreno_dev, 0);
	adreno_put_gpu_halt(adreno_dev);
	/*
	 * requeue dispatcher work to resubmit pending commands
	 * that may have been blocked due to this idling request