Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e2d7f1c1 authored by Jordan Crouse's avatar Jordan Crouse
Browse files

msm: kgsl: Clean up preemption



Move the preemption timer into the generic adreno code so we don't need
to destroy it in target specific code and generally get rid of a bunch
of static assumptions in the target specific code.

Change-Id: Ic0dedbad4fff2c85dcb9d5c1300c45ee659675bd
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
parent 8b9da3dd
Loading
Loading
Loading
Loading
+15 −1
Original line number Diff line number Diff line
@@ -769,7 +769,6 @@ struct adreno_gpudev {
	struct adreno_coresight *coresight[2];

	struct adreno_irq *irq;
	int num_prio_levels;
	unsigned int vbif_xin_halt_ctrl0_mask;
	unsigned int gbif_client_halt_mask;
	unsigned int gbif_arb_halt_mask;
@@ -1683,6 +1682,21 @@ static inline void adreno_deassert_gbif_halt(struct adreno_device *adreno_dev)
				ADRENO_REG_RBBM_GBIF_HALT, 0x0);
	}
}

/**
 * adreno_move_preempt_state - Update the preemption state
 * @adreno_dev: An Adreno GPU device handle
 * @old: The current state of the preemption
 * @new: The new state of the preemption
 *
 * Return: True if the state was updated or false if not
 */
static inline bool adreno_move_preempt_state(struct adreno_device *adreno_dev,
	enum adreno_preempt_states old, enum adreno_preempt_states new)
{
	return (atomic_cmpxchg(&adreno_dev->preempt.state, old, new) == old);
}

void adreno_gmu_clear_and_unmask_irqs(struct adreno_device *adreno_dev);
void adreno_gmu_mask_and_clear_irqs(struct adreno_device *adreno_dev);
int adreno_gmu_fenced_write(struct adreno_device *adreno_dev,
+0 −1
Original line number Diff line number Diff line
@@ -1402,7 +1402,6 @@ struct adreno_gpudev adreno_a3xx_gpudev = {
	.perfcounters = &a3xx_perfcounters,
	.irq = &a3xx_irq,
	.irq_trace = trace_kgsl_a3xx_irq_status,
	.num_prio_levels = 1,
	.vbif_xin_halt_ctrl0_mask = A30X_VBIF_XIN_HALT_CTRL0_MASK,
	.platform_setup = a3xx_platform_setup,
	.rb_start = a3xx_rb_start,
+0 −2
Original line number Diff line number Diff line
@@ -3013,7 +3013,6 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
	.snapshot = a5xx_snapshot,
	.irq = &a5xx_irq,
	.irq_trace = trace_kgsl_a5xx_irq_status,
	.num_prio_levels = KGSL_PRIORITY_MAX_RB_LEVELS,
	.platform_setup = a5xx_platform_setup,
	.init = a5xx_init,
	.remove = a5xx_remove,
@@ -3033,7 +3032,6 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
	.preemption_post_ibsubmit =
			a5xx_preemption_post_ibsubmit,
	.preemption_init = a5xx_preemption_init,
	.preemption_close = a5xx_preemption_close,
	.preemption_schedule = a5xx_preemption_schedule,
	.clk_set_options = a5xx_clk_set_options,
	.read_alwayson = a5xx_read_alwayson,
+0 −1
Original line number Diff line number Diff line
@@ -231,7 +231,6 @@ void a5xx_preemption_trigger(struct adreno_device *adreno_dev);
void a5xx_preemption_schedule(struct adreno_device *adreno_dev);
void a5xx_preemption_start(struct adreno_device *adreno_dev);
int a5xx_preemption_init(struct adreno_device *adreno_dev);
void a5xx_preemption_close(struct adreno_device *adreno_dev);
int a5xx_preemption_yield_enable(unsigned int *cmds);

unsigned int a5xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev,
+12 −66
Original line number Diff line number Diff line
@@ -41,12 +41,6 @@ static void _update_wptr(struct adreno_device *adreno_dev, bool reset_timer)
	spin_unlock_irqrestore(&rb->preempt_lock, flags);
}

static inline bool adreno_move_preempt_state(struct adreno_device *adreno_dev,
	enum adreno_preempt_states old, enum adreno_preempt_states new)
{
	return (atomic_cmpxchg(&adreno_dev->preempt.state, old, new) == old);
}

static void _a5xx_preemption_done(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -150,21 +144,6 @@ static void _a5xx_preemption_worker(struct work_struct *work)
	mutex_unlock(&device->mutex);
}

static void _a5xx_preemption_timer(struct timer_list *t)
{
	struct adreno_preemption *preempt = from_timer(preempt, t, timer);
	struct adreno_device *adreno_dev = container_of(preempt,
						struct adreno_device, preempt);

	/* We should only be here from a triggered state */
	if (!adreno_move_preempt_state(adreno_dev,
		ADRENO_PREEMPT_TRIGGERED, ADRENO_PREEMPT_FAULTED))
		return;

	/* Schedule the worker to take care of the details */
	queue_work(system_unbound_wq, &adreno_dev->preempt.work);
}

/* Find the highest priority active ringbuffer */
static struct adreno_ringbuffer *a5xx_next_ringbuffer(
		struct adreno_device *adreno_dev)
@@ -532,45 +511,10 @@ static int a5xx_preemption_ringbuffer_init(struct adreno_device *adreno_dev,
	return 0;
}

#if IS_ENABLED(CONFIG_ARM_SMMU)
static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);

	/* Allocate mem for storing preemption smmu record */
	if (IS_ERR_OR_NULL(iommu->smmu_info))
		iommu->smmu_info = kgsl_allocate_global(device, PAGE_SIZE,
			KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED,
			"smmu_info");

	return PTR_ERR_OR_ZERO(iommu->smmu_info);
}
#else
static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev)
{
	return -ENODEV;
}
#endif

static void _preemption_close(struct adreno_device *adreno_dev)
{
	struct adreno_preemption *preempt = &adreno_dev->preempt;

	del_timer(&preempt->timer);
}

void a5xx_preemption_close(struct adreno_device *adreno_dev)
{
	if (!test_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv))
		return;

	_preemption_close(adreno_dev);
}

int a5xx_preemption_init(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
	struct adreno_preemption *preempt = &adreno_dev->preempt;
	struct adreno_ringbuffer *rb;
	int ret;
@@ -583,8 +527,6 @@ int a5xx_preemption_init(struct adreno_device *adreno_dev)

	INIT_WORK(&preempt->work, _a5xx_preemption_worker);

	timer_setup(&preempt->timer, _a5xx_preemption_timer, 0);

	/* Allocate mem for storing preemption counters */
	if (IS_ERR_OR_NULL(preempt->counters))
		preempt->counters = kgsl_allocate_global(device,
@@ -594,7 +536,7 @@ int a5xx_preemption_init(struct adreno_device *adreno_dev)

	ret = PTR_ERR_OR_ZERO(preempt->counters);
	if (ret)
		goto err;
		return ret;

	addr = preempt->counters->gpuaddr;

@@ -602,16 +544,20 @@ int a5xx_preemption_init(struct adreno_device *adreno_dev)
	FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
		ret = a5xx_preemption_ringbuffer_init(adreno_dev, rb, addr);
		if (ret)
			goto err;
			return ret;

		addr += A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE;
	}

	ret = a5xx_preemption_iommu_init(adreno_dev);
	/* Allocate mem for storing preemption smmu record */
	if (IS_ERR_OR_NULL(iommu->smmu_info))
		iommu->smmu_info = kgsl_allocate_global(device, PAGE_SIZE,
			KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED,
			"smmu_info");

err:
	if (ret)
		_preemption_close(adreno_dev);
	if (IS_ERR(iommu->smmu_info))
		return PTR_ERR(iommu->smmu_info);

	return ret;
	set_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv);
	return 0;
}
Loading