Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 929330b5 authored by Carter Cooper's avatar Carter Cooper Committed by Jordan Crouse
Browse files

msm: kgsl: Use the GPU to write the RPTR



The memstore shared between the CPU and GPU is old but can not be
messed with. Rather than stealing values from it where available,
add a new block of shared memory that is exclusive to the driver
and GPU. This block can be used more freely than the old
memstore block.

Program the GPU to write the RPTR out to an address the CPU can read rather
than having the CPU read a GPU register directly. There are some very
small but very real conditions where different blocks on the GPU have
outdated values for the RPTR. When scheduling preemption the value read
from the register could not reflect the actual value of the RPTR in the CP.
This can cause the save/restore from preemption to give back incorrect RPTR
values causing much confusion between the GPU and CPU.

Remove the ringbuffers copy of the read pointer shadow.
Now that the GPU will update a shared memory address with the
value of the read pointer, there is no need to poll the register
to get the value and then keep a local copy of it.

Change-Id: Ic44759d1a5c6e48b2f0f566ea8c153f01cf68279
Signed-off-by: default avatarCarter Cooper <ccooper@codeaurora.org>
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
parent 905de01d
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -60,6 +60,8 @@
#define A5XX_CP_RB_BASE                  0x800
#define A5XX_CP_RB_BASE_HI               0x801
#define A5XX_CP_RB_CNTL                  0x802
#define A5XX_CP_RB_RPTR_ADDR_LO          0x804
#define A5XX_CP_RB_RPTR_ADDR_HI          0x805
#define A5XX_CP_RB_RPTR                  0x806
#define A5XX_CP_RB_WPTR                  0x807
#define A5XX_CP_PFP_STAT_ADDR            0x808
+28 −8
Original line number Diff line number Diff line
@@ -170,6 +170,30 @@ void adreno_writereg64(struct adreno_device *adreno_dev,
			gpudev->reg_offsets->offsets[hi], upper_32_bits(val));
}

/**
 * adreno_get_rptr() - Get the current ringbuffer read pointer
 * @rb: Pointer the ringbuffer to query
 *
 * Get the latest rptr
 */
unsigned int adreno_get_rptr(struct adreno_ringbuffer *rb)
{
	struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
	unsigned int rptr = 0;

	if (adreno_is_a3xx(adreno_dev))
		adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
				&rptr);
	else {
		struct kgsl_device *device = KGSL_DEVICE(adreno_dev);

		kgsl_sharedmem_readl(&device->scratch, &rptr,
				SCRATCH_RPTR_OFFSET(rb->id));
	}

	return rptr;
}

/**
 * adreno_of_read_property() - Adreno read property
 * @node: Device node
@@ -2147,8 +2171,6 @@ bool adreno_isidle(struct kgsl_device *device)
	if (!kgsl_state_is_awake(device))
		return true;

	adreno_get_rptr(ADRENO_CURRENT_RINGBUFFER(adreno_dev));

	/*
	 * wptr is updated when we add commands to ringbuffer, add a barrier
	 * to make sure updated wptr is compared to rptr
@@ -2159,15 +2181,13 @@ bool adreno_isidle(struct kgsl_device *device)
	 * ringbuffer is truly idle when all ringbuffers read and write
	 * pointers are equal
	 */

	FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
		if (rb->rptr != rb->wptr)
			break;
		if (!adreno_rb_empty(rb))
			return false;
	}

	if (i == adreno_dev->num_ringbuffers)
	return adreno_hw_isidle(adreno_dev);

	return false;
}

/**
+9 −18
Original line number Diff line number Diff line
@@ -456,6 +456,8 @@ enum adreno_regs {
	ADRENO_REG_CP_WFI_PEND_CTR,
	ADRENO_REG_CP_RB_BASE,
	ADRENO_REG_CP_RB_BASE_HI,
	ADRENO_REG_CP_RB_RPTR_ADDR_LO,
	ADRENO_REG_CP_RB_RPTR_ADDR_HI,
	ADRENO_REG_CP_RB_RPTR,
	ADRENO_REG_CP_RB_WPTR,
	ADRENO_REG_CP_CNTL,
@@ -1270,24 +1272,6 @@ static inline unsigned int adreno_preempt_state(
		state;
}

/**
 * adreno_get_rptr() - Get the current ringbuffer read pointer
 * @rb: Pointer the ringbuffer to query
 *
 * Get the current read pointer from the GPU register.
 */
static inline unsigned int
adreno_get_rptr(struct adreno_ringbuffer *rb)
{
	struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
	if (adreno_dev->cur_rb == rb &&
		adreno_preempt_state(adreno_dev,
			ADRENO_DISPATCHER_PREEMPT_CLEAR))
		adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR, &(rb->rptr));

	return rb->rptr;
}

static inline bool adreno_is_preemption_enabled(
				struct adreno_device *adreno_dev)
{
@@ -1369,6 +1353,13 @@ void adreno_readreg64(struct adreno_device *adreno_dev,
void adreno_writereg64(struct adreno_device *adreno_dev,
		enum adreno_regs lo, enum adreno_regs hi, uint64_t val);

unsigned int adreno_get_rptr(struct adreno_ringbuffer *rb);

static inline bool adreno_rb_empty(struct adreno_ringbuffer *rb)
{
	return (adreno_get_rptr(rb) == rb->wptr);
}

static inline bool adreno_soft_fault_detect(struct adreno_device *adreno_dev)
{
	return adreno_dev->fast_hang_detect &&
+38 −26
Original line number Diff line number Diff line
@@ -196,9 +196,10 @@ static void a4xx_preemption_start(struct adreno_device *adreno_dev,
	/* scratch REG9 corresponds to CP_RB_CNTL register */
	kgsl_regwrite(device, A4XX_CP_SCRATCH_REG9, val);
	/* scratch REG10 corresponds to rptr address */
	kgsl_regwrite(device, A4XX_CP_SCRATCH_REG10, 0);
	kgsl_regwrite(device, A4XX_CP_SCRATCH_REG10,
			SCRATCH_RPTR_GPU_ADDR(device, rb->id));
	/* scratch REG11 corresponds to rptr */
	kgsl_regwrite(device, A4XX_CP_SCRATCH_REG11, rb->rptr);
	kgsl_regwrite(device, A4XX_CP_SCRATCH_REG11, adreno_get_rptr(rb));
	/* scratch REG12 corresponds to wptr */
	kgsl_regwrite(device, A4XX_CP_SCRATCH_REG12, rb->wptr);
	/*
@@ -222,7 +223,6 @@ static void a4xx_preemption_save(struct adreno_device *adreno_dev,
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);

	kgsl_regread(device, A4XX_CP_SCRATCH_REG18, &rb->rptr);
	kgsl_regread(device, A4XX_CP_SCRATCH_REG23, &rb->gpr11);
}

@@ -255,8 +255,7 @@ static int a4xx_preemption_pre_ibsubmit(
	int exec_ib = 0;

	cmds += a4xx_preemption_token(adreno_dev, rb, cmds,
				device->memstore.gpuaddr +
				KGSL_MEMSTORE_OFFSET(context->id, preempted));
			MEMSTORE_ID_GPU_ADDR(device, context->id, preempted));

	if (ib)
		exec_ib = 1;
@@ -839,6 +838,7 @@ static unsigned int a4xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
	ADRENO_REG_DEFINE(ADRENO_REG_CP_WFI_PEND_CTR, A4XX_CP_WFI_PEND_CTR),
	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A4XX_CP_RB_BASE),
	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE_HI, ADRENO_REG_SKIP),
	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_LO, A4XX_CP_RB_RPTR_ADDR),
	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, A4XX_CP_RB_RPTR),
	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_WPTR, A4XX_CP_RB_WPTR),
	ADRENO_REG_DEFINE(ADRENO_REG_CP_CNTL, A4XX_CP_CNTL),
@@ -1634,8 +1634,15 @@ static int a4xx_rb_start(struct adreno_device *adreno_dev,
			 unsigned int start_type)
{
	struct adreno_ringbuffer *rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
	struct kgsl_device *device = &adreno_dev->dev;
	uint64_t addr;
	int ret;

	addr = SCRATCH_RPTR_GPU_ADDR(device, rb->id);

	adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_RPTR_ADDR_LO,
			ADRENO_REG_CP_RB_RPTR_ADDR_HI, addr);

	/*
	 * The size of the ringbuffer in the hardware is the log2
	 * representation of the size in quadwords (sizedwords / 2).
@@ -1644,8 +1651,8 @@ static int a4xx_rb_start(struct adreno_device *adreno_dev,
	 */

	adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
		(ilog2(KGSL_RB_DWORDS >> 1) & 0x3F) |
		(1 << 27));
			((ilog2(4) << 8) & 0x1F00) |
			(ilog2(KGSL_RB_DWORDS >> 1) & 0x3F));

	adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
			  rb->buffer_desc.gpuaddr);
@@ -1897,7 +1904,7 @@ static int a4xx_submit_preempt_token(struct adreno_ringbuffer *rb,

	ringcmds += gpudev->preemption_token(adreno_dev, rb, ringcmds,
				device->memstore.gpuaddr +
				KGSL_MEMSTORE_RB_OFFSET(rb, preempted));
				MEMSTORE_RB_OFFSET(rb, preempted));

	if ((uint)(ringcmds - start) > total_sizedwords) {
		KGSL_DRV_ERR(device, "Insufficient rb size allocated\n");
@@ -1960,7 +1967,9 @@ static void a4xx_preempt_trig_state(
			KGSL_DRV_INFO(device,
			"Preemption completed without interrupt\n");
			trace_adreno_hw_preempt_trig_to_comp(adreno_dev->cur_rb,
					adreno_dev->next_rb);
					adreno_dev->next_rb,
					adreno_get_rptr(adreno_dev->cur_rb),
					adreno_get_rptr(adreno_dev->next_rb));
			atomic_set(&dispatcher->preemption_state,
				ADRENO_DISPATCHER_PREEMPT_COMPLETE);
			adreno_dispatcher_schedule(device);
@@ -1988,9 +1997,7 @@ static void a4xx_preempt_trig_state(
	 * commands that got submitted to current RB after triggering preemption
	 * then submit them as those commands may have a preempt token in them
	 */
	adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
			&adreno_dev->cur_rb->rptr);
	if (adreno_dev->cur_rb->rptr != adreno_dev->cur_rb->wptr) {
	if (!adreno_rb_empty(adreno_dev->cur_rb)) {
		/*
		 * Memory barrier before informing the
		 * hardware of new commands
@@ -2011,7 +2018,9 @@ static void a4xx_preempt_trig_state(
	dispatcher->preempt_token_submit = 1;
	adreno_dev->cur_rb->wptr_preempt_end = adreno_dev->cur_rb->wptr;
	trace_adreno_hw_preempt_token_submit(adreno_dev->cur_rb,
						adreno_dev->next_rb);
			adreno_dev->next_rb,
			adreno_get_rptr(adreno_dev->cur_rb),
			adreno_get_rptr(adreno_dev->next_rb));
}

/**
@@ -2035,10 +2044,6 @@ static void a4xx_preempt_clear_state(
	if (!kgsl_state_is_awake(device))
		return;

	/* keep updating the current rptr when preemption is clear */
	adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
			&(adreno_dev->cur_rb->rptr));

	highest_busy_rb = adreno_dispatcher_get_highest_busy_rb(adreno_dev);
	if (!highest_busy_rb)
		return;
@@ -2056,7 +2061,7 @@ static void a4xx_preempt_clear_state(
		 * if switching to lower priority make sure that the rptr and
		 * wptr are equal, when the lower rb is not starved
		 */
		if (adreno_dev->cur_rb->rptr != adreno_dev->cur_rb->wptr)
		if (!adreno_rb_empty(adreno_dev->cur_rb))
			return;
		/*
		 * switch to default context because when we switch back
@@ -2095,7 +2100,9 @@ static void a4xx_preempt_clear_state(
		msecs_to_jiffies(ADRENO_DISPATCH_PREEMPT_TIMEOUT));

	trace_adreno_hw_preempt_clear_to_trig(adreno_dev->cur_rb,
						adreno_dev->next_rb);
			adreno_dev->next_rb,
			adreno_get_rptr(adreno_dev->cur_rb),
			adreno_get_rptr(adreno_dev->next_rb));
	/* issue PREEMPT trigger */
	adreno_writereg(adreno_dev, ADRENO_REG_CP_PREEMPT, 1);
	/*
@@ -2146,6 +2153,7 @@ static void a4xx_preempt_complete_state(
	struct adreno_dispatcher_cmdqueue *dispatch_q;
	unsigned int wptr, rbbase;
	unsigned int val, val1;
	unsigned int prevrptr;

	del_timer_sync(&dispatcher->preempt_timer);

@@ -2176,12 +2184,15 @@ static void a4xx_preempt_complete_state(
	dispatch_q = &(adreno_dev->cur_rb->dispatch_q);
	/* new RB is the current RB */
	trace_adreno_hw_preempt_comp_to_clear(adreno_dev->next_rb,
						adreno_dev->cur_rb);
			adreno_dev->cur_rb,
			adreno_get_rptr(adreno_dev->next_rb),
			adreno_get_rptr(adreno_dev->cur_rb));
	adreno_dev->prev_rb = adreno_dev->cur_rb;
	adreno_dev->cur_rb = adreno_dev->next_rb;
	adreno_dev->cur_rb->preempted_midway = 0;
	adreno_dev->cur_rb->wptr_preempt_end = 0xFFFFFFFF;
	adreno_dev->next_rb = NULL;

	if (adreno_disp_preempt_fair_sched) {
		/* starved rb is now scheduled so unhalt dispatcher */
		if (ADRENO_DISPATCHER_RB_STARVE_TIMER_ELAPSED ==
@@ -2194,7 +2205,7 @@ static void a4xx_preempt_complete_state(
		 * If the outgoing RB is has commands then set the
		 * busy time for it
		 */
		if (adreno_dev->prev_rb->rptr != adreno_dev->prev_rb->wptr) {
		if (!adreno_rb_empty(adreno_dev->prev_rb)) {
			adreno_dev->prev_rb->starve_timer_state =
				ADRENO_DISPATCHER_RB_STARVE_TIMER_INIT;
			adreno_dev->prev_rb->sched_timer = jiffies;
@@ -2205,15 +2216,16 @@ static void a4xx_preempt_complete_state(
	}
	atomic_set(&dispatcher->preemption_state,
		ADRENO_DISPATCHER_PREEMPT_CLEAR);

	prevrptr = adreno_get_rptr(adreno_dev->prev_rb);

	if (adreno_compare_prio_level(adreno_dev->prev_rb->id,
				adreno_dev->cur_rb->id) < 0) {
		if (adreno_dev->prev_rb->wptr_preempt_end !=
			adreno_dev->prev_rb->rptr)
		if (adreno_dev->prev_rb->wptr_preempt_end != prevrptr)
			adreno_dev->prev_rb->preempted_midway = 1;
	} else if (adreno_dev->prev_rb->wptr_preempt_end !=
		adreno_dev->prev_rb->rptr) {
	} else if (adreno_dev->prev_rb->wptr_preempt_end != prevrptr)
		BUG();
	}

	/* submit wptr if required for new rb */
	adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_WPTR, &wptr);
	if (adreno_dev->cur_rb->wptr != wptr) {
+37 −43
Original line number Diff line number Diff line
@@ -194,6 +194,8 @@ static void a5xx_preemption_start(struct adreno_device *adreno_dev,

	kgsl_sharedmem_writel(device, &rb->preemption_desc,
		PREEMPT_RECORD(wptr), rb->wptr);
	kgsl_sharedmem_writel(device, &rb->preemption_desc,
		PREEMPT_RECORD(rptr), adreno_get_rptr(rb));
	kgsl_regwrite(device, A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
		lower_32_bits(rb->preemption_desc.gpuaddr));
	kgsl_regwrite(device, A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI,
@@ -225,16 +227,8 @@ static void a5xx_preemption_start(struct adreno_device *adreno_dev,
		offsetof(struct a5xx_cp_smmu_info, context_idr), contextidr);
}

/*
 * a5xx_preemption_save() - Save the state after preemption is done
 */
static void a5xx_preemption_save(struct adreno_device *adreno_dev,
		struct adreno_ringbuffer *rb)
{
	/* save the rptr from ctxrecord here */
	kgsl_sharedmem_readl(&rb->preemption_desc, &rb->rptr,
		PREEMPT_RECORD(rptr));
}
#define _CP_CNTL (((ilog2(4) << 8) & 0x1F00) | \
			(ilog2(KGSL_RB_DWORDS >> 1) & 0x3F))

#ifdef CONFIG_MSM_KGSL_IOMMU
static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev)
@@ -290,7 +284,10 @@ static int a5xx_preemption_init(struct adreno_device *adreno_dev)
		kgsl_sharedmem_writel(device, &rb->preemption_desc,
			PREEMPT_RECORD(data), 0);
		kgsl_sharedmem_writel(device, &rb->preemption_desc,
			PREEMPT_RECORD(cntl), 0x0800000C);
			PREEMPT_RECORD(cntl), _CP_CNTL);
		kgsl_sharedmem_writeq(device, &rb->preemption_desc,
				PREEMPT_RECORD(rptr_addr),
				SCRATCH_RPTR_GPU_ADDR(device, i));
		kgsl_sharedmem_writel(device, &rb->preemption_desc,
			PREEMPT_RECORD(rptr), 0);
		kgsl_sharedmem_writel(device, &rb->preemption_desc,
@@ -445,9 +442,7 @@ static int a5xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev,
	unsigned int ctx_id = context ? context->id : 0;

	return a5xx_preemption_token(adreno_dev, rb, cmds,
				device->memstore.gpuaddr +
				KGSL_MEMSTORE_OFFSET(ctx_id, preempted));

			MEMSTORE_ID_GPU_ADDR(device, ctx_id, preempted));
}

static void a5xx_platform_setup(struct adreno_device *adreno_dev)
@@ -2327,8 +2322,8 @@ static int _preemption_init(
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	unsigned int *cmds_orig = cmds;
	uint64_t gpuaddr = rb->preemption_desc.gpuaddr;
	uint64_t gpuaddr_token = device->memstore.gpuaddr +
				KGSL_MEMSTORE_OFFSET(0, preempted);
	uint64_t gpuaddr_token = MEMSTORE_ID_GPU_ADDR(device,
			KGSL_MEMSTORE_GLOBAL, preempted);

	/* Turn CP protection OFF */
	*cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
@@ -2694,8 +2689,15 @@ static int a5xx_rb_start(struct adreno_device *adreno_dev,
			 unsigned int start_type)
{
	struct adreno_ringbuffer *rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
	struct kgsl_device *device = &adreno_dev->dev;
	uint64_t addr;
	int ret;

	addr = SCRATCH_RPTR_GPU_ADDR(device, rb->id);

	adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_RPTR_ADDR_LO,
			ADRENO_REG_CP_RB_RPTR_ADDR_HI, addr);

	/*
	 * The size of the ringbuffer in the hardware is the log2
	 * representation of the size in quadwords (sizedwords / 2).
@@ -2703,9 +2705,7 @@ static int a5xx_rb_start(struct adreno_device *adreno_dev,
	 * in certain circumstances.
	 */

	adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
		(ilog2(KGSL_RB_DWORDS >> 1) & 0x3F) |
		(1 << 27));
	adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL, _CP_CNTL);

	adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
			rb->buffer_desc.gpuaddr);
@@ -3246,6 +3246,10 @@ static unsigned int a5xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
	ADRENO_REG_DEFINE(ADRENO_REG_CP_WFI_PEND_CTR, A5XX_CP_WFI_PEND_CTR),
	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A5XX_CP_RB_BASE),
	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE_HI, A5XX_CP_RB_BASE_HI),
	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_LO,
			A5XX_CP_RB_RPTR_ADDR_LO),
	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_HI,
			A5XX_CP_RB_RPTR_ADDR_HI),
	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, A5XX_CP_RB_RPTR),
	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_WPTR, A5XX_CP_RB_WPTR),
	ADRENO_REG_DEFINE(ADRENO_REG_CP_CNTL, A5XX_CP_CNTL),
@@ -3904,7 +3908,9 @@ static void a5xx_preempt_trig_state(
				"Preemption completed without interrupt\n");
				trace_adreno_hw_preempt_trig_to_comp(
					adreno_dev->cur_rb,
					adreno_dev->next_rb);
					adreno_dev->next_rb,
					adreno_get_rptr(adreno_dev->cur_rb),
					adreno_get_rptr(adreno_dev->next_rb));
				atomic_set(&dispatcher->preemption_state,
					ADRENO_DISPATCHER_PREEMPT_COMPLETE);
			} else {
@@ -3966,10 +3972,6 @@ static void a5xx_preempt_clear_state(
	if (!kgsl_state_is_awake(device))
		return;

	/* keep updating the current rptr when preemption is clear */
	adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
			&(adreno_dev->cur_rb->rptr));

	highest_busy_rb = adreno_dispatcher_get_highest_busy_rb(adreno_dev);
	if (!highest_busy_rb)
		return;
@@ -3992,7 +3994,7 @@ static void a5xx_preempt_clear_state(
		 * if switching to lower priority make sure that the rptr and
		 * wptr are equal, when the lower rb is not starved
		 */
		if (adreno_dev->cur_rb->rptr != adreno_dev->cur_rb->wptr)
		if (!adreno_rb_empty(adreno_dev->cur_rb))
			return;
		/*
		 * switch to default context because when we switch back
@@ -4011,10 +4013,6 @@ static void a5xx_preempt_clear_state(
			return;
	}

	/* rptr could be updated in drawctxt switch above, update it here */
	adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
			&(adreno_dev->cur_rb->rptr));

	/* turn on IOMMU as the preemption may trigger pt switch */
	kgsl_mmu_enable_clk(&device->mmu);

@@ -4032,7 +4030,9 @@ static void a5xx_preempt_clear_state(
		msecs_to_jiffies(ADRENO_DISPATCH_PREEMPT_TIMEOUT));

	trace_adreno_hw_preempt_clear_to_trig(adreno_dev->cur_rb,
						adreno_dev->next_rb);
			adreno_dev->next_rb,
			adreno_get_rptr(adreno_dev->cur_rb),
			adreno_get_rptr(adreno_dev->next_rb));
	/* issue PREEMPT trigger */
	adreno_writereg(adreno_dev, ADRENO_REG_CP_PREEMPT, 1);

@@ -4092,12 +4092,12 @@ static void a5xx_preempt_complete_state(
		return;
	}

	a5xx_preemption_save(adreno_dev, adreno_dev->cur_rb);

	dispatch_q = &(adreno_dev->cur_rb->dispatch_q);
	/* new RB is the current RB */
	trace_adreno_hw_preempt_comp_to_clear(adreno_dev->next_rb,
						adreno_dev->cur_rb);
			adreno_dev->cur_rb,
			adreno_get_rptr(adreno_dev->next_rb),
			adreno_get_rptr(adreno_dev->cur_rb));
	adreno_dev->prev_rb = adreno_dev->cur_rb;
	adreno_dev->cur_rb = adreno_dev->next_rb;
	adreno_dev->cur_rb->preempted_midway = 0;
@@ -4116,7 +4116,8 @@ static void a5xx_preempt_complete_state(
		 * If the outgoing RB is has commands then set the
		 * busy time for it
		 */
		if (adreno_dev->prev_rb->rptr != adreno_dev->prev_rb->wptr) {

		if (!adreno_rb_empty(adreno_dev->prev_rb)) {
			adreno_dev->prev_rb->starve_timer_state =
				ADRENO_DISPATCHER_RB_STARVE_TIMER_INIT;
			adreno_dev->prev_rb->sched_timer = jiffies;
@@ -4142,13 +4143,10 @@ static void a5xx_preempt_complete_state(
	adreno_preempt_process_dispatch_queue(adreno_dev, dispatch_q);
}

static void a5xx_preemption_schedule(
				struct adreno_device *adreno_dev)
static void a5xx_preemption_schedule(struct adreno_device *adreno_dev)
{
	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct adreno_ringbuffer *rb;
	int i = 0;
	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;

	if (!adreno_is_preemption_enabled(adreno_dev))
		return;
@@ -4161,10 +4159,6 @@ static void a5xx_preemption_schedule(
	 */
	smp_mb();

	if (KGSL_STATE_ACTIVE == device->state)
		FOR_EACH_RINGBUFFER(adreno_dev, rb, i)
			rb->rptr = adreno_get_rptr(rb);

	switch (atomic_read(&dispatcher->preemption_state)) {
	case ADRENO_DISPATCHER_PREEMPT_CLEAR:
		a5xx_preempt_clear_state(adreno_dev);
Loading