Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit df9deb33 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: Correct the order of preemption packets"

parents 5000c433 f14b7735
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -711,6 +711,7 @@ struct adreno_gpudev {
				struct adreno_ringbuffer *, unsigned int *,
				struct kgsl_context *, uint64_t cond_addr,
				struct kgsl_memobj_node *);
	int (*preemption_yield_enable)(unsigned int *);
	int (*preemption_post_ibsubmit)(struct adreno_device *,
				struct adreno_ringbuffer *, unsigned int *,
				struct kgsl_context *);
+23 −16
Original line number Diff line number Diff line
@@ -317,10 +317,6 @@ static int a5xx_preemption_token(struct adreno_device *adreno_dev,
{
	unsigned int *cmds_orig = cmds;

	/* Enable yield in RB only */
	*cmds++ = cp_type7_packet(CP_YIELD_ENABLE, 1);
	*cmds++ = 1;

	*cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4);
	cmds += cp_gpuaddr(adreno_dev, cmds, gpuaddr);
	*cmds++ = 1;
@@ -411,18 +407,11 @@ static int a5xx_preemption_pre_ibsubmit(
}

/*
 * a5xx_preemption_post_ibsubmit() - Below PM4 commands are
 * a5xx_preemption_yield_enable() - Below PM4 commands are
 * added after every cmdbatch submission.
 */
static int a5xx_preemption_post_ibsubmit(
			struct adreno_device *adreno_dev,
			struct adreno_ringbuffer *rb, unsigned int *cmds,
			struct kgsl_context *context)
static int a5xx_preemption_yield_enable(unsigned int *cmds)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	unsigned int *cmds_orig = cmds;
	unsigned int ctx_id = context ? context->id : 0;

	/*
	 * SRM -- set render mode (ex binning, direct render etc)
	 * SRM is set by UMD usually at start of IB to tell CP the type of
@@ -437,11 +426,27 @@ static int a5xx_preemption_post_ibsubmit(
	*cmds++ = 0;
	*cmds++ = 0;

	cmds += a5xx_preemption_token(adreno_dev, rb, cmds,
	*cmds++ = cp_type7_packet(CP_YIELD_ENABLE, 1);
	*cmds++ = 1;

	return 8;
}

/*
 * a5xx_preemption_post_ibsubmit() - Below PM4 commands are
 * added after every cmdbatch submission.
 */
static int a5xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev,
			struct adreno_ringbuffer *rb, unsigned int *cmds,
			struct kgsl_context *context)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	unsigned int ctx_id = context ? context->id : 0;

	return a5xx_preemption_token(adreno_dev, rb, cmds,
				device->memstore.gpuaddr +
				KGSL_MEMSTORE_OFFSET(ctx_id, preempted));

	return cmds - cmds_orig;
}

static void a5xx_platform_setup(struct adreno_device *adreno_dev)
@@ -4182,6 +4187,8 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
	.regulator_disable = a5xx_regulator_disable,
	.pwrlevel_change_settings = a5xx_pwrlevel_change_settings,
	.preemption_pre_ibsubmit = a5xx_preemption_pre_ibsubmit,
	.preemption_yield_enable =
				a5xx_preemption_yield_enable,
	.preemption_post_ibsubmit =
			a5xx_preemption_post_ibsubmit,
	.preemption_token = a5xx_preemption_token,
+12 −3
Original line number Diff line number Diff line
@@ -520,7 +520,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,

	if (gpudev->preemption_post_ibsubmit &&
				adreno_is_preemption_enabled(adreno_dev))
		total_sizedwords += 13;
		total_sizedwords += 5;

	/*
	 * a5xx uses 64 bit memory address. pm4 commands that involve read/write
@@ -707,8 +707,8 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,

	if (gpudev->preemption_post_ibsubmit &&
				adreno_is_preemption_enabled(adreno_dev))
		ringcmds += gpudev->preemption_post_ibsubmit(adreno_dev,
					rb, ringcmds, &drawctxt->base);
		ringcmds += gpudev->preemption_post_ibsubmit(adreno_dev, rb,
					ringcmds, &drawctxt->base);

	/*
	 * If we have more ringbuffer commands than space reserved
@@ -865,6 +865,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
		struct kgsl_cmdbatch *cmdbatch, struct adreno_submit_time *time)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	struct kgsl_memobj_node *ib;
	unsigned int numibs = 0;
	unsigned int *link;
@@ -983,6 +984,10 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
			dwords += 2;
	}

	if (gpudev->preemption_yield_enable &&
				adreno_is_preemption_enabled(adreno_dev))
		dwords += 8;

	link = kzalloc(sizeof(unsigned int) *  dwords, GFP_KERNEL);
	if (!link) {
		ret = -ENOMEM;
@@ -1033,6 +1038,10 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
		}
	}

	if (gpudev->preemption_yield_enable &&
				adreno_is_preemption_enabled(adreno_dev))
		cmds += gpudev->preemption_yield_enable(cmds);

	if (cmdbatch_kernel_profiling) {
		cmds += _get_alwayson_counter(adreno_dev, cmds,
			adreno_dev->cmdbatch_profile_buffer.gpuaddr +