Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ec83a168 authored by Shrenuj Bansal's avatar Shrenuj Bansal Committed by Tarun Karra
Browse files

msm: kgsl: Use SET_SECURE_MODE packet to put GPU in unsecure mode



Access to the SECVID_TRUST_CNTL register is protected from HLOS.
Instead of writing to this register, use the SET_SECURE_MODE
packet to bring up the GPU in unsecure mode so that we also
execute the zap shader firmware.

Change-Id: I3ada2e94cd24fc060c742e3fbe44249505e17290
Signed-off-by: default avatarShrenuj Bansal <shrenujb@codeaurora.org>
parent 3659eff5
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -654,6 +654,8 @@ struct adreno_gpudev {
	void (*gpudev_init)(struct adreno_device *);
	int (*rb_init)(struct adreno_device *, struct adreno_ringbuffer *);
	int (*hw_init)(struct adreno_device *);
	int (*switch_to_unsecure_mode)(struct adreno_device *,
				struct adreno_ringbuffer *);
	int (*microcode_read)(struct adreno_device *);
	int (*microcode_load)(struct adreno_device *, unsigned int start_type);
	void (*perfcounter_init)(struct adreno_device *);
+19 −3
Original line number Diff line number Diff line
@@ -23,6 +23,7 @@
#include "adreno_trace.h"
#include "adreno_pm4types.h"
#include "adreno_perfcounter.h"
#include "adreno_ringbuffer.h"
#include "kgsl_sharedmem.h"
#include "kgsl_log.h"
#include "kgsl.h"
@@ -1554,9 +1555,6 @@ static void a5xx_start(struct adreno_device *adreno_dev)
	adreno_vbif_start(adreno_dev, a5xx_vbif_platforms,
			ARRAY_SIZE(a5xx_vbif_platforms));

	/* GPU comes up in secured mode, make it unsecured by default */
	kgsl_regwrite(device, A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);

	/* Make all blocks contribute to the GPU BUSY perf counter */
	kgsl_regwrite(device, A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);

@@ -1903,6 +1901,23 @@ static int a5xx_rb_init(struct adreno_device *adreno_dev,
	return 0;
}

int a5xx_switch_to_unsecure_mode(struct adreno_device *adreno_dev,
				struct adreno_ringbuffer *rb)
{
	unsigned int *cmds;

	cmds = adreno_ringbuffer_allocspace(rb, 2);
	if (IS_ERR(cmds))
		return PTR_ERR(cmds);
	if (cmds == NULL)
		return -ENOSPC;

	cmds += cp_secure_mode(adreno_dev, cmds, 0);

	adreno_ringbuffer_submit(rb, NULL);
	return 0;
}

static int _load_firmware(struct adreno_device *adreno_dev, const char *fwfile,
			  struct kgsl_memdesc *ucode, size_t *ucode_size,
			  unsigned int *ucode_version)
@@ -3284,6 +3299,7 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
	.gpudev_init = a5xx_gpudev_init,
	.rb_init = a5xx_rb_init,
	.hw_init = a5xx_hw_init,
	.switch_to_unsecure_mode = a5xx_switch_to_unsecure_mode,
	.microcode_read = a5xx_microcode_read,
	.microcode_load = a5xx_microcode_load,
	.perfcounters = &a5xx_perfcounters,
+14 −1
Original line number Diff line number Diff line
@@ -324,6 +324,18 @@ static int _ringbuffer_start_common(struct adreno_ringbuffer *rb)
		"ringbuffer initialization failed to idle\n");
		kgsl_device_snapshot(device, NULL);
	}

	if (gpudev->switch_to_unsecure_mode) {
		status = gpudev->switch_to_unsecure_mode(adreno_dev, rb);
		if (status)
			return status;

		status = adreno_spin_idle(device);
		if (status)
			KGSL_DRV_ERR(rb->device,
			"switching to unsecure mode failed to idle\n");
	}

	return status;
}

@@ -477,7 +489,8 @@ void adreno_ringbuffer_close(struct adreno_device *adreno_dev)
 * Add commands to the ringbuffer to put the GPU in secure mode
 * or unsecure mode based on the variable set.
 */
static int cp_secure_mode(struct adreno_device *adreno_dev, uint *cmds, int set)
int cp_secure_mode(struct adreno_device *adreno_dev, uint *cmds,
				int set)
{
	uint *start = cmds;

+2 −0
Original line number Diff line number Diff line
@@ -136,6 +136,8 @@ struct adreno_ringbuffer {
#define KGSL_MEMSTORE_RB_OFFSET(rb, field)	\
	KGSL_MEMSTORE_OFFSET((rb->id + KGSL_MEMSTORE_MAX), field)

int cp_secure_mode(struct adreno_device *adreno_dev, uint *cmds, int set);

int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
				struct kgsl_context *context,
				struct kgsl_cmdbatch *cmdbatch,