Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2fb5b735 authored by Jordan Crouse's avatar Jordan Crouse
Browse files

msm: kgsl: Update a6xx register access protections



Update the register access protections for a6xx. There are different
sets of spans for each a6xx sub-tier.

Change-Id: Ic0dedbadf21621cbc16f8457a20ea2638b13365b
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
parent c75e07f6
Loading
Loading
Loading
Loading
+85 −0
Original line number Diff line number Diff line
@@ -740,6 +740,43 @@ static const struct adreno_reglist a630_vbif_regs[] = {
	{A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
};


/* For a615, a616, a618, a630, a640 and a680 */
static const struct a6xx_protected_regs a630_protected_regs[] = {
	{ A6XX_CP_PROTECT_REG + 0, 0x00000, 0x004ff, 0 },
	{ A6XX_CP_PROTECT_REG + 1, 0x00501, 0x00506, 0 },
	{ A6XX_CP_PROTECT_REG + 2, 0x0050b, 0x007ff, 0 },
	{ A6XX_CP_PROTECT_REG + 3, 0x0050e, 0x0050e, 1 },
	{ A6XX_CP_PROTECT_REG + 4, 0x00510, 0x00510, 1 },
	{ A6XX_CP_PROTECT_REG + 5, 0x00534, 0x00534, 1 },
	{ A6XX_CP_PROTECT_REG + 6, 0x00800, 0x00882, 1 },
	{ A6XX_CP_PROTECT_REG + 7, 0x008a0, 0x008a8, 1 },
	{ A6XX_CP_PROTECT_REG + 8, 0x008ab, 0x008cf, 1 },
	{ A6XX_CP_PROTECT_REG + 9, 0x008d0, 0x0098c, 0 },
	{ A6XX_CP_PROTECT_REG + 10, 0x00900, 0x0094d, 1 },
	{ A6XX_CP_PROTECT_REG + 11, 0x0098d, 0x00bff, 1 },
	{ A6XX_CP_PROTECT_REG + 12, 0x00e00, 0x00e0f, 1 },
	{ A6XX_CP_PROTECT_REG + 13, 0x03c00, 0x03cc3, 1 },
	{ A6XX_CP_PROTECT_REG + 14, 0x03cc4, 0x05cc3, 0 },
	{ A6XX_CP_PROTECT_REG + 15, 0x08630, 0x087ff, 1 },
	{ A6XX_CP_PROTECT_REG + 16, 0x08e00, 0x08e00, 1 },
	{ A6XX_CP_PROTECT_REG + 17, 0x08e08, 0x08e08, 1 },
	{ A6XX_CP_PROTECT_REG + 18, 0x08e50, 0x08e6f, 1 },
	{ A6XX_CP_PROTECT_REG + 19, 0x09624, 0x097ff, 1 },
	{ A6XX_CP_PROTECT_REG + 20, 0x09e70, 0x09e71, 1 },
	{ A6XX_CP_PROTECT_REG + 21, 0x09e78, 0x09fff, 1 },
	{ A6XX_CP_PROTECT_REG + 22, 0x0a630, 0x0a7ff, 1 },
	{ A6XX_CP_PROTECT_REG + 23, 0x0ae02, 0x0ae02, 1 },
	{ A6XX_CP_PROTECT_REG + 24, 0x0ae50, 0x0b17f, 1 },
	{ A6XX_CP_PROTECT_REG + 25, 0x0b604, 0x0b604, 1 },
	{ A6XX_CP_PROTECT_REG + 26, 0x0be02, 0x0be03, 1 },
	{ A6XX_CP_PROTECT_REG + 27, 0x0be20, 0x0de1f, 1 },
	{ A6XX_CP_PROTECT_REG + 28, 0x0f000, 0x0fbff, 1 },
	{ A6XX_CP_PROTECT_REG + 29, 0x0fc00, 0x11bff, 0 },
	{ A6XX_CP_PROTECT_REG + 31, 0x11c00, 0x00000, 1 },
	{ 0 },
};

static const struct adreno_a6xx_core adreno_gpu_core_a630v2 = {
	.base = {
		DEFINE_ADRENO_REV(ADRENO_REV_A630, 6, 3, 0, ANY_ID),
@@ -765,6 +802,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a630v2 = {
	.vbif = a630_vbif_regs,
	.vbif_count = ARRAY_SIZE(a630_vbif_regs),
	.hang_detect_cycles = 0x3fffff,
	.protected_regs = a630_protected_regs,
};

/* For a615, a616 and a618 */
@@ -864,6 +902,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a615 = {
	.vbif = a615_gbif_regs,
	.vbif_count = ARRAY_SIZE(a615_gbif_regs),
	.hang_detect_cycles = 0x3fffff,
	.protected_regs = a630_protected_regs,
};

static const struct adreno_a6xx_core adreno_gpu_core_a618 = {
@@ -891,6 +930,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a618 = {
	.vbif = a615_gbif_regs,
	.vbif_count = ARRAY_SIZE(a615_gbif_regs),
	.hang_detect_cycles = 0x3fffff,
	.protected_regs = a630_protected_regs,
};

static const struct adreno_reglist a620_hwcg_regs[] = {
@@ -954,6 +994,44 @@ static const struct adreno_reglist a650_gbif_regs[] = {
	{A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3},
};

/* These are for a620 and a650 */
static const struct a6xx_protected_regs a620_protected_regs[] = {
	{ A6XX_CP_PROTECT_REG + 0, 0x00000, 0x004ff, 0 },
	{ A6XX_CP_PROTECT_REG + 1, 0x00501, 0x00506, 0 },
	{ A6XX_CP_PROTECT_REG + 2, 0x0050b, 0x007ff, 0 },
	{ A6XX_CP_PROTECT_REG + 3, 0x0050e, 0x0050e, 1 },
	{ A6XX_CP_PROTECT_REG + 4, 0x00510, 0x00510, 1 },
	{ A6XX_CP_PROTECT_REG + 5, 0x00534, 0x00534, 1 },
	{ A6XX_CP_PROTECT_REG + 6, 0x00800, 0x00882, 1 },
	{ A6XX_CP_PROTECT_REG + 7, 0x008a0, 0x008a8, 1 },
	{ A6XX_CP_PROTECT_REG + 8, 0x008ab, 0x008cf, 1 },
	{ A6XX_CP_PROTECT_REG + 9, 0x008d0, 0x0098c, 0 },
	{ A6XX_CP_PROTECT_REG + 10, 0x00900, 0x0094d, 1 },
	{ A6XX_CP_PROTECT_REG + 11, 0x0098d, 0x00bff, 1 },
	{ A6XX_CP_PROTECT_REG + 12, 0x00e00, 0x00e0f, 1 },
	{ A6XX_CP_PROTECT_REG + 13, 0x03c00, 0x03cc3, 1 },
	{ A6XX_CP_PROTECT_REG + 14, 0x03cc4, 0x05cc3, 0 },
	{ A6XX_CP_PROTECT_REG + 15, 0x08630, 0x087ff, 1 },
	{ A6XX_CP_PROTECT_REG + 16, 0x08e00, 0x08e00, 1 },
	{ A6XX_CP_PROTECT_REG + 17, 0x08e08, 0x08e08, 1 },
	{ A6XX_CP_PROTECT_REG + 18, 0x08e50, 0x08e6f, 1 },
	{ A6XX_CP_PROTECT_REG + 19, 0x08e80, 0x090ff, 1 },
	{ A6XX_CP_PROTECT_REG + 20, 0x09624, 0x097ff, 1 },
	{ A6XX_CP_PROTECT_REG + 21, 0x09e60, 0x09e71, 1 },
	{ A6XX_CP_PROTECT_REG + 22, 0x09e78, 0x09fff, 1 },
	{ A6XX_CP_PROTECT_REG + 23, 0x0a630, 0x0a7ff, 1 },
	{ A6XX_CP_PROTECT_REG + 24, 0x0ae02, 0x0ae02, 1 },
	{ A6XX_CP_PROTECT_REG + 25, 0x0ae50, 0x0b17f, 1 },
	{ A6XX_CP_PROTECT_REG + 26, 0x0b604, 0x0b604, 1 },
	{ A6XX_CP_PROTECT_REG + 27, 0x0b608, 0x0b60f, 1 },
	{ A6XX_CP_PROTECT_REG + 28, 0x0be02, 0x0be03, 1 },
	{ A6XX_CP_PROTECT_REG + 29, 0x0be20, 0x0de1f, 1 },
	{ A6XX_CP_PROTECT_REG + 30, 0x0f000, 0x0fbff, 1 },
	{ A6XX_CP_PROTECT_REG + 31, 0x0fc00, 0x11bff, 0 },
	{ A6XX_CP_PROTECT_REG + 47, 0x11c00, 0x00000, 1 },
	{ 0 },
};

static const struct adreno_a6xx_core adreno_gpu_core_a620 = {
	.base = {
		DEFINE_ADRENO_REV(ADRENO_REV_A620, 6, 2, 0, 0),
@@ -980,6 +1058,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a620 = {
	.vbif_count = ARRAY_SIZE(a650_gbif_regs),
	.veto_fal10 = true,
	.hang_detect_cycles = 0x3ffff,
	.protected_regs = a620_protected_regs,
};

static const struct adreno_reglist a640_hwcg_regs[] = {
@@ -1068,6 +1147,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a640 = {
	.vbif = a640_vbif_regs,
	.vbif_count = ARRAY_SIZE(a640_vbif_regs),
	.hang_detect_cycles = 0x3fffff,
	.protected_regs = a630_protected_regs,
};

static const struct adreno_reglist a650_hwcg_regs[] = {
@@ -1147,6 +1227,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a650 = {
	.veto_fal10 = true,
	.pdc_in_aop = true,
	.hang_detect_cycles = 0x3fffff,
	.protected_regs = a620_protected_regs,
};

static const struct adreno_a6xx_core adreno_gpu_core_a650v2 = {
@@ -1174,6 +1255,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a650v2 = {
	.veto_fal10 = true,
	.pdc_in_aop = true,
	.hang_detect_cycles = 0x3ffff,
	.protected_regs = a620_protected_regs,
};

static const struct adreno_a6xx_core adreno_gpu_core_a680 = {
@@ -1199,6 +1281,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a680 = {
	.vbif = a640_vbif_regs,
	.vbif_count = ARRAY_SIZE(a640_vbif_regs),
	.hang_detect_cycles = 0x3fffff,
	.protected_regs = a630_protected_regs,
};

static const struct adreno_reglist a612_hwcg_regs[] = {
@@ -1273,6 +1356,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a612 = {
	.vbif = a640_vbif_regs,
	.vbif_count = ARRAY_SIZE(a640_vbif_regs),
	.hang_detect_cycles = 0x3fffff,
	.protected_regs = a630_protected_regs,
};

static const struct adreno_a6xx_core adreno_gpu_core_a616 = {
@@ -1300,6 +1384,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a616 = {
	.vbif = a615_gbif_regs,
	.vbif_count = ARRAY_SIZE(a615_gbif_regs),
	.hang_detect_cycles = 0x3fffff,
	.protected_regs = a630_protected_regs,
};

static const struct adreno_gpu_core *adreno_gpulist[] = {
+31 −83
Original line number Diff line number Diff line
@@ -15,41 +15,6 @@
#include "adreno_trace.h"
#include "kgsl_trace.h"

static struct a6xx_protected_regs {
	unsigned int base;
	unsigned int count;
	int read_protect;
} a6xx_protected_regs_group[] = {
	{ 0x600, 0x51, 0 },
	{ 0xAE50, 0x2, 1 },
	{ 0x9624, 0x13, 1 },
	{ 0x8630, 0x8, 1 },
	{ 0x9E70, 0x1, 1 },
	{ 0x9E78, 0x187, 1 },
	{ 0xF000, 0x810, 1 },
	{ 0xFC00, 0x3, 0 },
	{ 0x50E, 0x0, 1 },
	{ 0x50F, 0x0, 0 },
	{ 0x510, 0x0, 1 },
	{ 0x0, 0x4F9, 0 },
	{ 0x501, 0xA, 0 },
	{ 0x511, 0x44, 0 },
	{ 0xE00, 0x1, 1 },
	{ 0xE03, 0xB, 1 },
	{ 0x8E00, 0x0, 1 },
	{ 0x8E50, 0xF, 1 },
	{ 0xBE02, 0x0, 1 },
	{ 0xBE20, 0x11F3, 1 },
	{ 0x800, 0x82, 1 },
	{ 0x8A0, 0x8, 1 },
	{ 0x8AB, 0x19, 1 },
	{ 0x900, 0x4D, 1 },
	{ 0x98D, 0x76, 1 },
	{ 0x8D0, 0x23, 0 },
	{ 0x980, 0x4, 0 },
	{ 0xA630, 0x0, 1 },
};

/* IFPC & Preemption static powerup restore list */
static u32 a6xx_pwrup_reglist[] = {
	A6XX_VSC_ADDR_MODE_CNTL,
@@ -123,6 +88,11 @@ static u32 a6xx_ifpc_pwrup_reglist[] = {
	A6XX_CP_AHB_CNTL,
};

/* a620 and a650 need to program A6XX_CP_PROTECT_REG_47 for the infinite span */
static u32 a650_pwrup_reglist[] = {
	A6XX_CP_PROTECT_REG + 47,
};

static u32 a615_pwrup_reglist[] = {
	A6XX_UCHE_GBIF_GX_CONFIG,
};
@@ -175,58 +145,34 @@ static void a6xx_init(struct adreno_device *adreno_dev)
		"powerup_register_list");
}

/**
 * a6xx_protect_init() - Initializes register protection on a6xx
 * @device: Pointer to the device structure
 * Performs register writes to enable protected access to sensitive
 * registers
 */
static void a6xx_protect_init(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct kgsl_protected_registers *mmu_prot =
		kgsl_mmu_get_prot_regs(&device->mmu);
	int i, num_sets;
	int req_sets = ARRAY_SIZE(a6xx_protected_regs_group);
	int max_sets = adreno_dev->gpucore->num_protected_regs;
	unsigned int mmu_base = 0, mmu_range = 0, cur_range;

	/* enable access protection to privileged registers */
	kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL, 0x00000003);

	if (mmu_prot) {
		mmu_base = mmu_prot->base;
		mmu_range = mmu_prot->range;
		req_sets += DIV_ROUND_UP(mmu_range, 0x2000);
	}

	WARN(req_sets > max_sets,
		"Size exceeds the num of protection regs available\n");
	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
	const struct a6xx_protected_regs *regs = a6xx_core->protected_regs;
	int i;

	/* Protect GPU registers */
	num_sets = min_t(unsigned int,
		ARRAY_SIZE(a6xx_protected_regs_group), max_sets);
	for (i = 0; i < num_sets; i++) {
		struct a6xx_protected_regs *regs =
					&a6xx_protected_regs_group[i];
	/*
	 * Enable access protection to privileged registers, fault on an access
	 * protect violation and select the last span to protect from the start
	 * address all the way to the end of the register address space
	 */
	kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL,
		(1 << 0) | (1 << 1) | (1 << 3));

		kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
				regs->base | (regs->count << 18) |
				(regs->read_protect << 31));
	}
	/* Program each register defined by the core definition */
	for (i = 0; regs[i].reg; i++) {
		u32 count;

	/* Protect MMU registers */
	if (mmu_prot) {
		while ((i < max_sets) && (mmu_range > 0)) {
			cur_range = min_t(unsigned int, mmu_range,
						0x2000);
			kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
				mmu_base | ((cur_range - 1) << 18) | (1 << 31));
		/*
		 * This is the offset of the end register as counted from the
		 * start, i.e. # of registers in the range - 1
		 */
		count = regs[i].end - regs[i].start;

			mmu_base += cur_range;
			mmu_range -= cur_range;
			i++;
		}
		kgsl_regwrite(device, regs[i].reg,
			regs[i].start | (count << 18) |
			(regs[i].noaccess << 31));
	}
}

@@ -385,6 +331,8 @@ static void a6xx_patch_pwrup_reglist(struct adreno_device *adreno_dev)
		reglist[items++] = REGLIST(a612_pwrup_reglist);
	else if (adreno_is_a615_family(adreno_dev))
		reglist[items++] = REGLIST(a615_pwrup_reglist);
	else if (adreno_is_a650(adreno_dev) || adreno_is_a620(adreno_dev))
		reglist[items++] = REGLIST(a650_pwrup_reglist);

	/*
	 * For each entry in each of the lists, write the offset and the current
+22 −0
Original line number Diff line number Diff line
@@ -10,6 +10,26 @@

#include "a6xx_reg.h"

/**
 * struct a6xx_protected_regs - container for a protect register span
 */
struct a6xx_protected_regs {
	/** @reg: Physical protected mode register to write to */
	u32 reg;
	/** @start: Dword offset of the starting register in the range */
	u32 start;
	/**
	 * @end: Dword offset of the ending register in the range
	 * (inclusive)
	 */
	u32 end;
	/**
	 * @noaccess: 1 if the register should not be accessible from
	 * userspace, 0 if it can be read (but not written)
	 */
	u32 noaccess;
};

/**
 * struct adreno_a6xx_core - a6xx specific GPU core definitions
 */
@@ -44,6 +64,8 @@ struct adreno_a6xx_core {
	bool pdc_in_aop;
	/** @hang_detect_cycles: Hang detect counter timeout value */
	u32 hang_detect_cycles;
	/** @protected_regs: Array of protected registers for the target */
	const struct a6xx_protected_regs *protected_regs;
};

#define CP_CLUSTER_FE		0x0