Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 51ddaca6 authored by Jordan Crouse's avatar Jordan Crouse
Browse files

msm: kgsl: Fix up the legacy target register address protections



Move all of the register address protection (RAP) support for legacy
targets into the individual GPU target code. By explicitly specifying
the protected mode register instead of relying on vague generic code
we can do a better job of monitoring the list and making sure it doesn't
get accidentally changed for the worst by developers.

Change-Id: Ic0dedbad22891bb7e14722e6eb7c52ca142a7cea
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
parent 647a772b
Loading
Loading
Loading
Loading
+0 −37
Original line number Diff line number Diff line
@@ -1455,43 +1455,6 @@ static inline void adreno_put_gpu_halt(struct adreno_device *adreno_dev)
void adreno_reglist_write(struct adreno_device *adreno_dev,
		const struct adreno_reglist *list, u32 count);

/**
 * adreno_set_protected_registers() - Protect the specified range of registers
 * from being accessed by the GPU
 * @adreno_dev: pointer to the Adreno device
 * @index: Pointer to the index of the protect mode register to write to
 * @reg: Starting dword register to write
 * @mask_len: Size of the mask to protect (# of registers = 2 ** mask_len)
 *
 * Add the range of registers to the list of protected mode registers that will
 * cause an exception if the GPU accesses them.  There are 16 available
 * protected mode registers.  Index is used to specify which register to write
 * to - the intent is to call this function multiple times with the same index
 * pointer for each range and the registers will be magically programmed in
 * incremental fashion
 */
static inline void adreno_set_protected_registers(
		struct adreno_device *adreno_dev, unsigned int *index,
		unsigned int reg, int mask_len)
{
	unsigned int val;
	unsigned int base =
		adreno_getreg(adreno_dev, ADRENO_REG_CP_PROTECT_REG_0);
	unsigned int offset = *index;
	unsigned int max_slots = adreno_dev->gpucore->num_protected_regs ?
				adreno_dev->gpucore->num_protected_regs : 16;

	/* Do we have a free slot? */
	if (WARN(*index >= max_slots, "Protected register slots full: %d/%d\n",
					*index, max_slots))
		return;

	val = 0x60000000 | ((mask_len & 0x1F) << 24) | ((reg << 2) & 0xFFFFF);

	kgsl_regwrite(KGSL_DEVICE(adreno_dev), base + offset, val);
	*index = *index + 1;
}

#ifdef CONFIG_DEBUG_FS
void adreno_debugfs_init(struct adreno_device *adreno_dev);
void adreno_context_debugfs_init(struct adreno_device *adreno_dev,
+39 −40
Original line number Diff line number Diff line
@@ -1070,49 +1070,48 @@ static void a3xx_perfcounter_init(struct adreno_device *adreno_dev)
	}
}

/**
 * a3xx_protect_init() - Initializes register protection on a3xx
 * @adreno_dev: Pointer to the device structure
 * Performs register writes to enable protected access to sensitive
 * registers
 */
static void a3xx_protect_init(struct adreno_device *adreno_dev)
struct {
	u32 reg;
	u32 base;
	u32 count;
} a3xx_protected_blocks[] = {
	/* RBBM */
	{ A3XX_CP_PROTECT_REG_0,      0x0018, 0 },
	{ A3XX_CP_PROTECT_REG_0 + 1,  0x0020, 2 },
	{ A3XX_CP_PROTECT_REG_0 + 2,  0x0033, 0 },
	{ A3XX_CP_PROTECT_REG_0 + 3,  0x0042, 0 },
	{ A3XX_CP_PROTECT_REG_0 + 4,  0x0050, 4 },
	{ A3XX_CP_PROTECT_REG_0 + 5,  0x0063, 0 },
	{ A3XX_CP_PROTECT_REG_0 + 6,  0x0100, 4 },
	/* CP */
	{ A3XX_CP_PROTECT_REG_0 + 7,  0x01c0, 5 },
	{ A3XX_CP_PROTECT_REG_0 + 8,  0x01ec, 1 },
	{ A3XX_CP_PROTECT_REG_0 + 9,  0x01f6, 1 },
	{ A3XX_CP_PROTECT_REG_0 + 10, 0x01f8, 2 },
	{ A3XX_CP_PROTECT_REG_0 + 11, 0x045e, 2 },
	{ A3XX_CP_PROTECT_REG_0 + 12, 0x0460, 4 },
	/* RB */
	{ A3XX_CP_PROTECT_REG_0 + 13, 0x0cc0, 0 },
	/* VBIF */
	{ A3XX_CP_PROTECT_REG_0 + 14, 0x3000, 6 },
	/* SMMU */
	{ A3XX_CP_PROTECT_REG_0 + 15, 0xa000, 12 },
	/* There are no remaining protected mode registers for a3xx */
};

static void a3xx_protect_init(struct kgsl_device *device)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	int index = 0;
	struct kgsl_protected_registers *iommu_regs;
	int i;

	/* enable access protection to privileged registers */
	kgsl_regwrite(device, A3XX_CP_PROTECT_CTRL, 0x00000007);

	/* RBBM registers */
	adreno_set_protected_registers(adreno_dev, &index, 0x18, 0);
	adreno_set_protected_registers(adreno_dev, &index, 0x20, 2);
	adreno_set_protected_registers(adreno_dev, &index, 0x33, 0);
	adreno_set_protected_registers(adreno_dev, &index, 0x42, 0);
	adreno_set_protected_registers(adreno_dev, &index, 0x50, 4);
	adreno_set_protected_registers(adreno_dev, &index, 0x63, 0);
	adreno_set_protected_registers(adreno_dev, &index, 0x100, 4);

	/* CP registers */
	adreno_set_protected_registers(adreno_dev, &index, 0x1C0, 5);
	adreno_set_protected_registers(adreno_dev, &index, 0x1EC, 1);
	adreno_set_protected_registers(adreno_dev, &index, 0x1F6, 1);
	adreno_set_protected_registers(adreno_dev, &index, 0x1F8, 2);
	adreno_set_protected_registers(adreno_dev, &index, 0x45E, 2);
	adreno_set_protected_registers(adreno_dev, &index, 0x460, 4);

	/* RB registers */
	adreno_set_protected_registers(adreno_dev, &index, 0xCC0, 0);

	/* VBIF registers */
	adreno_set_protected_registers(adreno_dev, &index, 0x3000, 6);

	/* SMMU registers */
	iommu_regs = kgsl_mmu_get_prot_regs(&device->mmu);
	if (iommu_regs)
		adreno_set_protected_registers(adreno_dev, &index,
				iommu_regs->base, ilog2(iommu_regs->range));
	for (i = 0; i < ARRAY_SIZE(a3xx_protected_blocks); i++) {
		u32 val = 0x60000000 |
			(a3xx_protected_blocks[i].count << 24) |
			(a3xx_protected_blocks[i].base << 2);

		kgsl_regwrite(device, a3xx_protected_blocks[i].reg, val);
	}
}

static void a3xx_start(struct adreno_device *adreno_dev)
@@ -1161,7 +1160,7 @@ static void a3xx_start(struct adreno_device *adreno_dev)
	kgsl_regwrite(device, A3XX_RBBM_CLOCK_CTL, A3XX_RBBM_CLOCK_CTL_DEFAULT);

	/* Turn on protection */
	a3xx_protect_init(adreno_dev);
	a3xx_protect_init(device);

	/* Turn on performance counters */
	kgsl_regwrite(device, A3XX_RBBM_PERFCTR_CTL, 0x01);
+56 −44
Original line number Diff line number Diff line
@@ -293,57 +293,69 @@ static void a5xx_remove(struct adreno_device *adreno_dev)
		a5xx_critical_packet_destroy(adreno_dev);
}

/**
 * a5xx_protect_init() - Initializes register protection on a5xx
 * @device: Pointer to the device structure
 * Performs register writes to enable protected access to sensitive
 * registers
 */
const static struct {
	u32 reg;
	u32 base;
	u32 count;
} a5xx_protected_blocks[] = {
	/* RBBM */
	{  A5XX_CP_PROTECT_REG_0,     0x004, 2 },
	{  A5XX_CP_PROTECT_REG_0 + 1, 0x008, 3 },
	{  A5XX_CP_PROTECT_REG_0 + 2, 0x010, 4 },
	{  A5XX_CP_PROTECT_REG_0 + 3, 0x020, 5 },
	{  A5XX_CP_PROTECT_REG_0 + 4, 0x040, 6 },
	{  A5XX_CP_PROTECT_REG_0 + 5, 0x080, 6 },
	/* Content protection */
	{  A5XX_CP_PROTECT_REG_0 + 6, A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 4 },
	{  A5XX_CP_PROTECT_REG_0 + 7, A5XX_RBBM_SECVID_TRUST_CNTL, 1 },
	/* CP */
	{  A5XX_CP_PROTECT_REG_0 + 8, 0x800, 6 },
	{  A5XX_CP_PROTECT_REG_0 + 9, 0x840, 3 },
	{  A5XX_CP_PROTECT_REG_0 + 10, 0x880, 5 },
	{  A5XX_CP_PROTECT_REG_0 + 11, 0xaa0, 0 },
	/* RB */
	{  A5XX_CP_PROTECT_REG_0 + 12, 0xcc0, 0 },
	{  A5XX_CP_PROTECT_REG_0 + 13, 0xcf0, 1 },
	/* VPC */
	{  A5XX_CP_PROTECT_REG_0 + 14, 0xe68, 3 },
	{  A5XX_CP_PROTECT_REG_0 + 15, 0xe70, 4 },
	/* UCHE */
	{  A5XX_CP_PROTECT_REG_0 + 16, 0xe80, 4 },
	/* A5XX_CP_PROTECT_REG_17 will be used for SMMU */
	/* A5XX_CP_PROTECT_REG_18 - A5XX_CP_PROTECT_REG_31 are available */
};

static void _setprotectreg(struct kgsl_device *device, u32 offset,
		u32 base, u32 count)
{
	kgsl_regwrite(device, offset, 0x60000000 | (count << 24) | (base << 2));
}

static void a5xx_protect_init(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	int index = 0;
	struct kgsl_protected_registers *iommu_regs;
	u32 reg;
	int i;

	/* enable access protection to privileged registers */
	kgsl_regwrite(device, A5XX_CP_PROTECT_CNTL, 0x00000007);

	/* RBBM registers */
	adreno_set_protected_registers(adreno_dev, &index, 0x4, 2);
	adreno_set_protected_registers(adreno_dev, &index, 0x8, 3);
	adreno_set_protected_registers(adreno_dev, &index, 0x10, 4);
	adreno_set_protected_registers(adreno_dev, &index, 0x20, 5);
	adreno_set_protected_registers(adreno_dev, &index, 0x40, 6);
	adreno_set_protected_registers(adreno_dev, &index, 0x80, 6);

	/* Content protection registers */
	adreno_set_protected_registers(adreno_dev, &index,
		   A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 4);
	adreno_set_protected_registers(adreno_dev, &index,
		   A5XX_RBBM_SECVID_TRUST_CNTL, 1);

	/* CP registers */
	adreno_set_protected_registers(adreno_dev, &index, 0x800, 6);
	adreno_set_protected_registers(adreno_dev, &index, 0x840, 3);
	adreno_set_protected_registers(adreno_dev, &index, 0x880, 5);
	adreno_set_protected_registers(adreno_dev, &index, 0x0AA0, 0);

	/* RB registers */
	adreno_set_protected_registers(adreno_dev, &index, 0xCC0, 0);
	adreno_set_protected_registers(adreno_dev, &index, 0xCF0, 1);

	/* VPC registers */
	adreno_set_protected_registers(adreno_dev, &index, 0xE68, 3);
	adreno_set_protected_registers(adreno_dev, &index, 0xE70, 4);

	/* UCHE registers */
	adreno_set_protected_registers(adreno_dev, &index, 0xE80, ilog2(16));

	/* SMMU registers */
	iommu_regs = kgsl_mmu_get_prot_regs(&device->mmu);
	if (iommu_regs)
		adreno_set_protected_registers(adreno_dev, &index,
				iommu_regs->base, ilog2(iommu_regs->range));
	for (i = 0; i < ARRAY_SIZE(a5xx_protected_blocks); i++) {
		reg = a5xx_protected_blocks[i].reg;

		_setprotectreg(device, reg, a5xx_protected_blocks[i].base,
			a5xx_protected_blocks[i].count);
	}

	/*
	 * For a530 and a540 the SMMU region is 0x20000 bytes long and 0x10000
	 * bytes on all other targets. The base offset for both is 0x40000.
	 * Write it to the next available slot
	 */
	if (adreno_is_a530(adreno_dev) || adreno_is_a540(adreno_dev))
		_setprotectreg(device, reg + 1, 0x40000, ilog2(0x20000));
	else
		_setprotectreg(device, reg + 1, 0x40000, ilog2(0x10000));
}

/*