Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 803271df authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: Clear out unneeded register address protection code"

parents e67ce4ec 548f437e
Loading
Loading
Loading
Loading
+85 −18
Original line number Diff line number Diff line
@@ -194,7 +194,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a530v2 = {
		.gpudev = &adreno_a5xx_gpudev,
		.gmem_base = 0x100000,
		.gmem_size = SZ_1M,
		.num_protected_regs = 0x20,
		.busy_mask = 0xfffffffe,
		.bus_width = 32,
	},
@@ -220,7 +219,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a530v3 = {
		.gpudev = &adreno_a5xx_gpudev,
		.gmem_base = 0x100000,
		.gmem_size = SZ_1M,
		.num_protected_regs = 0x20,
		.busy_mask = 0xfffffffe,
		.bus_width = 32,
	},
@@ -286,7 +284,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a505 = {
		.gpudev = &adreno_a5xx_gpudev,
		.gmem_base = 0x100000,
		.gmem_size = (SZ_128K + SZ_8K),
		.num_protected_regs = 0x20,
		.busy_mask = 0xfffffffe,
		.bus_width = 16,
	},
@@ -306,7 +303,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a506 = {
		.gpudev = &adreno_a5xx_gpudev,
		.gmem_base = 0x100000,
		.gmem_size = (SZ_128K + SZ_8K),
		.num_protected_regs = 0x20,
		.busy_mask = 0xfffffffe,
		.bus_width = 16,
	},
@@ -384,7 +380,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a510 = {
		.gpudev = &adreno_a5xx_gpudev,
		.gmem_base = 0x100000,
		.gmem_size = SZ_256K,
		.num_protected_regs = 0x20,
		.busy_mask = 0xfffffffe,
		.bus_width = 16,
	},
@@ -510,7 +505,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a540v2 = {
		.gpudev = &adreno_a5xx_gpudev,
		.gmem_base = 0x100000,
		.gmem_size = SZ_1M,
		.num_protected_regs = 0x20,
		.busy_mask = 0xfffffffe,
		.bus_width = 32,
	},
@@ -593,7 +587,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a512 = {
		.gpudev = &adreno_a5xx_gpudev,
		.gmem_base = 0x100000,
		.gmem_size = (SZ_256K + SZ_16K),
		.num_protected_regs = 0x20,
		.busy_mask = 0xfffffffe,
		.bus_width = 32,
	},
@@ -612,7 +605,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a508 = {
		.gpudev = &adreno_a5xx_gpudev,
		.gmem_base = 0x100000,
		.gmem_size = (SZ_128K + SZ_8K),
		.num_protected_regs = 0x20,
		.busy_mask = 0xfffffffe,
		.bus_width = 32,
	},
@@ -740,6 +732,43 @@ static const struct adreno_reglist a630_vbif_regs[] = {
	{A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
};


/* For a615, a616, a618, a630, a640 and a680 */
static const struct a6xx_protected_regs a630_protected_regs[] = {
	{ A6XX_CP_PROTECT_REG + 0, 0x00000, 0x004ff, 0 },
	{ A6XX_CP_PROTECT_REG + 1, 0x00501, 0x00506, 0 },
	{ A6XX_CP_PROTECT_REG + 2, 0x0050b, 0x007ff, 0 },
	{ A6XX_CP_PROTECT_REG + 3, 0x0050e, 0x0050e, 1 },
	{ A6XX_CP_PROTECT_REG + 4, 0x00510, 0x00510, 1 },
	{ A6XX_CP_PROTECT_REG + 5, 0x00534, 0x00534, 1 },
	{ A6XX_CP_PROTECT_REG + 6, 0x00800, 0x00882, 1 },
	{ A6XX_CP_PROTECT_REG + 7, 0x008a0, 0x008a8, 1 },
	{ A6XX_CP_PROTECT_REG + 8, 0x008ab, 0x008cf, 1 },
	{ A6XX_CP_PROTECT_REG + 9, 0x008d0, 0x0098c, 0 },
	{ A6XX_CP_PROTECT_REG + 10, 0x00900, 0x0094d, 1 },
	{ A6XX_CP_PROTECT_REG + 11, 0x0098d, 0x00bff, 1 },
	{ A6XX_CP_PROTECT_REG + 12, 0x00e00, 0x00e0f, 1 },
	{ A6XX_CP_PROTECT_REG + 13, 0x03c00, 0x03cc3, 1 },
	{ A6XX_CP_PROTECT_REG + 14, 0x03cc4, 0x05cc3, 0 },
	{ A6XX_CP_PROTECT_REG + 15, 0x08630, 0x087ff, 1 },
	{ A6XX_CP_PROTECT_REG + 16, 0x08e00, 0x08e00, 1 },
	{ A6XX_CP_PROTECT_REG + 17, 0x08e08, 0x08e08, 1 },
	{ A6XX_CP_PROTECT_REG + 18, 0x08e50, 0x08e6f, 1 },
	{ A6XX_CP_PROTECT_REG + 19, 0x09624, 0x097ff, 1 },
	{ A6XX_CP_PROTECT_REG + 20, 0x09e70, 0x09e71, 1 },
	{ A6XX_CP_PROTECT_REG + 21, 0x09e78, 0x09fff, 1 },
	{ A6XX_CP_PROTECT_REG + 22, 0x0a630, 0x0a7ff, 1 },
	{ A6XX_CP_PROTECT_REG + 23, 0x0ae02, 0x0ae02, 1 },
	{ A6XX_CP_PROTECT_REG + 24, 0x0ae50, 0x0b17f, 1 },
	{ A6XX_CP_PROTECT_REG + 25, 0x0b604, 0x0b604, 1 },
	{ A6XX_CP_PROTECT_REG + 26, 0x0be02, 0x0be03, 1 },
	{ A6XX_CP_PROTECT_REG + 27, 0x0be20, 0x0de1f, 1 },
	{ A6XX_CP_PROTECT_REG + 28, 0x0f000, 0x0fbff, 1 },
	{ A6XX_CP_PROTECT_REG + 29, 0x0fc00, 0x11bff, 0 },
	{ A6XX_CP_PROTECT_REG + 31, 0x11c00, 0x00000, 1 },
	{ 0 },
};

static const struct adreno_a6xx_core adreno_gpu_core_a630v2 = {
	.base = {
		DEFINE_ADRENO_REV(ADRENO_REV_A630, 6, 3, 0, ANY_ID),
@@ -749,7 +778,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a630v2 = {
		.gpudev = &adreno_a6xx_gpudev,
		.gmem_base = 0x100000,
		.gmem_size = SZ_1M,
		.num_protected_regs = 0x20,
		.busy_mask = 0xfffffffe,
		.bus_width = 32,
	},
@@ -765,6 +793,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a630v2 = {
	.vbif = a630_vbif_regs,
	.vbif_count = ARRAY_SIZE(a630_vbif_regs),
	.hang_detect_cycles = 0x3fffff,
	.protected_regs = a630_protected_regs,
};

/* For a615, a616 and a618 */
@@ -848,7 +877,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a615 = {
		.gpudev = &adreno_a6xx_gpudev,
		.gmem_base = 0x100000,
		.gmem_size = SZ_512K,
		.num_protected_regs = 0x20,
		.busy_mask = 0xfffffffe,
		.bus_width = 32,
	},
@@ -864,6 +892,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a615 = {
	.vbif = a615_gbif_regs,
	.vbif_count = ARRAY_SIZE(a615_gbif_regs),
	.hang_detect_cycles = 0x3fffff,
	.protected_regs = a630_protected_regs,
};

static const struct adreno_a6xx_core adreno_gpu_core_a618 = {
@@ -875,7 +904,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a618 = {
		.gpudev = &adreno_a6xx_gpudev,
		.gmem_base = 0x100000,
		.gmem_size = SZ_512K,
		.num_protected_regs = 0x20,
		.busy_mask = 0xfffffffe,
		.bus_width = 32,
	},
@@ -891,6 +919,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a618 = {
	.vbif = a615_gbif_regs,
	.vbif_count = ARRAY_SIZE(a615_gbif_regs),
	.hang_detect_cycles = 0x3fffff,
	.protected_regs = a630_protected_regs,
};

static const struct adreno_reglist a620_hwcg_regs[] = {
@@ -954,6 +983,44 @@ static const struct adreno_reglist a650_gbif_regs[] = {
	{A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3},
};

/* These are for a620 and a650 */
static const struct a6xx_protected_regs a620_protected_regs[] = {
	{ A6XX_CP_PROTECT_REG + 0, 0x00000, 0x004ff, 0 },
	{ A6XX_CP_PROTECT_REG + 1, 0x00501, 0x00506, 0 },
	{ A6XX_CP_PROTECT_REG + 2, 0x0050b, 0x007ff, 0 },
	{ A6XX_CP_PROTECT_REG + 3, 0x0050e, 0x0050e, 1 },
	{ A6XX_CP_PROTECT_REG + 4, 0x00510, 0x00510, 1 },
	{ A6XX_CP_PROTECT_REG + 5, 0x00534, 0x00534, 1 },
	{ A6XX_CP_PROTECT_REG + 6, 0x00800, 0x00882, 1 },
	{ A6XX_CP_PROTECT_REG + 7, 0x008a0, 0x008a8, 1 },
	{ A6XX_CP_PROTECT_REG + 8, 0x008ab, 0x008cf, 1 },
	{ A6XX_CP_PROTECT_REG + 9, 0x008d0, 0x0098c, 0 },
	{ A6XX_CP_PROTECT_REG + 10, 0x00900, 0x0094d, 1 },
	{ A6XX_CP_PROTECT_REG + 11, 0x0098d, 0x00bff, 1 },
	{ A6XX_CP_PROTECT_REG + 12, 0x00e00, 0x00e0f, 1 },
	{ A6XX_CP_PROTECT_REG + 13, 0x03c00, 0x03cc3, 1 },
	{ A6XX_CP_PROTECT_REG + 14, 0x03cc4, 0x05cc3, 0 },
	{ A6XX_CP_PROTECT_REG + 15, 0x08630, 0x087ff, 1 },
	{ A6XX_CP_PROTECT_REG + 16, 0x08e00, 0x08e00, 1 },
	{ A6XX_CP_PROTECT_REG + 17, 0x08e08, 0x08e08, 1 },
	{ A6XX_CP_PROTECT_REG + 18, 0x08e50, 0x08e6f, 1 },
	{ A6XX_CP_PROTECT_REG + 19, 0x08e80, 0x090ff, 1 },
	{ A6XX_CP_PROTECT_REG + 20, 0x09624, 0x097ff, 1 },
	{ A6XX_CP_PROTECT_REG + 21, 0x09e60, 0x09e71, 1 },
	{ A6XX_CP_PROTECT_REG + 22, 0x09e78, 0x09fff, 1 },
	{ A6XX_CP_PROTECT_REG + 23, 0x0a630, 0x0a7ff, 1 },
	{ A6XX_CP_PROTECT_REG + 24, 0x0ae02, 0x0ae02, 1 },
	{ A6XX_CP_PROTECT_REG + 25, 0x0ae50, 0x0b17f, 1 },
	{ A6XX_CP_PROTECT_REG + 26, 0x0b604, 0x0b604, 1 },
	{ A6XX_CP_PROTECT_REG + 27, 0x0b608, 0x0b60f, 1 },
	{ A6XX_CP_PROTECT_REG + 28, 0x0be02, 0x0be03, 1 },
	{ A6XX_CP_PROTECT_REG + 29, 0x0be20, 0x0de1f, 1 },
	{ A6XX_CP_PROTECT_REG + 30, 0x0f000, 0x0fbff, 1 },
	{ A6XX_CP_PROTECT_REG + 31, 0x0fc00, 0x11bff, 0 },
	{ A6XX_CP_PROTECT_REG + 47, 0x11c00, 0x00000, 1 },
	{ 0 },
};

static const struct adreno_a6xx_core adreno_gpu_core_a620 = {
	.base = {
		DEFINE_ADRENO_REV(ADRENO_REV_A620, 6, 2, 0, 0),
@@ -963,7 +1030,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a620 = {
		.gpudev = &adreno_a6xx_gpudev,
		.gmem_base = 0,
		.gmem_size = SZ_512K,
		.num_protected_regs = 0x30,
		.busy_mask = 0xfffffffe,
		.bus_width = 32,
	},
@@ -980,6 +1046,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a620 = {
	.vbif_count = ARRAY_SIZE(a650_gbif_regs),
	.veto_fal10 = true,
	.hang_detect_cycles = 0x3ffff,
	.protected_regs = a620_protected_regs,
};

static const struct adreno_reglist a640_hwcg_regs[] = {
@@ -1052,7 +1119,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a640 = {
		.gpudev = &adreno_a6xx_gpudev,
		.gmem_base = 0x100000,
		.gmem_size = SZ_1M, //Verified 1MB
		.num_protected_regs = 0x20,
		.busy_mask = 0xfffffffe,
		.bus_width = 32,
	},
@@ -1068,6 +1134,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a640 = {
	.vbif = a640_vbif_regs,
	.vbif_count = ARRAY_SIZE(a640_vbif_regs),
	.hang_detect_cycles = 0x3fffff,
	.protected_regs = a630_protected_regs,
};

static const struct adreno_reglist a650_hwcg_regs[] = {
@@ -1131,7 +1198,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a650 = {
		.gpudev = &adreno_a6xx_gpudev,
		.gmem_base = 0,
		.gmem_size = SZ_1M + SZ_128K, /* verified 1152kB */
		.num_protected_regs = 0x30,
		.busy_mask = 0xfffffffe,
		.bus_width = 32,
	},
@@ -1147,6 +1213,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a650 = {
	.veto_fal10 = true,
	.pdc_in_aop = true,
	.hang_detect_cycles = 0x3fffff,
	.protected_regs = a620_protected_regs,
};

static const struct adreno_a6xx_core adreno_gpu_core_a650v2 = {
@@ -1158,7 +1225,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a650v2 = {
		.gpudev = &adreno_a6xx_gpudev,
		.gmem_base = 0,
		.gmem_size = SZ_1M + SZ_128K, /* verified 1152kB */
		.num_protected_regs = 0x30,
		.busy_mask = 0xfffffffe,
		.bus_width = 32,
	},
@@ -1174,6 +1240,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a650v2 = {
	.veto_fal10 = true,
	.pdc_in_aop = true,
	.hang_detect_cycles = 0x3ffff,
	.protected_regs = a620_protected_regs,
};

static const struct adreno_a6xx_core adreno_gpu_core_a680 = {
@@ -1183,7 +1250,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a680 = {
		.gpudev = &adreno_a6xx_gpudev,
		.gmem_base = 0x100000,
		.gmem_size = SZ_2M,
		.num_protected_regs = 0x20,
		.busy_mask = 0xfffffffe,
		.bus_width = 32,
	},
@@ -1199,6 +1265,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a680 = {
	.vbif = a640_vbif_regs,
	.vbif_count = ARRAY_SIZE(a640_vbif_regs),
	.hang_detect_cycles = 0x3fffff,
	.protected_regs = a630_protected_regs,
};

static const struct adreno_reglist a612_hwcg_regs[] = {
@@ -1259,7 +1326,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a612 = {
		.gpudev = &adreno_a6xx_gpudev,
		.gmem_base = 0x100000,
		.gmem_size = (SZ_128K + SZ_4K),
		.num_protected_regs = 0x20,
		.busy_mask = 0xfffffffe,
		.bus_width = 32,
	},
@@ -1273,6 +1339,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a612 = {
	.vbif = a640_vbif_regs,
	.vbif_count = ARRAY_SIZE(a640_vbif_regs),
	.hang_detect_cycles = 0x3fffff,
	.protected_regs = a630_protected_regs,
};

static const struct adreno_a6xx_core adreno_gpu_core_a616 = {
@@ -1284,7 +1351,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a616 = {
		.gpudev = &adreno_a6xx_gpudev,
		.gmem_base = 0x100000,
		.gmem_size = SZ_512K,
		.num_protected_regs = 0x20,
		.busy_mask = 0xfffffffe,
		.bus_width = 32,
	},
@@ -1300,6 +1366,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a616 = {
	.vbif = a615_gbif_regs,
	.vbif_count = ARRAY_SIZE(a615_gbif_regs),
	.hang_detect_cycles = 0x3fffff,
	.protected_regs = a630_protected_regs,
};

static const struct adreno_gpu_core *adreno_gpulist[] = {
+0 −2
Original line number Diff line number Diff line
@@ -350,7 +350,6 @@ struct adreno_reglist {
 * @gpudev: Pointer to the GPU family specific functions for this core
 * @gmem_base: Base address of binning memory (GMEM/OCMEM)
 * @gmem_size: Amount of binning memory (GMEM/OCMEM) to reserve for the core
 * @num_protected_regs: number of protected registers
 * @busy_mask: mask to check if GPU is busy in RBBM_STATUS
 * @bus_width: Bytes transferred in 1 cycle
 */
@@ -361,7 +360,6 @@ struct adreno_gpu_core {
	struct adreno_gpudev *gpudev;
	unsigned long gmem_base;
	size_t gmem_size;
	unsigned int num_protected_regs;
	unsigned int busy_mask;
	u32 bus_width;
};
+31 −83
Original line number Diff line number Diff line
@@ -15,41 +15,6 @@
#include "adreno_trace.h"
#include "kgsl_trace.h"

static struct a6xx_protected_regs {
	unsigned int base;
	unsigned int count;
	int read_protect;
} a6xx_protected_regs_group[] = {
	{ 0x600, 0x51, 0 },
	{ 0xAE50, 0x2, 1 },
	{ 0x9624, 0x13, 1 },
	{ 0x8630, 0x8, 1 },
	{ 0x9E70, 0x1, 1 },
	{ 0x9E78, 0x187, 1 },
	{ 0xF000, 0x810, 1 },
	{ 0xFC00, 0x3, 0 },
	{ 0x50E, 0x0, 1 },
	{ 0x50F, 0x0, 0 },
	{ 0x510, 0x0, 1 },
	{ 0x0, 0x4F9, 0 },
	{ 0x501, 0xA, 0 },
	{ 0x511, 0x44, 0 },
	{ 0xE00, 0x1, 1 },
	{ 0xE03, 0xB, 1 },
	{ 0x8E00, 0x0, 1 },
	{ 0x8E50, 0xF, 1 },
	{ 0xBE02, 0x0, 1 },
	{ 0xBE20, 0x11F3, 1 },
	{ 0x800, 0x82, 1 },
	{ 0x8A0, 0x8, 1 },
	{ 0x8AB, 0x19, 1 },
	{ 0x900, 0x4D, 1 },
	{ 0x98D, 0x76, 1 },
	{ 0x8D0, 0x23, 0 },
	{ 0x980, 0x4, 0 },
	{ 0xA630, 0x0, 1 },
};

/* IFPC & Preemption static powerup restore list */
static u32 a6xx_pwrup_reglist[] = {
	A6XX_VSC_ADDR_MODE_CNTL,
@@ -123,6 +88,11 @@ static u32 a6xx_ifpc_pwrup_reglist[] = {
	A6XX_CP_AHB_CNTL,
};

/* a620 and a650 need to program A6XX_CP_PROTECT_REG_47 for the infinite span */
static u32 a650_pwrup_reglist[] = {
	A6XX_CP_PROTECT_REG + 47,
};

static u32 a615_pwrup_reglist[] = {
	A6XX_UCHE_GBIF_GX_CONFIG,
};
@@ -175,58 +145,34 @@ static void a6xx_init(struct adreno_device *adreno_dev)
		"powerup_register_list");
}

/**
 * a6xx_protect_init() - Initializes register protection on a6xx
 * @device: Pointer to the device structure
 * Performs register writes to enable protected access to sensitive
 * registers
 */
static void a6xx_protect_init(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct kgsl_protected_registers *mmu_prot =
		kgsl_mmu_get_prot_regs(&device->mmu);
	int i, num_sets;
	int req_sets = ARRAY_SIZE(a6xx_protected_regs_group);
	int max_sets = adreno_dev->gpucore->num_protected_regs;
	unsigned int mmu_base = 0, mmu_range = 0, cur_range;

	/* enable access protection to privileged registers */
	kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL, 0x00000003);

	if (mmu_prot) {
		mmu_base = mmu_prot->base;
		mmu_range = mmu_prot->range;
		req_sets += DIV_ROUND_UP(mmu_range, 0x2000);
	}

	WARN(req_sets > max_sets,
		"Size exceeds the num of protection regs available\n");
	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
	const struct a6xx_protected_regs *regs = a6xx_core->protected_regs;
	int i;

	/* Protect GPU registers */
	num_sets = min_t(unsigned int,
		ARRAY_SIZE(a6xx_protected_regs_group), max_sets);
	for (i = 0; i < num_sets; i++) {
		struct a6xx_protected_regs *regs =
					&a6xx_protected_regs_group[i];
	/*
	 * Enable access protection to privileged registers, fault on an access
	 * protect violation and select the last span to protect from the start
	 * address all the way to the end of the register address space
	 */
	kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL,
		(1 << 0) | (1 << 1) | (1 << 3));

		kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
				regs->base | (regs->count << 18) |
				(regs->read_protect << 31));
	}
	/* Program each register defined by the core definition */
	for (i = 0; regs[i].reg; i++) {
		u32 count;

	/* Protect MMU registers */
	if (mmu_prot) {
		while ((i < max_sets) && (mmu_range > 0)) {
			cur_range = min_t(unsigned int, mmu_range,
						0x2000);
			kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
				mmu_base | ((cur_range - 1) << 18) | (1 << 31));
		/*
		 * This is the offset of the end register as counted from the
		 * start, i.e. # of registers in the range - 1
		 */
		count = regs[i].end - regs[i].start;

			mmu_base += cur_range;
			mmu_range -= cur_range;
			i++;
		}
		kgsl_regwrite(device, regs[i].reg,
			regs[i].start | (count << 18) |
			(regs[i].noaccess << 31));
	}
}

@@ -385,6 +331,8 @@ static void a6xx_patch_pwrup_reglist(struct adreno_device *adreno_dev)
		reglist[items++] = REGLIST(a612_pwrup_reglist);
	else if (adreno_is_a615_family(adreno_dev))
		reglist[items++] = REGLIST(a615_pwrup_reglist);
	else if (adreno_is_a650(adreno_dev) || adreno_is_a620(adreno_dev))
		reglist[items++] = REGLIST(a650_pwrup_reglist);

	/*
	 * For each entry in each of the lists, write the offset and the current
+22 −0
Original line number Diff line number Diff line
@@ -10,6 +10,26 @@

#include "a6xx_reg.h"

/**
 * struct a6xx_protected_regs - container for a protect register span
 */
struct a6xx_protected_regs {
	/** @reg: Physical protected mode register to write to */
	u32 reg;
	/** @start: Dword offset of the starting register in the range */
	u32 start;
	/**
	 * @end: Dword offset of the ending register in the range
	 * (inclusive)
	 */
	u32 end;
	/**
	 * @noaccess: 1 if the register should not be accessible from
	 * userspace, 0 if it can be read (but not written)
	 */
	u32 noaccess;
};

/**
 * struct adreno_a6xx_core - a6xx specific GPU core definitions
 */
@@ -44,6 +64,8 @@ struct adreno_a6xx_core {
	bool pdc_in_aop;
	/** @hang_detect_cycles: Hang detect counter timeout value */
	u32 hang_detect_cycles;
	/** @protected_regs: Array of protected registers for the target */
	const struct a6xx_protected_regs *protected_regs;
};

#define CP_CLUSTER_FE		0x0
+0 −10
Original line number Diff line number Diff line
@@ -332,16 +332,6 @@ struct kgsl_event_group {
	void *priv;
};

/**
 * struct kgsl_protected_registers - Protected register range
 * @base: Offset of the range to be protected
 * @range: Range (# of registers = 2 ** range)
 */
struct kgsl_protected_registers {
	unsigned int base;
	int range;
};

/**
 * struct sparse_bind_object - Bind metadata
 * @node: Node for the rb tree
Loading