Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 28895a72 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: Add A650 GMU memory map"

parents 1701d5eb adc96c2a
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -124,6 +124,8 @@
#define ADRENO_ACD BIT(17)
/* ECP enabled GMU */
#define ADRENO_ECP BIT(18)
/* Cooperative reset enabled GMU */
#define ADRENO_COOP_RESET BIT(19)

/*
 * Adreno GPU quirks - control bits for various workarounds
+105 −74
Original line number Diff line number Diff line
@@ -314,6 +314,14 @@ static void a6xx_gmu_power_config(struct kgsl_device *device)
static int a6xx_gmu_start(struct kgsl_device *device)
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	u32 val = 0x00000100;
	u32 mask = 0x000001FF;

	/* Check for 0xBABEFACE on legacy targets */
	if (!ADRENO_FEATURE(ADRENO_DEVICE(device), ADRENO_COOP_RESET)) {
		val = 0xBABEFACE;
		mask = 0xFFFFFFFF;
	}

	kgsl_regwrite(device, A6XX_GMU_CX_GMU_WFI_CONFIG, 0x0);

@@ -321,9 +329,7 @@ static int a6xx_gmu_start(struct kgsl_device *device)
	gmu_core_regwrite(device, A6XX_GMU_CM3_SYSRESET, 0);
	if (timed_poll_check(device,
			A6XX_GMU_CM3_FW_INIT_RESULT,
			0xBABEFACE,
			GMU_START_TIMEOUT,
			0xFFFFFFFF)) {
			val, GMU_START_TIMEOUT, mask)) {
		dev_err(&gmu->pdev->dev, "GMU doesn't boot\n");
		return -ETIMEDOUT;
	}
@@ -953,7 +959,6 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device,
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	struct gmu_memdesc *mem_addr = gmu->hfi_mem;
	uint32_t gmu_log_info;
	int ret;
	unsigned int chipid = 0;
@@ -996,7 +1001,7 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device,
	gmu_core_regwrite(device, A6XX_GMU_CM3_BOOT_CONFIG, gmu->load_mode);

	gmu_core_regwrite(device, A6XX_GMU_HFI_QTBL_ADDR,
			mem_addr->gmuaddr);
			gmu->hfi_mem->gmuaddr);
	gmu_core_regwrite(device, A6XX_GMU_HFI_QTBL_INFO, 1);

	gmu_core_regwrite(device, A6XX_GMU_AHB_FENCE_RANGE_0,
@@ -1506,6 +1511,7 @@ static size_t a6xx_snapshot_gmu_mem(struct kgsl_device *device,
		return 0;
	}

	memset(mem_hdr, 0, sizeof(*mem_hdr));
	mem_hdr->type = desc->type;
	mem_hdr->hostaddr = (uintptr_t)desc->memdesc->hostptr;
	mem_hdr->gmuaddr = desc->memdesc->gmuaddr;
@@ -1517,6 +1523,94 @@ static size_t a6xx_snapshot_gmu_mem(struct kgsl_device *device,
	return desc->memdesc->size + sizeof(*mem_hdr);
}

struct a6xx_tcm_data {
	enum gmu_mem_type type;
	u32 start;
	u32 last;
};

static size_t a6xx_snapshot_gmu_tcm(struct kgsl_device *device,
		u8 *buf, size_t remain, void *priv)
{
	struct kgsl_snapshot_gmu_mem *mem_hdr =
		(struct kgsl_snapshot_gmu_mem *)buf;
	unsigned int *data = (unsigned int *)(buf + sizeof(*mem_hdr));
	unsigned int i, bytes;
	struct a6xx_tcm_data *tcm = priv;

	bytes = (tcm->last - tcm->start + 1) << 2;

	if (remain < bytes + sizeof(*mem_hdr)) {
		SNAPSHOT_ERR_NOMEM(device, "GMU Memory");
		return 0;
	}

	mem_hdr->type = SNAPSHOT_GMU_MEM_BIN_BLOCK;
	mem_hdr->hostaddr = 0;
	mem_hdr->gmuaddr = gmu_get_memtype_base(KGSL_GMU_DEVICE(device),
			tcm->type);
	mem_hdr->gpuaddr = 0;

	for (i = tcm->start; i <= tcm->last; i++)
		kgsl_regread(device, i, data++);

	return bytes + sizeof(*mem_hdr);
}

static void a6xx_gmu_snapshot_memories(struct kgsl_device *device,
		struct kgsl_snapshot *snapshot)
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	struct gmu_mem_type_desc desc;
	struct gmu_memdesc *md;
	int i;

	for (i = 0; i < ARRAY_SIZE(gmu->kmem_entries); i++) {
		if (!test_bit(i, &gmu->kmem_bitmap))
			continue;

		md = &gmu->kmem_entries[i];
		if (!md->size)
			continue;

		desc.memdesc = md;
		if (md == gmu->hfi_mem)
			desc.type = SNAPSHOT_GMU_MEM_HFI;
		else if (md == gmu->gmu_log)
			desc.type = SNAPSHOT_GMU_MEM_LOG;
		else if (md == gmu->dump_mem)
			desc.type = SNAPSHOT_GMU_MEM_DEBUG;
		else
			desc.type = SNAPSHOT_GMU_MEM_BIN_BLOCK;

		if (md->mem_type == GMU_ITCM) {
			struct a6xx_tcm_data tcm = {
				.type = md->mem_type,
				.start = a6xx_gmu_tcm_registers[0],
				.last = a6xx_gmu_tcm_registers[1],
			};

			kgsl_snapshot_add_section(device,
				KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
				snapshot, a6xx_snapshot_gmu_tcm, &tcm);
		} else if (md->mem_type == GMU_DTCM) {
			struct a6xx_tcm_data tcm = {
				.type = md->mem_type,
				.start = a6xx_gmu_tcm_registers[2],
				.last = a6xx_gmu_tcm_registers[3],
			};

			kgsl_snapshot_add_section(device,
				KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
				snapshot, a6xx_snapshot_gmu_tcm, &tcm);
		} else {
			kgsl_snapshot_add_section(device,
				KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
				snapshot, a6xx_snapshot_gmu_mem, &desc);
		}
	}
}

struct kgsl_snapshot_gmu_version {
	uint32_t type;
	uint32_t value;
@@ -1566,42 +1660,9 @@ static void a6xx_gmu_snapshot_versions(struct kgsl_device *device,
				&gmu_vers[i]);
}

struct a6xx_tcm_data {
	enum gmu_mem_type type;
	u32 start;
	u32 last;
};

static size_t a6xx_snapshot_gmu_tcm(struct kgsl_device *device,
		u8 *buf, size_t remain, void *priv)
{
	struct kgsl_snapshot_gmu_mem *mem_hdr =
		(struct kgsl_snapshot_gmu_mem *)buf;
	unsigned int *data = (unsigned int *)(buf + sizeof(*mem_hdr));
	unsigned int i, bytes;
	struct a6xx_tcm_data *tcm = priv;

	bytes = (tcm->last - tcm->start + 1) << 2;

	if (remain < bytes + sizeof(*mem_hdr)) {
		SNAPSHOT_ERR_NOMEM(device, "GMU Memory");
		return 0;
	}

	mem_hdr->type = SNAPSHOT_GMU_MEM_BIN_BLOCK;
	mem_hdr->hostaddr = 0;
	mem_hdr->gmuaddr = gmu_get_memtype_base(tcm->type);
	mem_hdr->gpuaddr = 0;

	for (i = tcm->start; i <= tcm->last; i++)
		kgsl_regread(device, i, data++);

	return bytes + sizeof(*mem_hdr);
}

/*
 * a6xx_gmu_snapshot() - A6XX GMU snapshot function
 * @adreno_dev: Device being snapshotted
 * @device: Device being snapshotted
 * @snapshot: Pointer to the snapshot instance
 *
 * This is where all of the A6XX GMU specific bits and pieces are grabbed
@@ -1610,51 +1671,21 @@ static size_t a6xx_snapshot_gmu_tcm(struct kgsl_device *device,
static void a6xx_gmu_snapshot(struct kgsl_device *device,
		struct kgsl_snapshot *snapshot)
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	struct gmu_mem_type_desc desc[] = {
		{gmu->hfi_mem, SNAPSHOT_GMU_MEM_HFI},
		{gmu->gmu_log, SNAPSHOT_GMU_MEM_LOG},
		{gmu->dump_mem, SNAPSHOT_GMU_MEM_DEBUG},
	};
	unsigned int val, i;
	unsigned int val;

	if (!gmu_core_isenabled(device))
		return;

	a6xx_gmu_snapshot_versions(device, snapshot);

	for (i = 0; i < ARRAY_SIZE(desc); i++) {
		if (desc[i].memdesc)
			kgsl_snapshot_add_section(device,
					KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
					snapshot, a6xx_snapshot_gmu_mem,
					&desc[i]);
	}

	if (adreno_is_a640(adreno_dev) || adreno_is_a650(adreno_dev) ||
			adreno_is_a680(adreno_dev)) {
		struct a6xx_tcm_data tcm = {
			.type = GMU_ITCM,
			.start = a6xx_gmu_tcm_registers[0],
			.last = a6xx_gmu_tcm_registers[1],
		};

		kgsl_snapshot_add_section(device,
				KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
				snapshot, a6xx_snapshot_gmu_tcm, &tcm);

		tcm.type = GMU_DTCM;
		tcm.start = a6xx_gmu_tcm_registers[2],
		tcm.last = a6xx_gmu_tcm_registers[3],
	a6xx_gmu_snapshot_memories(device, snapshot);

		kgsl_snapshot_add_section(device,
				KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
				snapshot, a6xx_snapshot_gmu_tcm, &tcm);
	} else {
	/* Snapshot tcms as registers for legacy targets */
	if (adreno_is_a630(ADRENO_DEVICE(device)) ||
			adreno_is_a615_family(ADRENO_DEVICE(device)))
		adreno_snapshot_registers(device, snapshot,
				a6xx_gmu_tcm_registers,
				ARRAY_SIZE(a6xx_gmu_tcm_registers) / 2);
	}

	adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers,
					ARRAY_SIZE(a6xx_gmu_registers) / 2);
+45 −21
Original line number Diff line number Diff line
@@ -38,16 +38,30 @@ struct gmu_iommu_context {
#define DUMMY_SIZE   SZ_4K

/* Define target specific GMU VMA configurations */
static const struct gmu_vma_entry {

struct gmu_vma_entry {
	unsigned int start;
	unsigned int size;
} gmu_vma[] = {
};

static const struct gmu_vma_entry gmu_vma_legacy[] = {
	[GMU_ITCM] = { .start = 0x00000, .size = SZ_16K },
	[GMU_ICACHE] = { .start = 0x04000, .size = (SZ_256K - SZ_16K) },
	[GMU_DTCM] = { .start = 0x40000, .size = SZ_16K },
	[GMU_DCACHE] = { .start = 0x44000, .size = (SZ_256K - SZ_16K) },
	[GMU_NONCACHED_KERNEL] = { .start = 0x60000000, .size = SZ_512M },
	[GMU_NONCACHED_USER] = { .start = 0x80000000, .size = SZ_1G },
	[GMU_MEM_TYPE_MAX] = { .start = 0x0, .size = 0x0 },
};

static const struct gmu_vma_entry gmu_vma[] = {
	[GMU_ITCM] = { .start = 0x00000000, .size = SZ_16K },
	[GMU_CACHE] = { .start = SZ_16K, .size = (SZ_16M - SZ_16K) },
	[GMU_DTCM] = { .start = SZ_256M + SZ_16K, .size = SZ_16K },
	[GMU_DCACHE] = { .start = 0x0, .size = 0x0 },
	[GMU_NONCACHED_KERNEL] = { .start = 0x60000000, .size = SZ_512M },
	[GMU_NONCACHED_USER] = { .start = 0x80000000, .size = SZ_1G },
	[GMU_MEM_TYPE_MAX] = { .start = 0x0, .size = 0x0 },
};

struct gmu_iommu_context gmu_ctx[] = {
@@ -70,9 +84,10 @@ static unsigned int next_uncached_user_alloc;
static void gmu_snapshot(struct kgsl_device *device);
static void gmu_remove(struct kgsl_device *device);

unsigned int gmu_get_memtype_base(enum gmu_mem_type type)
unsigned int gmu_get_memtype_base(struct gmu_device *gmu,
		enum gmu_mem_type type)
{
	return gmu_vma[type].start;
	return gmu->vma[type].start;
}

static int _gmu_iommu_fault_handler(struct device *dev,
@@ -218,7 +233,7 @@ static struct gmu_memdesc *allocate_gmu_kmem(struct gmu_device *gmu,
	case GMU_NONCACHED_KERNEL:
		/* Set start address for first uncached kernel alloc */
		if (next_uncached_kernel_alloc == 0)
			next_uncached_kernel_alloc = gmu_vma[mem_type].start;
			next_uncached_kernel_alloc = gmu->vma[mem_type].start;

		if (addr == 0)
			addr = next_uncached_kernel_alloc;
@@ -230,7 +245,7 @@ static struct gmu_memdesc *allocate_gmu_kmem(struct gmu_device *gmu,
	case GMU_NONCACHED_USER:
		/* Set start address for first uncached user alloc */
		if (next_uncached_kernel_alloc == 0)
			next_uncached_user_alloc = gmu_vma[mem_type].start;
			next_uncached_user_alloc = gmu->vma[mem_type].start;

		if (addr == 0)
			addr = next_uncached_user_alloc;
@@ -395,14 +410,15 @@ static void gmu_memory_close(struct gmu_device *gmu)

}

static enum gmu_mem_type gmu_get_blk_memtype(struct gmu_block_header *blk)
static enum gmu_mem_type gmu_get_blk_memtype(struct gmu_device *gmu,
		struct gmu_block_header *blk)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(gmu_vma); i++) {
		if (blk->addr >= gmu_vma[i].start &&
	for (i = 0; i < GMU_MEM_TYPE_MAX; i++) {
		if (blk->addr >= gmu->vma[i].start &&
				blk->addr + blk->value <=
				gmu_vma[i].start + gmu_vma[i].size)
				gmu->vma[i].start + gmu->vma[i].size)
			return (enum gmu_mem_type)i;
	}

@@ -420,7 +436,7 @@ int gmu_prealloc_req(struct kgsl_device *device, struct gmu_block_header *blk)
	if (md)
		return 0;

	type = gmu_get_blk_memtype(blk);
	type = gmu_get_blk_memtype(gmu, blk);
	if (type >= GMU_MEM_TYPE_MAX)
		return -EINVAL;

@@ -1273,14 +1289,14 @@ static int gmu_tcm_init(struct gmu_device *gmu)
	struct gmu_memdesc *md;

	/* Reserve a memdesc for ITCM. No actually memory allocated */
	md = allocate_gmu_kmem(gmu, GMU_ITCM, gmu_vma[GMU_ITCM].start,
			gmu_vma[GMU_ITCM].size, 0);
	md = allocate_gmu_kmem(gmu, GMU_ITCM, gmu->vma[GMU_ITCM].start,
			gmu->vma[GMU_ITCM].size, 0);
	if (IS_ERR(md))
		return PTR_ERR(md);

	/* Reserve a memdesc for DTCM. No actually memory allocated */
	md = allocate_gmu_kmem(gmu, GMU_DTCM, gmu_vma[GMU_DTCM].start,
			gmu_vma[GMU_DTCM].size, 0);
	md = allocate_gmu_kmem(gmu, GMU_DTCM, gmu->vma[GMU_DTCM].start,
			gmu->vma[GMU_DTCM].size, 0);

	return PTR_ERR_OR_ZERO(md);
}
@@ -1295,16 +1311,19 @@ int gmu_cache_finalize(struct kgsl_device *device)
		return 0;

	md = allocate_gmu_kmem(gmu, GMU_ICACHE,
			gmu_vma[GMU_ICACHE].start, gmu_vma[GMU_ICACHE].size,
			gmu->vma[GMU_ICACHE].start, gmu->vma[GMU_ICACHE].size,
			(IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
	if (IS_ERR(md))
		return PTR_ERR(md);

	if (!adreno_is_a650(ADRENO_DEVICE(device))) {
		md = allocate_gmu_kmem(gmu, GMU_DCACHE,
			gmu_vma[GMU_DCACHE].start, gmu_vma[GMU_DCACHE].size,
				gmu->vma[GMU_DCACHE].start,
				gmu->vma[GMU_DCACHE].size,
				(IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
		if (IS_ERR(md))
			return PTR_ERR(md);
	}

	md = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL,
			0, DUMMY_SIZE,
@@ -1362,6 +1381,11 @@ static int gmu_probe(struct kgsl_device *device, struct device_node *node)
	if (ret)
		goto error;

	if (adreno_is_a650(adreno_dev))
		gmu->vma = gmu_vma;
	else
		gmu->vma = gmu_vma_legacy;

	ret = gmu_tcm_init(gmu);
	if (ret)
		goto error;
+4 −1
Original line number Diff line number Diff line
@@ -85,6 +85,7 @@ extern struct gmu_dev_ops adreno_a6xx_gmudev;
enum gmu_mem_type {
	GMU_ITCM = 0,
	GMU_ICACHE,
	GMU_CACHE = GMU_ICACHE,
	GMU_DTCM,
	GMU_DCACHE,
	GMU_NONCACHED_KERNEL,
@@ -212,11 +213,13 @@ struct gmu_device {
	bool preallocations;
	struct gmu_memdesc kmem_entries[GMU_KERNEL_ENTRIES];
	unsigned long kmem_bitmap;
	const struct gmu_vma_entry *vma;
};

struct gmu_memdesc *gmu_get_memdesc(struct gmu_device *gmu,
		unsigned int addr, unsigned int size);
unsigned int gmu_get_memtype_base(enum gmu_mem_type type);
unsigned int gmu_get_memtype_base(struct gmu_device *gmu,
		enum gmu_mem_type type);

int gmu_prealloc_req(struct kgsl_device *device, struct gmu_block_header *blk);
int gmu_memory_probe(struct kgsl_device *device);
+5 −15
Original line number Diff line number Diff line
@@ -50,11 +50,8 @@ static int hfi_queue_read(struct gmu_device *gmu, uint32_t queue_idx,
	if (hdr->status == HFI_QUEUE_STATUS_DISABLED)
		return -EINVAL;

	if (hdr->read_index == hdr->write_index) {
		hdr->rx_req = 1;
		result = -ENODATA;
		goto done;
	}
	if (hdr->read_index == hdr->write_index)
		return -ENODATA;

	/* Clear the output data before populating */
	memset(output, 0, max_size);
@@ -133,7 +130,6 @@ static int hfi_queue_write(struct gmu_device *gmu, uint32_t queue_idx,
			"Insufficient bufsize %d for msg id=%d of size %d\n",
			empty_space, id, size);

		hdr->drop_cnt++;
		mutex_unlock(&hfi->cmdq_mutex);
		return -ENOSPC;
	}
@@ -213,21 +209,15 @@ void hfi_init(struct gmu_device *gmu)
	tbl->qtbl_hdr.num_q = HFI_QUEUE_MAX;
	tbl->qtbl_hdr.num_active_q = HFI_QUEUE_MAX;

	/* Fill I dividual Queue Headers */
	memset(&tbl->qhdr[0], 0, sizeof(tbl->qhdr));

	/* Fill Individual Queue Headers */
	for (i = 0; i < HFI_QUEUE_MAX; i++) {
		hdr = &tbl->qhdr[i];
		hdr->start_addr = GMU_QUEUE_START_ADDR(mem_addr, i);
		hdr->type = QUEUE_HDR_TYPE(queue[i].idx, queue[i].pri, 0,  0);
		hdr->status = queue[i].status;
		hdr->queue_size = HFI_QUEUE_SIZE >> 2; /* convert to dwords */
		hdr->msg_size = 0;
		hdr->drop_cnt = 0;
		hdr->rx_wm = 0x1;
		hdr->tx_wm = 0x1;
		hdr->rx_req = 0x1;
		hdr->tx_req = 0x0;
		hdr->read_index = 0x0;
		hdr->write_index = 0x0;
	}

	mutex_init(&hfi->cmdq_mutex);
Loading