Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 78113d62 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: Move GMU kmem inside GMU struct"

parents 5a43e307 02f4f189
Loading
Loading
Loading
Loading
+14 −2
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
 * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
 */

#include <linux/firmware.h>
@@ -19,6 +19,7 @@
#include "adreno_llc.h"
#include "kgsl_sharedmem.h"
#include "kgsl.h"
#include "kgsl_gmu.h"
#include "kgsl_hfi.h"
#include "kgsl_trace.h"

@@ -1435,6 +1436,7 @@ static int a6xx_microcode_read(struct adreno_device *adreno_dev)
{
	int ret;
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	struct adreno_firmware *sqe_fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);

	if (sqe_fw->memdesc.hostptr == NULL) {
@@ -1444,7 +1446,17 @@ static int a6xx_microcode_read(struct adreno_device *adreno_dev)
			return ret;
	}

	return gmu_core_dev_load_firmware(device);
	ret = gmu_core_dev_load_firmware(device);
	if (ret)
		return ret;

	ret = gmu_memory_probe(device);
	if (ret)
		return ret;

	hfi_init(gmu);

	return 0;
}

static int a6xx_soft_reset(struct adreno_device *adreno_dev)
+139 −15
Original line number Diff line number Diff line
@@ -12,6 +12,7 @@
#include "kgsl_gmu_core.h"
#include "kgsl_gmu.h"
#include "kgsl_trace.h"
#include "kgsl_snapshot.h"

#include "adreno.h"
#include "a6xx_reg.h"
@@ -28,9 +29,14 @@ static const unsigned int a6xx_gmu_gx_registers[] = {
	0x1A900, 0x1A92B, 0x1A940, 0x1A940,
};

static const unsigned int a6xx_gmu_tcm_registers[] = {
	/* ITCM */
	0x1B400, 0x1C3FF,
	/* DTCM */
	0x1C400, 0x1D3FF,
};

static const unsigned int a6xx_gmu_registers[] = {
	/* GMU TCM */
	0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
	/* GMU CX */
	0x1F400, 0x1F407, 0x1F410, 0x1F412, 0x1F500, 0x1F500, 0x1F507, 0x1F50A,
	0x1F800, 0x1F804, 0x1F807, 0x1F808, 0x1F80B, 0x1F80C, 0x1F80F, 0x1F81C,
@@ -497,7 +503,7 @@ static int load_gmu_fw(struct kgsl_device *device)
		if (blk->size == 0)
			continue;

		md = gmu_get_memdesc(blk->addr, blk->size);
		md = gmu_get_memdesc(gmu, blk->addr, blk->size);
		if (md == NULL) {
			dev_err(&gmu->pdev->dev,
					"No backing memory for 0x%8.8X\n",
@@ -1126,16 +1132,20 @@ static int a6xx_gmu_load_firmware(struct kgsl_device *device)
			dev_dbg(&gmu->pdev->dev,
					"HFI VER: 0x%8.8x\n", blk->value);
			break;
		/* Skip preallocation requests for now */
		case GMU_BLK_TYPE_PREALLOC_REQ:
		case GMU_BLK_TYPE_PREALLOC_PERSIST_REQ:
			ret = gmu_prealloc_req(device, blk);
			if (ret)
				return ret;
			break;

		default:
			break;
		}
	}

	return 0;
	 /* Request any other cache ranges that might be required */
	return gmu_cache_finalize(device);
}

#define A6XX_STATE_OF_CHILD             (BIT(4) | BIT(5))
@@ -1480,27 +1490,113 @@ struct gmu_mem_type_desc {
static size_t a6xx_snapshot_gmu_mem(struct kgsl_device *device,
		u8 *buf, size_t remain, void *priv)
{
	struct kgsl_snapshot_gmu *header = (struct kgsl_snapshot_gmu *)buf;
	struct kgsl_snapshot_gmu_mem *mem_hdr =
		(struct kgsl_snapshot_gmu_mem *)buf;
	unsigned int *data = (unsigned int *)
		(buf + sizeof(*mem_hdr));
	struct gmu_mem_type_desc *desc = priv;
	unsigned int *data = (unsigned int *)(buf + sizeof(*header));

	if (priv == NULL)
		return 0;

	if (remain < desc->memdesc->size + sizeof(*header)) {
	if (remain < desc->memdesc->size + sizeof(*mem_hdr)) {
		dev_err(device->dev,
			"snapshot: Not enough memory for the gmu section %d\n",
			desc->type);
		return 0;
	}

	header->type = desc->type;
	header->size = desc->memdesc->size;
	mem_hdr->type = desc->type;
	mem_hdr->hostaddr = (uintptr_t)desc->memdesc->hostptr;
	mem_hdr->gmuaddr = desc->memdesc->gmuaddr;
	mem_hdr->gpuaddr = 0;

	/* Just copy the ringbuffer, there are no active IBs */
	memcpy(data, desc->memdesc->hostptr, desc->memdesc->size);

	return desc->memdesc->size + sizeof(*header);
	return desc->memdesc->size + sizeof(*mem_hdr);
}

struct kgsl_snapshot_gmu_version {
	uint32_t type;
	uint32_t value;
};

static size_t a6xx_snapshot_gmu_version(struct kgsl_device *device,
		u8 *buf, size_t remain, void *priv)
{
	struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
	uint32_t *data = (uint32_t *) (buf + sizeof(*header));
	struct kgsl_snapshot_gmu_version *ver = priv;

	if (remain < DEBUG_SECTION_SZ(1)) {
		SNAPSHOT_ERR_NOMEM(device, "GMU Version");
		return 0;
	}

	header->type = ver->type;
	header->size = sizeof(uint32_t);

	*data = ver->value;

	return DEBUG_SECTION_SZ(1);
}

static void a6xx_gmu_snapshot_versions(struct kgsl_device *device,
		struct kgsl_snapshot *snapshot)
{
	int i;
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	struct kgsl_snapshot_gmu_version gmu_vers[] = {
		{ .type = SNAPSHOT_DEBUG_GMU_CORE_VERSION,
			.value = gmu->ver.core, },
		{ .type = SNAPSHOT_DEBUG_GMU_CORE_DEV_VERSION,
			.value = gmu->ver.core_dev, },
		{ .type = SNAPSHOT_DEBUG_GMU_PWR_VERSION,
			.value = gmu->ver.pwr, },
		{ .type = SNAPSHOT_DEBUG_GMU_PWR_DEV_VERSION,
			.value = gmu->ver.pwr_dev, },
		{ .type = SNAPSHOT_DEBUG_GMU_HFI_VERSION,
			.value = gmu->ver.hfi, },
	};

	for (i = 0; i < ARRAY_SIZE(gmu_vers); i++)
		kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
				snapshot, a6xx_snapshot_gmu_version,
				&gmu_vers[i]);
}

struct a6xx_tcm_data {
	enum gmu_mem_type type;
	u32 start;
	u32 last;
};

static size_t a6xx_snapshot_gmu_tcm(struct kgsl_device *device,
		u8 *buf, size_t remain, void *priv)
{
	struct kgsl_snapshot_gmu_mem *mem_hdr =
		(struct kgsl_snapshot_gmu_mem *)buf;
	unsigned int *data = (unsigned int *)(buf + sizeof(*mem_hdr));
	unsigned int i, bytes;
	struct a6xx_tcm_data *tcm = priv;

	bytes = (tcm->last - tcm->start + 1) << 2;

	if (remain < bytes + sizeof(*mem_hdr)) {
		SNAPSHOT_ERR_NOMEM(device, "GMU Memory");
		return 0;
	}

	mem_hdr->type = SNAPSHOT_GMU_MEM_BIN_BLOCK;
	mem_hdr->hostaddr = 0;
	mem_hdr->gmuaddr = gmu_get_memtype_base(tcm->type);
	mem_hdr->gpuaddr = 0;

	for (i = tcm->start; i <= tcm->last; i++)
		kgsl_regread(device, i, data++);

	return bytes + sizeof(*mem_hdr);
}

/*
@@ -1516,22 +1612,50 @@ static void a6xx_gmu_snapshot(struct kgsl_device *device,
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	struct gmu_mem_type_desc desc[] = {
		{gmu->hfi_mem, SNAPSHOT_GMU_HFIMEM},
		{gmu->gmu_log, SNAPSHOT_GMU_LOG},
		{gmu->dump_mem, SNAPSHOT_GMU_DUMPMEM} };
		{gmu->hfi_mem, SNAPSHOT_GMU_MEM_HFI},
		{gmu->gmu_log, SNAPSHOT_GMU_MEM_LOG},
		{gmu->dump_mem, SNAPSHOT_GMU_MEM_DEBUG},
	};
	unsigned int val, i;

	if (!gmu_core_isenabled(device))
		return;

	a6xx_gmu_snapshot_versions(device, snapshot);

	for (i = 0; i < ARRAY_SIZE(desc); i++) {
		if (desc[i].memdesc)
			kgsl_snapshot_add_section(device,
					KGSL_SNAPSHOT_SECTION_GMU,
					KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
					snapshot, a6xx_snapshot_gmu_mem,
					&desc[i]);
	}

	if (adreno_is_a640(adreno_dev) || adreno_is_a650(adreno_dev) ||
			adreno_is_a680(adreno_dev)) {
		struct a6xx_tcm_data tcm = {
			.type = GMU_ITCM,
			.start = a6xx_gmu_tcm_registers[0],
			.last = a6xx_gmu_tcm_registers[1],
		};

		kgsl_snapshot_add_section(device,
				KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
				snapshot, a6xx_snapshot_gmu_tcm, &tcm);

		tcm.type = GMU_DTCM;
		tcm.start = a6xx_gmu_tcm_registers[2],
		tcm.last = a6xx_gmu_tcm_registers[3],

		kgsl_snapshot_add_section(device,
				KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
				snapshot, a6xx_snapshot_gmu_tcm, &tcm);
	} else {
		adreno_snapshot_registers(device, snapshot,
				a6xx_gmu_tcm_registers,
				ARRAY_SIZE(a6xx_gmu_tcm_registers) / 2);
	}

	adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers,
					ARRAY_SIZE(a6xx_gmu_registers) / 2);

+122 −82
Original line number Diff line number Diff line
@@ -24,7 +24,6 @@

#define GMU_CONTEXT_USER		0
#define GMU_CONTEXT_KERNEL		1
#define GMU_KERNEL_ENTRIES		16

#define GMU_CM3_CFG_NONMASKINTR_SHIFT    9

@@ -65,14 +64,17 @@ struct gmu_iommu_context gmu_ctx[] = {
 * active SMMU entries of GMU kernel mode context. Each entry is assigned
 * a unique address inside GMU kernel mode address range.
 */
static struct gmu_memdesc gmu_kmem_entries[GMU_KERNEL_ENTRIES];
static unsigned long gmu_kmem_bitmap;
static unsigned int next_uncached_kernel_alloc;
static unsigned int next_uncached_user_alloc;

static void gmu_snapshot(struct kgsl_device *device);
static void gmu_remove(struct kgsl_device *device);

unsigned int gmu_get_memtype_base(enum gmu_mem_type type)
{
	return gmu_vma[type].start;
}

static int _gmu_iommu_fault_handler(struct device *dev,
		unsigned long addr, int flags, const char *name)
{
@@ -143,16 +145,17 @@ static int alloc_and_map(struct gmu_device *gmu, struct gmu_memdesc *md,
	return ret;
}

struct gmu_memdesc *gmu_get_memdesc(unsigned int addr, unsigned int size)
struct gmu_memdesc *gmu_get_memdesc(struct gmu_device *gmu,
		unsigned int addr, unsigned int size)
{
	int i;
	struct gmu_memdesc *mem;

	for (i = 0; i < GMU_KERNEL_ENTRIES; i++) {
		if (!test_bit(i, &gmu_kmem_bitmap))
		if (!test_bit(i, &gmu->kmem_bitmap))
			continue;

		mem = &gmu_kmem_entries[i];
		mem = &gmu->kmem_entries[i];

		if (addr >= mem->gmuaddr &&
				(addr + size <= mem->gmuaddr + mem->size))
@@ -175,7 +178,7 @@ static struct gmu_memdesc *allocate_gmu_kmem(struct gmu_device *gmu,
	struct gmu_memdesc *md;
	int ret;
	int entry_idx = find_first_zero_bit(
			&gmu_kmem_bitmap, GMU_KERNEL_ENTRIES);
			&gmu->kmem_bitmap, GMU_KERNEL_ENTRIES);

	if (entry_idx >= GMU_KERNEL_ENTRIES) {
		dev_err(&gmu->pdev->dev,
@@ -192,8 +195,8 @@ static struct gmu_memdesc *allocate_gmu_kmem(struct gmu_device *gmu,
		return ERR_PTR(-EINVAL);
	}

	md = &gmu_kmem_entries[entry_idx];
	set_bit(entry_idx, &gmu_kmem_bitmap);
	md = &gmu->kmem_entries[entry_idx];
	set_bit(entry_idx, &gmu->kmem_bitmap);

	memset(md, 0, sizeof(*md));

@@ -240,7 +243,7 @@ static struct gmu_memdesc *allocate_gmu_kmem(struct gmu_device *gmu,
		dev_err(&gmu->pdev->dev,
				"Invalid memory type (%d) requested\n",
				mem_type);
		clear_bit(entry_idx, &gmu_kmem_bitmap);
		clear_bit(entry_idx, &gmu->kmem_bitmap);
		return ERR_PTR(-EINVAL);
	}

@@ -250,7 +253,7 @@ static struct gmu_memdesc *allocate_gmu_kmem(struct gmu_device *gmu,

	ret = alloc_and_map(gmu, md, attrs);
	if (ret) {
		clear_bit(entry_idx, &gmu_kmem_bitmap);
		clear_bit(entry_idx, &gmu->kmem_bitmap);
		return ERR_PTR(ret);
	}

@@ -358,13 +361,14 @@ static void gmu_kmem_close(struct gmu_device *gmu)
	gmu->hfi_mem = NULL;
	gmu->dump_mem = NULL;
	gmu->gmu_log = NULL;
	gmu->preallocations = false;

	/* Unmap and free all memories in GMU kernel memory pool */
	for (i = 0; i < GMU_KERNEL_ENTRIES; i++) {
		if (!test_bit(i, &gmu_kmem_bitmap))
		if (!test_bit(i, &gmu->kmem_bitmap))
			continue;

		md = &gmu_kmem_entries[i];
		md = &gmu->kmem_entries[i];
		ctx = &gmu_ctx[md->ctx_idx];

		if (md->gmuaddr && md->mem_type != GMU_ITCM &&
@@ -373,7 +377,7 @@ static void gmu_kmem_close(struct gmu_device *gmu)

		free_gmu_mem(gmu, md);

		clear_bit(i, &gmu_kmem_bitmap);
		clear_bit(i, &gmu->kmem_bitmap);
	}

	/* Detach the device from SMMU context bank */
@@ -391,67 +395,56 @@ static void gmu_memory_close(struct gmu_device *gmu)

}

/*
 * gmu_memory_probe() - probe GMU IOMMU context banks and allocate memory
 * to share with GMU in kernel mode.
 * @device: Pointer to KGSL device
 * @gmu: Pointer to GMU device
 * @node: Pointer to GMU device node
 */
static int gmu_memory_probe(struct kgsl_device *device,
		struct gmu_device *gmu, struct device_node *node)
static enum gmu_mem_type gmu_get_blk_memtype(struct gmu_block_header *blk)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct gmu_memdesc *md;
	int ret;

	ret = gmu_iommu_init(gmu, node);
	if (ret)
		return ret;
	int i;

	/* Reserve a memdesc for ITCM. No actually memory allocated */
	md = allocate_gmu_kmem(gmu, GMU_ITCM, gmu_vma[GMU_ITCM].start,
			gmu_vma[GMU_ITCM].size, 0);
	if (IS_ERR(md)) {
		ret = PTR_ERR(md);
		goto err_ret;
	for (i = 0; i < ARRAY_SIZE(gmu_vma); i++) {
		if (blk->addr >= gmu_vma[i].start &&
				blk->addr + blk->value <=
				gmu_vma[i].start + gmu_vma[i].size)
			return (enum gmu_mem_type)i;
	}

	/* Reserve a memdesc for DTCM. No actually memory allocated */
	md = allocate_gmu_kmem(gmu, GMU_DTCM, gmu_vma[GMU_DTCM].start,
			gmu_vma[GMU_DTCM].size, 0);
	if (IS_ERR(md)) {
		ret = PTR_ERR(md);
		goto err_ret;
	return GMU_MEM_TYPE_MAX;
}

	/* Allocates & maps memory for DCACHE */
	md = allocate_gmu_kmem(gmu, GMU_DCACHE, gmu_vma[GMU_DCACHE].start,
			gmu_vma[GMU_DCACHE].size,
			(IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
	if (IS_ERR(md)) {
		ret = PTR_ERR(md);
		goto err_ret;
	}
int gmu_prealloc_req(struct kgsl_device *device, struct gmu_block_header *blk)
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	enum gmu_mem_type type;
	struct gmu_memdesc *md;

	/* Allocates & maps memory for ICACHE */
	md = allocate_gmu_kmem(gmu, GMU_ICACHE, gmu_vma[GMU_ICACHE].start,
			gmu_vma[GMU_ICACHE].size,
	/* Check to see if this memdesc is already around */
	md = gmu_get_memdesc(gmu, blk->addr, blk->value);
	if (md)
		return 0;

	type = gmu_get_blk_memtype(blk);
	if (type >= GMU_MEM_TYPE_MAX)
		return -EINVAL;

	md = allocate_gmu_kmem(gmu, type, blk->addr, blk->value,
			(IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
	if (IS_ERR(md)) {
		ret = PTR_ERR(md);
		goto err_ret;
	}
	if (IS_ERR(md))
		return PTR_ERR(md);

	/* Allocates & maps memory for WB DUMMY PAGE */
	/* Must be the first UNCACHED alloc */
	md = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0,
			DUMMY_SIZE, (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
	if (IS_ERR(md)) {
		ret = PTR_ERR(md);
		goto err_ret;
	gmu->preallocations = true;

	return 0;
}

/*
 * gmu_memory_probe() - probe GMU IOMMU context banks and allocate memory
 * to share with GMU in kernel mode.
 * @device: Pointer to KGSL device
 */
int gmu_memory_probe(struct kgsl_device *device)
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	int ret;

	/* Allocates & maps memory for HFI */
	gmu->hfi_mem = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0,
			HFIMEM_SIZE, (IOMMU_READ | IOMMU_WRITE));
@@ -478,17 +471,6 @@ static int gmu_memory_probe(struct kgsl_device *device,
		goto err_ret;
	}

	if (ADRENO_FEATURE(adreno_dev, ADRENO_ECP)) {
		/* Allocation to account for future MEM_ALLOC buffers */
		md = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL,
				0, SZ_32K,
				(IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
		if (IS_ERR(md)) {
			ret = PTR_ERR(md);
			goto err_ret;
		}
	}

	return 0;
err_ret:
	gmu_memory_close(gmu);
@@ -1286,11 +1268,68 @@ static int gmu_acd_set(struct kgsl_device *device, unsigned int val)
	return 0;
}

static int gmu_tcm_init(struct gmu_device *gmu)
{
	struct gmu_memdesc *md;

	/* Reserve a memdesc for ITCM. No actually memory allocated */
	md = allocate_gmu_kmem(gmu, GMU_ITCM, gmu_vma[GMU_ITCM].start,
			gmu_vma[GMU_ITCM].size, 0);
	if (IS_ERR(md))
		return PTR_ERR(md);

	/* Reserve a memdesc for DTCM. No actually memory allocated */
	md = allocate_gmu_kmem(gmu, GMU_DTCM, gmu_vma[GMU_DTCM].start,
			gmu_vma[GMU_DTCM].size, 0);

	return PTR_ERR_OR_ZERO(md);
}

int gmu_cache_finalize(struct kgsl_device *device)
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	struct gmu_memdesc *md;

	/* Preallocations were made so no need to request all this memory */
	if (gmu->preallocations)
		return 0;

	md = allocate_gmu_kmem(gmu, GMU_ICACHE,
			gmu_vma[GMU_ICACHE].start, gmu_vma[GMU_ICACHE].size,
			(IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
	if (IS_ERR(md))
		return PTR_ERR(md);

	md = allocate_gmu_kmem(gmu, GMU_DCACHE,
			gmu_vma[GMU_DCACHE].start, gmu_vma[GMU_DCACHE].size,
			(IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
	if (IS_ERR(md))
		return PTR_ERR(md);

	md = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL,
			0, DUMMY_SIZE,
			(IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
	if (IS_ERR(md))
		return PTR_ERR(md);

	if (ADRENO_FEATURE(ADRENO_DEVICE(device), ADRENO_ECP)) {
		/* Allocation to account for future MEM_ALLOC buffers */
		md = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL,
				0, SZ_32K,
				(IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
		if (IS_ERR(md))
			return PTR_ERR(md);
	}

	gmu->preallocations = true;

	return 0;
}

/* Do not access any GMU registers in GMU probe function */
static int gmu_probe(struct kgsl_device *device, struct device_node *node)
{
	struct gmu_device *gmu;
	struct gmu_memdesc *mem_addr = NULL;
	struct kgsl_hfi *hfi;
	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -1319,10 +1358,13 @@ static int gmu_probe(struct kgsl_device *device, struct device_node *node)
		goto error;

	/* Set up GMU IOMMU and shared memory with GMU */
	ret = gmu_memory_probe(device, gmu, node);
	ret = gmu_iommu_init(gmu, node);
	if (ret)
		goto error;

	ret = gmu_tcm_init(gmu);
	if (ret)
		goto error;
	mem_addr = gmu->hfi_mem;

	/* Map and reserve GMU CSRs registers */
	ret = gmu_reg_probe(device);
@@ -1379,8 +1421,6 @@ static int gmu_probe(struct kgsl_device *device, struct device_node *node)
	if (ret)
		goto error;

	hfi_init(&gmu->hfi, mem_addr, HFI_QUEUE_SIZE);

	/* Set up GMU idle states */
	if (ADRENO_FEATURE(adreno_dev, ADRENO_MIN_VOLT))
		gmu->idle_level = GPU_HW_MIN_VOLT;
+13 −1
Original line number Diff line number Diff line
@@ -76,6 +76,9 @@ struct gmu_block_header {
/* For GMU Logs*/
#define LOGMEM_SIZE  SZ_4K

/* GMU memdesc entries */
#define GMU_KERNEL_ENTRIES		16

extern struct gmu_dev_ops adreno_a6xx_gmudev;
#define KGSL_GMU_DEVICE(_a)  ((struct gmu_device *)((_a)->gmu_core.ptr))

@@ -206,8 +209,17 @@ struct gmu_device {
	unsigned int idle_level;
	unsigned int fault_count;
	struct kgsl_mailbox mailbox;
	bool preallocations;
	struct gmu_memdesc kmem_entries[GMU_KERNEL_ENTRIES];
	unsigned long kmem_bitmap;
};

struct gmu_memdesc *gmu_get_memdesc(unsigned int addr, unsigned int size);
struct gmu_memdesc *gmu_get_memdesc(struct gmu_device *gmu,
		unsigned int addr, unsigned int size);
unsigned int gmu_get_memtype_base(enum gmu_mem_type type);

int gmu_prealloc_req(struct kgsl_device *device, struct gmu_block_header *blk);
int gmu_memory_probe(struct kgsl_device *device);
int gmu_cache_finalize(struct kgsl_device *device);

#endif /* __KGSL_GMU_H */
+4 −3
Original line number Diff line number Diff line
@@ -174,10 +174,11 @@ static int hfi_queue_write(struct gmu_device *gmu, uint32_t queue_idx,


/* Sizes of the queue and message are in unit of dwords */
void hfi_init(struct kgsl_hfi *hfi, struct gmu_memdesc *mem_addr,
		uint32_t queue_sz_bytes)
void hfi_init(struct gmu_device *gmu)
{
	struct kgsl_hfi *hfi = &gmu->hfi;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(hfi->kgsldev);
	struct gmu_memdesc *mem_addr = gmu->hfi_mem;
	int i;
	struct hfi_queue_table *tbl;
	struct hfi_queue_header *hdr;
@@ -218,7 +219,7 @@ void hfi_init(struct kgsl_hfi *hfi, struct gmu_memdesc *mem_addr,
		hdr->start_addr = GMU_QUEUE_START_ADDR(mem_addr, i);
		hdr->type = QUEUE_HDR_TYPE(queue[i].idx, queue[i].pri, 0,  0);
		hdr->status = queue[i].status;
		hdr->queue_size = queue_sz_bytes >> 2; /* convert to dwords */
		hdr->queue_size = HFI_QUEUE_SIZE >> 2; /* convert to dwords */
		hdr->msg_size = 0;
		hdr->drop_cnt = 0;
		hdr->rx_wm = 0x1;
Loading