Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a5fae981 authored by Carter Cooper's avatar Carter Cooper
Browse files

msm: kgsl: Allow GMU preallocation requests



Allow the GMU firmware to make GMU address assignments before
KGSL is allowed to allocate from the GMU address space.

Change-Id: I0e2b4f3cd5a7f993f09338b7756eaeb4050a06ff
Signed-off-by: default avatarCarter Cooper <ccooper@codeaurora.org>
parent 957aee4a
Loading
Loading
Loading
Loading
+14 −2
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
 * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
 */

#include <linux/firmware.h>
@@ -19,6 +19,7 @@
#include "adreno_llc.h"
#include "kgsl_sharedmem.h"
#include "kgsl.h"
#include "kgsl_gmu.h"
#include "kgsl_hfi.h"
#include "kgsl_trace.h"

@@ -1435,6 +1436,7 @@ static int a6xx_microcode_read(struct adreno_device *adreno_dev)
{
	int ret;
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	struct adreno_firmware *sqe_fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);

	if (sqe_fw->memdesc.hostptr == NULL) {
@@ -1444,7 +1446,17 @@ static int a6xx_microcode_read(struct adreno_device *adreno_dev)
			return ret;
	}

	return gmu_core_dev_load_firmware(device);
	ret = gmu_core_dev_load_firmware(device);
	if (ret)
		return ret;

	ret = gmu_memory_probe(device);
	if (ret)
		return ret;

	hfi_init(gmu);

	return 0;
}

static int a6xx_soft_reset(struct adreno_device *adreno_dev)
+6 −2
Original line number Diff line number Diff line
@@ -1132,16 +1132,20 @@ static int a6xx_gmu_load_firmware(struct kgsl_device *device)
			dev_dbg(&gmu->pdev->dev,
					"HFI VER: 0x%8.8x\n", blk->value);
			break;
		/* Skip preallocation requests for now */
		case GMU_BLK_TYPE_PREALLOC_REQ:
		case GMU_BLK_TYPE_PREALLOC_PERSIST_REQ:
			ret = gmu_prealloc_req(device, blk);
			if (ret)
				return ret;
			break;

		default:
			break;
		}
	}

	return 0;
	 /* Request any other cache ranges that might be required */
	return gmu_cache_finalize(device);
}

#define A6XX_STATE_OF_CHILD             (BIT(4) | BIT(5))
+105 −68
Original line number Diff line number Diff line
@@ -363,6 +363,7 @@ static void gmu_kmem_close(struct gmu_device *gmu)
	gmu->hfi_mem = NULL;
	gmu->dump_mem = NULL;
	gmu->gmu_log = NULL;
	gmu->preallocations = false;

	/* Unmap and free all memories in GMU kernel memory pool */
	for (i = 0; i < GMU_KERNEL_ENTRIES; i++) {
@@ -396,67 +397,56 @@ static void gmu_memory_close(struct gmu_device *gmu)

}

/*
 * gmu_memory_probe() - probe GMU IOMMU context banks and allocate memory
 * to share with GMU in kernel mode.
 * @device: Pointer to KGSL device
 * @gmu: Pointer to GMU device
 * @node: Pointer to GMU device node
 */
static int gmu_memory_probe(struct kgsl_device *device,
		struct gmu_device *gmu, struct device_node *node)
static enum gmu_mem_type gmu_get_blk_memtype(struct gmu_block_header *blk)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct gmu_memdesc *md;
	int ret;

	ret = gmu_iommu_init(gmu, node);
	if (ret)
		return ret;
	int i;

	/* Reserve a memdesc for ITCM. No actually memory allocated */
	md = allocate_gmu_kmem(gmu, GMU_ITCM, gmu_vma[GMU_ITCM].start,
			gmu_vma[GMU_ITCM].size, 0);
	if (IS_ERR(md)) {
		ret = PTR_ERR(md);
		goto err_ret;
	for (i = 0; i < ARRAY_SIZE(gmu_vma); i++) {
		if (blk->addr >= gmu_vma[i].start &&
				blk->addr + blk->value <=
				gmu_vma[i].start + gmu_vma[i].size)
			return (enum gmu_mem_type)i;
	}

	/* Reserve a memdesc for DTCM. No actually memory allocated */
	md = allocate_gmu_kmem(gmu, GMU_DTCM, gmu_vma[GMU_DTCM].start,
			gmu_vma[GMU_DTCM].size, 0);
	if (IS_ERR(md)) {
		ret = PTR_ERR(md);
		goto err_ret;
	return GMU_MEM_TYPE_MAX;
}

	/* Allocates & maps memory for DCACHE */
	md = allocate_gmu_kmem(gmu, GMU_DCACHE, gmu_vma[GMU_DCACHE].start,
			gmu_vma[GMU_DCACHE].size,
			(IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
	if (IS_ERR(md)) {
		ret = PTR_ERR(md);
		goto err_ret;
	}
int gmu_prealloc_req(struct kgsl_device *device, struct gmu_block_header *blk)
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	enum gmu_mem_type type;
	struct gmu_memdesc *md;

	/* Check to see if this memdesc is already around */
	md = gmu_get_memdesc(blk->addr, blk->value);
	if (md)
		return 0;

	/* Allocates & maps memory for ICACHE */
	md = allocate_gmu_kmem(gmu, GMU_ICACHE, gmu_vma[GMU_ICACHE].start,
			gmu_vma[GMU_ICACHE].size,
	type = gmu_get_blk_memtype(blk);
	if (type >= GMU_MEM_TYPE_MAX)
		return -EINVAL;

	md = allocate_gmu_kmem(gmu, type, blk->addr, blk->value,
			(IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
	if (IS_ERR(md)) {
		ret = PTR_ERR(md);
		goto err_ret;
	}
	if (IS_ERR(md))
		return PTR_ERR(md);

	/* Allocates & maps memory for WB DUMMY PAGE */
	/* Must be the first UNCACHED alloc */
	md = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0,
			DUMMY_SIZE, (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
	if (IS_ERR(md)) {
		ret = PTR_ERR(md);
		goto err_ret;
	gmu->preallocations = true;

	return 0;
}

/*
 * gmu_memory_probe() - probe GMU IOMMU context banks and allocate memory
 * to share with GMU in kernel mode.
 * @device: Pointer to KGSL device
 */
int gmu_memory_probe(struct kgsl_device *device)
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	int ret;

	/* Allocates & maps memory for HFI */
	gmu->hfi_mem = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0,
			HFIMEM_SIZE, (IOMMU_READ | IOMMU_WRITE));
@@ -483,17 +473,6 @@ static int gmu_memory_probe(struct kgsl_device *device,
		goto err_ret;
	}

	if (ADRENO_FEATURE(adreno_dev, ADRENO_ECP)) {
		/* Allocation to account for future MEM_ALLOC buffers */
		md = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL,
				0, SZ_32K,
				(IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
		if (IS_ERR(md)) {
			ret = PTR_ERR(md);
			goto err_ret;
		}
	}

	return 0;
err_ret:
	gmu_memory_close(gmu);
@@ -1291,11 +1270,68 @@ static int gmu_acd_set(struct kgsl_device *device, unsigned int val)
	return 0;
}

static int gmu_tcm_init(struct gmu_device *gmu)
{
	struct gmu_memdesc *md;

	/* Reserve a memdesc for ITCM. No actually memory allocated */
	md = allocate_gmu_kmem(gmu, GMU_ITCM, gmu_vma[GMU_ITCM].start,
			gmu_vma[GMU_ITCM].size, 0);
	if (IS_ERR(md))
		return PTR_ERR(md);

	/* Reserve a memdesc for DTCM. No actually memory allocated */
	md = allocate_gmu_kmem(gmu, GMU_DTCM, gmu_vma[GMU_DTCM].start,
			gmu_vma[GMU_DTCM].size, 0);

	return PTR_ERR_OR_ZERO(md);
}

int gmu_cache_finalize(struct kgsl_device *device)
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	struct gmu_memdesc *md;

	/* Preallocations were made so no need to request all this memory */
	if (gmu->preallocations)
		return 0;

	md = allocate_gmu_kmem(gmu, GMU_ICACHE,
			gmu_vma[GMU_ICACHE].start, gmu_vma[GMU_ICACHE].size,
			(IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
	if (IS_ERR(md))
		return PTR_ERR(md);

	md = allocate_gmu_kmem(gmu, GMU_DCACHE,
			gmu_vma[GMU_DCACHE].start, gmu_vma[GMU_DCACHE].size,
			(IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
	if (IS_ERR(md))
		return PTR_ERR(md);

	md = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL,
			0, DUMMY_SIZE,
			(IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
	if (IS_ERR(md))
		return PTR_ERR(md);

	if (ADRENO_FEATURE(ADRENO_DEVICE(device), ADRENO_ECP)) {
		/* Allocation to account for future MEM_ALLOC buffers */
		md = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL,
				0, SZ_32K,
				(IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
		if (IS_ERR(md))
			return PTR_ERR(md);
	}

	gmu->preallocations = true;

	return 0;
}

/* Do not access any GMU registers in GMU probe function */
static int gmu_probe(struct kgsl_device *device, struct device_node *node)
{
	struct gmu_device *gmu;
	struct gmu_memdesc *mem_addr = NULL;
	struct kgsl_hfi *hfi;
	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -1324,10 +1360,13 @@ static int gmu_probe(struct kgsl_device *device, struct device_node *node)
		goto error;

	/* Set up GMU IOMMU and shared memory with GMU */
	ret = gmu_memory_probe(device, gmu, node);
	ret = gmu_iommu_init(gmu, node);
	if (ret)
		goto error;

	ret = gmu_tcm_init(gmu);
	if (ret)
		goto error;
	mem_addr = gmu->hfi_mem;

	/* Map and reserve GMU CSRs registers */
	ret = gmu_reg_probe(device);
@@ -1384,8 +1423,6 @@ static int gmu_probe(struct kgsl_device *device, struct device_node *node)
	if (ret)
		goto error;

	hfi_init(&gmu->hfi, mem_addr, HFI_QUEUE_SIZE);

	/* Set up GMU idle states */
	if (ADRENO_FEATURE(adreno_dev, ADRENO_MIN_VOLT))
		gmu->idle_level = GPU_HW_MIN_VOLT;
+5 −0
Original line number Diff line number Diff line
@@ -206,9 +206,14 @@ struct gmu_device {
	unsigned int idle_level;
	unsigned int fault_count;
	struct kgsl_mailbox mailbox;
	bool preallocations;
};

struct gmu_memdesc *gmu_get_memdesc(unsigned int addr, unsigned int size);
unsigned int gmu_get_memtype_base(enum gmu_mem_type type);

int gmu_prealloc_req(struct kgsl_device *device, struct gmu_block_header *blk);
int gmu_memory_probe(struct kgsl_device *device);
int gmu_cache_finalize(struct kgsl_device *device);

#endif /* __KGSL_GMU_H */
+4 −3
Original line number Diff line number Diff line
@@ -174,10 +174,11 @@ static int hfi_queue_write(struct gmu_device *gmu, uint32_t queue_idx,


/* Sizes of the queue and message are in unit of dwords */
void hfi_init(struct kgsl_hfi *hfi, struct gmu_memdesc *mem_addr,
		uint32_t queue_sz_bytes)
void hfi_init(struct gmu_device *gmu)
{
	struct kgsl_hfi *hfi = &gmu->hfi;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(hfi->kgsldev);
	struct gmu_memdesc *mem_addr = gmu->hfi_mem;
	int i;
	struct hfi_queue_table *tbl;
	struct hfi_queue_header *hdr;
@@ -218,7 +219,7 @@ void hfi_init(struct kgsl_hfi *hfi, struct gmu_memdesc *mem_addr,
		hdr->start_addr = GMU_QUEUE_START_ADDR(mem_addr, i);
		hdr->type = QUEUE_HDR_TYPE(queue[i].idx, queue[i].pri, 0,  0);
		hdr->status = queue[i].status;
		hdr->queue_size = queue_sz_bytes >> 2; /* convert to dwords */
		hdr->queue_size = HFI_QUEUE_SIZE >> 2; /* convert to dwords */
		hdr->msg_size = 0;
		hdr->drop_cnt = 0;
		hdr->rx_wm = 0x1;
Loading