Loading drivers/gpu/msm/a6xx_reg.h +2 −1 Original line number Original line Diff line number Diff line /* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */ /* /* * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. */ */ #ifndef _A6XX_REG_H #ifndef _A6XX_REG_H Loading Loading @@ -994,6 +994,7 @@ #define A6XX_GMU_HOST2GMU_INTR_INFO_1 0x1F99C #define A6XX_GMU_HOST2GMU_INTR_INFO_1 0x1F99C #define A6XX_GMU_HOST2GMU_INTR_INFO_2 0x1F99D #define A6XX_GMU_HOST2GMU_INTR_INFO_2 0x1F99D #define A6XX_GMU_HOST2GMU_INTR_INFO_3 0x1F99E #define A6XX_GMU_HOST2GMU_INTR_INFO_3 0x1F99E #define A6XX_GMU_GENERAL_0 0x1F9C5 #define A6XX_GMU_GENERAL_1 0x1F9C6 #define A6XX_GMU_GENERAL_1 0x1F9C6 #define A6XX_GMU_GENERAL_6 0x1F9CB #define A6XX_GMU_GENERAL_6 0x1F9CB #define A6XX_GMU_GENERAL_7 0x1F9CC #define A6XX_GMU_GENERAL_7 0x1F9CC Loading drivers/gpu/msm/adreno_a6xx_gmu.c +65 −13 Original line number Original line Diff line number Diff line // SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only /* /* * Copyright (c) 2018, The Linux Foundation. All rights reserved. * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ */ #include <linux/firmware.h> #include <linux/firmware.h> Loading Loading @@ -554,12 +554,6 @@ static int a6xx_gmu_oob_set(struct kgsl_device *device, set = BIT(30 - req * 2); set = BIT(30 - req * 2); check = BIT(31 - req); check = BIT(31 - req); if ((gmu->hfi.version & 0x1F) == 0) { /* LEGACY for intermediate oobs */ set = BIT(req + 16); check = BIT(req + 16); } if (req >= 6) { if (req >= 6) { dev_err(&gmu->pdev->dev, dev_err(&gmu->pdev->dev, "OOB_set(0x%x) invalid\n", set); "OOB_set(0x%x) invalid\n", set); Loading Loading @@ -610,9 +604,6 @@ static inline void a6xx_gmu_oob_clear(struct kgsl_device *device, "OOB_clear(0x%x) invalid\n", clear); "OOB_clear(0x%x) invalid\n", clear); return; return; } } /* LEGACY for intermediate oobs */ if ((gmu->hfi.version & 0x1F) == 0) clear = BIT(req + 24); } else } else clear = BIT(req + 24); clear = BIT(req + 24); Loading Loading @@ -1046,6 +1037,10 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device, return ret; return ret; } } /* Read the HFI and Power version from registers */ gmu_core_regread(device, A6XX_GMU_HFI_VERSION_INFO, &gmu->ver.hfi); gmu_core_regread(device, A6XX_GMU_GENERAL_0, &gmu->ver.pwr); ret = a6xx_gmu_hfi_start(device); ret = a6xx_gmu_hfi_start(device); if (ret) if (ret) return ret; return ret; Loading @@ -1064,7 +1059,8 @@ static int a6xx_gmu_load_firmware(struct kgsl_device *device) const struct adreno_device *adreno_dev = ADRENO_DEVICE(device); const struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct gmu_device *gmu = KGSL_GMU_DEVICE(device); struct gmu_device *gmu = KGSL_GMU_DEVICE(device); const struct adreno_gpu_core *gpucore = adreno_dev->gpucore; const struct adreno_gpu_core *gpucore = adreno_dev->gpucore; int ret = -EINVAL; struct gmu_block_header *blk; int ret, offset = 0; /* there is no GMU */ /* there is no GMU */ if (!gmu_core_isenabled(device)) if (!gmu_core_isenabled(device)) Loading @@ -1079,13 +1075,69 @@ static int a6xx_gmu_load_firmware(struct kgsl_device *device) ret = request_firmware(&gmu->fw_image, gpucore->gpmufw_name, ret = request_firmware(&gmu->fw_image, gpucore->gpmufw_name, device->dev); device->dev); if (ret || gmu->fw_image == NULL) if (ret) { dev_err(device->dev, "request_firmware (%s) failed: %d\n", dev_err(device->dev, "request_firmware (%s) failed: %d\n", gpucore->gpmufw_name, ret); gpucore->gpmufw_name, ret); return ret; return ret; } } /* * Zero payload fw blocks contain meta data and are * guaranteed to precede fw load data. Parse the * meta data blocks. */ while (offset < gmu->fw_image->size) { blk = (struct gmu_block_header *)&gmu->fw_image->data[offset]; if (offset + sizeof(*blk) > gmu->fw_image->size) { dev_err(&gmu->pdev->dev, "Invalid FW Block\n"); return -EINVAL; } /* Done with zero length blocks so return */ if (blk->size) break; offset += sizeof(*blk); switch (blk->type) { case GMU_BLK_TYPE_CORE_VER: gmu->ver.core = blk->value; dev_dbg(&gmu->pdev->dev, "CORE VER: 0x%8.8x\n", blk->value); break; case GMU_BLK_TYPE_CORE_DEV_VER: gmu->ver.core_dev = blk->value; dev_dbg(&gmu->pdev->dev, "CORE DEV VER: 0x%8.8x\n", blk->value); break; case GMU_BLK_TYPE_PWR_VER: gmu->ver.pwr = blk->value; dev_dbg(&gmu->pdev->dev, "PWR VER: 0x%8.8x\n", blk->value); break; case GMU_BLK_TYPE_PWR_DEV_VER: gmu->ver.pwr_dev = blk->value; dev_dbg(&gmu->pdev->dev, "PWR DEV VER: 0x%8.8x\n", blk->value); break; case GMU_BLK_TYPE_HFI_VER: gmu->ver.hfi = blk->value; dev_dbg(&gmu->pdev->dev, "HFI VER: 0x%8.8x\n", blk->value); break; /* Skip preallocation requests for now */ case GMU_BLK_TYPE_PREALLOC_REQ: case GMU_BLK_TYPE_PREALLOC_PERSIST_REQ: default: break; } } return 0; } #define A6XX_STATE_OF_CHILD (BIT(4) | BIT(5)) #define A6XX_STATE_OF_CHILD (BIT(4) | BIT(5)) #define A6XX_IDLE_FULL_LLM BIT(0) #define A6XX_IDLE_FULL_LLM BIT(0) #define A6XX_WAKEUP_ACK BIT(1) #define A6XX_WAKEUP_ACK BIT(1) Loading drivers/gpu/msm/kgsl_gmu.c +84 −72 Original line number Original line Diff line number Diff line Loading @@ -67,7 +67,8 @@ struct gmu_iommu_context gmu_ctx[] = { */ */ static struct gmu_memdesc gmu_kmem_entries[GMU_KERNEL_ENTRIES]; static struct gmu_memdesc gmu_kmem_entries[GMU_KERNEL_ENTRIES]; static unsigned long gmu_kmem_bitmap; static unsigned long gmu_kmem_bitmap; static unsigned int uncached_alloc_offset; static unsigned int next_uncached_kernel_alloc; static unsigned int next_uncached_user_alloc; static void gmu_snapshot(struct kgsl_device *device); static void gmu_snapshot(struct kgsl_device *device); static void gmu_remove(struct kgsl_device *device); static void gmu_remove(struct kgsl_device *device); Loading Loading @@ -116,16 +117,13 @@ static void free_gmu_mem(struct gmu_device *gmu, memset(md, 0, sizeof(*md)); memset(md, 0, sizeof(*md)); } } static int alloc_and_map(struct gmu_device *gmu, unsigned int ctx_id, static int alloc_and_map(struct gmu_device *gmu, struct gmu_memdesc *md, struct gmu_memdesc *md, unsigned int attrs) unsigned int attrs) { { int ret; int ret; struct iommu_domain *domain; struct iommu_domain *domain; if (md->mem_type == GMU_ITCM || md->mem_type == GMU_DTCM) domain = gmu_ctx[md->ctx_idx].domain; return 0; domain = gmu_ctx[ctx_id].domain; md->hostptr = dma_alloc_attrs(&gmu->pdev->dev, (size_t) md->size, md->hostptr = dma_alloc_attrs(&gmu->pdev->dev, (size_t) md->size, &md->physaddr, GFP_KERNEL, 0); &md->physaddr, GFP_KERNEL, 0); Loading Loading @@ -157,7 +155,7 @@ struct gmu_memdesc *gmu_get_memdesc(unsigned int addr, unsigned int size) mem = &gmu_kmem_entries[i]; mem = &gmu_kmem_entries[i]; if (addr >= mem->gmuaddr && if (addr >= mem->gmuaddr && (addr + size < mem->gmuaddr + mem->size)) (addr + size <= mem->gmuaddr + mem->size)) return mem; return mem; } } Loading @@ -171,11 +169,11 @@ struct gmu_memdesc *gmu_get_memdesc(unsigned int addr, unsigned int size) * @attrs: IOMMU mapping attributes * @attrs: IOMMU mapping attributes */ */ static struct gmu_memdesc *allocate_gmu_kmem(struct gmu_device *gmu, static struct gmu_memdesc *allocate_gmu_kmem(struct gmu_device *gmu, enum gmu_mem_type mem_type, unsigned int size, enum gmu_mem_type mem_type, unsigned int addr, unsigned int attrs) unsigned int size, unsigned int attrs) { { struct gmu_memdesc *md; struct gmu_memdesc *md; int ret = 0; int ret; int entry_idx = find_first_zero_bit( int entry_idx = find_first_zero_bit( &gmu_kmem_bitmap, GMU_KERNEL_ENTRIES); &gmu_kmem_bitmap, GMU_KERNEL_ENTRIES); Loading @@ -185,72 +183,81 @@ static struct gmu_memdesc *allocate_gmu_kmem(struct gmu_device *gmu, return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL); } } switch (mem_type) { /* Non-TCM requests have page alignment requirement */ case GMU_NONCACHED_KERNEL: if ((mem_type != GMU_ITCM) && (mem_type != GMU_DTCM) && size = PAGE_ALIGN(size); addr & (PAGE_SIZE - 1)) { if (size > SZ_1M || size == 0) { dev_err(&gmu->pdev->dev, dev_err(&gmu->pdev->dev, "Invalid uncached GMU memory req %d\n", "Invalid alignment request 0x%X\n", size); addr); return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL); } } md = &gmu_kmem_entries[entry_idx]; md = &gmu_kmem_entries[entry_idx]; md->gmuaddr = gmu_vma[mem_type].start + uncached_alloc_offset; set_bit(entry_idx, &gmu_kmem_bitmap); set_bit(entry_idx, &gmu_kmem_bitmap); md->size = size; md->mem_type = mem_type; break; case GMU_DCACHE: memset(md, 0, sizeof(*md)); md = &gmu_kmem_entries[entry_idx]; md->gmuaddr = gmu_vma[mem_type].start; switch (mem_type) { set_bit(entry_idx, &gmu_kmem_bitmap); case GMU_ITCM: case GMU_DTCM: /* Assign values and return without mapping */ md->size = size; md->size = size; md->mem_type = mem_type; md->mem_type = mem_type; break; md->gmuaddr = addr; return md; case GMU_DCACHE: case GMU_ICACHE: case GMU_ICACHE: md = &gmu_kmem_entries[entry_idx]; size = PAGE_ALIGN(size); md->gmuaddr = gmu_vma[mem_type].start; md->ctx_idx = GMU_CONTEXT_KERNEL; set_bit(entry_idx, &gmu_kmem_bitmap); md->size = size; md->mem_type = mem_type; break; break; case GMU_ITCM: case GMU_NONCACHED_KERNEL: md = &gmu_kmem_entries[entry_idx]; /* Set start address for first uncached kernel alloc */ md->gmuaddr = gmu_vma[mem_type].start; if (next_uncached_kernel_alloc == 0) set_bit(entry_idx, &gmu_kmem_bitmap); next_uncached_kernel_alloc = gmu_vma[mem_type].start; md->size = size; md->mem_type = mem_type; if (addr == 0) addr = next_uncached_kernel_alloc; size = PAGE_ALIGN(size); md->ctx_idx = GMU_CONTEXT_KERNEL; break; break; case GMU_DTCM: case GMU_NONCACHED_USER: md = &gmu_kmem_entries[entry_idx]; /* Set start address for first uncached user alloc */ md->gmuaddr = gmu_vma[mem_type].start; if (next_uncached_kernel_alloc == 0) set_bit(entry_idx, &gmu_kmem_bitmap); next_uncached_user_alloc = gmu_vma[mem_type].start; md->size = size; md->mem_type = mem_type; if (addr == 0) addr = next_uncached_user_alloc; size = PAGE_ALIGN(size); md->ctx_idx = GMU_CONTEXT_USER; break; break; default: default: dev_err(&gmu->pdev->dev, dev_err(&gmu->pdev->dev, "Invalid memory type (%d) requested\n", "Invalid memory type (%d) requested\n", mem_type); mem_type); clear_bit(entry_idx, &gmu_kmem_bitmap); return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL); } } ret = alloc_and_map(gmu, GMU_CONTEXT_KERNEL, md, attrs); md->size = size; md->mem_type = mem_type; md->gmuaddr = addr; ret = alloc_and_map(gmu, md, attrs); if (ret) { if (ret) { clear_bit(entry_idx, &gmu_kmem_bitmap); clear_bit(entry_idx, &gmu_kmem_bitmap); md->gmuaddr = 0; return ERR_PTR(ret); return ERR_PTR(ret); } } if (mem_type == GMU_NONCACHED_KERNEL) if (mem_type == GMU_NONCACHED_KERNEL) uncached_alloc_offset += md->size; next_uncached_kernel_alloc = PAGE_ALIGN(md->gmuaddr + md->size); else if (mem_type == GMU_NONCACHED_USER) next_uncached_user_alloc = PAGE_ALIGN(md->gmuaddr + md->size); return md; return md; } } Loading Loading @@ -346,7 +353,7 @@ static void gmu_kmem_close(struct gmu_device *gmu) { { int i; int i; struct gmu_memdesc *md; struct gmu_memdesc *md; struct gmu_iommu_context *ctx = &gmu_ctx[GMU_CONTEXT_KERNEL]; struct gmu_iommu_context *ctx; gmu->hfi_mem = NULL; gmu->hfi_mem = NULL; gmu->dump_mem = NULL; gmu->dump_mem = NULL; Loading @@ -358,6 +365,7 @@ static void gmu_kmem_close(struct gmu_device *gmu) continue; continue; md = &gmu_kmem_entries[i]; md = &gmu_kmem_entries[i]; ctx = &gmu_ctx[md->ctx_idx]; if (md->gmuaddr && md->mem_type != GMU_ITCM && if (md->gmuaddr && md->mem_type != GMU_ITCM && md->mem_type != GMU_DTCM) md->mem_type != GMU_DTCM) Loading Loading @@ -402,47 +410,51 @@ static int gmu_memory_probe(struct kgsl_device *device, return ret; return ret; /* Reserve a memdesc for ITCM. No actually memory allocated */ /* Reserve a memdesc for ITCM. No actually memory allocated */ md = allocate_gmu_kmem(gmu, GMU_ITCM, gmu_vma[GMU_ITCM].size, 0); md = allocate_gmu_kmem(gmu, GMU_ITCM, gmu_vma[GMU_ITCM].start, gmu_vma[GMU_ITCM].size, 0); if (IS_ERR(md)) { if (IS_ERR(md)) { ret = PTR_ERR(md); ret = PTR_ERR(md); goto err_ret; goto err_ret; } } /* Reserve a memdesc for DTCM. No actually memory allocated */ /* Reserve a memdesc for DTCM. No actually memory allocated */ md = allocate_gmu_kmem(gmu, GMU_DTCM, gmu_vma[GMU_DTCM].size, 0); md = allocate_gmu_kmem(gmu, GMU_DTCM, gmu_vma[GMU_DTCM].start, gmu_vma[GMU_DTCM].size, 0); if (IS_ERR(md)) { if (IS_ERR(md)) { ret = PTR_ERR(md); ret = PTR_ERR(md); goto err_ret; goto err_ret; } } /* Allocates & maps memory for WB DUMMY PAGE */ /* Allocates & maps memory for DCACHE */ /* Must be the first alloc */ md = allocate_gmu_kmem(gmu, GMU_DCACHE, gmu_vma[GMU_DCACHE].start, md = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, gmu_vma[GMU_DCACHE].size, DUMMY_SIZE, (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); if (IS_ERR(md)) { if (IS_ERR(md)) { ret = PTR_ERR(md); ret = PTR_ERR(md); goto err_ret; goto err_ret; } } /* Allocates & maps memory for DCACHE */ /* Allocates & maps memory for ICACHE */ md = allocate_gmu_kmem(gmu, GMU_DCACHE, gmu_vma[GMU_DCACHE].size, md = allocate_gmu_kmem(gmu, GMU_ICACHE, gmu_vma[GMU_ICACHE].start, gmu_vma[GMU_ICACHE].size, (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); if (IS_ERR(md)) { if (IS_ERR(md)) { ret = PTR_ERR(md); ret = PTR_ERR(md); goto err_ret; goto err_ret; } } /* Allocates & maps memory for ICACHE */ /* Allocates & maps memory for WB DUMMY PAGE */ md = allocate_gmu_kmem(gmu, GMU_ICACHE, gmu_vma[GMU_ICACHE].size, /* Must be the first UNCACHED alloc */ (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); md = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0, DUMMY_SIZE, (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); if (IS_ERR(md)) { if (IS_ERR(md)) { ret = PTR_ERR(md); ret = PTR_ERR(md); goto err_ret; goto err_ret; } } /* Allocates & maps memory for HFI */ /* Allocates & maps memory for HFI */ gmu->hfi_mem = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, HFIMEM_SIZE, gmu->hfi_mem = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0, (IOMMU_READ | IOMMU_WRITE)); HFIMEM_SIZE, (IOMMU_READ | IOMMU_WRITE)); if (IS_ERR(gmu->hfi_mem)) { if (IS_ERR(gmu->hfi_mem)) { ret = PTR_ERR(gmu->hfi_mem); ret = PTR_ERR(gmu->hfi_mem); goto err_ret; goto err_ret; Loading @@ -450,7 +462,7 @@ static int gmu_memory_probe(struct kgsl_device *device, /* Allocates & maps GMU crash dump memory */ /* Allocates & maps GMU crash dump memory */ if (adreno_is_a630(adreno_dev) || adreno_is_a615_family(adreno_dev)) { if (adreno_is_a630(adreno_dev) || adreno_is_a615_family(adreno_dev)) { gmu->dump_mem = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, gmu->dump_mem = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0, DUMPMEM_SIZE, (IOMMU_READ | IOMMU_WRITE)); DUMPMEM_SIZE, (IOMMU_READ | IOMMU_WRITE)); if (IS_ERR(gmu->dump_mem)) { if (IS_ERR(gmu->dump_mem)) { ret = PTR_ERR(gmu->dump_mem); ret = PTR_ERR(gmu->dump_mem); Loading @@ -459,8 +471,8 @@ static int gmu_memory_probe(struct kgsl_device *device, } } /* GMU master log */ /* GMU master log */ gmu->gmu_log = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, LOGMEM_SIZE, gmu->gmu_log = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0, (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); LOGMEM_SIZE, (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); if (IS_ERR(gmu->gmu_log)) { if (IS_ERR(gmu->gmu_log)) { ret = PTR_ERR(gmu->gmu_log); ret = PTR_ERR(gmu->gmu_log); goto err_ret; goto err_ret; Loading @@ -468,7 +480,8 @@ static int gmu_memory_probe(struct kgsl_device *device, if (ADRENO_FEATURE(adreno_dev, ADRENO_ECP)) { if (ADRENO_FEATURE(adreno_dev, ADRENO_ECP)) { /* Allocation to account for future MEM_ALLOC buffers */ /* Allocation to account for future MEM_ALLOC buffers */ md = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, SZ_32K, md = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0, SZ_32K, (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); if (IS_ERR(md)) { if (IS_ERR(md)) { ret = PTR_ERR(md); ret = PTR_ERR(md); Loading Loading @@ -1291,7 +1304,6 @@ static int gmu_probe(struct kgsl_device *device, struct device_node *node) device->gmu_core.ptr = (void *)gmu; device->gmu_core.ptr = (void *)gmu; hfi = &gmu->hfi; hfi = &gmu->hfi; gmu->load_mode = TCM_BOOT; gmu->load_mode = TCM_BOOT; gmu->ver = ~0U; gmu->pdev = of_find_device_by_node(node); gmu->pdev = of_find_device_by_node(node); of_dma_configure(&gmu->pdev->dev, node, true); of_dma_configure(&gmu->pdev->dev, node, true); Loading drivers/gpu/msm/kgsl_gmu.h +27 −3 Original line number Original line Diff line number Diff line /* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */ /* /* * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. */ */ #ifndef __KGSL_GMU_H #ifndef __KGSL_GMU_H #define __KGSL_GMU_H #define __KGSL_GMU_H Loading @@ -14,6 +14,12 @@ #define BWMEM_SIZE (12 + (4 * NUM_BW_LEVELS)) /*in bytes*/ #define BWMEM_SIZE (12 + (4 * NUM_BW_LEVELS)) /*in bytes*/ #define GMU_VER_MAJOR(ver) (((ver) >> 28) & 0xF) #define GMU_VER_MINOR(ver) (((ver) >> 16) & 0xFFF) #define GMU_VER_STEP(ver) ((ver) & 0xFFFF) #define GMU_VERSION(major, minor) \ ((((major) & 0xF) << 28) | (((minor) & 0xFFF) << 16)) #define GMU_INT_WDOG_BITE BIT(0) #define GMU_INT_WDOG_BITE BIT(0) #define GMU_INT_RSCC_COMP BIT(1) #define GMU_INT_RSCC_COMP BIT(1) #define GMU_INT_FENCE_ERR BIT(3) #define GMU_INT_FENCE_ERR BIT(3) Loading Loading @@ -57,6 +63,16 @@ struct gmu_block_header { uint32_t value; uint32_t value; }; }; /* GMU Block types */ #define GMU_BLK_TYPE_DATA 0 #define GMU_BLK_TYPE_PREALLOC_REQ 1 #define GMU_BLK_TYPE_CORE_VER 2 #define GMU_BLK_TYPE_CORE_DEV_VER 3 #define GMU_BLK_TYPE_PWR_VER 4 #define GMU_BLK_TYPE_PWR_DEV_VER 5 #define GMU_BLK_TYPE_HFI_VER 6 #define GMU_BLK_TYPE_PREALLOC_PERSIST_REQ 7 /* For GMU Logs*/ /* For GMU Logs*/ #define LOGMEM_SIZE SZ_4K #define LOGMEM_SIZE SZ_4K Loading @@ -80,6 +96,7 @@ enum gmu_mem_type { * @physaddr: Physical address of the memory object * @physaddr: Physical address of the memory object * @size: Size of the memory object * @size: Size of the memory object * @mem_type: memory type for this memory * @mem_type: memory type for this memory * @ctx_idx: GMU IOMMU context idx */ */ struct gmu_memdesc { struct gmu_memdesc { void *hostptr; void *hostptr; Loading @@ -87,6 +104,7 @@ struct gmu_memdesc { phys_addr_t physaddr; phys_addr_t physaddr; uint64_t size; uint64_t size; enum gmu_mem_type mem_type; enum gmu_mem_type mem_type; uint32_t ctx_idx; }; }; struct gmu_bw_votes { struct gmu_bw_votes { Loading Loading @@ -118,7 +136,7 @@ struct kgsl_mailbox { /** /** * struct gmu_device - GMU device structure * struct gmu_device - GMU device structure * @ver: GMU FW version, read from GMU * @ver: GMU Version information * @reg_phys: GMU CSR physical address * @reg_phys: GMU CSR physical address * @reg_len: GMU CSR range * @reg_len: GMU CSR range * @gmu_interrupt_num: GMU interrupt number * @gmu_interrupt_num: GMU interrupt number Loading Loading @@ -152,7 +170,13 @@ struct kgsl_mailbox { * @mailbox: Messages to AOP for ACD enable/disable go through this * @mailbox: Messages to AOP for ACD enable/disable go through this */ */ struct gmu_device { struct gmu_device { unsigned int ver; struct { u32 core; u32 core_dev; u32 pwr; u32 pwr_dev; u32 hfi; } ver; struct platform_device *pdev; struct platform_device *pdev; unsigned long reg_phys; unsigned long reg_phys; unsigned int reg_len; unsigned int reg_len; Loading drivers/gpu/msm/kgsl_hfi.c +7 −24 Original line number Original line Diff line number Diff line Loading @@ -31,14 +31,6 @@ #define CMD_MSG_HDR(id, size) CREATE_MSG_HDR(id, size, HFI_MSG_CMD) #define CMD_MSG_HDR(id, size) CREATE_MSG_HDR(id, size, HFI_MSG_CMD) #define ACK_MSG_HDR(id, size) CREATE_MSG_HDR(id, size, HFI_MSG_ACK) #define ACK_MSG_HDR(id, size) CREATE_MSG_HDR(id, size, HFI_MSG_ACK) #define HFI_VER_MAJOR(hfi) (((hfi)->version >> 28) & 0xF) #define HFI_VER_MINOR(hfi) (((hfi)->version >> 5) & 0x7FFFFF) #define HFI_VER_BRANCH(hfi) ((hfi)->version & 0x1F) #define HFI_VERSION(major, minor, branch) \ ((((major) & 0xF) << 28) | \ (((minor) & 0x7FFFFF) << 5) | \ ((branch) & 0x1F)) static void hfi_process_queue(struct gmu_device *gmu, uint32_t queue_idx, static void hfi_process_queue(struct gmu_device *gmu, uint32_t queue_idx, struct pending_cmd *ret_cmd); struct pending_cmd *ret_cmd); Loading Loading @@ -95,7 +87,7 @@ static int hfi_queue_read(struct gmu_device *gmu, uint32_t queue_idx, result = -ENODATA; result = -ENODATA; } } if (HFI_VER_MAJOR(&gmu->hfi) >= 2) if (GMU_VER_MAJOR(gmu->ver.hfi) >= 2) read = ALIGN(read, SZ_4) % hdr->queue_size; read = ALIGN(read, SZ_4) % hdr->queue_size; hdr->read_index = read; hdr->read_index = read; Loading Loading @@ -154,7 +146,7 @@ static int hfi_queue_write(struct gmu_device *gmu, uint32_t queue_idx, } } /* Cookify any non used data at the end of the write buffer */ /* Cookify any non used data at the end of the write buffer */ if (HFI_VER_MAJOR(&gmu->hfi) >= 2) { if (GMU_VER_MAJOR(gmu->ver.hfi) >= 2) { for (; write % 4; write = (write + 1) % hdr->queue_size) for (; write % 4; write = (write + 1) % hdr->queue_size) queue[write] = 0xFAFAFAFA; queue[write] = 0xFAFAFAFA; } } Loading Loading @@ -586,7 +578,7 @@ static void hfi_process_queue(struct gmu_device *gmu, uint32_t queue_idx, while (hfi_queue_read(gmu, queue_idx, rcvd, sizeof(rcvd)) > 0) { while (hfi_queue_read(gmu, queue_idx, rcvd, sizeof(rcvd)) > 0) { /* Special case if we're v1 */ /* Special case if we're v1 */ if (HFI_VER_MAJOR(&gmu->hfi) < 2) { if (GMU_VER_MAJOR(gmu->ver.hfi) < 2) { hfi_v1_receiver(gmu, rcvd, ret_cmd); hfi_v1_receiver(gmu, rcvd, ret_cmd); continue; continue; } } Loading Loading @@ -620,11 +612,6 @@ void hfi_receiver(unsigned long data) hfi_process_queue((struct gmu_device *) data, HFI_DBG_ID, NULL); hfi_process_queue((struct gmu_device *) data, HFI_DBG_ID, NULL); } } #define GMU_VER_MAJOR(ver) (((ver) >> 28) & 0xF) #define GMU_VER_MINOR(ver) (((ver) >> 16) & 0xFFF) #define GMU_VERSION(major, minor) \ ((((major) & 0xF) << 28) | (((minor) & 0xFFF) << 16)) static int hfi_verify_fw_version(struct kgsl_device *device, static int hfi_verify_fw_version(struct kgsl_device *device, struct gmu_device *gmu) struct gmu_device *gmu) { { Loading @@ -633,13 +620,9 @@ static int hfi_verify_fw_version(struct kgsl_device *device, unsigned int ver, major, minor; unsigned int ver, major, minor; /* GMU version is already known, so don't waste time finding again */ /* GMU version is already known, so don't waste time finding again */ if (gmu->ver != ~0U) if (gmu->ver.core != 0) return 0; return 0; /* Read the HFI version from the register */ adreno_read_gmureg(adreno_dev, ADRENO_REG_GMU_HFI_VERSION_INFO, &gmu->hfi.version); major = adreno_dev->gpucore->gpmu_major; major = adreno_dev->gpucore->gpmu_major; minor = adreno_dev->gpucore->gpmu_minor; minor = adreno_dev->gpucore->gpmu_minor; Loading @@ -662,7 +645,7 @@ static int hfi_verify_fw_version(struct kgsl_device *device, GMU_VER_MINOR(ver), minor); GMU_VER_MINOR(ver), minor); /* Save the gmu version information */ /* Save the gmu version information */ gmu->ver = ver; gmu->ver.core = ver; return 0; return 0; } } Loading Loading @@ -717,7 +700,7 @@ int hfi_start(struct kgsl_device *device, if (result) if (result) return result; return result; if (HFI_VER_MAJOR(&gmu->hfi) < 2) if (GMU_VER_MAJOR(gmu->ver.hfi) < 2) result = hfi_send_dcvstbl_v1(gmu); result = hfi_send_dcvstbl_v1(gmu); else else result = hfi_send_dcvstbl(gmu); result = hfi_send_dcvstbl(gmu); Loading @@ -733,7 +716,7 @@ int hfi_start(struct kgsl_device *device, * we are sending no more HFIs until the next boot otherwise * we are sending no more HFIs until the next boot otherwise * send H2F_MSG_CORE_FW_START and features for A640 devices * send H2F_MSG_CORE_FW_START and features for A640 devices */ */ if (HFI_VER_MAJOR(&gmu->hfi) >= 2) { if (GMU_VER_MAJOR(gmu->ver.hfi) >= 2) { if (ADRENO_FEATURE(adreno_dev, ADRENO_ECP)) { if (ADRENO_FEATURE(adreno_dev, ADRENO_ECP)) { result = hfi_send_feature_ctrl(gmu, result = hfi_send_feature_ctrl(gmu, HFI_FEATURE_ECP, 1, 0); HFI_FEATURE_ECP, 1, 0); Loading Loading
drivers/gpu/msm/a6xx_reg.h +2 −1 Original line number Original line Diff line number Diff line /* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */ /* /* * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. */ */ #ifndef _A6XX_REG_H #ifndef _A6XX_REG_H Loading Loading @@ -994,6 +994,7 @@ #define A6XX_GMU_HOST2GMU_INTR_INFO_1 0x1F99C #define A6XX_GMU_HOST2GMU_INTR_INFO_1 0x1F99C #define A6XX_GMU_HOST2GMU_INTR_INFO_2 0x1F99D #define A6XX_GMU_HOST2GMU_INTR_INFO_2 0x1F99D #define A6XX_GMU_HOST2GMU_INTR_INFO_3 0x1F99E #define A6XX_GMU_HOST2GMU_INTR_INFO_3 0x1F99E #define A6XX_GMU_GENERAL_0 0x1F9C5 #define A6XX_GMU_GENERAL_1 0x1F9C6 #define A6XX_GMU_GENERAL_1 0x1F9C6 #define A6XX_GMU_GENERAL_6 0x1F9CB #define A6XX_GMU_GENERAL_6 0x1F9CB #define A6XX_GMU_GENERAL_7 0x1F9CC #define A6XX_GMU_GENERAL_7 0x1F9CC Loading
drivers/gpu/msm/adreno_a6xx_gmu.c +65 −13 Original line number Original line Diff line number Diff line // SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only /* /* * Copyright (c) 2018, The Linux Foundation. All rights reserved. * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ */ #include <linux/firmware.h> #include <linux/firmware.h> Loading Loading @@ -554,12 +554,6 @@ static int a6xx_gmu_oob_set(struct kgsl_device *device, set = BIT(30 - req * 2); set = BIT(30 - req * 2); check = BIT(31 - req); check = BIT(31 - req); if ((gmu->hfi.version & 0x1F) == 0) { /* LEGACY for intermediate oobs */ set = BIT(req + 16); check = BIT(req + 16); } if (req >= 6) { if (req >= 6) { dev_err(&gmu->pdev->dev, dev_err(&gmu->pdev->dev, "OOB_set(0x%x) invalid\n", set); "OOB_set(0x%x) invalid\n", set); Loading Loading @@ -610,9 +604,6 @@ static inline void a6xx_gmu_oob_clear(struct kgsl_device *device, "OOB_clear(0x%x) invalid\n", clear); "OOB_clear(0x%x) invalid\n", clear); return; return; } } /* LEGACY for intermediate oobs */ if ((gmu->hfi.version & 0x1F) == 0) clear = BIT(req + 24); } else } else clear = BIT(req + 24); clear = BIT(req + 24); Loading Loading @@ -1046,6 +1037,10 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device, return ret; return ret; } } /* Read the HFI and Power version from registers */ gmu_core_regread(device, A6XX_GMU_HFI_VERSION_INFO, &gmu->ver.hfi); gmu_core_regread(device, A6XX_GMU_GENERAL_0, &gmu->ver.pwr); ret = a6xx_gmu_hfi_start(device); ret = a6xx_gmu_hfi_start(device); if (ret) if (ret) return ret; return ret; Loading @@ -1064,7 +1059,8 @@ static int a6xx_gmu_load_firmware(struct kgsl_device *device) const struct adreno_device *adreno_dev = ADRENO_DEVICE(device); const struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct gmu_device *gmu = KGSL_GMU_DEVICE(device); struct gmu_device *gmu = KGSL_GMU_DEVICE(device); const struct adreno_gpu_core *gpucore = adreno_dev->gpucore; const struct adreno_gpu_core *gpucore = adreno_dev->gpucore; int ret = -EINVAL; struct gmu_block_header *blk; int ret, offset = 0; /* there is no GMU */ /* there is no GMU */ if (!gmu_core_isenabled(device)) if (!gmu_core_isenabled(device)) Loading @@ -1079,13 +1075,69 @@ static int a6xx_gmu_load_firmware(struct kgsl_device *device) ret = request_firmware(&gmu->fw_image, gpucore->gpmufw_name, ret = request_firmware(&gmu->fw_image, gpucore->gpmufw_name, device->dev); device->dev); if (ret || gmu->fw_image == NULL) if (ret) { dev_err(device->dev, "request_firmware (%s) failed: %d\n", dev_err(device->dev, "request_firmware (%s) failed: %d\n", gpucore->gpmufw_name, ret); gpucore->gpmufw_name, ret); return ret; return ret; } } /* * Zero payload fw blocks contain meta data and are * guaranteed to precede fw load data. Parse the * meta data blocks. */ while (offset < gmu->fw_image->size) { blk = (struct gmu_block_header *)&gmu->fw_image->data[offset]; if (offset + sizeof(*blk) > gmu->fw_image->size) { dev_err(&gmu->pdev->dev, "Invalid FW Block\n"); return -EINVAL; } /* Done with zero length blocks so return */ if (blk->size) break; offset += sizeof(*blk); switch (blk->type) { case GMU_BLK_TYPE_CORE_VER: gmu->ver.core = blk->value; dev_dbg(&gmu->pdev->dev, "CORE VER: 0x%8.8x\n", blk->value); break; case GMU_BLK_TYPE_CORE_DEV_VER: gmu->ver.core_dev = blk->value; dev_dbg(&gmu->pdev->dev, "CORE DEV VER: 0x%8.8x\n", blk->value); break; case GMU_BLK_TYPE_PWR_VER: gmu->ver.pwr = blk->value; dev_dbg(&gmu->pdev->dev, "PWR VER: 0x%8.8x\n", blk->value); break; case GMU_BLK_TYPE_PWR_DEV_VER: gmu->ver.pwr_dev = blk->value; dev_dbg(&gmu->pdev->dev, "PWR DEV VER: 0x%8.8x\n", blk->value); break; case GMU_BLK_TYPE_HFI_VER: gmu->ver.hfi = blk->value; dev_dbg(&gmu->pdev->dev, "HFI VER: 0x%8.8x\n", blk->value); break; /* Skip preallocation requests for now */ case GMU_BLK_TYPE_PREALLOC_REQ: case GMU_BLK_TYPE_PREALLOC_PERSIST_REQ: default: break; } } return 0; } #define A6XX_STATE_OF_CHILD (BIT(4) | BIT(5)) #define A6XX_STATE_OF_CHILD (BIT(4) | BIT(5)) #define A6XX_IDLE_FULL_LLM BIT(0) #define A6XX_IDLE_FULL_LLM BIT(0) #define A6XX_WAKEUP_ACK BIT(1) #define A6XX_WAKEUP_ACK BIT(1) Loading
drivers/gpu/msm/kgsl_gmu.c +84 −72 Original line number Original line Diff line number Diff line Loading @@ -67,7 +67,8 @@ struct gmu_iommu_context gmu_ctx[] = { */ */ static struct gmu_memdesc gmu_kmem_entries[GMU_KERNEL_ENTRIES]; static struct gmu_memdesc gmu_kmem_entries[GMU_KERNEL_ENTRIES]; static unsigned long gmu_kmem_bitmap; static unsigned long gmu_kmem_bitmap; static unsigned int uncached_alloc_offset; static unsigned int next_uncached_kernel_alloc; static unsigned int next_uncached_user_alloc; static void gmu_snapshot(struct kgsl_device *device); static void gmu_snapshot(struct kgsl_device *device); static void gmu_remove(struct kgsl_device *device); static void gmu_remove(struct kgsl_device *device); Loading Loading @@ -116,16 +117,13 @@ static void free_gmu_mem(struct gmu_device *gmu, memset(md, 0, sizeof(*md)); memset(md, 0, sizeof(*md)); } } static int alloc_and_map(struct gmu_device *gmu, unsigned int ctx_id, static int alloc_and_map(struct gmu_device *gmu, struct gmu_memdesc *md, struct gmu_memdesc *md, unsigned int attrs) unsigned int attrs) { { int ret; int ret; struct iommu_domain *domain; struct iommu_domain *domain; if (md->mem_type == GMU_ITCM || md->mem_type == GMU_DTCM) domain = gmu_ctx[md->ctx_idx].domain; return 0; domain = gmu_ctx[ctx_id].domain; md->hostptr = dma_alloc_attrs(&gmu->pdev->dev, (size_t) md->size, md->hostptr = dma_alloc_attrs(&gmu->pdev->dev, (size_t) md->size, &md->physaddr, GFP_KERNEL, 0); &md->physaddr, GFP_KERNEL, 0); Loading Loading @@ -157,7 +155,7 @@ struct gmu_memdesc *gmu_get_memdesc(unsigned int addr, unsigned int size) mem = &gmu_kmem_entries[i]; mem = &gmu_kmem_entries[i]; if (addr >= mem->gmuaddr && if (addr >= mem->gmuaddr && (addr + size < mem->gmuaddr + mem->size)) (addr + size <= mem->gmuaddr + mem->size)) return mem; return mem; } } Loading @@ -171,11 +169,11 @@ struct gmu_memdesc *gmu_get_memdesc(unsigned int addr, unsigned int size) * @attrs: IOMMU mapping attributes * @attrs: IOMMU mapping attributes */ */ static struct gmu_memdesc *allocate_gmu_kmem(struct gmu_device *gmu, static struct gmu_memdesc *allocate_gmu_kmem(struct gmu_device *gmu, enum gmu_mem_type mem_type, unsigned int size, enum gmu_mem_type mem_type, unsigned int addr, unsigned int attrs) unsigned int size, unsigned int attrs) { { struct gmu_memdesc *md; struct gmu_memdesc *md; int ret = 0; int ret; int entry_idx = find_first_zero_bit( int entry_idx = find_first_zero_bit( &gmu_kmem_bitmap, GMU_KERNEL_ENTRIES); &gmu_kmem_bitmap, GMU_KERNEL_ENTRIES); Loading @@ -185,72 +183,81 @@ static struct gmu_memdesc *allocate_gmu_kmem(struct gmu_device *gmu, return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL); } } switch (mem_type) { /* Non-TCM requests have page alignment requirement */ case GMU_NONCACHED_KERNEL: if ((mem_type != GMU_ITCM) && (mem_type != GMU_DTCM) && size = PAGE_ALIGN(size); addr & (PAGE_SIZE - 1)) { if (size > SZ_1M || size == 0) { dev_err(&gmu->pdev->dev, dev_err(&gmu->pdev->dev, "Invalid uncached GMU memory req %d\n", "Invalid alignment request 0x%X\n", size); addr); return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL); } } md = &gmu_kmem_entries[entry_idx]; md = &gmu_kmem_entries[entry_idx]; md->gmuaddr = gmu_vma[mem_type].start + uncached_alloc_offset; set_bit(entry_idx, &gmu_kmem_bitmap); set_bit(entry_idx, &gmu_kmem_bitmap); md->size = size; md->mem_type = mem_type; break; case GMU_DCACHE: memset(md, 0, sizeof(*md)); md = &gmu_kmem_entries[entry_idx]; md->gmuaddr = gmu_vma[mem_type].start; switch (mem_type) { set_bit(entry_idx, &gmu_kmem_bitmap); case GMU_ITCM: case GMU_DTCM: /* Assign values and return without mapping */ md->size = size; md->size = size; md->mem_type = mem_type; md->mem_type = mem_type; break; md->gmuaddr = addr; return md; case GMU_DCACHE: case GMU_ICACHE: case GMU_ICACHE: md = &gmu_kmem_entries[entry_idx]; size = PAGE_ALIGN(size); md->gmuaddr = gmu_vma[mem_type].start; md->ctx_idx = GMU_CONTEXT_KERNEL; set_bit(entry_idx, &gmu_kmem_bitmap); md->size = size; md->mem_type = mem_type; break; break; case GMU_ITCM: case GMU_NONCACHED_KERNEL: md = &gmu_kmem_entries[entry_idx]; /* Set start address for first uncached kernel alloc */ md->gmuaddr = gmu_vma[mem_type].start; if (next_uncached_kernel_alloc == 0) set_bit(entry_idx, &gmu_kmem_bitmap); next_uncached_kernel_alloc = gmu_vma[mem_type].start; md->size = size; md->mem_type = mem_type; if (addr == 0) addr = next_uncached_kernel_alloc; size = PAGE_ALIGN(size); md->ctx_idx = GMU_CONTEXT_KERNEL; break; break; case GMU_DTCM: case GMU_NONCACHED_USER: md = &gmu_kmem_entries[entry_idx]; /* Set start address for first uncached user alloc */ md->gmuaddr = gmu_vma[mem_type].start; if (next_uncached_kernel_alloc == 0) set_bit(entry_idx, &gmu_kmem_bitmap); next_uncached_user_alloc = gmu_vma[mem_type].start; md->size = size; md->mem_type = mem_type; if (addr == 0) addr = next_uncached_user_alloc; size = PAGE_ALIGN(size); md->ctx_idx = GMU_CONTEXT_USER; break; break; default: default: dev_err(&gmu->pdev->dev, dev_err(&gmu->pdev->dev, "Invalid memory type (%d) requested\n", "Invalid memory type (%d) requested\n", mem_type); mem_type); clear_bit(entry_idx, &gmu_kmem_bitmap); return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL); } } ret = alloc_and_map(gmu, GMU_CONTEXT_KERNEL, md, attrs); md->size = size; md->mem_type = mem_type; md->gmuaddr = addr; ret = alloc_and_map(gmu, md, attrs); if (ret) { if (ret) { clear_bit(entry_idx, &gmu_kmem_bitmap); clear_bit(entry_idx, &gmu_kmem_bitmap); md->gmuaddr = 0; return ERR_PTR(ret); return ERR_PTR(ret); } } if (mem_type == GMU_NONCACHED_KERNEL) if (mem_type == GMU_NONCACHED_KERNEL) uncached_alloc_offset += md->size; next_uncached_kernel_alloc = PAGE_ALIGN(md->gmuaddr + md->size); else if (mem_type == GMU_NONCACHED_USER) next_uncached_user_alloc = PAGE_ALIGN(md->gmuaddr + md->size); return md; return md; } } Loading Loading @@ -346,7 +353,7 @@ static void gmu_kmem_close(struct gmu_device *gmu) { { int i; int i; struct gmu_memdesc *md; struct gmu_memdesc *md; struct gmu_iommu_context *ctx = &gmu_ctx[GMU_CONTEXT_KERNEL]; struct gmu_iommu_context *ctx; gmu->hfi_mem = NULL; gmu->hfi_mem = NULL; gmu->dump_mem = NULL; gmu->dump_mem = NULL; Loading @@ -358,6 +365,7 @@ static void gmu_kmem_close(struct gmu_device *gmu) continue; continue; md = &gmu_kmem_entries[i]; md = &gmu_kmem_entries[i]; ctx = &gmu_ctx[md->ctx_idx]; if (md->gmuaddr && md->mem_type != GMU_ITCM && if (md->gmuaddr && md->mem_type != GMU_ITCM && md->mem_type != GMU_DTCM) md->mem_type != GMU_DTCM) Loading Loading @@ -402,47 +410,51 @@ static int gmu_memory_probe(struct kgsl_device *device, return ret; return ret; /* Reserve a memdesc for ITCM. No actually memory allocated */ /* Reserve a memdesc for ITCM. No actually memory allocated */ md = allocate_gmu_kmem(gmu, GMU_ITCM, gmu_vma[GMU_ITCM].size, 0); md = allocate_gmu_kmem(gmu, GMU_ITCM, gmu_vma[GMU_ITCM].start, gmu_vma[GMU_ITCM].size, 0); if (IS_ERR(md)) { if (IS_ERR(md)) { ret = PTR_ERR(md); ret = PTR_ERR(md); goto err_ret; goto err_ret; } } /* Reserve a memdesc for DTCM. No actually memory allocated */ /* Reserve a memdesc for DTCM. No actually memory allocated */ md = allocate_gmu_kmem(gmu, GMU_DTCM, gmu_vma[GMU_DTCM].size, 0); md = allocate_gmu_kmem(gmu, GMU_DTCM, gmu_vma[GMU_DTCM].start, gmu_vma[GMU_DTCM].size, 0); if (IS_ERR(md)) { if (IS_ERR(md)) { ret = PTR_ERR(md); ret = PTR_ERR(md); goto err_ret; goto err_ret; } } /* Allocates & maps memory for WB DUMMY PAGE */ /* Allocates & maps memory for DCACHE */ /* Must be the first alloc */ md = allocate_gmu_kmem(gmu, GMU_DCACHE, gmu_vma[GMU_DCACHE].start, md = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, gmu_vma[GMU_DCACHE].size, DUMMY_SIZE, (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); if (IS_ERR(md)) { if (IS_ERR(md)) { ret = PTR_ERR(md); ret = PTR_ERR(md); goto err_ret; goto err_ret; } } /* Allocates & maps memory for DCACHE */ /* Allocates & maps memory for ICACHE */ md = allocate_gmu_kmem(gmu, GMU_DCACHE, gmu_vma[GMU_DCACHE].size, md = allocate_gmu_kmem(gmu, GMU_ICACHE, gmu_vma[GMU_ICACHE].start, gmu_vma[GMU_ICACHE].size, (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); if (IS_ERR(md)) { if (IS_ERR(md)) { ret = PTR_ERR(md); ret = PTR_ERR(md); goto err_ret; goto err_ret; } } /* Allocates & maps memory for ICACHE */ /* Allocates & maps memory for WB DUMMY PAGE */ md = allocate_gmu_kmem(gmu, GMU_ICACHE, gmu_vma[GMU_ICACHE].size, /* Must be the first UNCACHED alloc */ (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); md = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0, DUMMY_SIZE, (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); if (IS_ERR(md)) { if (IS_ERR(md)) { ret = PTR_ERR(md); ret = PTR_ERR(md); goto err_ret; goto err_ret; } } /* Allocates & maps memory for HFI */ /* Allocates & maps memory for HFI */ gmu->hfi_mem = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, HFIMEM_SIZE, gmu->hfi_mem = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0, (IOMMU_READ | IOMMU_WRITE)); HFIMEM_SIZE, (IOMMU_READ | IOMMU_WRITE)); if (IS_ERR(gmu->hfi_mem)) { if (IS_ERR(gmu->hfi_mem)) { ret = PTR_ERR(gmu->hfi_mem); ret = PTR_ERR(gmu->hfi_mem); goto err_ret; goto err_ret; Loading @@ -450,7 +462,7 @@ static int gmu_memory_probe(struct kgsl_device *device, /* Allocates & maps GMU crash dump memory */ /* Allocates & maps GMU crash dump memory */ if (adreno_is_a630(adreno_dev) || adreno_is_a615_family(adreno_dev)) { if (adreno_is_a630(adreno_dev) || adreno_is_a615_family(adreno_dev)) { gmu->dump_mem = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, gmu->dump_mem = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0, DUMPMEM_SIZE, (IOMMU_READ | IOMMU_WRITE)); DUMPMEM_SIZE, (IOMMU_READ | IOMMU_WRITE)); if (IS_ERR(gmu->dump_mem)) { if (IS_ERR(gmu->dump_mem)) { ret = PTR_ERR(gmu->dump_mem); ret = PTR_ERR(gmu->dump_mem); Loading @@ -459,8 +471,8 @@ static int gmu_memory_probe(struct kgsl_device *device, } } /* GMU master log */ /* GMU master log */ gmu->gmu_log = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, LOGMEM_SIZE, gmu->gmu_log = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0, (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); LOGMEM_SIZE, (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); if (IS_ERR(gmu->gmu_log)) { if (IS_ERR(gmu->gmu_log)) { ret = PTR_ERR(gmu->gmu_log); ret = PTR_ERR(gmu->gmu_log); goto err_ret; goto err_ret; Loading @@ -468,7 +480,8 @@ static int gmu_memory_probe(struct kgsl_device *device, if (ADRENO_FEATURE(adreno_dev, ADRENO_ECP)) { if (ADRENO_FEATURE(adreno_dev, ADRENO_ECP)) { /* Allocation to account for future MEM_ALLOC buffers */ /* Allocation to account for future MEM_ALLOC buffers */ md = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, SZ_32K, md = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0, SZ_32K, (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); if (IS_ERR(md)) { if (IS_ERR(md)) { ret = PTR_ERR(md); ret = PTR_ERR(md); Loading Loading @@ -1291,7 +1304,6 @@ static int gmu_probe(struct kgsl_device *device, struct device_node *node) device->gmu_core.ptr = (void *)gmu; device->gmu_core.ptr = (void *)gmu; hfi = &gmu->hfi; hfi = &gmu->hfi; gmu->load_mode = TCM_BOOT; gmu->load_mode = TCM_BOOT; gmu->ver = ~0U; gmu->pdev = of_find_device_by_node(node); gmu->pdev = of_find_device_by_node(node); of_dma_configure(&gmu->pdev->dev, node, true); of_dma_configure(&gmu->pdev->dev, node, true); Loading
drivers/gpu/msm/kgsl_gmu.h +27 −3 Original line number Original line Diff line number Diff line /* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */ /* /* * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. */ */ #ifndef __KGSL_GMU_H #ifndef __KGSL_GMU_H #define __KGSL_GMU_H #define __KGSL_GMU_H Loading @@ -14,6 +14,12 @@ #define BWMEM_SIZE (12 + (4 * NUM_BW_LEVELS)) /*in bytes*/ #define BWMEM_SIZE (12 + (4 * NUM_BW_LEVELS)) /*in bytes*/ #define GMU_VER_MAJOR(ver) (((ver) >> 28) & 0xF) #define GMU_VER_MINOR(ver) (((ver) >> 16) & 0xFFF) #define GMU_VER_STEP(ver) ((ver) & 0xFFFF) #define GMU_VERSION(major, minor) \ ((((major) & 0xF) << 28) | (((minor) & 0xFFF) << 16)) #define GMU_INT_WDOG_BITE BIT(0) #define GMU_INT_WDOG_BITE BIT(0) #define GMU_INT_RSCC_COMP BIT(1) #define GMU_INT_RSCC_COMP BIT(1) #define GMU_INT_FENCE_ERR BIT(3) #define GMU_INT_FENCE_ERR BIT(3) Loading Loading @@ -57,6 +63,16 @@ struct gmu_block_header { uint32_t value; uint32_t value; }; }; /* GMU Block types */ #define GMU_BLK_TYPE_DATA 0 #define GMU_BLK_TYPE_PREALLOC_REQ 1 #define GMU_BLK_TYPE_CORE_VER 2 #define GMU_BLK_TYPE_CORE_DEV_VER 3 #define GMU_BLK_TYPE_PWR_VER 4 #define GMU_BLK_TYPE_PWR_DEV_VER 5 #define GMU_BLK_TYPE_HFI_VER 6 #define GMU_BLK_TYPE_PREALLOC_PERSIST_REQ 7 /* For GMU Logs*/ /* For GMU Logs*/ #define LOGMEM_SIZE SZ_4K #define LOGMEM_SIZE SZ_4K Loading @@ -80,6 +96,7 @@ enum gmu_mem_type { * @physaddr: Physical address of the memory object * @physaddr: Physical address of the memory object * @size: Size of the memory object * @size: Size of the memory object * @mem_type: memory type for this memory * @mem_type: memory type for this memory * @ctx_idx: GMU IOMMU context idx */ */ struct gmu_memdesc { struct gmu_memdesc { void *hostptr; void *hostptr; Loading @@ -87,6 +104,7 @@ struct gmu_memdesc { phys_addr_t physaddr; phys_addr_t physaddr; uint64_t size; uint64_t size; enum gmu_mem_type mem_type; enum gmu_mem_type mem_type; uint32_t ctx_idx; }; }; struct gmu_bw_votes { struct gmu_bw_votes { Loading Loading @@ -118,7 +136,7 @@ struct kgsl_mailbox { /** /** * struct gmu_device - GMU device structure * struct gmu_device - GMU device structure * @ver: GMU FW version, read from GMU * @ver: GMU Version information * @reg_phys: GMU CSR physical address * @reg_phys: GMU CSR physical address * @reg_len: GMU CSR range * @reg_len: GMU CSR range * @gmu_interrupt_num: GMU interrupt number * @gmu_interrupt_num: GMU interrupt number Loading Loading @@ -152,7 +170,13 @@ struct kgsl_mailbox { * @mailbox: Messages to AOP for ACD enable/disable go through this * @mailbox: Messages to AOP for ACD enable/disable go through this */ */ struct gmu_device { struct gmu_device { unsigned int ver; struct { u32 core; u32 core_dev; u32 pwr; u32 pwr_dev; u32 hfi; } ver; struct platform_device *pdev; struct platform_device *pdev; unsigned long reg_phys; unsigned long reg_phys; unsigned int reg_len; unsigned int reg_len; Loading
drivers/gpu/msm/kgsl_hfi.c +7 −24 Original line number Original line Diff line number Diff line Loading @@ -31,14 +31,6 @@ #define CMD_MSG_HDR(id, size) CREATE_MSG_HDR(id, size, HFI_MSG_CMD) #define CMD_MSG_HDR(id, size) CREATE_MSG_HDR(id, size, HFI_MSG_CMD) #define ACK_MSG_HDR(id, size) CREATE_MSG_HDR(id, size, HFI_MSG_ACK) #define ACK_MSG_HDR(id, size) CREATE_MSG_HDR(id, size, HFI_MSG_ACK) #define HFI_VER_MAJOR(hfi) (((hfi)->version >> 28) & 0xF) #define HFI_VER_MINOR(hfi) (((hfi)->version >> 5) & 0x7FFFFF) #define HFI_VER_BRANCH(hfi) ((hfi)->version & 0x1F) #define HFI_VERSION(major, minor, branch) \ ((((major) & 0xF) << 28) | \ (((minor) & 0x7FFFFF) << 5) | \ ((branch) & 0x1F)) static void hfi_process_queue(struct gmu_device *gmu, uint32_t queue_idx, static void hfi_process_queue(struct gmu_device *gmu, uint32_t queue_idx, struct pending_cmd *ret_cmd); struct pending_cmd *ret_cmd); Loading Loading @@ -95,7 +87,7 @@ static int hfi_queue_read(struct gmu_device *gmu, uint32_t queue_idx, result = -ENODATA; result = -ENODATA; } } if (HFI_VER_MAJOR(&gmu->hfi) >= 2) if (GMU_VER_MAJOR(gmu->ver.hfi) >= 2) read = ALIGN(read, SZ_4) % hdr->queue_size; read = ALIGN(read, SZ_4) % hdr->queue_size; hdr->read_index = read; hdr->read_index = read; Loading Loading @@ -154,7 +146,7 @@ static int hfi_queue_write(struct gmu_device *gmu, uint32_t queue_idx, } } /* Cookify any non used data at the end of the write buffer */ /* Cookify any non used data at the end of the write buffer */ if (HFI_VER_MAJOR(&gmu->hfi) >= 2) { if (GMU_VER_MAJOR(gmu->ver.hfi) >= 2) { for (; write % 4; write = (write + 1) % hdr->queue_size) for (; write % 4; write = (write + 1) % hdr->queue_size) queue[write] = 0xFAFAFAFA; queue[write] = 0xFAFAFAFA; } } Loading Loading @@ -586,7 +578,7 @@ static void hfi_process_queue(struct gmu_device *gmu, uint32_t queue_idx, while (hfi_queue_read(gmu, queue_idx, rcvd, sizeof(rcvd)) > 0) { while (hfi_queue_read(gmu, queue_idx, rcvd, sizeof(rcvd)) > 0) { /* Special case if we're v1 */ /* Special case if we're v1 */ if (HFI_VER_MAJOR(&gmu->hfi) < 2) { if (GMU_VER_MAJOR(gmu->ver.hfi) < 2) { hfi_v1_receiver(gmu, rcvd, ret_cmd); hfi_v1_receiver(gmu, rcvd, ret_cmd); continue; continue; } } Loading Loading @@ -620,11 +612,6 @@ void hfi_receiver(unsigned long data) hfi_process_queue((struct gmu_device *) data, HFI_DBG_ID, NULL); hfi_process_queue((struct gmu_device *) data, HFI_DBG_ID, NULL); } } #define GMU_VER_MAJOR(ver) (((ver) >> 28) & 0xF) #define GMU_VER_MINOR(ver) (((ver) >> 16) & 0xFFF) #define GMU_VERSION(major, minor) \ ((((major) & 0xF) << 28) | (((minor) & 0xFFF) << 16)) static int hfi_verify_fw_version(struct kgsl_device *device, static int hfi_verify_fw_version(struct kgsl_device *device, struct gmu_device *gmu) struct gmu_device *gmu) { { Loading @@ -633,13 +620,9 @@ static int hfi_verify_fw_version(struct kgsl_device *device, unsigned int ver, major, minor; unsigned int ver, major, minor; /* GMU version is already known, so don't waste time finding again */ /* GMU version is already known, so don't waste time finding again */ if (gmu->ver != ~0U) if (gmu->ver.core != 0) return 0; return 0; /* Read the HFI version from the register */ adreno_read_gmureg(adreno_dev, ADRENO_REG_GMU_HFI_VERSION_INFO, &gmu->hfi.version); major = adreno_dev->gpucore->gpmu_major; major = adreno_dev->gpucore->gpmu_major; minor = adreno_dev->gpucore->gpmu_minor; minor = adreno_dev->gpucore->gpmu_minor; Loading @@ -662,7 +645,7 @@ static int hfi_verify_fw_version(struct kgsl_device *device, GMU_VER_MINOR(ver), minor); GMU_VER_MINOR(ver), minor); /* Save the gmu version information */ /* Save the gmu version information */ gmu->ver = ver; gmu->ver.core = ver; return 0; return 0; } } Loading Loading @@ -717,7 +700,7 @@ int hfi_start(struct kgsl_device *device, if (result) if (result) return result; return result; if (HFI_VER_MAJOR(&gmu->hfi) < 2) if (GMU_VER_MAJOR(gmu->ver.hfi) < 2) result = hfi_send_dcvstbl_v1(gmu); result = hfi_send_dcvstbl_v1(gmu); else else result = hfi_send_dcvstbl(gmu); result = hfi_send_dcvstbl(gmu); Loading @@ -733,7 +716,7 @@ int hfi_start(struct kgsl_device *device, * we are sending no more HFIs until the next boot otherwise * we are sending no more HFIs until the next boot otherwise * send H2F_MSG_CORE_FW_START and features for A640 devices * send H2F_MSG_CORE_FW_START and features for A640 devices */ */ if (HFI_VER_MAJOR(&gmu->hfi) >= 2) { if (GMU_VER_MAJOR(gmu->ver.hfi) >= 2) { if (ADRENO_FEATURE(adreno_dev, ADRENO_ECP)) { if (ADRENO_FEATURE(adreno_dev, ADRENO_ECP)) { result = hfi_send_feature_ctrl(gmu, result = hfi_send_feature_ctrl(gmu, HFI_FEATURE_ECP, 1, 0); HFI_FEATURE_ECP, 1, 0); Loading