Loading drivers/gpu/drm/msm/adreno/a5xx_gpu.c +10 −29 Original line number Diff line number Diff line Loading @@ -46,7 +46,6 @@ static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) static void a5xx_set_pagetable(struct msm_gpu *gpu, struct msm_ringbuffer *ring, struct msm_gem_address_space *aspace) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct msm_mmu *mmu = aspace->mmu; struct msm_iommu *iommu = to_msm_iommu(mmu); Loading Loading @@ -75,17 +74,15 @@ static void a5xx_set_pagetable(struct msm_gpu *gpu, struct msm_ringbuffer *ring, * reload the pagetable if the current ring gets preempted out. */ OUT_PKT7(ring, CP_MEM_WRITE, 4); OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, ttbr0))); OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, ttbr0))); OUT_RING(ring, lower_32_bits(rbmemptr(ring, ttbr0))); OUT_RING(ring, upper_32_bits(rbmemptr(ring, ttbr0))); OUT_RING(ring, lower_32_bits(iommu->ttbr0)); OUT_RING(ring, upper_32_bits(iommu->ttbr0)); /* Also write the current contextidr (ASID) */ OUT_PKT7(ring, CP_MEM_WRITE, 3); OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, contextidr))); OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, contextidr))); OUT_RING(ring, lower_32_bits(rbmemptr(ring, contextidr))); OUT_RING(ring, upper_32_bits(rbmemptr(ring, contextidr))); OUT_RING(ring, iommu->contextidr); /* Invalidate the draw state so we start off fresh */ Loading Loading @@ -217,8 +214,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_PKT7(ring, CP_EVENT_WRITE, 4); OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31)); OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, fence))); OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, fence))); OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence))); OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); OUT_RING(ring, submit->fence); if (submit->secure) { Loading Loading @@ -477,30 +474,14 @@ static int a5xx_preempt_start(struct msm_gpu *gpu) static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, const struct firmware *fw, u64 *iova) { struct drm_device *drm = gpu->dev; struct drm_gem_object *bo; void *ptr; bo = msm_gem_new(drm, fw->size - 4, MSM_BO_UNCACHED | MSM_BO_GPU_READONLY); if (IS_ERR(bo)) return bo; ptr = msm_gem_vaddr(bo); if (!ptr) { drm_gem_object_unreference_unlocked(bo); return ERR_PTR(-ENOMEM); } ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4, MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); if (iova) { int ret = msm_gem_get_iova(bo, gpu->aspace, iova); if (ret) { drm_gem_object_unreference_unlocked(bo); return ERR_PTR(ret); } } if (IS_ERR(ptr)) return ERR_CAST(ptr); memcpy(ptr, &fw->data[4], fw->size - 4); return bo; Loading drivers/gpu/drm/msm/adreno/a5xx_power.c +4 −12 Original line number Diff line number Diff line Loading @@ -458,18 +458,10 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) */ bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2; a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize, MSM_BO_UNCACHED | MSM_BO_GPU_READONLY); if (IS_ERR(a5xx_gpu->gpmu_bo)) goto err; if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace, &a5xx_gpu->gpmu_iova)) goto err; ptr = msm_gem_vaddr(a5xx_gpu->gpmu_bo); if (!ptr) ptr = msm_gem_kernel_new(drm, bosize, MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova); if (IS_ERR(ptr)) goto err; while (cmds_size > 0) { Loading drivers/gpu/drm/msm/adreno/a5xx_preempt.c +8 −46 Original line number Diff line number Diff line Loading @@ -15,41 +15,6 @@ #include "msm_iommu.h" #include "a5xx_gpu.h" static void *alloc_kernel_bo(struct drm_device *drm, struct msm_gpu *gpu, size_t size, uint32_t flags, struct drm_gem_object **bo, u64 *iova) { struct drm_gem_object *_bo; u64 _iova; void *ptr; int ret; _bo = msm_gem_new(drm, size, flags); if (IS_ERR(_bo)) return _bo; ret = msm_gem_get_iova(_bo, gpu->aspace, &_iova); if (ret) goto out; ptr = msm_gem_vaddr(_bo); if (!ptr) { ret = -ENOMEM; goto out; } if (bo) *bo = _bo; if (iova) *iova = _iova; return ptr; out: drm_gem_object_unreference_unlocked(_bo); return ERR_PTR(ret); } /* * Try to transition the preemption state from old to new. Return * true on success or false if the original state wasn't 'old' Loading Loading @@ -100,7 +65,6 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) /* Return the highest priority ringbuffer with something in it */ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); unsigned long flags; int i; Loading @@ -109,7 +73,7 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) struct msm_ringbuffer *ring = gpu->rb[i]; spin_lock_irqsave(&ring->lock, flags); empty = (get_wptr(ring) == adreno_gpu->memptrs->rptr[ring->id]); empty = (get_wptr(ring) == ring->memptrs->rptr); spin_unlock_irqrestore(&ring->lock, flags); if (!empty) Loading Loading @@ -176,10 +140,8 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu) /* Set the SMMU info for the preemption */ if (a5xx_gpu->smmu_info) { a5xx_gpu->smmu_info->ttbr0 = adreno_gpu->memptrs->ttbr0[ring->id]; a5xx_gpu->smmu_info->contextidr = adreno_gpu->memptrs->contextidr[ring->id]; a5xx_gpu->smmu_info->ttbr0 = ring->memptrs->ttbr0; a5xx_gpu->smmu_info->contextidr = ring->memptrs->contextidr; } /* Set the address of the incoming preemption record */ Loading Loading @@ -278,10 +240,10 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, struct drm_gem_object *bo; u64 iova; ptr = alloc_kernel_bo(gpu->dev, gpu, ptr = msm_gem_kernel_new(gpu->dev, A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE, MSM_BO_UNCACHED | MSM_BO_PRIVILEGED, &bo, &iova); gpu->aspace, &bo, &iova); if (IS_ERR(ptr)) return PTR_ERR(ptr); Loading @@ -296,7 +258,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, ptr->info = 0; ptr->data = 0; ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT; ptr->rptr_addr = rbmemptr(adreno_gpu, ring->id, rptr); ptr->rptr_addr = rbmemptr(ring, rptr); ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE; return 0; Loading Loading @@ -352,10 +314,10 @@ void a5xx_preempt_init(struct msm_gpu *gpu) } if (msm_iommu_allow_dynamic(gpu->aspace->mmu)) { ptr = alloc_kernel_bo(gpu->dev, gpu, ptr = msm_gem_kernel_new(gpu->dev, sizeof(struct a5xx_smmu_info), MSM_BO_UNCACHED | MSM_BO_PRIVILEGED, &bo, &iova); gpu->aspace, &bo, &iova); if (IS_ERR(ptr)) goto fail; Loading drivers/gpu/drm/msm/adreno/a5xx_snapshot.c +8 −22 Original line number Diff line number Diff line Loading @@ -214,28 +214,14 @@ struct crashdump { static int crashdump_init(struct msm_gpu *gpu, struct crashdump *crashdump) { struct drm_device *drm = gpu->dev; int ret = -ENOMEM; crashdump->bo = msm_gem_new_locked(drm, CRASHDUMP_BO_SIZE, MSM_BO_UNCACHED); if (IS_ERR(crashdump->bo)) { ret = PTR_ERR(crashdump->bo); crashdump->bo = NULL; return ret; } crashdump->ptr = msm_gem_vaddr(crashdump->bo); if (!crashdump->ptr) goto out; ret = msm_gem_get_iova(crashdump->bo, gpu->aspace, &crashdump->iova); out: if (ret) { drm_gem_object_unreference(crashdump->bo); crashdump->bo = NULL; int ret = 0; crashdump->ptr = msm_gem_kernel_new_locked(gpu->dev, CRASHDUMP_BO_SIZE, MSM_BO_UNCACHED, gpu->aspace, &crashdump->bo, &crashdump->iova); if (IS_ERR(crashdump->ptr)) { ret = PTR_ERR(crashdump->ptr); crashdump->ptr = NULL; } return ret; Loading drivers/gpu/drm/msm/adreno/adreno_gpu.c +12 −77 Original line number Diff line number Diff line Loading @@ -90,7 +90,7 @@ int adreno_hw_init(struct msm_gpu *gpu) REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova); adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR, REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(adreno_gpu, 0, rptr)); REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(gpu->rb[0], rptr)); return 0; } Loading @@ -106,10 +106,11 @@ static uint32_t get_rptr(struct adreno_gpu *adreno_gpu, * ensure that it won't be. If not then this is why your * a430 stopped working. */ return adreno_gpu->memptrs->rptr[ring->id] = adreno_gpu_read( adreno_gpu, REG_ADRENO_CP_RB_RPTR); } else return adreno_gpu->memptrs->rptr[ring->id]; return ring->memptrs->rptr = adreno_gpu_read(adreno_gpu, REG_ADRENO_CP_RB_RPTR); } return ring->memptrs->rptr; } struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu) Loading @@ -128,17 +129,11 @@ uint32_t adreno_submitted_fence(struct msm_gpu *gpu, uint32_t adreno_last_fence(struct msm_gpu *gpu, struct msm_ringbuffer *ring) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); if (!ring) return 0; return adreno_gpu->memptrs->fence[ring->id]; return ring ? ring->memptrs->fence : 0; } void adreno_recover(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct drm_device *dev = gpu->dev; struct msm_ringbuffer *ring; int ret, i; Loading @@ -156,9 +151,8 @@ void adreno_recover(struct msm_gpu *gpu) ring->next = ring->start; /* reset completed fence seqno, discard anything pending: */ adreno_gpu->memptrs->fence[ring->id] = adreno_submitted_fence(gpu, ring); adreno_gpu->memptrs->rptr[ring->id] = 0; ring->memptrs->fence = adreno_submitted_fence(gpu, ring); ring->memptrs->rptr = 0; } gpu->funcs->pm_resume(gpu); Loading Loading @@ -213,7 +207,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_PKT3(ring, CP_EVENT_WRITE, 3); OUT_RING(ring, CACHE_FLUSH_TS); OUT_RING(ring, rbmemptr(adreno_gpu, ring->id, fence)); OUT_RING(ring, rbmemptr(ring, fence)); OUT_RING(ring, submit->fence); /* we could maybe be clever and only CP_COND_EXEC the interrupt: */ Loading Loading @@ -516,7 +510,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, { struct adreno_platform_config *config = pdev->dev.platform_data; struct msm_gpu *gpu = &adreno_gpu->base; struct msm_mmu *mmu; int ret; adreno_gpu->funcs = funcs; Loading @@ -541,77 +534,19 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, } ret = request_firmware(&adreno_gpu->pfp, adreno_gpu->info->pfpfw, drm->dev); if (ret) { if (ret) dev_err(drm->dev, "failed to load %s PFP firmware: %d\n", adreno_gpu->info->pfpfw, ret); return ret; } mmu = gpu->aspace->mmu; if (mmu) { ret = mmu->funcs->attach(mmu, NULL, 0); if (ret) return ret; } if (gpu->secure_aspace) { mmu = gpu->secure_aspace->mmu; if (mmu) { ret = mmu->funcs->attach(mmu, NULL, 0); if (ret) return ret; } } adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs), MSM_BO_UNCACHED); if (IS_ERR(adreno_gpu->memptrs_bo)) { ret = PTR_ERR(adreno_gpu->memptrs_bo); adreno_gpu->memptrs_bo = NULL; dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); return ret; } adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo); if (!adreno_gpu->memptrs) { dev_err(drm->dev, "could not vmap memptrs\n"); return -ENOMEM; } ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace, &adreno_gpu->memptrs_iova); if (ret) { dev_err(drm->dev, "could not map memptrs: %d\n", ret); return ret; } return 0; } void adreno_gpu_cleanup(struct adreno_gpu *gpu) { struct msm_gem_address_space *aspace = gpu->base.aspace; if (gpu->memptrs_bo) { if (gpu->memptrs_iova) msm_gem_put_iova(gpu->memptrs_bo, aspace); drm_gem_object_unreference_unlocked(gpu->memptrs_bo); } release_firmware(gpu->pm4); release_firmware(gpu->pfp); msm_gpu_cleanup(&gpu->base); if (aspace) { aspace->mmu->funcs->detach(aspace->mmu); msm_gem_address_space_put(aspace); } if (gpu->base.secure_aspace) { aspace = gpu->base.secure_aspace; aspace->mmu->funcs->detach(aspace->mmu); msm_gem_address_space_put(aspace); } } static void adreno_snapshot_os(struct msm_gpu *gpu, Loading Loading
drivers/gpu/drm/msm/adreno/a5xx_gpu.c +10 −29 Original line number Diff line number Diff line Loading @@ -46,7 +46,6 @@ static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) static void a5xx_set_pagetable(struct msm_gpu *gpu, struct msm_ringbuffer *ring, struct msm_gem_address_space *aspace) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct msm_mmu *mmu = aspace->mmu; struct msm_iommu *iommu = to_msm_iommu(mmu); Loading Loading @@ -75,17 +74,15 @@ static void a5xx_set_pagetable(struct msm_gpu *gpu, struct msm_ringbuffer *ring, * reload the pagetable if the current ring gets preempted out. */ OUT_PKT7(ring, CP_MEM_WRITE, 4); OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, ttbr0))); OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, ttbr0))); OUT_RING(ring, lower_32_bits(rbmemptr(ring, ttbr0))); OUT_RING(ring, upper_32_bits(rbmemptr(ring, ttbr0))); OUT_RING(ring, lower_32_bits(iommu->ttbr0)); OUT_RING(ring, upper_32_bits(iommu->ttbr0)); /* Also write the current contextidr (ASID) */ OUT_PKT7(ring, CP_MEM_WRITE, 3); OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, contextidr))); OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, contextidr))); OUT_RING(ring, lower_32_bits(rbmemptr(ring, contextidr))); OUT_RING(ring, upper_32_bits(rbmemptr(ring, contextidr))); OUT_RING(ring, iommu->contextidr); /* Invalidate the draw state so we start off fresh */ Loading Loading @@ -217,8 +214,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_PKT7(ring, CP_EVENT_WRITE, 4); OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31)); OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, fence))); OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, fence))); OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence))); OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); OUT_RING(ring, submit->fence); if (submit->secure) { Loading Loading @@ -477,30 +474,14 @@ static int a5xx_preempt_start(struct msm_gpu *gpu) static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, const struct firmware *fw, u64 *iova) { struct drm_device *drm = gpu->dev; struct drm_gem_object *bo; void *ptr; bo = msm_gem_new(drm, fw->size - 4, MSM_BO_UNCACHED | MSM_BO_GPU_READONLY); if (IS_ERR(bo)) return bo; ptr = msm_gem_vaddr(bo); if (!ptr) { drm_gem_object_unreference_unlocked(bo); return ERR_PTR(-ENOMEM); } ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4, MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); if (iova) { int ret = msm_gem_get_iova(bo, gpu->aspace, iova); if (ret) { drm_gem_object_unreference_unlocked(bo); return ERR_PTR(ret); } } if (IS_ERR(ptr)) return ERR_CAST(ptr); memcpy(ptr, &fw->data[4], fw->size - 4); return bo; Loading
drivers/gpu/drm/msm/adreno/a5xx_power.c +4 −12 Original line number Diff line number Diff line Loading @@ -458,18 +458,10 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) */ bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2; a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize, MSM_BO_UNCACHED | MSM_BO_GPU_READONLY); if (IS_ERR(a5xx_gpu->gpmu_bo)) goto err; if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace, &a5xx_gpu->gpmu_iova)) goto err; ptr = msm_gem_vaddr(a5xx_gpu->gpmu_bo); if (!ptr) ptr = msm_gem_kernel_new(drm, bosize, MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova); if (IS_ERR(ptr)) goto err; while (cmds_size > 0) { Loading
drivers/gpu/drm/msm/adreno/a5xx_preempt.c +8 −46 Original line number Diff line number Diff line Loading @@ -15,41 +15,6 @@ #include "msm_iommu.h" #include "a5xx_gpu.h" static void *alloc_kernel_bo(struct drm_device *drm, struct msm_gpu *gpu, size_t size, uint32_t flags, struct drm_gem_object **bo, u64 *iova) { struct drm_gem_object *_bo; u64 _iova; void *ptr; int ret; _bo = msm_gem_new(drm, size, flags); if (IS_ERR(_bo)) return _bo; ret = msm_gem_get_iova(_bo, gpu->aspace, &_iova); if (ret) goto out; ptr = msm_gem_vaddr(_bo); if (!ptr) { ret = -ENOMEM; goto out; } if (bo) *bo = _bo; if (iova) *iova = _iova; return ptr; out: drm_gem_object_unreference_unlocked(_bo); return ERR_PTR(ret); } /* * Try to transition the preemption state from old to new. Return * true on success or false if the original state wasn't 'old' Loading Loading @@ -100,7 +65,6 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) /* Return the highest priority ringbuffer with something in it */ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); unsigned long flags; int i; Loading @@ -109,7 +73,7 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) struct msm_ringbuffer *ring = gpu->rb[i]; spin_lock_irqsave(&ring->lock, flags); empty = (get_wptr(ring) == adreno_gpu->memptrs->rptr[ring->id]); empty = (get_wptr(ring) == ring->memptrs->rptr); spin_unlock_irqrestore(&ring->lock, flags); if (!empty) Loading Loading @@ -176,10 +140,8 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu) /* Set the SMMU info for the preemption */ if (a5xx_gpu->smmu_info) { a5xx_gpu->smmu_info->ttbr0 = adreno_gpu->memptrs->ttbr0[ring->id]; a5xx_gpu->smmu_info->contextidr = adreno_gpu->memptrs->contextidr[ring->id]; a5xx_gpu->smmu_info->ttbr0 = ring->memptrs->ttbr0; a5xx_gpu->smmu_info->contextidr = ring->memptrs->contextidr; } /* Set the address of the incoming preemption record */ Loading Loading @@ -278,10 +240,10 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, struct drm_gem_object *bo; u64 iova; ptr = alloc_kernel_bo(gpu->dev, gpu, ptr = msm_gem_kernel_new(gpu->dev, A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE, MSM_BO_UNCACHED | MSM_BO_PRIVILEGED, &bo, &iova); gpu->aspace, &bo, &iova); if (IS_ERR(ptr)) return PTR_ERR(ptr); Loading @@ -296,7 +258,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, ptr->info = 0; ptr->data = 0; ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT; ptr->rptr_addr = rbmemptr(adreno_gpu, ring->id, rptr); ptr->rptr_addr = rbmemptr(ring, rptr); ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE; return 0; Loading Loading @@ -352,10 +314,10 @@ void a5xx_preempt_init(struct msm_gpu *gpu) } if (msm_iommu_allow_dynamic(gpu->aspace->mmu)) { ptr = alloc_kernel_bo(gpu->dev, gpu, ptr = msm_gem_kernel_new(gpu->dev, sizeof(struct a5xx_smmu_info), MSM_BO_UNCACHED | MSM_BO_PRIVILEGED, &bo, &iova); gpu->aspace, &bo, &iova); if (IS_ERR(ptr)) goto fail; Loading
drivers/gpu/drm/msm/adreno/a5xx_snapshot.c +8 −22 Original line number Diff line number Diff line Loading @@ -214,28 +214,14 @@ struct crashdump { static int crashdump_init(struct msm_gpu *gpu, struct crashdump *crashdump) { struct drm_device *drm = gpu->dev; int ret = -ENOMEM; crashdump->bo = msm_gem_new_locked(drm, CRASHDUMP_BO_SIZE, MSM_BO_UNCACHED); if (IS_ERR(crashdump->bo)) { ret = PTR_ERR(crashdump->bo); crashdump->bo = NULL; return ret; } crashdump->ptr = msm_gem_vaddr(crashdump->bo); if (!crashdump->ptr) goto out; ret = msm_gem_get_iova(crashdump->bo, gpu->aspace, &crashdump->iova); out: if (ret) { drm_gem_object_unreference(crashdump->bo); crashdump->bo = NULL; int ret = 0; crashdump->ptr = msm_gem_kernel_new_locked(gpu->dev, CRASHDUMP_BO_SIZE, MSM_BO_UNCACHED, gpu->aspace, &crashdump->bo, &crashdump->iova); if (IS_ERR(crashdump->ptr)) { ret = PTR_ERR(crashdump->ptr); crashdump->ptr = NULL; } return ret; Loading
drivers/gpu/drm/msm/adreno/adreno_gpu.c +12 −77 Original line number Diff line number Diff line Loading @@ -90,7 +90,7 @@ int adreno_hw_init(struct msm_gpu *gpu) REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova); adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR, REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(adreno_gpu, 0, rptr)); REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(gpu->rb[0], rptr)); return 0; } Loading @@ -106,10 +106,11 @@ static uint32_t get_rptr(struct adreno_gpu *adreno_gpu, * ensure that it won't be. If not then this is why your * a430 stopped working. */ return adreno_gpu->memptrs->rptr[ring->id] = adreno_gpu_read( adreno_gpu, REG_ADRENO_CP_RB_RPTR); } else return adreno_gpu->memptrs->rptr[ring->id]; return ring->memptrs->rptr = adreno_gpu_read(adreno_gpu, REG_ADRENO_CP_RB_RPTR); } return ring->memptrs->rptr; } struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu) Loading @@ -128,17 +129,11 @@ uint32_t adreno_submitted_fence(struct msm_gpu *gpu, uint32_t adreno_last_fence(struct msm_gpu *gpu, struct msm_ringbuffer *ring) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); if (!ring) return 0; return adreno_gpu->memptrs->fence[ring->id]; return ring ? ring->memptrs->fence : 0; } void adreno_recover(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct drm_device *dev = gpu->dev; struct msm_ringbuffer *ring; int ret, i; Loading @@ -156,9 +151,8 @@ void adreno_recover(struct msm_gpu *gpu) ring->next = ring->start; /* reset completed fence seqno, discard anything pending: */ adreno_gpu->memptrs->fence[ring->id] = adreno_submitted_fence(gpu, ring); adreno_gpu->memptrs->rptr[ring->id] = 0; ring->memptrs->fence = adreno_submitted_fence(gpu, ring); ring->memptrs->rptr = 0; } gpu->funcs->pm_resume(gpu); Loading Loading @@ -213,7 +207,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_PKT3(ring, CP_EVENT_WRITE, 3); OUT_RING(ring, CACHE_FLUSH_TS); OUT_RING(ring, rbmemptr(adreno_gpu, ring->id, fence)); OUT_RING(ring, rbmemptr(ring, fence)); OUT_RING(ring, submit->fence); /* we could maybe be clever and only CP_COND_EXEC the interrupt: */ Loading Loading @@ -516,7 +510,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, { struct adreno_platform_config *config = pdev->dev.platform_data; struct msm_gpu *gpu = &adreno_gpu->base; struct msm_mmu *mmu; int ret; adreno_gpu->funcs = funcs; Loading @@ -541,77 +534,19 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, } ret = request_firmware(&adreno_gpu->pfp, adreno_gpu->info->pfpfw, drm->dev); if (ret) { if (ret) dev_err(drm->dev, "failed to load %s PFP firmware: %d\n", adreno_gpu->info->pfpfw, ret); return ret; } mmu = gpu->aspace->mmu; if (mmu) { ret = mmu->funcs->attach(mmu, NULL, 0); if (ret) return ret; } if (gpu->secure_aspace) { mmu = gpu->secure_aspace->mmu; if (mmu) { ret = mmu->funcs->attach(mmu, NULL, 0); if (ret) return ret; } } adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs), MSM_BO_UNCACHED); if (IS_ERR(adreno_gpu->memptrs_bo)) { ret = PTR_ERR(adreno_gpu->memptrs_bo); adreno_gpu->memptrs_bo = NULL; dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); return ret; } adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo); if (!adreno_gpu->memptrs) { dev_err(drm->dev, "could not vmap memptrs\n"); return -ENOMEM; } ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace, &adreno_gpu->memptrs_iova); if (ret) { dev_err(drm->dev, "could not map memptrs: %d\n", ret); return ret; } return 0; } void adreno_gpu_cleanup(struct adreno_gpu *gpu) { struct msm_gem_address_space *aspace = gpu->base.aspace; if (gpu->memptrs_bo) { if (gpu->memptrs_iova) msm_gem_put_iova(gpu->memptrs_bo, aspace); drm_gem_object_unreference_unlocked(gpu->memptrs_bo); } release_firmware(gpu->pm4); release_firmware(gpu->pfp); msm_gpu_cleanup(&gpu->base); if (aspace) { aspace->mmu->funcs->detach(aspace->mmu); msm_gem_address_space_put(aspace); } if (gpu->base.secure_aspace) { aspace = gpu->base.secure_aspace; aspace->mmu->funcs->detach(aspace->mmu); msm_gem_address_space_put(aspace); } } static void adreno_snapshot_os(struct msm_gpu *gpu, Loading