Loading drivers/gpu/msm/adreno_a5xx.c +12 −3 Original line number Diff line number Diff line Loading @@ -193,6 +193,8 @@ static void a5xx_critical_packet_destroy(struct adreno_device *adreno_dev) kgsl_free_global(&adreno_dev->dev, &crit_pkts_refbuf2); kgsl_free_global(&adreno_dev->dev, &crit_pkts_refbuf3); kgsl_iommu_unmap_global_secure_pt_entry(KGSL_DEVICE(adreno_dev), &crit_pkts_refbuf0); kgsl_sharedmem_free(&crit_pkts_refbuf0); } Loading Loading @@ -231,8 +233,10 @@ static int a5xx_critical_packet_construct(struct adreno_device *adreno_dev) if (ret) return ret; kgsl_add_global_secure_entry(&adreno_dev->dev, ret = kgsl_iommu_map_global_secure_pt_entry(&adreno_dev->dev, &crit_pkts_refbuf0); if (ret) return ret; ret = kgsl_allocate_global(&adreno_dev->dev, &crit_pkts_refbuf1, Loading Loading @@ -293,8 +297,13 @@ static void a5xx_init(struct adreno_device *adreno_dev) INIT_WORK(&adreno_dev->irq_storm_work, a5xx_irq_storm_worker); if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS)) a5xx_critical_packet_construct(adreno_dev); if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS)) { int ret; ret = a5xx_critical_packet_construct(adreno_dev); if (ret) a5xx_critical_packet_destroy(adreno_dev); } a5xx_crashdump_init(adreno_dev); } Loading drivers/gpu/msm/kgsl_iommu.c +44 −42 Original line number Diff line number Diff line Loading @@ -110,7 +110,7 @@ struct global_pt_entry { }; static struct global_pt_entry global_pt_entries[GLOBAL_PT_ENTRIES]; static struct kgsl_memdesc *kgsl_global_secure_pt_entry; static int secure_global_size; static int global_pt_count; uint64_t global_pt_alloc; static struct kgsl_memdesc gpu_qdss_desc; Loading Loading @@ -162,24 +162,33 @@ static int kgsl_iommu_map_globals(struct kgsl_pagetable *pagetable) return 0; } static void kgsl_iommu_unmap_global_secure_pt_entry(struct kgsl_pagetable *pagetable) void kgsl_iommu_unmap_global_secure_pt_entry(struct kgsl_device *device, struct kgsl_memdesc *entry) { struct kgsl_memdesc *entry = kgsl_global_secure_pt_entry; if (!kgsl_mmu_is_secured(&device->mmu)) return; if (entry != NULL) kgsl_mmu_unmap(pagetable, entry); if (entry != NULL && entry->pagetable->name == KGSL_MMU_SECURE_PT) kgsl_mmu_unmap(entry->pagetable, entry); } static int kgsl_map_global_secure_pt_entry(struct kgsl_pagetable *pagetable) int kgsl_iommu_map_global_secure_pt_entry(struct kgsl_device *device, struct kgsl_memdesc *entry) { int ret = 0; struct kgsl_memdesc *entry = kgsl_global_secure_pt_entry; if (!kgsl_mmu_is_secured(&device->mmu)) return -ENOTSUPP; if (entry != NULL) { struct kgsl_pagetable *pagetable = device->mmu.securepagetable; entry->pagetable = pagetable; entry->gpuaddr = KGSL_IOMMU_SECURE_BASE + secure_global_size; ret = kgsl_mmu_map(pagetable, entry); if (ret == 0) secure_global_size += entry->size; } return ret; } Loading Loading @@ -224,13 +233,6 @@ static void kgsl_iommu_add_global(struct kgsl_mmu *mmu, global_pt_count++; } void kgsl_add_global_secure_entry(struct kgsl_device *device, struct kgsl_memdesc *memdesc) { memdesc->gpuaddr = KGSL_IOMMU_SECURE_BASE; kgsl_global_secure_pt_entry = memdesc; } struct kgsl_memdesc *kgsl_iommu_get_qdss_global_entry(void) { return &gpu_qdss_desc; Loading Loading @@ -1068,7 +1070,6 @@ static void kgsl_iommu_destroy_pagetable(struct kgsl_pagetable *pt) if (pt->name == KGSL_MMU_SECURE_PT) { ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE]; kgsl_iommu_unmap_global_secure_pt_entry(pt); } else { ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER]; kgsl_iommu_unmap_globals(pt); Loading @@ -1089,13 +1090,10 @@ static void setup_64bit_pagetable(struct kgsl_mmu *mmu, struct kgsl_pagetable *pagetable, struct kgsl_iommu_pt *pt) { unsigned int secure_global_size = kgsl_global_secure_pt_entry != NULL ? kgsl_global_secure_pt_entry->size : 0; if (mmu->secured && pagetable->name == KGSL_MMU_SECURE_PT) { pt->compat_va_start = KGSL_IOMMU_SECURE_BASE + secure_global_size; pt->compat_va_start = KGSL_IOMMU_SECURE_BASE; pt->compat_va_end = KGSL_IOMMU_SECURE_END; pt->va_start = KGSL_IOMMU_SECURE_BASE + secure_global_size; pt->va_start = KGSL_IOMMU_SECURE_BASE; pt->va_end = KGSL_IOMMU_SECURE_END; } else { pt->compat_va_start = KGSL_IOMMU_SVM_BASE32; Loading @@ -1120,20 +1118,15 @@ static void setup_32bit_pagetable(struct kgsl_mmu *mmu, struct kgsl_pagetable *pagetable, struct kgsl_iommu_pt *pt) { unsigned int secure_global_size = kgsl_global_secure_pt_entry != NULL ? kgsl_global_secure_pt_entry->size : 0; if (mmu->secured) { if (pagetable->name == KGSL_MMU_SECURE_PT) { pt->compat_va_start = KGSL_IOMMU_SECURE_BASE + secure_global_size; pt->compat_va_start = KGSL_IOMMU_SECURE_BASE; pt->compat_va_end = KGSL_IOMMU_SECURE_END; pt->va_start = KGSL_IOMMU_SECURE_BASE + secure_global_size; pt->va_start = KGSL_IOMMU_SECURE_BASE; pt->va_end = KGSL_IOMMU_SECURE_END; } else { pt->va_start = KGSL_IOMMU_SVM_BASE32; pt->va_end = KGSL_IOMMU_SECURE_BASE + secure_global_size; pt->va_end = KGSL_IOMMU_SECURE_BASE; pt->compat_va_start = pt->va_start; pt->compat_va_end = pt->va_end; } Loading Loading @@ -1363,8 +1356,6 @@ static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) ctx->regbase = iommu->regbase + KGSL_IOMMU_CB0_OFFSET + (cb_num << KGSL_IOMMU_CB_SHIFT); ret = kgsl_map_global_secure_pt_entry(pt); done: if (ret) _free_pt(ctx, pt); Loading Loading @@ -1608,6 +1599,18 @@ static int kgsl_iommu_init(struct kgsl_mmu *mmu) kgsl_setup_qdss_desc(device); kgsl_setup_qtimer_desc(device); if (!mmu->secured) goto done; mmu->securepagetable = kgsl_mmu_getpagetable(mmu, KGSL_MMU_SECURE_PT); if (IS_ERR(mmu->securepagetable)) { status = PTR_ERR(mmu->securepagetable); mmu->securepagetable = NULL; } else if (mmu->securepagetable == NULL) { status = -ENOMEM; } done: if (status) kgsl_iommu_close(mmu); Loading Loading @@ -1689,17 +1692,9 @@ static int _setup_secure_context(struct kgsl_mmu *mmu) if (ctx->dev == NULL || !mmu->secured) return 0; if (mmu->securepagetable == NULL) { mmu->securepagetable = kgsl_mmu_getpagetable(mmu, KGSL_MMU_SECURE_PT); if (IS_ERR(mmu->securepagetable)) { ret = PTR_ERR(mmu->securepagetable); mmu->securepagetable = NULL; return ret; } else if (mmu->securepagetable == NULL) { if (mmu->securepagetable == NULL) return -ENOMEM; } } iommu_pt = mmu->securepagetable->priv; ret = _attach_pt(iommu_pt, ctx); Loading Loading @@ -2502,6 +2497,13 @@ static int kgsl_iommu_get_gpuaddr(struct kgsl_pagetable *pagetable, end = pt->va_end; } /* * When mapping secure buffers, adjust the start of the va range * to the end of secure global buffers. */ if (kgsl_memdesc_is_secured(memdesc)) start += secure_global_size; spin_lock(&pagetable->lock); addr = _get_unmapped_area(pagetable, start, end, size, align); Loading drivers/gpu/msm/kgsl_mmu.h +3 −1 Original line number Diff line number Diff line Loading @@ -174,7 +174,9 @@ int kgsl_mmu_start(struct kgsl_device *device); struct kgsl_pagetable *kgsl_mmu_getpagetable_ptbase(struct kgsl_mmu *mmu, u64 ptbase); void kgsl_add_global_secure_entry(struct kgsl_device *device, int kgsl_iommu_map_global_secure_pt_entry(struct kgsl_device *device, struct kgsl_memdesc *memdesc); void kgsl_iommu_unmap_global_secure_pt_entry(struct kgsl_device *device, struct kgsl_memdesc *memdesc); void kgsl_print_global_pt_entries(struct seq_file *s); void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable); Loading Loading
drivers/gpu/msm/adreno_a5xx.c +12 −3 Original line number Diff line number Diff line Loading @@ -193,6 +193,8 @@ static void a5xx_critical_packet_destroy(struct adreno_device *adreno_dev) kgsl_free_global(&adreno_dev->dev, &crit_pkts_refbuf2); kgsl_free_global(&adreno_dev->dev, &crit_pkts_refbuf3); kgsl_iommu_unmap_global_secure_pt_entry(KGSL_DEVICE(adreno_dev), &crit_pkts_refbuf0); kgsl_sharedmem_free(&crit_pkts_refbuf0); } Loading Loading @@ -231,8 +233,10 @@ static int a5xx_critical_packet_construct(struct adreno_device *adreno_dev) if (ret) return ret; kgsl_add_global_secure_entry(&adreno_dev->dev, ret = kgsl_iommu_map_global_secure_pt_entry(&adreno_dev->dev, &crit_pkts_refbuf0); if (ret) return ret; ret = kgsl_allocate_global(&adreno_dev->dev, &crit_pkts_refbuf1, Loading Loading @@ -293,8 +297,13 @@ static void a5xx_init(struct adreno_device *adreno_dev) INIT_WORK(&adreno_dev->irq_storm_work, a5xx_irq_storm_worker); if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS)) a5xx_critical_packet_construct(adreno_dev); if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS)) { int ret; ret = a5xx_critical_packet_construct(adreno_dev); if (ret) a5xx_critical_packet_destroy(adreno_dev); } a5xx_crashdump_init(adreno_dev); } Loading
drivers/gpu/msm/kgsl_iommu.c +44 −42 Original line number Diff line number Diff line Loading @@ -110,7 +110,7 @@ struct global_pt_entry { }; static struct global_pt_entry global_pt_entries[GLOBAL_PT_ENTRIES]; static struct kgsl_memdesc *kgsl_global_secure_pt_entry; static int secure_global_size; static int global_pt_count; uint64_t global_pt_alloc; static struct kgsl_memdesc gpu_qdss_desc; Loading Loading @@ -162,24 +162,33 @@ static int kgsl_iommu_map_globals(struct kgsl_pagetable *pagetable) return 0; } static void kgsl_iommu_unmap_global_secure_pt_entry(struct kgsl_pagetable *pagetable) void kgsl_iommu_unmap_global_secure_pt_entry(struct kgsl_device *device, struct kgsl_memdesc *entry) { struct kgsl_memdesc *entry = kgsl_global_secure_pt_entry; if (!kgsl_mmu_is_secured(&device->mmu)) return; if (entry != NULL) kgsl_mmu_unmap(pagetable, entry); if (entry != NULL && entry->pagetable->name == KGSL_MMU_SECURE_PT) kgsl_mmu_unmap(entry->pagetable, entry); } static int kgsl_map_global_secure_pt_entry(struct kgsl_pagetable *pagetable) int kgsl_iommu_map_global_secure_pt_entry(struct kgsl_device *device, struct kgsl_memdesc *entry) { int ret = 0; struct kgsl_memdesc *entry = kgsl_global_secure_pt_entry; if (!kgsl_mmu_is_secured(&device->mmu)) return -ENOTSUPP; if (entry != NULL) { struct kgsl_pagetable *pagetable = device->mmu.securepagetable; entry->pagetable = pagetable; entry->gpuaddr = KGSL_IOMMU_SECURE_BASE + secure_global_size; ret = kgsl_mmu_map(pagetable, entry); if (ret == 0) secure_global_size += entry->size; } return ret; } Loading Loading @@ -224,13 +233,6 @@ static void kgsl_iommu_add_global(struct kgsl_mmu *mmu, global_pt_count++; } void kgsl_add_global_secure_entry(struct kgsl_device *device, struct kgsl_memdesc *memdesc) { memdesc->gpuaddr = KGSL_IOMMU_SECURE_BASE; kgsl_global_secure_pt_entry = memdesc; } struct kgsl_memdesc *kgsl_iommu_get_qdss_global_entry(void) { return &gpu_qdss_desc; Loading Loading @@ -1068,7 +1070,6 @@ static void kgsl_iommu_destroy_pagetable(struct kgsl_pagetable *pt) if (pt->name == KGSL_MMU_SECURE_PT) { ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE]; kgsl_iommu_unmap_global_secure_pt_entry(pt); } else { ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER]; kgsl_iommu_unmap_globals(pt); Loading @@ -1089,13 +1090,10 @@ static void setup_64bit_pagetable(struct kgsl_mmu *mmu, struct kgsl_pagetable *pagetable, struct kgsl_iommu_pt *pt) { unsigned int secure_global_size = kgsl_global_secure_pt_entry != NULL ? kgsl_global_secure_pt_entry->size : 0; if (mmu->secured && pagetable->name == KGSL_MMU_SECURE_PT) { pt->compat_va_start = KGSL_IOMMU_SECURE_BASE + secure_global_size; pt->compat_va_start = KGSL_IOMMU_SECURE_BASE; pt->compat_va_end = KGSL_IOMMU_SECURE_END; pt->va_start = KGSL_IOMMU_SECURE_BASE + secure_global_size; pt->va_start = KGSL_IOMMU_SECURE_BASE; pt->va_end = KGSL_IOMMU_SECURE_END; } else { pt->compat_va_start = KGSL_IOMMU_SVM_BASE32; Loading @@ -1120,20 +1118,15 @@ static void setup_32bit_pagetable(struct kgsl_mmu *mmu, struct kgsl_pagetable *pagetable, struct kgsl_iommu_pt *pt) { unsigned int secure_global_size = kgsl_global_secure_pt_entry != NULL ? kgsl_global_secure_pt_entry->size : 0; if (mmu->secured) { if (pagetable->name == KGSL_MMU_SECURE_PT) { pt->compat_va_start = KGSL_IOMMU_SECURE_BASE + secure_global_size; pt->compat_va_start = KGSL_IOMMU_SECURE_BASE; pt->compat_va_end = KGSL_IOMMU_SECURE_END; pt->va_start = KGSL_IOMMU_SECURE_BASE + secure_global_size; pt->va_start = KGSL_IOMMU_SECURE_BASE; pt->va_end = KGSL_IOMMU_SECURE_END; } else { pt->va_start = KGSL_IOMMU_SVM_BASE32; pt->va_end = KGSL_IOMMU_SECURE_BASE + secure_global_size; pt->va_end = KGSL_IOMMU_SECURE_BASE; pt->compat_va_start = pt->va_start; pt->compat_va_end = pt->va_end; } Loading Loading @@ -1363,8 +1356,6 @@ static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) ctx->regbase = iommu->regbase + KGSL_IOMMU_CB0_OFFSET + (cb_num << KGSL_IOMMU_CB_SHIFT); ret = kgsl_map_global_secure_pt_entry(pt); done: if (ret) _free_pt(ctx, pt); Loading Loading @@ -1608,6 +1599,18 @@ static int kgsl_iommu_init(struct kgsl_mmu *mmu) kgsl_setup_qdss_desc(device); kgsl_setup_qtimer_desc(device); if (!mmu->secured) goto done; mmu->securepagetable = kgsl_mmu_getpagetable(mmu, KGSL_MMU_SECURE_PT); if (IS_ERR(mmu->securepagetable)) { status = PTR_ERR(mmu->securepagetable); mmu->securepagetable = NULL; } else if (mmu->securepagetable == NULL) { status = -ENOMEM; } done: if (status) kgsl_iommu_close(mmu); Loading Loading @@ -1689,17 +1692,9 @@ static int _setup_secure_context(struct kgsl_mmu *mmu) if (ctx->dev == NULL || !mmu->secured) return 0; if (mmu->securepagetable == NULL) { mmu->securepagetable = kgsl_mmu_getpagetable(mmu, KGSL_MMU_SECURE_PT); if (IS_ERR(mmu->securepagetable)) { ret = PTR_ERR(mmu->securepagetable); mmu->securepagetable = NULL; return ret; } else if (mmu->securepagetable == NULL) { if (mmu->securepagetable == NULL) return -ENOMEM; } } iommu_pt = mmu->securepagetable->priv; ret = _attach_pt(iommu_pt, ctx); Loading Loading @@ -2502,6 +2497,13 @@ static int kgsl_iommu_get_gpuaddr(struct kgsl_pagetable *pagetable, end = pt->va_end; } /* * When mapping secure buffers, adjust the start of the va range * to the end of secure global buffers. */ if (kgsl_memdesc_is_secured(memdesc)) start += secure_global_size; spin_lock(&pagetable->lock); addr = _get_unmapped_area(pagetable, start, end, size, align); Loading
drivers/gpu/msm/kgsl_mmu.h +3 −1 Original line number Diff line number Diff line Loading @@ -174,7 +174,9 @@ int kgsl_mmu_start(struct kgsl_device *device); struct kgsl_pagetable *kgsl_mmu_getpagetable_ptbase(struct kgsl_mmu *mmu, u64 ptbase); void kgsl_add_global_secure_entry(struct kgsl_device *device, int kgsl_iommu_map_global_secure_pt_entry(struct kgsl_device *device, struct kgsl_memdesc *memdesc); void kgsl_iommu_unmap_global_secure_pt_entry(struct kgsl_device *device, struct kgsl_memdesc *memdesc); void kgsl_print_global_pt_entries(struct seq_file *s); void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable); Loading