Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6dd22195 authored by Shrenuj Bansal's avatar Shrenuj Bansal
Browse files

msm: kgsl: Use sg table instead of physaddr to check secure guard page



Earlier, we would use the memdesc physaddr to determine whether
to a guard page for secure buffers had already been allocated.
Change that check to use the sg table since we do not store the
physaddr for memdescs which are not allocated via
dma_alloc_coherent.

Change-Id: I24a7ec2f60fbe7f43df1342d316b7bae4d86e787
Signed-off-by: default avatarShrenuj Bansal <shrenujb@codeaurora.org>
parent 8e38a300
Loading
Loading
Loading
Loading
+7 −5
Original line number Diff line number Diff line
@@ -1121,7 +1121,7 @@ kgsl_iommu_unmap(struct kgsl_pagetable *pt,
		return 0;

	if (kgsl_memdesc_has_guard_page(memdesc))
		range += kgsl_memdesc_guard_page_size(memdesc);
		range += kgsl_memdesc_guard_page_size(pt->mmu, memdesc);

	if (kgsl_memdesc_is_secured(memdesc)) {

@@ -1188,8 +1188,9 @@ int _iommu_add_guard_page(struct kgsl_pagetable *pt,
		 * mapped to save 1MB of memory if CPZ is not used.
		 */
		if (kgsl_memdesc_is_secured(memdesc)) {
			struct scatterlist *sg;
			unsigned int sgp_size = pt->mmu->secure_align_mask + 1;
			if (!kgsl_secure_guard_page_memdesc.physaddr) {
			if (!kgsl_secure_guard_page_memdesc.sgt) {
				if (kgsl_allocate_user(pt->mmu->device,
					&kgsl_secure_guard_page_memdesc, pt,
					sgp_size, sgp_size,
@@ -1200,12 +1201,13 @@ int _iommu_add_guard_page(struct kgsl_pagetable *pt,
				}
			}

			physaddr = kgsl_secure_guard_page_memdesc.physaddr;
			sg = kgsl_secure_guard_page_memdesc.sgt->sgl;
			physaddr = page_to_phys(sg_page(sg));
		}

		mutex_lock(&pt->mmu->device->mutex_pc_smmu);
		ret = iommu_map(iommu_pt->domain, gpuaddr, physaddr,
				kgsl_memdesc_guard_page_size(memdesc),
				kgsl_memdesc_guard_page_size(pt->mmu, memdesc),
				protflags & ~IOMMU_WRITE);
		mutex_unlock(&pt->mmu->device->mutex_pc_smmu);
		if (ret) {
@@ -1856,7 +1858,7 @@ static int kgsl_iommu_get_gpuaddr(struct kgsl_pagetable *pagetable,
		return -EINVAL;

	if (kgsl_memdesc_has_guard_page(memdesc))
		size += kgsl_memdesc_guard_page_size(memdesc);
		size += kgsl_memdesc_guard_page_size(pagetable->mmu, memdesc);

	align = 1 << kgsl_memdesc_get_align(memdesc);

+2 −2
Original line number Diff line number Diff line
@@ -746,7 +746,7 @@ kgsl_mmu_map(struct kgsl_pagetable *pagetable,
	/* Add space for the guard page when allocating the mmu VA. */
	size = memdesc->size;
	if (kgsl_memdesc_has_guard_page(memdesc))
		size += kgsl_memdesc_guard_page_size(memdesc);
		size += kgsl_memdesc_guard_page_size(pagetable->mmu, memdesc);

	ret = pagetable->pt_ops->mmu_map(pagetable, memdesc);
	spin_lock(&pagetable->lock);
@@ -824,7 +824,7 @@ kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
	/* Add space for the guard page when freeing the mmu VA. */
	size = memdesc->size;
	if (kgsl_memdesc_has_guard_page(memdesc))
		size += kgsl_memdesc_guard_page_size(memdesc);
		size += kgsl_memdesc_guard_page_size(pagetable->mmu, memdesc);

	start_addr = memdesc->gpuaddr;
	end_addr = (memdesc->gpuaddr + size);
+4 −2
Original line number Diff line number Diff line
@@ -221,9 +221,11 @@ kgsl_memdesc_has_guard_page(const struct kgsl_memdesc *memdesc)
 * Returns guard page size
 */
static inline int
kgsl_memdesc_guard_page_size(const struct kgsl_memdesc *memdesc)
kgsl_memdesc_guard_page_size(const struct kgsl_mmu *mmu,
				const struct kgsl_memdesc *memdesc)
{
	return kgsl_memdesc_is_secured(memdesc) ? SZ_1M : PAGE_SIZE;
	return kgsl_memdesc_is_secured(memdesc) ? mmu->secure_align_mask + 1 :
								PAGE_SIZE;
}

/*