Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit db290bcf authored by Jordan Crouse's avatar Jordan Crouse Committed by Gerrit - the friendly Code Review server
Browse files

msm: kgsl: Protect the memdesc->gpuaddr in SVM use cases



When SVM is being used there can only be one GPU address assigned
to the memory descriptor. Don't allow the GPU address to be changed
after it has been negotiated the first time by a process.

Change-Id: Ic0dedbad2a1b3ccdc2c1598a6c501b2be288d64e
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
Signed-off-by: default avatarHarshitha Sai Neelati <hsaine@codeaurora.org>
parent bfb2e66b
Loading
Loading
Loading
Loading
+26 −3
Original line number Original line Diff line number Diff line
@@ -4647,19 +4647,34 @@ static unsigned long _gpu_set_svm_region(struct kgsl_process_private *private,
{
{
	int ret;
	int ret;


	/*
	 * Protect access to the gpuaddr here to prevent multiple vmas from
	 * trying to map a SVM region at the same time
	 */
	spin_lock(&entry->memdesc.gpuaddr_lock);

	if (entry->memdesc.gpuaddr) {
		spin_unlock(&entry->memdesc.gpuaddr_lock);
		return (unsigned long) -EBUSY;
	}

	ret = kgsl_mmu_set_svm_region(private->pagetable, (uint64_t) addr,
	ret = kgsl_mmu_set_svm_region(private->pagetable, (uint64_t) addr,
		(uint64_t) size);
		(uint64_t) size);


	if (ret != 0)
	if (ret != 0) {
		return ret;
		spin_unlock(&entry->memdesc.gpuaddr_lock);
		return (unsigned long) ret;
	}


	entry->memdesc.gpuaddr = (uint64_t) addr;
	entry->memdesc.gpuaddr = (uint64_t) addr;
	spin_unlock(&entry->memdesc.gpuaddr_lock);

	entry->memdesc.pagetable = private->pagetable;
	entry->memdesc.pagetable = private->pagetable;


	ret = kgsl_mmu_map(private->pagetable, &entry->memdesc);
	ret = kgsl_mmu_map(private->pagetable, &entry->memdesc);
	if (ret) {
	if (ret) {
		kgsl_mmu_put_gpuaddr(&entry->memdesc);
		kgsl_mmu_put_gpuaddr(&entry->memdesc);
		return ret;
		return (unsigned long) ret;
	}
	}


	kgsl_memfree_purge(private->pagetable, entry->memdesc.gpuaddr,
	kgsl_memfree_purge(private->pagetable, entry->memdesc.gpuaddr,
@@ -4723,6 +4738,14 @@ static unsigned long _search_range(struct kgsl_process_private *private,
		result = _gpu_set_svm_region(private, entry, cpu, len);
		result = _gpu_set_svm_region(private, entry, cpu, len);
		if (!IS_ERR_VALUE(result))
		if (!IS_ERR_VALUE(result))
			break;
			break;
		/*
		 * _gpu_set_svm_region will return -EBUSY if we tried to set up
		 * SVM on an object that already has a GPU address. If
		 * that happens don't bother walking the rest of the
		 * region
		 */
		if ((long) result == -EBUSY)
			return -EBUSY;


		trace_kgsl_mem_unmapped_area_collision(entry, cpu, len);
		trace_kgsl_mem_unmapped_area_collision(entry, cpu, len);


+5 −0
Original line number Original line Diff line number Diff line
@@ -239,6 +239,11 @@ struct kgsl_memdesc {
	 * @reclaimed_page_count: Total number of pages reclaimed
	 * @reclaimed_page_count: Total number of pages reclaimed
	 */
	 */
	int reclaimed_page_count;
	int reclaimed_page_count;
	/*
	 * @gpuaddr_lock: Spinlock to protect the gpuaddr from being accessed by
	 * multiple entities trying to map the same SVM region at once
	 */
	spinlock_t gpuaddr_lock;
};
};


/*
/*
+5 −0
Original line number Original line Diff line number Diff line
@@ -2489,6 +2489,11 @@ static int get_gpuaddr(struct kgsl_pagetable *pagetable,
		return -ENOMEM;
		return -ENOMEM;
	}
	}


	/*
	 * This path is only called in a non-SVM path with locks so we can be
	 * sure we aren't racing with anybody so we don't need to worry about
	 * taking the lock
	 */
	ret = _insert_gpuaddr(pagetable, addr, size);
	ret = _insert_gpuaddr(pagetable, addr, size);
	spin_unlock(&pagetable->lock);
	spin_unlock(&pagetable->lock);


+8 −1
Original line number Original line Diff line number Diff line
@@ -436,10 +436,17 @@ void kgsl_mmu_put_gpuaddr(struct kgsl_memdesc *memdesc)
	if (PT_OP_VALID(pagetable, put_gpuaddr) && (unmap_fail == 0))
	if (PT_OP_VALID(pagetable, put_gpuaddr) && (unmap_fail == 0))
		pagetable->pt_ops->put_gpuaddr(memdesc);
		pagetable->pt_ops->put_gpuaddr(memdesc);


	memdesc->pagetable = NULL;

	/*
	 * If SVM tries to take a GPU address it will lose the race until the
	 * gpuaddr returns to zero so we shouldn't need to worry about taking a
	 * lock here
	 */

	if (!kgsl_memdesc_is_global(memdesc))
	if (!kgsl_memdesc_is_global(memdesc))
		memdesc->gpuaddr = 0;
		memdesc->gpuaddr = 0;


	memdesc->pagetable = NULL;
}
}
EXPORT_SYMBOL(kgsl_mmu_put_gpuaddr);
EXPORT_SYMBOL(kgsl_mmu_put_gpuaddr);


+1 −0
Original line number Original line Diff line number Diff line
@@ -924,6 +924,7 @@ void kgsl_memdesc_init(struct kgsl_device *device,
		ilog2(PAGE_SIZE));
		ilog2(PAGE_SIZE));
	kgsl_memdesc_set_align(memdesc, align);
	kgsl_memdesc_set_align(memdesc, align);
	spin_lock_init(&memdesc->lock);
	spin_lock_init(&memdesc->lock);
	spin_lock_init(&memdesc->gpuaddr_lock);
}
}


static int kgsl_shmem_alloc_page(struct page **pages,
static int kgsl_shmem_alloc_page(struct page **pages,