Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b566801d authored by Jordan Crouse's avatar Jordan Crouse Committed by Harshitha Sai Neelati
Browse files

msm: kgsl: Protect the memdesc->gpuaddr in SVM use cases



When SVM is being used there can only be one GPU address assigned
to the memory descriptor. Don't allow the GPU address to be changed
after it has been negotiated the first time by a process.

Change-Id: Ic0dedbad2a1b3ccdc2c1598a6c501b2be288d64e
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
parent fcb8f9f2
Loading
Loading
Loading
Loading
+28 −3
Original line number Diff line number Diff line
@@ -3810,19 +3810,34 @@ static unsigned long _gpu_set_svm_region(struct kgsl_process_private *private,
{
	int ret;

	/*
	 * Protect access to the gpuaddr here to prevent multiple vmas from
	 * trying to map a SVM region at the same time
	 */
	spin_lock(&entry->memdesc.lock);

	if (entry->memdesc.gpuaddr) {
		spin_unlock(&entry->memdesc.lock);
		return (unsigned long) -EBUSY;
	}

	ret = kgsl_mmu_set_svm_region(private->pagetable, (uint64_t) addr,
		(uint64_t) size);

	if (ret != 0)
		return ret;
	if (ret != 0) {
		spin_unlock(&entry->memdesc.lock);
		return (unsigned long) ret;
	}

	entry->memdesc.gpuaddr = (uint64_t) addr;
	spin_unlock(&entry->memdesc.lock);

	entry->memdesc.pagetable = private->pagetable;

	ret = kgsl_mmu_map(private->pagetable, &entry->memdesc);
	if (ret) {
		kgsl_mmu_put_gpuaddr(&entry->memdesc);
		return ret;
		return (unsigned long) ret;
	}

	kgsl_memfree_purge(private->pagetable, entry->memdesc.gpuaddr,
@@ -3914,6 +3929,16 @@ static unsigned long get_svm_unmapped_area(struct file *file,
			ret = set_svm_area(file, entry, iova, len, flags);
			if (!IS_ERR_VALUE(ret))
				return ret;

			/*
			 * set_svm_area will return -EBUSY if we tried to set up
			 * SVM on an object that already has a GPU address. If
			 * that happens don't bother walking the rest of the
			 * region
			 */
			if ((long) ret == -EBUSY)
				return -EBUSY;

		}

		iova = kgsl_mmu_find_svm_region(private->pagetable,
+5 −0
Original line number Diff line number Diff line
@@ -211,6 +211,11 @@ struct kgsl_memdesc {
	unsigned long attrs;
	struct page **pages;
	unsigned int page_count;
	/*
	 * @lock: Spinlock to protect the gpuaddr from being accessed by
	 * multiple entities trying to map the same SVM region at once
	 */
	spinlock_t lock;
};

/**
+5 −0
Original line number Diff line number Diff line
@@ -2227,6 +2227,11 @@ static int kgsl_iommu_get_gpuaddr(struct kgsl_pagetable *pagetable,
		goto out;
	}

	/*
	 * This path is only called in a non-SVM path with locks so we can be
	 * sure we aren't racing with anybody so we don't need to worry about
	 * taking the lock
	 */
	ret = _insert_gpuaddr(pagetable, addr, size);
	if (ret == 0) {
		memdesc->gpuaddr = addr;
+8 −1
Original line number Diff line number Diff line
@@ -409,10 +409,17 @@ void kgsl_mmu_put_gpuaddr(struct kgsl_memdesc *memdesc)
	if (PT_OP_VALID(pagetable, put_gpuaddr) && (unmap_fail == 0))
		pagetable->pt_ops->put_gpuaddr(memdesc);

	memdesc->pagetable = NULL;


	/*
	 * If SVM tries to take a GPU address it will lose the race until the
	 * gpuaddr returns to zero so we shouldn't need to worry about taking a
	 * lock here
	 */
	if (!kgsl_memdesc_is_global(memdesc))
		memdesc->gpuaddr = 0;

	memdesc->pagetable = NULL;
}

/**
+2 −0
Original line number Diff line number Diff line
@@ -609,6 +609,8 @@ void kgsl_memdesc_init(struct kgsl_device *device,
		(memdesc->flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT,
		ilog2(PAGE_SIZE));
	kgsl_memdesc_set_align(memdesc, align);

	spin_lock_init(&memdesc->lock);
}

void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)