Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fe2aee57 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: Protect the memdesc->gpuaddr in SVM use cases"

parents 05632b2a b566801d
Loading
Loading
Loading
Loading
+62 −22
Original line number Diff line number Diff line
@@ -254,6 +254,7 @@ static struct kgsl_mem_entry *kgsl_mem_entry_create(void)
		kref_get(&entry->refcount);
	}

	atomic_set(&entry->map_count, 0);
	return entry;
}

@@ -2388,7 +2389,7 @@ static int check_vma(unsigned long hostptr, u64 size)
	return true;
}

static int memdesc_sg_virt(struct kgsl_memdesc *memdesc)
static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, unsigned long useraddr)
{
	int ret = 0;
	long npages = 0, i;
@@ -2412,13 +2413,13 @@ static int memdesc_sg_virt(struct kgsl_memdesc *memdesc)
	}

	down_read(&current->mm->mmap_sem);
	if (!check_vma(memdesc->useraddr, memdesc->size)) {
	if (!check_vma(useraddr, memdesc->size)) {
		up_read(&current->mm->mmap_sem);
		ret = -EFAULT;
		goto out;
	}

	npages = get_user_pages(memdesc->useraddr, sglen, write, pages, NULL);
	npages = get_user_pages(useraddr, sglen, write, pages, NULL);
	up_read(&current->mm->mmap_sem);

	ret = (npages < 0) ? (int)npages : 0;
@@ -2456,7 +2457,6 @@ static int kgsl_setup_anon_useraddr(struct kgsl_pagetable *pagetable,

	entry->memdesc.pagetable = pagetable;
	entry->memdesc.size = (uint64_t) size;
	entry->memdesc.useraddr = hostptr;
	entry->memdesc.flags |= (uint64_t)KGSL_MEMFLAGS_USERMEM_ADDR;

	if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
@@ -2464,15 +2464,15 @@ static int kgsl_setup_anon_useraddr(struct kgsl_pagetable *pagetable,

		/* Register the address in the database */
		ret = kgsl_mmu_set_svm_region(pagetable,
			(uint64_t) entry->memdesc.useraddr, (uint64_t) size);
			(uint64_t) hostptr, (uint64_t) size);

		if (ret)
			return ret;

		entry->memdesc.gpuaddr = (uint64_t)  entry->memdesc.useraddr;
		entry->memdesc.gpuaddr = (uint64_t) hostptr;
	}

	return memdesc_sg_virt(&entry->memdesc);
	return memdesc_sg_virt(&entry->memdesc, hostptr);
}

#ifdef CONFIG_DMA_SHARED_BUFFER
@@ -2561,8 +2561,7 @@ static int kgsl_setup_dmabuf_useraddr(struct kgsl_device *device,
		return ret;
	}

	/* Setup the user addr/cache mode for cache operations */
	entry->memdesc.useraddr = hostptr;
	/* Setup the cache mode for cache operations */
	_setup_cache_mode(entry, vma);

	if (kgsl_mmu_has_feature(device, KGSL_MMU_IO_COHERENT) &&
@@ -3620,7 +3619,12 @@ long kgsl_ioctl_gpumem_get_info(struct kgsl_device_private *dev_priv,
	param->flags = (unsigned int) entry->memdesc.flags;
	param->size = (size_t) entry->memdesc.size;
	param->mmapsize = (size_t) kgsl_memdesc_footprint(&entry->memdesc);
	param->useraddr = entry->memdesc.useraddr;
	/*
	 * Entries can have multiple user mappings so thre isn't any one address
	 * we can report. Plus, the user should already know their mappings, so
	 * there isn't any value in reporting it back to them.
	 */
	param->useraddr = 0;

	kgsl_mem_entry_put(entry);
	return result;
@@ -3645,7 +3649,12 @@ long kgsl_ioctl_gpuobj_info(struct kgsl_device_private *dev_priv,
	param->flags = entry->memdesc.flags;
	param->size = entry->memdesc.size;
	param->va_len = kgsl_memdesc_footprint(&entry->memdesc);
	param->va_addr = (uint64_t) entry->memdesc.useraddr;
	/*
	 * Entries can have multiple user mappings so thre isn't any one address
	 * we can report. Plus, the user should already know their mappings, so
	 * there isn't any value in reporting it back to them.
	 */
	param->va_addr = 0;

	kgsl_mem_entry_put(entry);
	return 0;
@@ -3760,6 +3769,8 @@ static void kgsl_gpumem_vm_open(struct vm_area_struct *vma)

	if (kgsl_mem_entry_get(entry) == 0)
		vma->vm_private_data = NULL;

	atomic_inc(&entry->map_count);
}

static vm_fault_t
@@ -3783,9 +3794,13 @@ kgsl_gpumem_vm_close(struct vm_area_struct *vma)
	if (!entry)
		return;

	entry->memdesc.useraddr = 0;
	atomic64_sub(entry->memdesc.mapsize, &entry->priv->gpumem_mapped);
	entry->memdesc.mapsize = 0;
	/*
	 * Remove the memdesc from the mapped stat once all the mappings have
	 * gone away
	 */
	if (!atomic_dec_return(&entry->map_count))
		atomic64_sub(entry->memdesc.size, &entry->priv->gpumem_mapped);

	kgsl_mem_entry_put(entry);
}

@@ -3817,7 +3832,8 @@ get_mmap_entry(struct kgsl_process_private *private,
		goto err_put;
	}

	if (entry->memdesc.useraddr != 0) {
	/* Don't allow ourselves to remap user memory */
	if (entry->memdesc.flags & KGSL_MEMFLAGS_USERMEM_ADDR) {
		ret = -EBUSY;
		goto err_put;
	}
@@ -3850,19 +3866,34 @@ static unsigned long _gpu_set_svm_region(struct kgsl_process_private *private,
{
	int ret;

	/*
	 * Protect access to the gpuaddr here to prevent multiple vmas from
	 * trying to map a SVM region at the same time
	 */
	spin_lock(&entry->memdesc.lock);

	if (entry->memdesc.gpuaddr) {
		spin_unlock(&entry->memdesc.lock);
		return (unsigned long) -EBUSY;
	}

	ret = kgsl_mmu_set_svm_region(private->pagetable, (uint64_t) addr,
		(uint64_t) size);

	if (ret != 0)
		return ret;
	if (ret != 0) {
		spin_unlock(&entry->memdesc.lock);
		return (unsigned long) ret;
	}

	entry->memdesc.gpuaddr = (uint64_t) addr;
	spin_unlock(&entry->memdesc.lock);

	entry->memdesc.pagetable = private->pagetable;

	ret = kgsl_mmu_map(private->pagetable, &entry->memdesc);
	if (ret) {
		kgsl_mmu_put_gpuaddr(&entry->memdesc);
		return ret;
		return (unsigned long) ret;
	}

	kgsl_memfree_purge(private->pagetable, entry->memdesc.gpuaddr,
@@ -3954,6 +3985,16 @@ static unsigned long get_svm_unmapped_area(struct file *file,
			ret = set_svm_area(file, entry, iova, len, flags);
			if (!IS_ERR_VALUE(ret))
				return ret;

			/*
			 * set_svm_area will return -EBUSY if we tried to set up
			 * SVM on an object that already has a GPU address. If
			 * that happens don't bother walking the rest of the
			 * region
			 */
			if ((long) ret == -EBUSY)
				return -EBUSY;

		}

		iova = kgsl_mmu_find_svm_region(private->pagetable,
@@ -4074,12 +4115,11 @@ static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)

	vma->vm_file = file;

	entry->memdesc.useraddr = vma->vm_start;
	atomic64_add(entry->memdesc.size, &entry->priv->gpumem_mapped);

	entry->memdesc.mapsize += entry->memdesc.size;
	atomic64_add(entry->memdesc.mapsize, &entry->priv->gpumem_mapped);
	atomic_inc(&entry->map_count);

	trace_kgsl_mem_mmap(entry);
	trace_kgsl_mem_mmap(entry, vma->vm_start);
	return 0;
}

+10 −4
Original line number Diff line number Diff line
@@ -184,11 +184,9 @@ struct kgsl_memdesc_ops {
 * @pagetable: Pointer to the pagetable that the object is mapped in
 * @hostptr: Kernel virtual address
 * @hostptr_count: Number of threads using hostptr
 * @useraddr: User virtual address (if applicable)
 * @gpuaddr: GPU virtual address
 * @physaddr: Physical address of the memory object
 * @size: Size of the memory object
 * @mapsize: Size of memory mapped in userspace
 * @priv: Internal flags and settings
 * @sgt: Scatter gather table for allocated pages
 * @ops: Function hooks for the memdesc memory type
@@ -202,11 +200,9 @@ struct kgsl_memdesc {
	struct kgsl_pagetable *pagetable;
	void *hostptr;
	unsigned int hostptr_count;
	unsigned long useraddr;
	uint64_t gpuaddr;
	phys_addr_t physaddr;
	uint64_t size;
	uint64_t mapsize;
	unsigned int priv;
	struct sg_table *sgt;
	const struct kgsl_memdesc_ops *ops;
@@ -215,6 +211,11 @@ struct kgsl_memdesc {
	unsigned long attrs;
	struct page **pages;
	unsigned int page_count;
	/*
	 * @lock: Spinlock to protect the gpuaddr from being accessed by
	 * multiple entities trying to map the same SVM region at once
	 */
	spinlock_t lock;
};

/**
@@ -271,6 +272,11 @@ struct kgsl_mem_entry {
	 * userspace
	 */
	u64 mapped;
	/**
	 * @map_count: Count how many vmas this object is mapped in - used for
	 * debugfs accounting
	 */
	atomic_t map_count;
};

struct kgsl_device_private;
+8 −4
Original line number Diff line number Diff line
@@ -184,7 +184,8 @@ static int print_mem_entry(void *data, void *ptr)
	flags[3] = get_alignflag(m);
	flags[4] = get_cacheflag(m);
	flags[5] = kgsl_memdesc_use_cpu_map(m) ? 'p' : '-';
	flags[6] = (m->useraddr) ? 'Y' : 'N';
	/* Show Y if at least one vma has this entry mapped (could be multiple) */
	flags[6] = atomic_read(&entry->map_count) ? 'Y' : 'N';
	flags[7] = kgsl_memdesc_is_secured(m) ?  's' : '-';
	flags[8] = '-';
	flags[9] = '\0';
@@ -197,10 +198,13 @@ static int print_mem_entry(void *data, void *ptr)

	seq_printf(s, "%pK %pK %16llu %5d %9s %10s %16s %5d %16llu %6d %6d",
			(uint64_t *)(uintptr_t) m->gpuaddr,
			(unsigned long *) m->useraddr,
			m->size, entry->id, flags,
			/*
			 * Show zero for the useraddr - we can't reliably track
			 * that value for multiple vmas anyway
			 */
			0, m->size, entry->id, flags,
			memtype_str(usermem_type),
			usage, (m->sgt ? m->sgt->nents : 0), m->mapsize,
			usage, (m->sgt ? m->sgt->nents : 0), m->size,
			egl_surface_count, egl_image_count);

	if (entry->metadata[0] != 0)
+5 −0
Original line number Diff line number Diff line
@@ -2227,6 +2227,11 @@ static int kgsl_iommu_get_gpuaddr(struct kgsl_pagetable *pagetable,
		goto out;
	}

	/*
	 * This path is only called in a non-SVM path with locks so we can be
	 * sure we aren't racing with anybody so we don't need to worry about
	 * taking the lock
	 */
	ret = _insert_gpuaddr(pagetable, addr, size);
	if (ret == 0) {
		memdesc->gpuaddr = addr;
+8 −1
Original line number Diff line number Diff line
@@ -409,10 +409,17 @@ void kgsl_mmu_put_gpuaddr(struct kgsl_memdesc *memdesc)
	if (PT_OP_VALID(pagetable, put_gpuaddr) && (unmap_fail == 0))
		pagetable->pt_ops->put_gpuaddr(memdesc);

	memdesc->pagetable = NULL;


	/*
	 * If SVM tries to take a GPU address it will lose the race until the
	 * gpuaddr returns to zero so we shouldn't need to worry about taking a
	 * lock here
	 */
	if (!kgsl_memdesc_is_global(memdesc))
		memdesc->gpuaddr = 0;

	memdesc->pagetable = NULL;
}

/**
Loading