Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cec2f747 authored by Hareesh Gundu's avatar Hareesh Gundu Committed by Gerrit - the friendly Code Review server
Browse files

msm: kgsl: Perform cache operation with kernel address



Kernel should never access untrusted pointers directly.
If the address is not mapped to kernel, map to kernel
address space and perform cache related operation.

Change-Id: I433befcde620e51b8ec17954ddb710f6084e0592
Signed-off-by: default avatarHareesh Gundu <hareeshg@codeaurora.org>
Signed-off-by: default avatarharanath <hpasup@codeaurora.org>
parent 7d7aefae
Loading
Loading
Loading
Loading
+104 −29
Original line number Diff line number Diff line
/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2002,2007-2017 The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -560,16 +560,70 @@ static inline unsigned int _fixup_cache_range_op(unsigned int op)
}
#endif

int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset,
		uint64_t size, unsigned int op)
static int kgsl_do_cache_op(struct page *page, void *addr,
		uint64_t offset, uint64_t size, unsigned int op)
{
	void (*cache_op)(const void *, const void *);

	/*
	 * The dmac_xxx_range functions handle addresses and sizes that
	 * are not aligned to the cacheline size correctly.
	 */
	switch (_fixup_cache_range_op(op)) {
	case KGSL_CACHE_OP_FLUSH:
		cache_op = dmac_flush_range;
		break;
	case KGSL_CACHE_OP_CLEAN:
		cache_op = dmac_clean_range;
		break;
	case KGSL_CACHE_OP_INV:
		cache_op = dmac_inv_range;
		break;
	default:
		return -EINVAL;
	}

	if (page != NULL) {
		unsigned long pfn = page_to_pfn(page) + offset / PAGE_SIZE;
		/*
	 * If the buffer is mapped in the kernel operate on that address
	 * otherwise use the user address
		 *  page_address() returns the kernel virtual address of page.
		 *  For high memory kernel virtual address exists only if page
		 *  has been mapped. So use a version of kmap rather than
		 *  page_address() for high memory.
		 */
		if (PageHighMem(page)) {
			offset &= ~PAGE_MASK;

	void *addr = (memdesc->hostptr) ?
		memdesc->hostptr : (void *) memdesc->useraddr;
			do {
				unsigned int len = size;

				if (len + offset > PAGE_SIZE)
					len = PAGE_SIZE - offset;

				page = pfn_to_page(pfn++);
				addr = kmap_atomic(page);
				cache_op(addr + offset, addr + offset + len);
				kunmap_atomic(addr);

				size -= len;
				offset = 0;
			} while (size);

			return 0;
		}

		addr = page_address(page);
	}

	cache_op(addr + offset, addr + offset + (size_t) size);
	return 0;
}

int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset,
		uint64_t size, unsigned int op)
{
	void *addr = NULL;
	int ret = 0;

	if (size == 0 || size > UINT_MAX)
		return -EINVAL;
@@ -578,38 +632,59 @@ int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset,
	if ((offset + size < offset) || (offset + size < size))
		return -ERANGE;

	/* Make sure the offset + size do not overflow the address */
	if (addr + ((size_t) offset + (size_t) size) < addr)
		return -ERANGE;

	/* Check that offset+length does not exceed memdesc->size */
	if (offset + size > memdesc->size)
		return -ERANGE;

	/* Return quietly if the buffer isn't mapped on the CPU */
	if (addr == NULL)
		return 0;
	if (memdesc->hostptr) {
		addr = memdesc->hostptr;
		/* Make sure the offset + size do not overflow the address */
		if (addr + ((size_t) offset + (size_t) size) < addr)
			return -ERANGE;

	addr = addr + offset;
		ret = kgsl_do_cache_op(NULL, addr, offset, size, op);
		return ret;
	}

	/*
	 * The dmac_xxx_range functions handle addresses and sizes that
	 * are not aligned to the cacheline size correctly.
	 * If the buffer is not to mapped to kernel, perform cache
	 * operations after mapping to kernel.
	 */
	if (memdesc->sgt != NULL) {
		struct scatterlist *sg;
		unsigned int i, pos = 0;

	switch (_fixup_cache_range_op(op)) {
	case KGSL_CACHE_OP_FLUSH:
		dmac_flush_range(addr, addr + (size_t) size);
		break;
	case KGSL_CACHE_OP_CLEAN:
		dmac_clean_range(addr, addr + (size_t) size);
		break;
	case KGSL_CACHE_OP_INV:
		dmac_inv_range(addr, addr + (size_t) size);
		for_each_sg(memdesc->sgt->sgl, sg, memdesc->sgt->nents, i) {
			uint64_t sg_offset, sg_left;

			if (offset >= (pos + sg->length)) {
				pos += sg->length;
				continue;
			}
			sg_offset = offset > pos ? offset - pos : 0;
			sg_left = (sg->length - sg_offset > size) ? size :
						sg->length - sg_offset;
			ret = kgsl_do_cache_op(sg_page(sg), NULL, sg_offset,
								sg_left, op);
			size -= sg_left;
			if (size == 0)
				break;
			pos += sg->length;
		}
	} else if (memdesc->pages != NULL) {
		addr = vmap(memdesc->pages, memdesc->page_count,
				VM_IOREMAP, pgprot_writecombine(PAGE_KERNEL));
		if (addr == NULL)
			return -ENOMEM;

	return 0;
		/* Make sure the offset + size do not overflow the address */
		if (addr + ((size_t) offset + (size_t) size) < addr)
			return -ERANGE;

		ret = kgsl_do_cache_op(NULL, addr, offset, size, op);
		vunmap(addr);
	}
	return ret;
}
EXPORT_SYMBOL(kgsl_cache_range_op);