Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e3b9dc99 authored by Lynus Vaz's avatar Lynus Vaz
Browse files

msm: kgsl: Force VA alignment and padding if required



Some targets require all GPU virtual mappings to be aligned to and
padded up to a particular size. Allocate a padding page and map it
into the virtual space as many times as necessary to make up the size
required by the target.

Change-Id: I8f41c83bf88c20cf619ce2a7a63c31a05b732c4d
Signed-off-by: default avatarLynus Vaz <lvaz@codeaurora.org>
parent b3025dc4
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -1281,6 +1281,14 @@ static int adreno_probe(struct platform_device *pdev)
	if (adreno_support_64bit(adreno_dev))
		device->mmu.features |= KGSL_MMU_64BIT;

	/* Default to 4K alignment (in other words, no additional padding) */
	device->mmu.va_padding = PAGE_SIZE;

	if (adreno_dev->gpucore->va_padding) {
		device->mmu.features |= KGSL_MMU_PAD_VA;
		device->mmu.va_padding = adreno_dev->gpucore->va_padding;
	}

	status = kgsl_device_platform_probe(device);
	if (status) {
		device->pdev = NULL;
+2 −0
Original line number Diff line number Diff line
@@ -355,6 +355,7 @@ struct adreno_firmware {
 * @regfw_name: Filename for the register sequence firmware
 * @gpmu_tsens: ID for the temporature sensor used by the GPMU
 * @max_power: Max possible power draw of a core, units elephant tail hairs
 * @va_padding: Size to pad allocations to, zero if not required
 */
struct adreno_gpu_core {
	enum adreno_gpurev gpurev;
@@ -385,6 +386,7 @@ struct adreno_gpu_core {
	const char *regfw_name;
	unsigned int gpmu_tsens;
	unsigned int max_power;
	uint64_t va_padding;
};


+4 −0
Original line number Diff line number Diff line
@@ -4361,6 +4361,10 @@ static unsigned long _get_svm_area(struct kgsl_process_private *private,
		return -ERANGE;

	if (flags & MAP_FIXED) {
		/* We must honor alignment requirements */
		if (!IS_ALIGNED(hint, align))
			return -EINVAL;

		/* we must use addr 'hint' or fail */
		return _gpu_set_svm_region(private, entry, hint, len);
	} else if (hint != 0) {
+2 −0
Original line number Diff line number Diff line
@@ -210,6 +210,7 @@ struct kgsl_memdesc_ops {
 * @physaddr: Physical address of the memory object
 * @size: Size of the memory object
 * @mapsize: Size of memory mapped in userspace
 * @pad_to: Size that we pad the memdesc to
 * @priv: Internal flags and settings
 * @sgt: Scatter gather table for allocated pages
 * @ops: Function hooks for the memdesc memory type
@@ -229,6 +230,7 @@ struct kgsl_memdesc {
	phys_addr_t physaddr;
	uint64_t size;
	uint64_t mapsize;
	uint64_t pad_to;
	unsigned int priv;
	struct sg_table *sgt;
	struct kgsl_memdesc_ops *ops;
+34 −23
Original line number Diff line number Diff line
@@ -232,7 +232,7 @@ static void kgsl_iommu_add_global(struct kgsl_mmu *mmu,
	memdesc->gpuaddr = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu) + global_pt_alloc;

	memdesc->priv |= KGSL_MEMDESC_GLOBAL;
	global_pt_alloc += memdesc->size;
	global_pt_alloc += kgsl_memdesc_footprint(memdesc);

	global_pt_entries[global_pt_count].memdesc = memdesc;
	strlcpy(global_pt_entries[global_pt_count].name, name,
@@ -368,22 +368,33 @@ static int _attach_pt(struct kgsl_iommu_pt *iommu_pt,
	return ret;
}

static int _iommu_map_sync_pc(struct kgsl_pagetable *pt,
		uint64_t gpuaddr, phys_addr_t physaddr,
		uint64_t size, unsigned int flags)
static int _iommu_map_single_page_sync_pc(struct kgsl_pagetable *pt,
		uint64_t gpuaddr, phys_addr_t physaddr, int times,
		unsigned int flags)
{
	struct kgsl_iommu_pt *iommu_pt = pt->priv;
	int ret;
	size_t mapped = 0;
	int i;
	int ret = 0;

	_iommu_sync_mmu_pc(true);

	ret = iommu_map(iommu_pt->domain, gpuaddr, physaddr, size, flags);
	for (i = 0; i < times; i++) {
		ret = iommu_map(iommu_pt->domain, gpuaddr + mapped,
				physaddr, PAGE_SIZE, flags);
		if (ret)
			break;
		mapped += PAGE_SIZE;
	}

	if (ret)
		iommu_unmap(iommu_pt->domain, gpuaddr, mapped);

	_iommu_sync_mmu_pc(false);

	if (ret) {
		KGSL_CORE_ERR("map err: 0x%016llX, 0x%llx, 0x%x, %d\n",
			gpuaddr, size, flags, ret);
		KGSL_CORE_ERR("map err: 0x%016llX, 0x%lx, 0x%x, %d\n",
			gpuaddr, PAGE_SIZE * times, flags, ret);
		return -ENODEV;
	}

@@ -503,7 +514,7 @@ static int _iommu_map_sg_sync_pc(struct kgsl_pagetable *pt,
 */

static struct page *kgsl_guard_page;
static struct kgsl_memdesc kgsl_secure_guard_page_memdesc;
static struct page *kgsl_secure_guard_page;

/*
 * The dummy page is a placeholder/extra page to be used for sparse mappings.
@@ -1458,7 +1469,8 @@ static void kgsl_iommu_close(struct kgsl_mmu *mmu)
	if (iommu->regbase != NULL)
		iounmap(iommu->regbase);

	kgsl_sharedmem_free(&kgsl_secure_guard_page_memdesc);
	kgsl_free_secure_page(kgsl_secure_guard_page);
	kgsl_secure_guard_page = NULL;

	if (kgsl_guard_page != NULL) {
		__free_page(kgsl_guard_page);
@@ -1730,9 +1742,11 @@ static int _iommu_map_guard_page(struct kgsl_pagetable *pt,
				   uint64_t gpuaddr,
				   unsigned int protflags)
{
	uint64_t pad_size;
	phys_addr_t physaddr;

	if (!kgsl_memdesc_has_guard_page(memdesc))
	pad_size = kgsl_memdesc_footprint(memdesc) - memdesc->size;
	if (!pad_size)
		return 0;

	/*
@@ -1742,21 +1756,16 @@ static int _iommu_map_guard_page(struct kgsl_pagetable *pt,
	 * mapped to save 1MB of memory if CPZ is not used.
	 */
	if (kgsl_memdesc_is_secured(memdesc)) {
		struct scatterlist *sg;
		unsigned int sgp_size = pt->mmu->secure_align_mask + 1;

		if (!kgsl_secure_guard_page_memdesc.sgt) {
			if (kgsl_allocate_user(KGSL_MMU_DEVICE(pt->mmu),
					&kgsl_secure_guard_page_memdesc,
					sgp_size, KGSL_MEMFLAGS_SECURE)) {
		if (!kgsl_secure_guard_page) {
			kgsl_secure_guard_page = kgsl_alloc_secure_page();
			if (!kgsl_secure_guard_page) {
				KGSL_CORE_ERR(
					"Secure guard page alloc failed\n");
				return -ENOMEM;
			}
		}

		sg = kgsl_secure_guard_page_memdesc.sgt->sgl;
		physaddr = page_to_phys(sg_page(sg));
		physaddr = page_to_phys(kgsl_secure_guard_page);
	} else {
		if (kgsl_guard_page == NULL) {
			kgsl_guard_page = alloc_page(GFP_KERNEL | __GFP_ZERO |
@@ -1768,9 +1777,11 @@ static int _iommu_map_guard_page(struct kgsl_pagetable *pt,
		physaddr = page_to_phys(kgsl_guard_page);
	}

	return _iommu_map_sync_pc(pt, gpuaddr, physaddr,
			kgsl_memdesc_guard_page_size(memdesc),
			protflags & ~IOMMU_WRITE);
	if (!MMU_FEATURE(pt->mmu, KGSL_MMU_PAD_VA))
		protflags &= ~IOMMU_WRITE;

	return _iommu_map_single_page_sync_pc(pt, gpuaddr, physaddr,
			pad_size >> PAGE_SHIFT, protflags);
}

static unsigned int _get_protection_flags(struct kgsl_memdesc *memdesc)
Loading