Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b0ee18e3 authored by Jordan Crouse's avatar Jordan Crouse
Browse files

msm: kgsl: Stop using KGSL_MEMDESC_CONTIG



KGSL_MEMDESC_CONTIG is only used for global GPU allocations but there are
no longer any global GPU objects that _need_ contiguous memory. Remove
all usage of it and transition the one remaining multi-page user
(memstore) to the paged allocator instead.

Change-Id: Ic0dedbadda417a1d4103e165c13d1debb3316b72
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
parent cfa7f9cf
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -1313,7 +1313,7 @@ static int adreno_probe(struct platform_device *pdev)
	struct adreno_device *adreno_dev;
	struct kgsl_device *device;
	int status;
	unsigned int priv;
	unsigned int priv = 0;
	u32 size;

	of_id = of_match_device(adreno_match_table, &pdev->dev);
@@ -1400,7 +1400,6 @@ static int adreno_probe(struct platform_device *pdev)
		device->mmu.features |= KGSL_MMU_IO_COHERENT;

	/* Allocate the memstore for storing timestamps and other useful info */
	priv = KGSL_MEMDESC_CONTIG;

	if (ADRENO_FEATURE(adreno_dev, ADRENO_APRIV))
		priv |= KGSL_MEMDESC_PRIVILEGED;
+1 −2
Original line number Diff line number Diff line
@@ -115,8 +115,7 @@ static void a6xx_init(struct adreno_device *adreno_dev)
	a6xx_crashdump_init(adreno_dev);

	kgsl_allocate_global(device, &adreno_dev->pwrup_reglist,
		PAGE_SIZE, 0, KGSL_MEMDESC_CONTIG | KGSL_MEMDESC_PRIVILEGED,
		"powerup_register_list");
		PAGE_SIZE, 0, KGSL_MEMDESC_PRIVILEGED, "powerup_register_list");
}

static void a6xx_protect_init(struct adreno_device *adreno_dev)
+23 −16
Original line number Diff line number Diff line
@@ -4381,11 +4381,23 @@ long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv,
	return ret;
}

static vm_fault_t
kgsl_memstore_vm_fault(struct vm_fault *vmf)
{
	struct kgsl_memdesc *memdesc = vmf->vma->vm_private_data;

	return memdesc->ops->vmfault(memdesc, vmf->vma, vmf);
}

static const struct vm_operations_struct kgsl_memstore_vm_ops = {
	.fault = kgsl_memstore_vm_fault,
};

static int
kgsl_mmap_memstore(struct kgsl_device *device, struct vm_area_struct *vma)
kgsl_mmap_memstore(struct file *file, struct kgsl_device *device,
		struct vm_area_struct *vma)
{
	struct kgsl_memdesc *memdesc = &device->memstore;
	int result;
	unsigned int vma_size = vma->vm_end - vma->vm_start;

	/* The memstore can only be mapped as read only */
@@ -4394,22 +4406,17 @@ kgsl_mmap_memstore(struct kgsl_device *device, struct vm_area_struct *vma)
		return -EPERM;

	if (memdesc->size  != vma_size) {
		dev_err(device->dev,
			     "memstore bad size: %d should be %llu\n",
			     vma_size, memdesc->size);
		dev_err(device->dev, "Cannot partially map the memstore\n");
		return -EINVAL;
	}

	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
	vma->vm_private_data = memdesc;
	vma->vm_flags |= memdesc->ops->vmflags;
	vma->vm_ops = &kgsl_memstore_vm_ops;
	vma->vm_file = file;

	result = remap_pfn_range(vma, vma->vm_start,
				device->memstore.physaddr >> PAGE_SHIFT,
				 vma_size, vma->vm_page_prot);
	if (result != 0)
		dev_err(device->dev, "remap_pfn_range failed: %d\n",
			     result);

	return result;
	return 0;
}

/*
@@ -4425,7 +4432,7 @@ static void kgsl_gpumem_vm_open(struct vm_area_struct *vma)
		vma->vm_private_data = NULL;
}

static int
static vm_fault_t
kgsl_gpumem_vm_fault(struct vm_fault *vmf)
{
	struct kgsl_mem_entry *entry = vmf->vma->vm_private_data;
@@ -4768,7 +4775,7 @@ static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
	/* Handle leagacy behavior for memstore */

	if (vma_offset == (unsigned long) device->memstore.gpuaddr)
		return kgsl_mmap_memstore(device, vma);
		return kgsl_mmap_memstore(file, device, vma);

	/*
	 * The reference count on the entry that we get from