Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5768dfb5 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: Use a common sharedmem init function"

parents 870bc256 b3025dc4
Loading
Loading
Loading
Loading
+17 −53
Original line number Diff line number Diff line
@@ -2405,7 +2405,6 @@ long kgsl_ioctl_gpuobj_import(struct kgsl_device_private *dev_priv,
	struct kgsl_gpuobj_import *param = data;
	struct kgsl_mem_entry *entry;
	int ret, fd = -1;
	struct kgsl_mmu *mmu = &dev_priv->device->mmu;

	entry = kgsl_mem_entry_create();
	if (entry == NULL)
@@ -2419,18 +2418,10 @@ long kgsl_ioctl_gpuobj_import(struct kgsl_device_private *dev_priv,
			| KGSL_MEMFLAGS_FORCE_32BIT
			| KGSL_MEMFLAGS_IOCOHERENT;

	/* Disable IO coherence if it is not supported on the chip */
	if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
		param->flags &= ~((uint64_t)KGSL_MEMFLAGS_IOCOHERENT);

	if (kgsl_is_compat_task())
		param->flags |= KGSL_MEMFLAGS_FORCE_32BIT;

	entry->memdesc.flags = param->flags;

	if (MMU_FEATURE(mmu, KGSL_MMU_NEED_GUARD_PAGE))
		entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE;

	kgsl_memdesc_init(dev_priv->device, &entry->memdesc, param->flags);
	if (param->type == KGSL_USER_MEM_TYPE_ADDR)
		ret = _gpuobj_map_useraddr(dev_priv->device, private->pagetable,
			entry, param);
@@ -2669,6 +2660,7 @@ long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
	struct kgsl_process_private *private = dev_priv->process_priv;
	struct kgsl_mmu *mmu = &dev_priv->device->mmu;
	unsigned int memtype;
	uint64_t flags;

	/*
	 * If content protection is not enabled and secure buffer
@@ -2705,30 +2697,17 @@ long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
	 * Note: CACHEMODE is ignored for this call. Caching should be
	 * determined by type of allocation being mapped.
	 */
	param->flags &= KGSL_MEMFLAGS_GPUREADONLY
	flags = param->flags & (KGSL_MEMFLAGS_GPUREADONLY
				| KGSL_MEMTYPE_MASK
				| KGSL_MEMALIGN_MASK
				| KGSL_MEMFLAGS_USE_CPU_MAP
				| KGSL_MEMFLAGS_SECURE
			| KGSL_MEMFLAGS_IOCOHERENT;

	/* Disable IO coherence if it is not supported on the chip */
	if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
		param->flags &= ~((uint64_t)KGSL_MEMFLAGS_IOCOHERENT);

	entry->memdesc.flags = (uint64_t) param->flags;
				| KGSL_MEMFLAGS_IOCOHERENT);

	if (kgsl_is_compat_task())
		entry->memdesc.flags |= KGSL_MEMFLAGS_FORCE_32BIT;

	if (!kgsl_mmu_use_cpu_map(mmu))
		entry->memdesc.flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
		flags |= KGSL_MEMFLAGS_FORCE_32BIT;

	if (MMU_FEATURE(mmu, KGSL_MMU_NEED_GUARD_PAGE))
		entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE;

	if (param->flags & KGSL_MEMFLAGS_SECURE)
		entry->memdesc.priv |= KGSL_MEMDESC_SECURE;
	kgsl_memdesc_init(dev_priv->device, &entry->memdesc, flags);

	switch (memtype) {
	case KGSL_MEM_ENTRY_USER:
@@ -3124,10 +3103,6 @@ struct kgsl_mem_entry *gpumem_alloc_entry(
		| KGSL_MEMFLAGS_FORCE_32BIT
		| KGSL_MEMFLAGS_IOCOHERENT;

	/* Turn off SVM if the system doesn't support it */
	if (!kgsl_mmu_use_cpu_map(mmu))
		flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);

	/* Return not supported error if secure memory isn't enabled */
	if (!kgsl_mmu_is_secured(mmu) &&
			(flags & KGSL_MEMFLAGS_SECURE)) {
@@ -3136,10 +3111,6 @@ struct kgsl_mem_entry *gpumem_alloc_entry(
		return ERR_PTR(-EOPNOTSUPP);
	}

	/* Secure memory disables advanced addressing modes */
	if (flags & KGSL_MEMFLAGS_SECURE)
		flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);

	/* Cap the alignment bits to the highest number we can handle */
	align = MEMFLAGS(flags, KGSL_MEMALIGN_MASK, KGSL_MEMALIGN_SHIFT);
	if (align >= ilog2(KGSL_MAX_ALIGN)) {
@@ -3158,20 +3129,10 @@ struct kgsl_mem_entry *gpumem_alloc_entry(

	flags = kgsl_filter_cachemode(flags);

	/* Disable IO coherence if it is not supported on the chip */
	if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
		flags &= ~((uint64_t)KGSL_MEMFLAGS_IOCOHERENT);

	entry = kgsl_mem_entry_create();
	if (entry == NULL)
		return ERR_PTR(-ENOMEM);

	if (MMU_FEATURE(mmu, KGSL_MMU_NEED_GUARD_PAGE))
		entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE;

	if (flags & KGSL_MEMFLAGS_SECURE)
		entry->memdesc.priv |= KGSL_MEMDESC_SECURE;

	ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
		size, flags);
	if (ret != 0)
@@ -3355,6 +3316,7 @@ long kgsl_ioctl_sparse_phys_alloc(struct kgsl_device_private *dev_priv,
	struct kgsl_process_private *process = dev_priv->process_priv;
	struct kgsl_sparse_phys_alloc *param = data;
	struct kgsl_mem_entry *entry;
	uint64_t flags;
	int ret;
	int id;

@@ -3387,11 +3349,12 @@ long kgsl_ioctl_sparse_phys_alloc(struct kgsl_device_private *dev_priv,
	entry->id = id;
	entry->priv = process;

	entry->memdesc.flags = KGSL_MEMFLAGS_SPARSE_PHYS;
	kgsl_memdesc_set_align(&entry->memdesc, ilog2(param->pagesize));
	flags = KGSL_MEMFLAGS_SPARSE_PHYS |
		((ilog2(param->pagesize) << KGSL_MEMALIGN_SHIFT) &
			KGSL_MEMALIGN_MASK);

	ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
			param->size, entry->memdesc.flags);
			param->size, flags);
	if (ret)
		goto err_remove_idr;

@@ -3480,7 +3443,8 @@ long kgsl_ioctl_sparse_virt_alloc(struct kgsl_device_private *dev_priv,
	if (entry == NULL)
		return -ENOMEM;

	entry->memdesc.flags = KGSL_MEMFLAGS_SPARSE_VIRT;
	kgsl_memdesc_init(dev_priv->device, &entry->memdesc,
			KGSL_MEMFLAGS_SPARSE_VIRT);
	entry->memdesc.size = param->size;
	entry->memdesc.cur_bindings = 0;
	kgsl_memdesc_set_align(&entry->memdesc, ilog2(param->pagesize));
+3 −4
Original line number Diff line number Diff line
@@ -260,13 +260,12 @@ static void kgsl_setup_qdss_desc(struct kgsl_device *device)
		return;
	}

	gpu_qdss_desc.flags = 0;
	kgsl_memdesc_init(device, &gpu_qdss_desc, 0);
	gpu_qdss_desc.priv = 0;
	gpu_qdss_desc.physaddr = gpu_qdss_entry[0];
	gpu_qdss_desc.size = gpu_qdss_entry[1];
	gpu_qdss_desc.pagetable = NULL;
	gpu_qdss_desc.ops = NULL;
	gpu_qdss_desc.dev = device->dev->parent;
	gpu_qdss_desc.hostptr = NULL;

	result = memdesc_sg_dma(&gpu_qdss_desc, gpu_qdss_desc.physaddr,
@@ -305,13 +304,12 @@ static void kgsl_setup_qtimer_desc(struct kgsl_device *device)
		return;
	}

	gpu_qtimer_desc.flags = 0;
	kgsl_memdesc_init(device, &gpu_qtimer_desc, 0);
	gpu_qtimer_desc.priv = 0;
	gpu_qtimer_desc.physaddr = gpu_qtimer_entry[0];
	gpu_qtimer_desc.size = gpu_qtimer_entry[1];
	gpu_qtimer_desc.pagetable = NULL;
	gpu_qtimer_desc.ops = NULL;
	gpu_qtimer_desc.dev = device->dev->parent;
	gpu_qtimer_desc.hostptr = NULL;

	result = memdesc_sg_dma(&gpu_qtimer_desc, gpu_qtimer_desc.physaddr,
@@ -1483,6 +1481,7 @@ static int _setstate_alloc(struct kgsl_device *device,
{
	int ret;

	kgsl_memdesc_init(device, &iommu->setstate, 0);
	ret = kgsl_sharedmem_alloc_contig(device, &iommu->setstate, PAGE_SIZE);

	if (!ret) {
+35 −3
Original line number Diff line number Diff line
@@ -341,7 +341,7 @@ int kgsl_allocate_user(struct kgsl_device *device,
{
	int ret;

	memdesc->flags = flags;
	kgsl_memdesc_init(device, memdesc, flags);

	if (kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE)
		ret = kgsl_sharedmem_alloc_contig(device, memdesc, size);
@@ -696,6 +696,40 @@ int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset,
}
EXPORT_SYMBOL(kgsl_cache_range_op);

void kgsl_memdesc_init(struct kgsl_device *device,
			struct kgsl_memdesc *memdesc, uint64_t flags)
{
	struct kgsl_mmu *mmu = &device->mmu;
	unsigned int align;

	memset(memdesc, 0, sizeof(*memdesc));
	/* Turn off SVM if the system doesn't support it */
	if (!kgsl_mmu_use_cpu_map(mmu))
		flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);

	/* Secure memory disables advanced addressing modes */
	if (flags & KGSL_MEMFLAGS_SECURE)
		flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);

	/* Disable IO coherence if it is not supported on the chip */
	if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
		flags &= ~((uint64_t) KGSL_MEMFLAGS_IOCOHERENT);

	if (MMU_FEATURE(mmu, KGSL_MMU_NEED_GUARD_PAGE))
		memdesc->priv |= KGSL_MEMDESC_GUARD_PAGE;

	if (flags & KGSL_MEMFLAGS_SECURE)
		memdesc->priv |= KGSL_MEMDESC_SECURE;

	memdesc->flags = flags;
	memdesc->dev = device->dev->parent;

	align = max_t(unsigned int,
		(memdesc->flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT,
		ilog2(PAGE_SIZE));
	kgsl_memdesc_set_align(memdesc, align);
}

int
kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
			uint64_t size)
@@ -896,8 +930,6 @@ void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)

	if (memdesc->pages)
		kgsl_free(memdesc->pages);

	memset(memdesc, 0, sizeof(*memdesc));
}
EXPORT_SYMBOL(kgsl_sharedmem_free);

+6 −3
Original line number Diff line number Diff line
/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -57,6 +57,9 @@ int kgsl_cache_range_op(struct kgsl_memdesc *memdesc,
			uint64_t offset, uint64_t size,
			unsigned int op);

void kgsl_memdesc_init(struct kgsl_device *device,
			struct kgsl_memdesc *memdesc, uint64_t flags);

void kgsl_process_init_sysfs(struct kgsl_device *device,
		struct kgsl_process_private *private);
void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
@@ -282,8 +285,8 @@ static inline int kgsl_allocate_global(struct kgsl_device *device,
{
	int ret;

	memdesc->flags = flags;
	memdesc->priv = priv;
	kgsl_memdesc_init(device, memdesc, flags);
	memdesc->priv |= priv;

	if (((memdesc->priv & KGSL_MEMDESC_CONTIG) != 0) ||
		(kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE))