Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4e3ff7f6 authored by Jordan Crouse's avatar Jordan Crouse
Browse files

msm: kgsl: Don't map DDR as strongly ordered



Don't map DDR memory as strongly ordered because it makes the memory
controller sad. All we really care about is the cache characteristics
and write-combine is the same as far as those are concerned.

Change-Id: Ic0dedbad30785c8d7c24ad3249413139593029f0
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
parent 9222a358
Loading
Loading
Loading
Loading
+7 −18
Original line number Diff line number Diff line
@@ -2345,9 +2345,8 @@ static void _setup_cache_mode(struct kgsl_mem_entry *entry,
	uint64_t mode;
	pgprot_t pgprot = vma->vm_page_prot;

	if (pgprot_val(pgprot) == pgprot_val(pgprot_noncached(pgprot)))
		mode = KGSL_CACHEMODE_UNCACHED;
	else if (pgprot_val(pgprot) == pgprot_val(pgprot_writecombine(pgprot)))
	if ((pgprot_val(pgprot) == pgprot_val(pgprot_noncached(pgprot))) ||
	    (pgprot_val(pgprot) == pgprot_val(pgprot_writecombine(pgprot))))
		mode = KGSL_CACHEMODE_WRITECOMBINE;
	else
		mode = KGSL_CACHEMODE_WRITEBACK;
@@ -2355,14 +2354,6 @@ static void _setup_cache_mode(struct kgsl_mem_entry *entry,
	entry->memdesc.flags |= (mode << KGSL_CACHEMODE_SHIFT);
}

static bool is_cached(u64 flags)
{
	u32 mode = (flags & KGSL_CACHEMODE_MASK) >> KGSL_CACHEMODE_SHIFT;

	return (mode != KGSL_CACHEMODE_UNCACHED &&
		mode != KGSL_CACHEMODE_WRITECOMBINE);
}

static int kgsl_setup_dma_buf(struct kgsl_device *device,
				struct kgsl_pagetable *pagetable,
				struct kgsl_mem_entry *entry,
@@ -2429,7 +2420,7 @@ static int kgsl_setup_dmabuf_useraddr(struct kgsl_device *device,
	_setup_cache_mode(entry, vma);

	if (IS_ENABLED(CONFIG_QCOM_KGSL_IOCOHERENCY_DEFAULT) &&
		is_cached(entry->memdesc.flags))
		kgsl_cachemode_is_cached(entry->memdesc.flags))
		entry->memdesc.flags |= KGSL_MEMFLAGS_IOCOHERENT;

	up_read(&current->mm->mmap_sem);
@@ -2997,7 +2988,7 @@ static int _kgsl_gpumem_sync_cache(struct kgsl_mem_entry *entry,
		length = entry->memdesc.size;
	}

	if (is_cached(entry->memdesc.flags)) {
	if (kgsl_cachemode_is_cached(entry->memdesc.flags)) {
		trace_kgsl_mem_sync_cache(entry, offset, length, op);
		ret = kgsl_cache_range_op(&entry->memdesc, offset,
					length, cacheop);
@@ -3306,7 +3297,7 @@ struct kgsl_mem_entry *gpumem_alloc_entry(
		return ERR_PTR(-ENOMEM);

	if (IS_ENABLED(CONFIG_QCOM_KGSL_IOCOHERENCY_DEFAULT) &&
		is_cached(flags))
		kgsl_cachemode_is_cached(flags))
		flags |= KGSL_MEMFLAGS_IOCOHERENT;

	ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
@@ -3532,7 +3523,7 @@ long kgsl_ioctl_sparse_phys_alloc(struct kgsl_device_private *dev_priv,


	if (IS_ENABLED(CONFIG_QCOM_KGSL_IOCOHERENCY_DEFAULT) &&
		is_cached(flags))
		kgsl_cachemode_is_cached(flags))
		flags |= KGSL_MEMFLAGS_IOCOHERENT;

	ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
@@ -4625,9 +4616,6 @@ static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
	cache = kgsl_memdesc_get_cachemode(&entry->memdesc);

	switch (cache) {
	case KGSL_CACHEMODE_UNCACHED:
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
		break;
	case KGSL_CACHEMODE_WRITETHROUGH:
		vma->vm_page_prot = pgprot_writethroughcache(vma->vm_page_prot);
		if (pgprot_val(vma->vm_page_prot) ==
@@ -4637,6 +4625,7 @@ static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
	case KGSL_CACHEMODE_WRITEBACK:
		vma->vm_page_prot = pgprot_writebackcache(vma->vm_page_prot);
		break;
	case KGSL_CACHEMODE_UNCACHED:
	case KGSL_CACHEMODE_WRITECOMBINE:
	default:
		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+1 −5
Original line number Diff line number Diff line
@@ -8,7 +8,6 @@
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/bitfield.h>

#include "kgsl_device.h"
#include "kgsl_pool.h"
@@ -564,7 +563,6 @@ void kgsl_memdesc_init(struct kgsl_device *device,
{
	struct kgsl_mmu *mmu = &device->mmu;
	unsigned int align;
	u32 cachemode;

	memset(memdesc, 0, sizeof(*memdesc));
	/* Turn off SVM if the system doesn't support it */
@@ -590,9 +588,7 @@ void kgsl_memdesc_init(struct kgsl_device *device,
	 * have stale data. This happens primarily due to the limitations
	 * of dma caching APIs available on arm64
	 */
	cachemode = FIELD_GET(KGSL_CACHEMODE_MASK, flags);
	if ((cachemode == KGSL_CACHEMODE_WRITECOMBINE ||
		cachemode == KGSL_CACHEMODE_UNCACHED))
	if (!kgsl_cachemode_is_cached(flags))
		flags &= ~((u64) KGSL_MEMFLAGS_IOCOHERENT);

	if (kgsl_mmu_has_feature(device, KGSL_MMU_NEED_GUARD_PAGE) ||
+15 −0
Original line number Diff line number Diff line
@@ -5,6 +5,7 @@
#ifndef __KGSL_SHAREDMEM_H
#define __KGSL_SHAREDMEM_H

#include <linux/bitfield.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
@@ -359,4 +360,18 @@ static inline void kgsl_free_sgt(struct sg_table *sgt)
	}
}

/**
 * kgsl_cachemode_is_cached - Return true if the passed flags indicate a cached
 * buffer
 * @flags: A bitmask of KGSL_MEMDESC_ flags
 *
 * Return: true if the flags indicate a cached buffer
 */
static inline bool kgsl_cachemode_is_cached(u64 flags)
{
	u64 mode = FIELD_GET(KGSL_CACHEMODE_MASK, flags);

	return (mode != KGSL_CACHEMODE_UNCACHED &&
		mode != KGSL_CACHEMODE_WRITECOMBINE);
}
#endif /* __KGSL_SHAREDMEM_H */