Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 66641c7b authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: Don't map DDR as strongly ordered"

parents 894fef24 4e3ff7f6
Loading
Loading
Loading
Loading
+16 −28
Original line number Diff line number Diff line
@@ -646,27 +646,6 @@ void a6xx_start(struct adreno_device *adreno_dev)
	}
}

/*
 * a6xx_microcode_load() - Load microcode
 * @adreno_dev: Pointer to adreno device
 */
static int a6xx_microcode_load(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
	uint64_t gpuaddr;

	gpuaddr = fw->memdesc->gpuaddr;
	kgsl_regwrite(device, A6XX_CP_SQE_INSTR_BASE_LO,
				lower_32_bits(gpuaddr));
	kgsl_regwrite(device, A6XX_CP_SQE_INSTR_BASE_HI,
				upper_32_bits(gpuaddr));

	return adreno_zap_shader_load(adreno_dev, a6xx_core->zap_name);
}


/*
 * CP_INIT_MAX_CONTEXT bit tells if the multiple hardware contexts can
 * be used at once of if they should be serialized
@@ -862,6 +841,8 @@ static int a6xx_post_start(struct adreno_device *adreno_dev)

int a6xx_rb_start(struct adreno_device *adreno_dev)
{
	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
	struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct adreno_ringbuffer *rb;
	uint64_t addr;
@@ -900,9 +881,12 @@ int a6xx_rb_start(struct adreno_device *adreno_dev)
	kgsl_regwrite(device, A6XX_CP_RB_BASE_HI,
		upper_32_bits(rb->buffer_desc->gpuaddr));

	ret = a6xx_microcode_load(adreno_dev);
	if (ret)
		return ret;
	/* Program the ucode base for CP */
	kgsl_regwrite(device, A6XX_CP_SQE_INSTR_BASE_LO,
			lower_32_bits(fw->memdesc->gpuaddr));

	kgsl_regwrite(device, A6XX_CP_SQE_INSTR_BASE_HI,
			upper_32_bits(fw->memdesc->gpuaddr));

	/* Clear the SQE_HALT to start the CP engine */
	kgsl_regwrite(device, A6XX_CP_SQE_CNTL, 1);
@@ -911,17 +895,21 @@ int a6xx_rb_start(struct adreno_device *adreno_dev)
	if (ret)
		return ret;

	ret = adreno_zap_shader_load(adreno_dev, a6xx_core->zap_name);
	if (ret)
		return ret;

	/*
	 * Take the GPU out of secure mode. Try the zap shader if it is loaded,
	 * otherwise just try to write directly to the secure control register
	 */
	if (!adreno_dev->zap_loaded)
		kgsl_regwrite(device, A6XX_RBBM_SECVID_TRUST_CNTL, 0);
	else
	else {
		ret = adreno_switch_to_unsecure_mode(adreno_dev, rb);

		if (ret)
			return ret;
	}

	return a6xx_post_start(adreno_dev);
}
+1 −1
Original line number Diff line number Diff line
@@ -517,7 +517,7 @@ static void receive_err_req(struct a6xx_gmu_device *gmu, void *rcvd)
{
	struct hfi_err_cmd *cmd = rcvd;

	dev_err(&gmu->pdev->dev, "HFI Error Received: %d %d %s\n",
	dev_err(&gmu->pdev->dev, "HFI Error Received: %d %d %.16s\n",
			((cmd->error_code >> 16) & 0xFFFF),
			(cmd->error_code & 0xFFFF),
			(char *) cmd->data);
+3 −2
Original line number Diff line number Diff line
@@ -1134,8 +1134,9 @@ static inline bool _verify_ib(struct kgsl_device_private *dev_priv,
		return false;
	}

	/* Make sure that the address is mapped */
	if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, ib->gpuaddr)) {
	/* Make sure that the address is in range and dword aligned */
	if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, ib->gpuaddr) ||
	    !IS_ALIGNED(ib->gpuaddr, 4)) {
		pr_context(device, context, "ctxt %d invalid ib gpuaddr %llX\n",
			context->id, ib->gpuaddr);
		return false;
+19 −0
Original line number Diff line number Diff line
@@ -6,6 +6,8 @@
#include <linux/slab.h>

#include "a3xx_reg.h"
#include "a5xx_reg.h"
#include "a6xx_reg.h"
#include "adreno.h"
#include "adreno_iommu.h"
#include "adreno_pm4types.h"
@@ -185,6 +187,10 @@ static unsigned int _adreno_iommu_set_pt_v2_a5xx(struct kgsl_device *device,
	*cmds++ = upper_32_bits(ttbr0);
	*cmds++ = contextidr;


	*cmds++ = cp_type4_packet(A5XX_CP_CNTL, 1);
	*cmds++ = 1;

	*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 4, 1);
	cmds += cp_gpuaddr(adreno_dev, cmds, (rb->pagetable_desc->gpuaddr +
		PT_INFO_OFFSET(ttbr0)));
@@ -192,6 +198,9 @@ static unsigned int _adreno_iommu_set_pt_v2_a5xx(struct kgsl_device *device,
	*cmds++ = upper_32_bits(ttbr0);
	*cmds++ = contextidr;

	*cmds++ = cp_type4_packet(A5XX_CP_CNTL, 1);
	*cmds++ = 0;

	return cmds - cmds_orig;
}

@@ -211,6 +220,11 @@ static unsigned int _adreno_iommu_set_pt_v2_a6xx(struct kgsl_device *device,
	*cmds++ = contextidr;
	*cmds++ = cb_num;

	if (!ADRENO_FEATURE(adreno_dev, ADRENO_APRIV)) {
		*cmds++ = cp_type4_packet(A6XX_CP_MISC_CNTL, 1);
		*cmds++ = 1;
	}

	*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 4, 1);
	cmds += cp_gpuaddr(adreno_dev, cmds, (rb->pagetable_desc->gpuaddr +
		PT_INFO_OFFSET(ttbr0)));
@@ -218,6 +232,11 @@ static unsigned int _adreno_iommu_set_pt_v2_a6xx(struct kgsl_device *device,
	*cmds++ = upper_32_bits(ttbr0);
	*cmds++ = contextidr;

	if (!ADRENO_FEATURE(adreno_dev, ADRENO_APRIV)) {
		*cmds++ = cp_type4_packet(A6XX_CP_MISC_CNTL, 1);
		*cmds++ = 0;
	}

	return cmds - cmds_orig;
}

+7 −18
Original line number Diff line number Diff line
@@ -2345,9 +2345,8 @@ static void _setup_cache_mode(struct kgsl_mem_entry *entry,
	uint64_t mode;
	pgprot_t pgprot = vma->vm_page_prot;

	if (pgprot_val(pgprot) == pgprot_val(pgprot_noncached(pgprot)))
		mode = KGSL_CACHEMODE_UNCACHED;
	else if (pgprot_val(pgprot) == pgprot_val(pgprot_writecombine(pgprot)))
	if ((pgprot_val(pgprot) == pgprot_val(pgprot_noncached(pgprot))) ||
	    (pgprot_val(pgprot) == pgprot_val(pgprot_writecombine(pgprot))))
		mode = KGSL_CACHEMODE_WRITECOMBINE;
	else
		mode = KGSL_CACHEMODE_WRITEBACK;
@@ -2355,14 +2354,6 @@ static void _setup_cache_mode(struct kgsl_mem_entry *entry,
	entry->memdesc.flags |= (mode << KGSL_CACHEMODE_SHIFT);
}

static bool is_cached(u64 flags)
{
	u32 mode = (flags & KGSL_CACHEMODE_MASK) >> KGSL_CACHEMODE_SHIFT;

	return (mode != KGSL_CACHEMODE_UNCACHED &&
		mode != KGSL_CACHEMODE_WRITECOMBINE);
}

static int kgsl_setup_dma_buf(struct kgsl_device *device,
				struct kgsl_pagetable *pagetable,
				struct kgsl_mem_entry *entry,
@@ -2429,7 +2420,7 @@ static int kgsl_setup_dmabuf_useraddr(struct kgsl_device *device,
	_setup_cache_mode(entry, vma);

	if (IS_ENABLED(CONFIG_QCOM_KGSL_IOCOHERENCY_DEFAULT) &&
		is_cached(entry->memdesc.flags))
		kgsl_cachemode_is_cached(entry->memdesc.flags))
		entry->memdesc.flags |= KGSL_MEMFLAGS_IOCOHERENT;

	up_read(&current->mm->mmap_sem);
@@ -2997,7 +2988,7 @@ static int _kgsl_gpumem_sync_cache(struct kgsl_mem_entry *entry,
		length = entry->memdesc.size;
	}

	if (is_cached(entry->memdesc.flags)) {
	if (kgsl_cachemode_is_cached(entry->memdesc.flags)) {
		trace_kgsl_mem_sync_cache(entry, offset, length, op);
		ret = kgsl_cache_range_op(&entry->memdesc, offset,
					length, cacheop);
@@ -3306,7 +3297,7 @@ struct kgsl_mem_entry *gpumem_alloc_entry(
		return ERR_PTR(-ENOMEM);

	if (IS_ENABLED(CONFIG_QCOM_KGSL_IOCOHERENCY_DEFAULT) &&
		is_cached(flags))
		kgsl_cachemode_is_cached(flags))
		flags |= KGSL_MEMFLAGS_IOCOHERENT;

	ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
@@ -3532,7 +3523,7 @@ long kgsl_ioctl_sparse_phys_alloc(struct kgsl_device_private *dev_priv,


	if (IS_ENABLED(CONFIG_QCOM_KGSL_IOCOHERENCY_DEFAULT) &&
		is_cached(flags))
		kgsl_cachemode_is_cached(flags))
		flags |= KGSL_MEMFLAGS_IOCOHERENT;

	ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
@@ -4625,9 +4616,6 @@ static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
	cache = kgsl_memdesc_get_cachemode(&entry->memdesc);

	switch (cache) {
	case KGSL_CACHEMODE_UNCACHED:
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
		break;
	case KGSL_CACHEMODE_WRITETHROUGH:
		vma->vm_page_prot = pgprot_writethroughcache(vma->vm_page_prot);
		if (pgprot_val(vma->vm_page_prot) ==
@@ -4637,6 +4625,7 @@ static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
	case KGSL_CACHEMODE_WRITEBACK:
		vma->vm_page_prot = pgprot_writebackcache(vma->vm_page_prot);
		break;
	case KGSL_CACHEMODE_UNCACHED:
	case KGSL_CACHEMODE_WRITECOMBINE:
	default:
		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Loading