Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5dce71ba authored by Jeremy Gebben's avatar Jeremy Gebben Committed by Tarun Karra
Browse files

msm: kgsl: redo iommu self programming



Use attributes from the iommu domain to retrieve proper TTBR0 and
CONTEXTIDR values, rather than reading registers and using our
own field masks.

Use the DYNAMIC attribute when creating per-process domains.
This allows the iommu driver to configure the domain's pagetable
before we create mappings. It also allows the iommu driver to issue
TLBI command during iommu_unmap calls for a per-process domain.
This removes the need for dodgy TLBIALL commands from kgsl after
iommu_unmap() returns.

Change-Id: Ie3a24a1ae9fb768d78c6ec18ab739e689c4d8f32
Signed-off-by: default avatarJeremy Gebben <jgebben@codeaurora.org>
parent c3d6b932
Loading
Loading
Loading
Loading
+20 −3
Original line number Diff line number Diff line
@@ -110,6 +110,7 @@ static void a5xx_preemption_start(struct adreno_device *adreno_dev,
	struct kgsl_device *device = &(adreno_dev->dev);
	struct kgsl_iommu *iommu = device->mmu.priv;
	uint64_t ttbr0;
	uint32_t contextidr;

	kgsl_sharedmem_writel(device, &rb->preemption_desc,
		PREEMPT_RECORD(wptr), rb->wptr);
@@ -119,8 +120,12 @@ static void a5xx_preemption_start(struct adreno_device *adreno_dev,
		_hi_32(rb->preemption_desc.gpuaddr));
	kgsl_sharedmem_readq(&rb->pagetable_desc, &ttbr0,
		offsetof(struct adreno_ringbuffer_pagetable_info, ttbr0));
	kgsl_sharedmem_readl(&rb->pagetable_desc, &contextidr,
		offsetof(struct adreno_ringbuffer_pagetable_info, contextidr));
	kgsl_sharedmem_writeq(device, &iommu->smmu_info,
		offsetof(struct a5xx_cp_smmu_info, ttbr0), ttbr0);
	kgsl_sharedmem_writel(device, &iommu->smmu_info,
		offsetof(struct a5xx_cp_smmu_info, context_idr), contextidr);
}

/*
@@ -1549,6 +1554,7 @@ static void a5xx_start(struct adreno_device *adreno_dev)
	uint val = 0, i;
	struct adreno_ringbuffer *rb;
	uint64_t def_ttbr0;
	uint32_t contextidr;

	adreno_vbif_start(adreno_dev, a5xx_vbif_platforms,
			ARRAY_SIZE(a5xx_vbif_platforms));
@@ -1712,8 +1718,10 @@ static void a5xx_start(struct adreno_device *adreno_dev)
	kgsl_regwrite(device, A5XX_RBBM_AHB_CNTL2, 0x0000003F);

	if (adreno_is_preemption_enabled(adreno_dev)) {
		def_ttbr0 = kgsl_mmu_get_default_ttbr0(&device->mmu,
				KGSL_IOMMU_CONTEXT_USER);
		struct kgsl_pagetable *pt = device->mmu.defaultpagetable;

		def_ttbr0 = kgsl_mmu_pagetable_get_ttbr0(pt);
		contextidr = kgsl_mmu_pagetable_get_contextidr(pt);

		/* Initialize the context switch record here */
		kgsl_sharedmem_writel(device, &iommu->smmu_info,
@@ -1721,8 +1729,17 @@ static void a5xx_start(struct adreno_device *adreno_dev)
				A5XX_CP_SMMU_INFO_MAGIC_REF);
		kgsl_sharedmem_writeq(device, &iommu->smmu_info,
				PREEMPT_SMMU_RECORD(ttbr0), def_ttbr0);
		/*
		 * The CP doesn't actually use the asid field, so
		 * put a bad value into it until it is removed from
		 * the preemption record.
		 */
		kgsl_sharedmem_writeq(device, &iommu->smmu_info,
				PREEMPT_SMMU_RECORD(asid),
				0xdecafbad);
		kgsl_sharedmem_writeq(device, &iommu->smmu_info,
				PREEMPT_SMMU_RECORD(asid), 1);
				PREEMPT_SMMU_RECORD(context_idr),
				contextidr);
		adreno_writereg64(adreno_dev,
				ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
				ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
+54 −111
Original line number Diff line number Diff line
@@ -493,12 +493,10 @@ static unsigned int _adreno_iommu_pt_update_pid_to_mem(

static unsigned int _adreno_iommu_set_pt_v1(struct adreno_ringbuffer *rb,
					unsigned int *cmds_orig,
					phys_addr_t pt_val,
					unsigned int ptname)
					u64 ttbr0, u32 contextidr, u32 ptname)
{
	struct kgsl_device *device = rb->device;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	uint64_t ttbr0 = 0;
	unsigned int *cmds = cmds_orig;
	unsigned int *cond_exec_ptr;

@@ -521,28 +519,19 @@ static unsigned int _adreno_iommu_set_pt_v1(struct adreno_ringbuffer *rb,
	cmds++;

	if (ADRENO_FEATURE(adreno_dev, ADRENO_HAS_REG_TO_REG_CMDS)) {
		int count = 1;

		if (_hi_32(KGSL_IOMMU_CTX_TTBR0_ADDR_MASK))
			count = 2;
		/* transfer the ttbr0 value to ME_SCRATCH */
		/* transfer the ttbr0 and contextidr values to ME_SCRATCH */
		*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_TO_REG, 2, 1);
		*cmds++ = count << 16 | adreno_getreg(adreno_dev,
		*cmds++ = 3 << 16 | adreno_getreg(adreno_dev,
				ADRENO_REG_CP_SCRATCH_REG6);
		cmds += cp_gpuaddr(adreno_dev, cmds,
			   (rb->pagetable_desc.gpuaddr +
			   offsetof(struct adreno_ringbuffer_pagetable_info,
			   ttbr0) + sizeof(uint64_t)));
			   ttbr0)));
		cmds += cp_wait_for_idle(adreno_dev, cmds);
		*cmds++ = cp_packet(adreno_dev, CP_REG_TO_SCRATCH, 1);
		*cmds++ = (count << 24) | (6 << 16) |
		*cmds++ = (3 << 24) | (6 << 16) |
			adreno_getreg(adreno_dev,
					ADRENO_REG_CP_SCRATCH_REG6);
	} else {
		ttbr0 = kgsl_mmu_get_default_ttbr0(&device->mmu,
			KGSL_IOMMU_CONTEXT_USER);
		ttbr0 &= ~KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
		ttbr0 |= (pt_val & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK);
	}

	cmds += cp_wait_for_idle(adreno_dev, cmds);
@@ -552,22 +541,18 @@ static unsigned int _adreno_iommu_set_pt_v1(struct adreno_ringbuffer *rb,
	if (ADRENO_FEATURE(adreno_dev, ADRENO_HAS_REG_TO_REG_CMDS)) {
		/* ME_SCRATCH_REG to REG copy */
		*cmds++ = cp_packet(adreno_dev, CP_SCRATCH_TO_REG, 1);
		if (_hi_32(KGSL_IOMMU_CTX_TTBR0_ADDR_MASK))
		*cmds++ = (2 << 24) | (6 << 16) | ttbr0;
		else
			*cmds++ = (1 << 24) | (6 << 16) | ttbr0;
		*cmds++ = cp_packet(adreno_dev, CP_SCRATCH_TO_REG, 1);
		*cmds++ = (1 << 24) | (8 << 16) | ttbr0;
	} else {
		if (_hi_32(KGSL_IOMMU_CTX_TTBR0_ADDR_MASK)) {
			cmds += _cp_smmu_reg(adreno_dev, cmds,
					KGSL_IOMMU_CTX_TTBR0, 2);
		cmds += _cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_TTBR0, 2);
		*cmds++ = _lo_32(ttbr0);
		*cmds++ = _hi_32(ttbr0);
		} else {
		cmds += _cp_smmu_reg(adreno_dev, cmds,
					KGSL_IOMMU_CTX_TTBR0, 1);
			*cmds++ = _lo_32(ttbr0);
		}
				KGSL_IOMMU_CTX_CONTEXTIDR, 1);
		*cmds++ = contextidr;
	}

	/* a3xx doesn't have MEQ space to hold the TLBI commands */
	if (adreno_is_a3xx(adreno_dev))
		cmds += _iommu_unlock(adreno_dev, cmds);
@@ -591,29 +576,20 @@ static unsigned int _adreno_iommu_set_pt_v1(struct adreno_ringbuffer *rb,

static unsigned int _adreno_iommu_set_pt_v2_a3xx(struct kgsl_device *device,
					unsigned int *cmds_orig,
					phys_addr_t pt_val)
					u64 ttbr0, u32 contextidr)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	uint64_t ttbr0;
	unsigned int *cmds = cmds_orig;

	cmds += _adreno_iommu_add_idle_cmds(adreno_dev, cmds);

	ttbr0 = kgsl_mmu_get_default_ttbr0(&device->mmu,
			KGSL_IOMMU_CONTEXT_USER);
	ttbr0 &= ~KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
	ttbr0 |= (pt_val & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK);

	cmds += _vbif_lock(adreno_dev, cmds);

	if (_hi_32(KGSL_IOMMU_CTX_TTBR0_ADDR_MASK)) {
	cmds += _cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_TTBR0, 2);
	*cmds++ = _lo_32(ttbr0);
	*cmds++ = _hi_32(ttbr0);
	} else {
		cmds += _cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_TTBR0, 1);
		*cmds++ = _lo_32(ttbr0);
	}
	cmds += _cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_CONTEXTIDR, 1);
	*cmds++ = contextidr;

	cmds += _vbif_unlock(adreno_dev, cmds);

@@ -629,29 +605,20 @@ static unsigned int _adreno_iommu_set_pt_v2_a3xx(struct kgsl_device *device,

static unsigned int _adreno_iommu_set_pt_v2_a4xx(struct kgsl_device *device,
					unsigned int *cmds_orig,
					phys_addr_t pt_val)
					u64 ttbr0, u32 contextidr)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	uint64_t ttbr0;
	unsigned int *cmds = cmds_orig;

	cmds += _adreno_iommu_add_idle_cmds(adreno_dev, cmds);

	ttbr0 = kgsl_mmu_get_default_ttbr0(&device->mmu,
			KGSL_IOMMU_CONTEXT_USER);
	ttbr0 &= ~KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
	ttbr0 |= (pt_val & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK);

	cmds += _vbif_lock(adreno_dev, cmds);

	if (_hi_32(KGSL_IOMMU_CTX_TTBR0_ADDR_MASK)) {
	cmds += _cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_TTBR0, 2);
	*cmds++ = _lo_32(ttbr0);
	*cmds++ = _hi_32(ttbr0);
	} else {
		cmds += _cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_TTBR0, 1);
		*cmds++ = _lo_32(ttbr0);
	}
	cmds += _cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_CONTEXTIDR, 1);
	*cmds++ = contextidr;

	cmds += _vbif_unlock(adreno_dev, cmds);

@@ -667,17 +634,12 @@ static unsigned int _adreno_iommu_set_pt_v2_a4xx(struct kgsl_device *device,

static unsigned int _adreno_iommu_set_pt_v2_a5xx(struct kgsl_device *device,
					unsigned int *cmds_orig,
			phys_addr_t pt_val, struct adreno_ringbuffer *rb)
					u64 ttbr0, u32 contextidr,
					struct adreno_ringbuffer *rb)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	uint64_t ttbr0 = 0;
	unsigned int *cmds = cmds_orig;

	ttbr0 = kgsl_mmu_get_default_ttbr0(&device->mmu,
			KGSL_IOMMU_CONTEXT_USER);
	ttbr0 &= ~KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
	ttbr0 |= (pt_val & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK);

	cmds += _adreno_iommu_add_idle_cmds(adreno_dev, cmds);
	cmds += cp_wait_for_me(adreno_dev, cmds);

@@ -685,13 +647,14 @@ static unsigned int _adreno_iommu_set_pt_v2_a5xx(struct kgsl_device *device,
	*cmds++ = cp_packet(adreno_dev, CP_SMMU_TABLE_UPDATE, 3);
	*cmds++ = _lo_32(ttbr0);
	*cmds++ = _hi_32(ttbr0);
	*cmds++ = 0;
	*cmds++ = contextidr;

	*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
	*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 4, 1);
	cmds += cp_gpuaddr(adreno_dev, cmds, (rb->pagetable_desc.gpuaddr +
		offsetof(struct adreno_ringbuffer_pagetable_info, ttbr0)));
	*cmds++ = _lo_32(ttbr0);
	*cmds++ = _hi_32(ttbr0);
	*cmds++ = contextidr;

	/* release all commands with wait_for_me */
	cmds += cp_wait_for_me(adreno_dev, cmds);
@@ -714,7 +677,8 @@ unsigned int adreno_iommu_set_pt_generate_cmds(
{
	struct kgsl_device *device = rb->device;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	phys_addr_t pt_val;
	u64 ttbr0;
	u32 contextidr;
	unsigned int *cmds_orig = cmds;
	struct kgsl_iommu *iommu = adreno_dev->dev.mmu.priv;

@@ -722,7 +686,8 @@ unsigned int adreno_iommu_set_pt_generate_cmds(
	if (test_bit(ADRENO_DEVICE_FAULT, &adreno_dev->priv))
		return 0;

	pt_val = kgsl_mmu_get_pt_base_addr(&device->mmu, pt);
	ttbr0 = kgsl_mmu_pagetable_get_ttbr0(pt);
	contextidr = kgsl_mmu_pagetable_get_contextidr(pt);

	cmds += _adreno_iommu_add_idle_indirect_cmds(adreno_dev, cmds,
		device->mmu.setstate_memory.gpuaddr +
@@ -731,17 +696,18 @@ unsigned int adreno_iommu_set_pt_generate_cmds(
	if (iommu->version >= 2) {
		if (adreno_is_a5xx(adreno_dev))
			cmds += _adreno_iommu_set_pt_v2_a5xx(device, cmds,
						pt_val, rb);
						ttbr0, contextidr, rb);
		else if (adreno_is_a4xx(adreno_dev))
			cmds += _adreno_iommu_set_pt_v2_a4xx(device, cmds,
						pt_val);
						ttbr0, contextidr);
		else if (adreno_is_a3xx(adreno_dev))
			cmds += _adreno_iommu_set_pt_v2_a3xx(device, cmds,
						pt_val);
						ttbr0, contextidr);
		else
			BUG(); /* new GPU family? */
	} else {
		cmds += _adreno_iommu_set_pt_v1(rb, cmds, pt_val, pt->name);
		cmds += _adreno_iommu_set_pt_v1(rb, cmds, ttbr0, contextidr,
						pt->name);
	}

	/* invalidate all base pointers */
@@ -751,7 +717,7 @@ unsigned int adreno_iommu_set_pt_generate_cmds(
}

/**
 * adreno_iommu_set_pt_ib() - Generate commands to swicth pagetable. The
 * adreno_iommu_set_pt_ib() - Generate commands to switch pagetable. The
 * commands generated use an IB
 * @rb: The RB in which the commands will be executed
 * @cmds: Memory pointer where commands are generated
@@ -764,45 +730,22 @@ unsigned int adreno_iommu_set_pt_ib(struct adreno_ringbuffer *rb,
	struct kgsl_device *device = rb->device;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	unsigned int *cmds_orig = cmds;
	phys_addr_t pt_val;
	uint64_t ttbr0;
	struct kgsl_iommu_pt *iommu_pt = pt->priv;

	pt_val = kgsl_mmu_get_pt_base_addr(&(rb->device->mmu), pt);

	/* put the ptname in pagetable desc */
	*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
	cmds += cp_gpuaddr(adreno_dev, cmds,
			(rb->pagetable_desc.gpuaddr +
			offsetof(struct adreno_ringbuffer_pagetable_info,
			incoming_ptname)));
	*cmds++ = pt->name;
	/* Write the ttbr0 value to pagetable desc memory */
	ttbr0 = kgsl_mmu_get_default_ttbr0(&device->mmu,
			KGSL_IOMMU_CONTEXT_USER);
	ttbr0 &= ~KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
	ttbr0 |= (pt_val & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK);

	if (_hi_32(KGSL_IOMMU_CTX_TTBR0_ADDR_MASK)) {
	/* Write the ttbr0 and contextidr values to pagetable desc memory */
	*cmds++ =  cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
	cmds += cp_gpuaddr(adreno_dev, cmds,
			(rb->pagetable_desc.gpuaddr +
			offsetof(struct adreno_ringbuffer_pagetable_info,
			   ttbr0) + sizeof(uint64_t)));
		*cmds++ = _lo_32(ttbr0);
		*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
		cmds += cp_gpuaddr(adreno_dev, cmds,
			   (rb->pagetable_desc.gpuaddr +
			   offsetof(struct adreno_ringbuffer_pagetable_info,
			   ttbr0) + sizeof(uint64_t) + sizeof(unsigned int)));
		*cmds++ = _hi_32(ttbr0);
	} else {
			ttbr0)));
	*cmds++ = _lo_32(iommu_pt->ttbr0);

	*cmds++ =  cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
	cmds += cp_gpuaddr(adreno_dev, cmds,
			(rb->pagetable_desc.gpuaddr +
			offsetof(struct adreno_ringbuffer_pagetable_info,
			   ttbr0) + sizeof(uint64_t)));
		*cmds++ = _lo_32(ttbr0);
	}
			contextidr)));
	*cmds++ = iommu_pt->contextidr;

	*cmds++ = cp_packet(adreno_dev, CP_WAIT_MEM_WRITES, 1);
	*cmds++ = 0;
+3 −2
Original line number Diff line number Diff line
@@ -52,8 +52,8 @@ struct adreno_submit_time {
 * switching of pagetable this value equals current_rb_ptname.
 * @switch_pt_enable: Flag used during pagetable switch to check if pt
 * switch can be skipped
 * @adreno_ttbr0: Parameters used during pagetable switch, it contains the
 * pagetable values that need to be programmmed into the TTBR0 registers
 * @ttbr0: value to program into TTBR0 during pagetable switch.
 * @contextidr: value to program into CONTEXTIDR during pagetable switch.
 */
struct adreno_ringbuffer_pagetable_info {
	int current_global_ptname;
@@ -61,6 +61,7 @@ struct adreno_ringbuffer_pagetable_info {
	int incoming_ptname;
	int switch_pt_enable;
	uint64_t ttbr0;
	unsigned int contextidr;
};

/**
+17 −18
Original line number Diff line number Diff line
@@ -475,8 +475,7 @@ static size_t snapshot_capture_mem_list(struct kgsl_device *device,
	}

	header->num_entries = num_mem;
	header->ptbase =
		kgsl_mmu_pagetable_get_ptbase(process->pagetable);
	header->ptbase = kgsl_mmu_pagetable_get_ttbr0(process->pagetable);
	/*
	 * Walk throught the memory list and store the
	 * tuples(gpuaddr, size, memtype) in snapshot
@@ -555,7 +554,7 @@ static size_t snapshot_ib(struct kgsl_device *device, u8 *buf,
	/* Write the sub-header for the section */
	header->gpuaddr = obj->gpuaddr;
	header->ptbase =
		kgsl_mmu_pagetable_get_ptbase(obj->entry->priv->pagetable);
		kgsl_mmu_pagetable_get_ttbr0(obj->entry->priv->pagetable);
	header->size = obj->size >> 2;

	/* Write the contents of the ib */
@@ -607,39 +606,39 @@ static void setup_fault_process(struct kgsl_device *device,
				struct kgsl_snapshot *snapshot,
				struct kgsl_process_private *process)
{
	phys_addr_t hw_ptbase, proc_ptbase;
	u64 hw_ptbase, proc_ptbase;

	if (process != NULL && !kgsl_process_private_get(process))
		process = NULL;

	/* Get the physical address of the MMU pagetable */
	hw_ptbase = kgsl_mmu_get_current_ptbase(&device->mmu);
	hw_ptbase = kgsl_mmu_get_current_ttbr0(&device->mmu);

	/* if we have an input process, make sure the ptbases match */
	if (process) {
		proc_ptbase = kgsl_mmu_pagetable_get_ptbase(process->pagetable);
		proc_ptbase = kgsl_mmu_pagetable_get_ttbr0(process->pagetable);
		/* agreement! No need to check further */
		if (hw_ptbase == proc_ptbase)
			goto done;

		kgsl_process_private_put(process);
		process = NULL;
		KGSL_CORE_ERR("snapshot: ptbase mismatch hw %pa sw %pa\n",
				&hw_ptbase, &proc_ptbase);
		KGSL_CORE_ERR("snapshot: ptbase mismatch hw %llx sw %llx\n",
				hw_ptbase, proc_ptbase);
	}

	/* try to find the right pagetable by walking the process list */
	if (kgsl_mmu_is_perprocess(&device->mmu)) {
		struct kgsl_process_private *tmp_private;
		struct kgsl_process_private *tmp;

		mutex_lock(&kgsl_driver.process_mutex);
		list_for_each_entry(tmp_private,
				&kgsl_driver.process_list, list) {
			if (kgsl_mmu_pt_equal(&device->mmu,
						tmp_private->pagetable,
						hw_ptbase)
				&& kgsl_process_private_get(tmp_private)) {
					process = tmp_private;
		list_for_each_entry(tmp, &kgsl_driver.process_list, list) {
			u64 pt_ttbr0;

			pt_ttbr0 = kgsl_mmu_pagetable_get_ttbr0(tmp->pagetable);
			if ((pt_ttbr0 == hw_ptbase)
			    && kgsl_process_private_get(tmp)) {
				process = tmp;
				break;
			}
		}
@@ -677,7 +676,7 @@ static size_t snapshot_global(struct kgsl_device *device, u8 *buf,
	header->size = memdesc->size >> 2;
	header->gpuaddr = memdesc->gpuaddr;
	header->ptbase =
		kgsl_mmu_pagetable_get_ptbase(device->mmu.defaultpagetable);
		kgsl_mmu_pagetable_get_ttbr0(device->mmu.defaultpagetable);
	header->type = SNAPSHOT_GPU_OBJECT_GLOBAL;

	memcpy(ptr, memdesc->hostptr, memdesc->size);
@@ -717,7 +716,7 @@ static size_t snapshot_preemption_record(struct kgsl_device *device, u8 *buf,
	header->size = size >> 2;
	header->gpuaddr = memdesc->gpuaddr;
	header->ptbase =
		kgsl_mmu_pagetable_get_ptbase(device->mmu.defaultpagetable);
		kgsl_mmu_pagetable_get_ttbr0(device->mmu.defaultpagetable);
	header->type = SNAPSHOT_GPU_OBJECT_GLOBAL;

	memcpy(ptr, memdesc->hostptr, size);
+342 −337

File changed.

Preview size limit exceeded, changes collapsed.

Loading