Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 11569ad2 authored by Jordan Crouse's avatar Jordan Crouse
Browse files

msm: kgsl: Move setstate to the IOMMU initialization



The setstate memory is a IOMMU specific construct.  Move it to the
IOMMU code where it belongs.

Change-Id: Ic0dedbada977f2861f7c1300a5365da5b09d70a9
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
parent 2f49086b
Loading
Loading
Loading
Loading
+6 −3
Original line number Diff line number Diff line
@@ -600,8 +600,7 @@ unsigned int adreno_iommu_set_pt_generate_cmds(
	cmds += adreno_iommu_set_apriv(adreno_dev, cmds, 1);

	cmds += _adreno_iommu_add_idle_indirect_cmds(adreno_dev, cmds,
		device->mmu.setstate_memory.gpuaddr +
		KGSL_IOMMU_SETSTATE_NOP_OFFSET);
		iommu->setstate.gpuaddr + KGSL_IOMMU_SETSTATE_NOP_OFFSET);

	if (iommu->version >= 2) {
		if (adreno_is_a5xx(adreno_dev))
@@ -873,15 +872,19 @@ done:
int adreno_iommu_init(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = &adreno_dev->dev;
	struct kgsl_iommu *iommu = device->mmu.priv;

	if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
		return 0;

	if (iommu == NULL)
		return -ENODEV;

	/*
	 * A nop is required in an indirect buffer when switching
	 * pagetables in-stream
	 */
	kgsl_sharedmem_writel(device, &device->mmu.setstate_memory,
	kgsl_sharedmem_writel(device, &iommu->setstate,
				KGSL_IOMMU_SETSTATE_NOP_OFFSET,
				cp_packet(adreno_dev, CP_NOP, 1));

+24 −10
Original line number Diff line number Diff line
@@ -719,6 +719,26 @@ static size_t snapshot_preemption_record(struct kgsl_device *device, u8 *buf,
	return size + sizeof(*header);
}

/* Snapshot IOMMU specific buffers */
static void adreno_snapshot_iommu(struct kgsl_device *device,
		struct kgsl_snapshot *snapshot)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct kgsl_mmu *mmu = &device->mmu;
	struct kgsl_iommu *iommu = mmu->priv;

	if (iommu == NULL)
		return;

	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2,
		snapshot, snapshot_global, &iommu->setstate);

	if (ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION))
		kgsl_snapshot_add_section(device,
			KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2,
			snapshot, snapshot_global, &iommu->smmu_info);
}

/* adreno_snapshot - Snapshot the Adreno GPU state
 * @device - KGSL device to snapshot
 * @snapshot - Pointer to the snapshot instance
@@ -737,7 +757,6 @@ void adreno_snapshot(struct kgsl_device *device, struct kgsl_snapshot *snapshot,
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	struct adreno_ringbuffer *rb;
	struct snapshot_rb_params snap_rb_params;
	struct kgsl_iommu *iommu = device->mmu.priv;

	ib_max_objs = 0;
	/* Reset the list of objects */
@@ -783,25 +802,20 @@ void adreno_snapshot(struct kgsl_device *device, struct kgsl_snapshot *snapshot,
	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2,
			snapshot, snapshot_global, &adreno_dev->dev.memstore);

	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2,
			snapshot, snapshot_global,
			&adreno_dev->dev.mmu.setstate_memory);

	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2,
			snapshot, snapshot_global,
			&adreno_dev->pwron_fixup);

	if (test_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv)) {
	if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU)
		adreno_snapshot_iommu(device, snapshot);

	if (ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION)) {
		FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
			kgsl_snapshot_add_section(device,
				KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2,
				snapshot, snapshot_preemption_record,
				&rb->preemption_desc);
		}

		kgsl_snapshot_add_section(device,
				KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2,
				snapshot, snapshot_global, &iommu->smmu_info);
	}

	/*
+31 −3
Original line number Diff line number Diff line
@@ -957,6 +957,24 @@ static unsigned int kgsl_iommu_get_reg_ahbaddr(struct kgsl_mmu *mmu,
	return result;
}

static int _setstate_alloc(struct kgsl_device *device,
		struct kgsl_iommu *iommu)
{
	int ret;

	ret = kgsl_sharedmem_alloc_contig(device, &iommu->setstate, NULL,
		PAGE_SIZE);
	if (ret)
		return ret;

	/* Mark the setstate memory as read only */
	iommu->setstate.flags |= KGSL_MEMFLAGS_GPUREADONLY;

	kgsl_sharedmem_set(device, &iommu->setstate, 0, 0, PAGE_SIZE);

	return 0;
}

static int kgsl_iommu_init(struct kgsl_mmu *mmu)
{
	/*
@@ -974,6 +992,10 @@ static int kgsl_iommu_init(struct kgsl_mmu *mmu)
		return -EINVAL;
	}

	status = _setstate_alloc(KGSL_MMU_DEVICE(mmu), iommu);
	if (status)
		return status;

	/* check requirements for per process pagetables */
	if (ctx->gpu_offset == UINT_MAX) {
		KGSL_CORE_ERR("missing qcom,gpu-offset forces global pt\n");
@@ -994,7 +1016,8 @@ static int kgsl_iommu_init(struct kgsl_mmu *mmu)
	if (iommu->regbase == NULL) {
		KGSL_CORE_ERR("Could not map IOMMU registers 0x%lx:0x%x\n",
			iommu->regstart, iommu->regsize);
		return -ENOMEM;
		status = -ENOMEM;
		goto done;
	}

	if (addr_entry_cache == NULL) {
@@ -1014,7 +1037,12 @@ static int kgsl_iommu_init(struct kgsl_mmu *mmu)
		}
	}

	kgsl_add_global_pt_entry(KGSL_MMU_DEVICE(mmu), &iommu->setstate);

done:
	if (status)
		kgsl_sharedmem_free(&iommu->setstate);

	return status;
}

@@ -1311,7 +1339,7 @@ static void kgsl_iommu_stop(struct kgsl_mmu *mmu)
	}
}

static int kgsl_iommu_close(struct kgsl_mmu *mmu)
static void kgsl_iommu_close(struct kgsl_mmu *mmu)
{
	struct kgsl_iommu *iommu = mmu->priv;
	int i;
@@ -1336,7 +1364,7 @@ static int kgsl_iommu_close(struct kgsl_mmu *mmu)
		kgsl_guard_page = NULL;
	}

	return 0;
	kgsl_free_global(&iommu->setstate);
}

static u64
+3 −1
Original line number Diff line number Diff line
@@ -88,7 +88,7 @@ enum kgsl_iommu_context_id {
	KGSL_IOMMU_CONTEXT_MAX,
};

/* offset at which a nop command is placed in setstate_memory */
/* offset at which a nop command is placed in setstate */
#define KGSL_IOMMU_SETSTATE_NOP_OFFSET	1024

/*
@@ -124,6 +124,7 @@ struct kgsl_iommu_context {
 * @regbase: Virtual address of the IOMMU register base
 * @regstart: Physical address of the iommu registers
 * @regsize: Length of the iommu register region.
 * @setstate: Scratch GPU memory for IOMMU operations
 * @clk_enable_count: The ref count of clock enable calls
 * @clks: Array of pointers to IOMMU clocks
 * @micro_mmu_ctrl: GPU register offset of this glob al register
@@ -135,6 +136,7 @@ struct kgsl_iommu {
	void __iomem *regbase;
	unsigned long regstart;
	unsigned int regsize;
	struct kgsl_memdesc setstate;
	atomic_t clk_enable_count;
	struct clk *clks[KGSL_IOMMU_MAX_CLKS];
	unsigned int micro_mmu_ctrl;
+11 −37
Original line number Diff line number Diff line
@@ -523,45 +523,24 @@ EXPORT_SYMBOL(kgsl_mmu_log_fault_addr);

int kgsl_mmu_init(struct kgsl_device *device, char *mmutype)
{
	int status = 0;
	struct kgsl_mmu *mmu = &device->mmu;
	int ret = 0;

	if (mmutype && !strcmp(mmutype, "nommu"))
		kgsl_mmu_type = KGSL_MMU_TYPE_NONE;

	/*
	 * Don't use kgsl_allocate_global here because we need to get the MMU
	 * set up before we can add the global entry but the MMU init needs the
	 * setstate block. Allocate the memory here and map it later
	 */

	status = kgsl_sharedmem_alloc_contig(device, &mmu->setstate_memory,
					NULL, PAGE_SIZE);
	if (status)
		return status;

	/* Mark the setstate memory as read only */
	mmu->setstate_memory.flags |= KGSL_MEMFLAGS_GPUREADONLY;

	kgsl_sharedmem_set(device, &mmu->setstate_memory, 0, 0,
				mmu->setstate_memory.size);

	if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) {
	switch (kgsl_mmu_type) {
	case KGSL_MMU_TYPE_IOMMU:
		mmu->mmu_ops = &kgsl_iommu_ops;
		status =  mmu->mmu_ops->mmu_init(mmu);
		break;
	case KGSL_MMU_TYPE_NONE:
		break;
	}

	if (status)
		goto done;

	/* Add the setstate memory to the global PT entry list */
	status = kgsl_add_global_pt_entry(device, &mmu->setstate_memory);

done:
	if (status)
		kgsl_sharedmem_free(&mmu->setstate_memory);
	if (MMU_OP_VALID(mmu, mmu_init))
		ret = mmu->mmu_ops->mmu_init(mmu);

	return status;
	return ret;
}
EXPORT_SYMBOL(kgsl_mmu_init);

@@ -845,17 +824,12 @@ kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
}
EXPORT_SYMBOL(kgsl_mmu_unmap);

int kgsl_mmu_close(struct kgsl_device *device)
void kgsl_mmu_close(struct kgsl_device *device)
{
	struct kgsl_mmu *mmu = &device->mmu;
	int ret = 0;

	kgsl_free_global(&mmu->setstate_memory);

	if (MMU_OP_VALID(mmu, mmu_close))
		ret = mmu->mmu_ops->mmu_close(mmu);

	return ret;
		mmu->mmu_ops->mmu_close(mmu);
}
EXPORT_SYMBOL(kgsl_mmu_close);

Loading