Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5deb282b authored by Jordan Crouse's avatar Jordan Crouse Committed by Matt Wagantall
Browse files

msm: kgsl: Remove IOMMU units and other legacy code



The concept of IOMMU units harkens back to the 8064 days. The code
was pretty heavily embedded into the infrastructure so it was harder
to remove when the rest of the IOMMUv0 code went to the great
clipboard in the sky. Remove it now and streamline all the interfaces
all the way back to the DT parser.

Change-Id: Ic0dedbad4a33123e37f4b8d272e0421c369e1786
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
Signed-off-by: default avatarJeremy Gebben <jgebben@codeaurora.org>
parent a2c33982
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -240,8 +240,8 @@ static int kgsl_iommu_pdev_probe(struct platform_device *pdev)
		goto err;
	}

	data->physstart = reg_val[0];
	data->physend = data->physstart + reg_val[1] - 1;
	data->regstart = reg_val[0];
	data->regsize = reg_val[1];

	data->features |= KGSL_MMU_DMA_API;

@@ -876,8 +876,8 @@ static int adreno_of_get_iommu(struct platform_device *pdev,
	if (of_property_read_u32_array(node, "reg", reg_val, 2))
		goto err;

	data->physstart = reg_val[0];
	data->physend = data->physstart + reg_val[1] - 1;
	data->regstart = reg_val[0];
	data->regsize = reg_val[1];

	data->iommu_ctx_count = 0;

@@ -1077,7 +1077,7 @@ int adreno_probe(struct platform_device *pdev)

	/* Defer adreno probe if IOMMU is not already probed */
	if (!of_parse_phandle(pdev->dev.of_node, "iommu", 0) &&
			(iommu_pdev_data.physend == 0))
			(iommu_pdev_data.regstart == 0))
		return -EPROBE_DEFER;

	adreno_dev = adreno_get_dev(pdev);
+37 −41
Original line number Diff line number Diff line
@@ -413,7 +413,6 @@ static unsigned int _adreno_iommu_set_pt_v1(struct adreno_ringbuffer *rb,
	ttbr0 = kgsl_mmu_get_reg_ahbaddr(&device->mmu, KGSL_IOMMU_CONTEXT_USER,
					KGSL_IOMMU_CTX_TTBR0) >> 2;

	if (kgsl_mmu_hw_halt_supported(&device->mmu)) {
	cmds += cp_wait_for_idle(adreno_dev, cmds);
	/*
	 * glue commands together until next
@@ -446,7 +445,7 @@ static unsigned int _adreno_iommu_set_pt_v1(struct adreno_ringbuffer *rb,
			mmu_ctrl,
			KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_IDLE,
			KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_IDLE, 0xF);
	}

	if (ADRENO_FEATURE(adreno_dev, ADRENO_HAS_REG_TO_REG_CMDS)) {
		/* ME_SCRATCH_REG to REG copy */
		*cmds++ = cp_packet(adreno_dev, CP_SCRATCH_TO_REG, 1);
@@ -473,8 +472,7 @@ static unsigned int _adreno_iommu_set_pt_v1(struct adreno_ringbuffer *rb,
			*cmds++ = reg_pt_val;
		}
	}
	if (kgsl_mmu_hw_halt_supported(&device->mmu) &&
		adreno_is_a3xx(adreno_dev)) {
	if (adreno_is_a3xx(adreno_dev)) {
		/* unlock the IOMMU lock */
		*cmds++ = cp_packet(adreno_dev, CP_REG_RMW, 3);
		*cmds++ = mmu_ctrl;
@@ -509,8 +507,7 @@ static unsigned int _adreno_iommu_set_pt_v1(struct adreno_ringbuffer *rb,
			cmds, tlbstatus, 0,
			KGSL_IOMMU_CTX_TLBSTATUS_SACTIVE, 0xF);

	if (kgsl_mmu_hw_halt_supported(&device->mmu) &&
		!adreno_is_a3xx(adreno_dev)) {
	if (!adreno_is_a3xx(adreno_dev)) {
		/* unlock the IOMMU lock */
		*cmds++ = cp_packet(adreno_dev, CP_REG_RMW, 3);
		*cmds++ = mmu_ctrl;
@@ -1068,7 +1065,6 @@ int adreno_iommu_init(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = &adreno_dev->dev;
	struct kgsl_iommu *iommu = device->mmu.priv;
	struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_unit;

	if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
		return 0;
@@ -1076,13 +1072,13 @@ int adreno_iommu_init(struct adreno_device *adreno_dev)
	/* Overwrite the ahb_base_offset for iommu v2 targets here */
	if (kgsl_msm_supports_iommu_v2()) {
		if (adreno_is_a405(adreno_dev))
			iommu_unit->ahb_base_offset =
			iommu->ahb_base_offset =
					KGSL_IOMMU_V2_AHB_BASE_OFFSET_A405;
		else if (adreno_is_a530(adreno_dev))
			iommu_unit->ahb_base_offset =
			iommu->ahb_base_offset =
					KGSL_IOMMU_V2_AHB_BASE_OFFSET_A530;
		else
			iommu_unit->ahb_base_offset =
			iommu->ahb_base_offset =
					KGSL_IOMMU_V2_AHB_BASE_OFFSET;
	}

+167 −264

File changed.

Preview size limit exceeded, changes collapsed.

+32 −79
Original line number Diff line number Diff line
@@ -33,26 +33,6 @@
/* IOMMU_V2 AHB base points to ContextBank1 */
#define KGSL_IOMMU_CTX_AHB_OFFSET_V2   0

/* TLBLKCR fields */
#define KGSL_IOMMU_TLBLKCR_LKE_MASK		0x00000001
#define KGSL_IOMMU_TLBLKCR_LKE_SHIFT		0
#define KGSL_IOMMU_TLBLKCR_TLBIALLCFG_MASK	0x00000001
#define KGSL_IOMMU_TLBLKCR_TLBIALLCFG_SHIFT	1
#define KGSL_IOMMU_TLBLKCR_TLBIASIDCFG_MASK	0x00000001
#define KGSL_IOMMU_TLBLKCR_TLBIASIDCFG_SHIFT	2
#define KGSL_IOMMU_TLBLKCR_TLBIVAACFG_MASK	0x00000001
#define KGSL_IOMMU_TLBLKCR_TLBIVAACFG_SHIFT	3
#define KGSL_IOMMU_TLBLKCR_FLOOR_MASK		0x000000FF
#define KGSL_IOMMU_TLBLKCR_FLOOR_SHIFT		8
#define KGSL_IOMMU_TLBLKCR_VICTIM_MASK		0x000000FF
#define KGSL_IOMMU_TLBLKCR_VICTIM_SHIFT		16

/* V2PXX fields */
#define KGSL_IOMMU_V2PXX_INDEX_MASK		0x000000FF
#define KGSL_IOMMU_V2PXX_INDEX_SHIFT		0
#define KGSL_IOMMU_V2PXX_VA_MASK		0x000FFFFF
#define KGSL_IOMMU_V2PXX_VA_SHIFT		12

/* FSYNR1 V0 fields */
#define KGSL_IOMMU_FSYNR1_AWRITE_MASK		0x00000001
#define KGSL_IOMMU_FSYNR1_AWRITE_SHIFT		8
@@ -96,8 +76,6 @@ enum kgsl_iommu_reg_map {
	KGSL_IOMMU_CTX_FAR,
	KGSL_IOMMU_CTX_TLBIALL,
	KGSL_IOMMU_CTX_RESUME,
	KGSL_IOMMU_CTX_TLBLKCR,
	KGSL_IOMMU_CTX_V2PUR,
	KGSL_IOMMU_CTX_FSYNR0,
	KGSL_IOMMU_CTX_FSYNR1,
	KGSL_IOMMU_CTX_TLBSYNC,
@@ -137,48 +115,38 @@ struct kgsl_iommu_ctx {
 * from dtsi file
 * @iommu_ctxs:         Pointer to array of struct holding context name and id
 * @iommu_ctx_count:    Number of contexts defined in the dtsi file
 * @physstart:          Start of iommu registers physical address
 * @physend:            End of iommu registers physical address
 * @regstart:           Start of iommu registers physical address
 * @regsize:            Size of registers physical address block
 * @clks                Iommu clocks
 * @features            Iommu features, ex RETENTION, DMA API
 */
struct kgsl_device_iommu_data {
	struct kgsl_iommu_ctx *iommu_ctxs;
	int iommu_ctx_count;
	unsigned int physstart;
	unsigned int physend;
	unsigned int regstart;
	unsigned int regsize;
	struct clk *clks[KGSL_IOMMU_MAX_CLKS];
	unsigned int features;
};


#define KGSL_IOMMU_REG(iommu, ctx, REG) \
	((iommu)->regbase + \
	 (iommu)->iommu_reg_list[KGSL_IOMMU_CTX_##REG].reg_offset + \
	 ((ctx) << KGSL_IOMMU_CTX_SHIFT) + (iommu)->ctx_offset)

/* Macros to read/write IOMMU registers */
#define KGSL_IOMMU_SET_CTX_REG_Q(iommu, iommu_unit, ctx, REG, val)	\
		writeq_relaxed(val,					\
		iommu_unit->reg_map.hostptr +				\
		iommu->iommu_reg_list[KGSL_IOMMU_CTX_##REG].reg_offset +\
		(ctx << KGSL_IOMMU_CTX_SHIFT) +				\
		iommu->ctx_offset)
#define KGSL_IOMMU_SET_CTX_REG_Q(iommu, ctx, REG, val)	\
		writeq_relaxed((val), KGSL_IOMMU_REG(iommu, ctx, REG))

#define KGSL_IOMMU_GET_CTX_REG_Q(iommu, iommu_unit, ctx, REG)		\
		readq_relaxed(						\
		iommu_unit->reg_map.hostptr +				\
		iommu->iommu_reg_list[KGSL_IOMMU_CTX_##REG].reg_offset +\
		(ctx << KGSL_IOMMU_CTX_SHIFT) +				\
		iommu->ctx_offset)
#define KGSL_IOMMU_GET_CTX_REG_Q(iommu, ctx, REG)		\
		readq_relaxed(KGSL_IOMMU_REG(iommu, ctx, REG))

#define KGSL_IOMMU_SET_CTX_REG(iommu, iommu_unit, ctx, REG, val)	\
		writel_relaxed(val,					\
		iommu_unit->reg_map.hostptr +				\
		iommu->iommu_reg_list[KGSL_IOMMU_CTX_##REG].reg_offset +\
		(ctx << KGSL_IOMMU_CTX_SHIFT) +				\
		iommu->ctx_offset)
#define KGSL_IOMMU_SET_CTX_REG(iommu, ctx, REG, val)	\
		writel_relaxed((val), KGSL_IOMMU_REG(iommu, ctx, REG))

#define KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, ctx, REG)		\
		readl_relaxed(						\
		iommu_unit->reg_map.hostptr +				\
		iommu->iommu_reg_list[KGSL_IOMMU_CTX_##REG].reg_offset +\
		(ctx << KGSL_IOMMU_CTX_SHIFT) +				\
		iommu->ctx_offset)
#define KGSL_IOMMU_GET_CTX_REG(iommu, ctx, REG)		\
		readl_relaxed(KGSL_IOMMU_REG(iommu, ctx, REG))

/* Gets the lsb value of pagetable */
#define KGSL_IOMMMU_PT_LSB(iommu, pt_val)				\
@@ -188,18 +156,18 @@ struct kgsl_device_iommu_data {
#define KGSL_IOMMU_SETSTATE_NOP_OFFSET	1024

/*
 * struct kgsl_iommu_device - Structure holding data about iommu contexts
 * struct kgsl_iommu_context - Structure holding data about iommu contexts
 * @dev: Device pointer to iommu context
 * @name: context name
 * @attached: Indicates whether this iommu context is presently attached to
 * a pagetable/domain or not
 * @default_ttbr0: The TTBR0 value set by iommu driver on start up
 * @ctx_id: This iommu units context id.
 * @ctx_id: The hardware context ID for the device
 * are on, else the clocks are off
 * fault: Flag when set indicates that this iommu device has caused a page
 * fault
 */
struct kgsl_iommu_device {
struct kgsl_iommu_context {
	struct device *dev;
	const char *name;
	bool attached;
@@ -210,33 +178,14 @@ struct kgsl_iommu_device {
};

/*
 * struct kgsl_iommu_unit - Structure holding data about iommu units. An IOMMU
 * units is basically a separte IOMMU h/w block with it's own IOMMU contexts
 * @dev: Pointer to array of struct kgsl_iommu_device which has information
 * about the IOMMU contexts under this IOMMU unit
 * @reg_map: Memory descriptor which holds the mapped address of this IOMMU
 * units register range
 * struct kgsl_iommu - Structure holding iommu data for kgsl driver
 * @device: Pointer to KGSL device struct
 * @ctx: Array of kgsl_iommu_context structs
 * @regbase: Virtual address of the IOMMU register base
 * @ahb_base_offset - The base address from where IOMMU registers can be
 * accesed from ahb bus
 * @iommu_halt_enable: Valid only on IOMMU-v1, when set indicates that the iommu
 * unit supports halting of the IOMMU, which can be enabled while programming
 * the IOMMU registers for synchronization
 * accesed from AHB bus
 * @clk_enable_count: The ref count of clock enable calls
 * @clks: iommu unit clks
 */
struct kgsl_iommu_unit {
	struct kgsl_iommu_device dev[KGSL_IOMMU_CONTEXT_MAX];
	struct kgsl_memdesc reg_map;
	unsigned int ahb_base_offset;
	int iommu_halt_enable;
	atomic_t clk_enable_count;
	struct clk *clks[KGSL_IOMMU_MAX_CLKS];
};

/*
 * struct kgsl_iommu - Structure holding iommu data for kgsl driver
 * @iommu_unit: Structure containing info about the IOMMU h/w block
 * @device: Pointer to kgsl device
 * @clks: Array of pointers to IOMMU clocks
 * @ctx_offset: The context offset to be added to base address when
 * accessing IOMMU registers from the CPU
 * @ctx_ahb_offset: The context offset to be added to base address when
@@ -246,8 +195,12 @@ struct kgsl_iommu_unit {
 * @smmu_info: smmu info used in a5xx preemption
 */
struct kgsl_iommu {
	struct kgsl_iommu_unit iommu_unit;
	struct kgsl_device *device;
	struct kgsl_iommu_context ctx[KGSL_IOMMU_CONTEXT_MAX];
	void __iomem *regbase;
	unsigned int ahb_base_offset;
	atomic_t clk_enable_count;
	struct clk *clks[KGSL_IOMMU_MAX_CLKS];
	unsigned int ctx_offset;
	unsigned int ctx_ahb_offset;
	struct kgsl_iommu_register_list *iommu_reg_list;
+1 −17
Original line number Diff line number Diff line
@@ -89,7 +89,6 @@ struct kgsl_mmu_ops {
	phys_addr_t (*mmu_get_pt_base_addr)
			(struct kgsl_mmu *mmu,
			struct kgsl_pagetable *pt);
	int (*mmu_hw_halt_supported)(struct kgsl_mmu *mmu);
	int (*mmu_set_pf_policy)(struct kgsl_mmu *mmu, unsigned long pf_policy);
	void (*mmu_set_pagefault)(struct kgsl_mmu *mmu);
	struct kgsl_protected_registers *(*mmu_get_prot_regs)
@@ -256,7 +255,7 @@ static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu)
 * kgsl_mmu_get_reg_ahbaddr() - Calls the mmu specific function pointer to
 * return the address that GPU can use to access register
 * @mmu:		Pointer to the device mmu
 * @ctx_id:		The context id within the iommu unit
 * @ctx_id:		The MMU HW context ID
 * @reg:		Register whose address is to be returned
 *
 * Returns the ahb address of reg else 0
@@ -271,21 +270,6 @@ static inline unsigned int kgsl_mmu_get_reg_ahbaddr(struct kgsl_mmu *mmu,
		return 0;
}

/*
 * kgsl_mmu_hw_halt_supported() - Runtime check for iommu hw halt
 * @mmu: the mmu
 *
 * Returns non-zero if the iommu supports hw halt,
 * 0 if not.
 */
static inline int kgsl_mmu_hw_halt_supported(struct kgsl_mmu *mmu)
{
	if (mmu->mmu_ops && mmu->mmu_ops->mmu_hw_halt_supported)
		return mmu->mmu_ops->mmu_hw_halt_supported(mmu);
	else
		return 0;
}

/*
 * kgsl_mmu_is_perprocess() - Runtime check for per-process
 * pagetables.
Loading