Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3d2eb06b authored by Tarun Karra's avatar Tarun Karra
Browse files

msm: kgsl: Restructure IOMMU clock management



Restructure IOMMU clock management code to enable/disable clocks
based on iommu unit instead of context. In hardware IOMMU clocks
are associated with each IOMMU unit. There is no granularity of
clock control for each iommu context. Turning on the clocks for
one context in a IOMMU unit automatically turns on the clocks for
all the contexts in IOMMU unit. Reduce the number of clock
enable/disable calls and structure the software design as per
hardware design by doing clock management per IOMMU unit.

Change-Id: Ib54dbb7a31d6b30b65c07ef130bee149e3e587d8
Signed-off-by: default avatarTarun Karra <tkarra@codeaurora.org>
parent 3fc0e794
Loading
Loading
Loading
Loading
+3 −7
Original line number Diff line number Diff line
@@ -1115,10 +1115,7 @@ static int adreno_iommu_setstate(struct kgsl_device *device,

	cmds = link;

	result = kgsl_mmu_enable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_USER);

	if (result)
		goto done;
	kgsl_mmu_enable_clk(&device->mmu, KGSL_IOMMU_MAX_UNITS);

	pt_val = kgsl_mmu_get_pt_base_addr(&device->mmu,
				device->mmu.hwpagetable);
@@ -1158,11 +1155,10 @@ static int adreno_iommu_setstate(struct kgsl_device *device,
	 * after the command has been retired
	 */
	if (result)
		kgsl_mmu_disable_clk(&device->mmu,
						KGSL_IOMMU_CONTEXT_USER);
		kgsl_mmu_disable_clk(&device->mmu, KGSL_IOMMU_MAX_UNITS);
	else
		kgsl_mmu_disable_clk_on_ts(&device->mmu, rb->global_ts,
						KGSL_IOMMU_CONTEXT_USER);
						KGSL_IOMMU_MAX_UNITS);

done:
	kfree(link);
+3 −12
Original line number Diff line number Diff line
@@ -358,23 +358,14 @@ void a4xx_snapshot(struct adreno_device *adreno_dev,
			a4xx_sp_tp_registers_count, 0);

	/* Turn on MMU clocks since we read MMU registers */
	if (kgsl_mmu_enable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_USER)) {
		KGSL_CORE_ERR("Failed to turn on iommu user context clocks\n");
		goto skip_regs;
	}
	if (kgsl_mmu_enable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_PRIV)) {
		kgsl_mmu_disable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_USER);
		KGSL_CORE_ERR("Failed to turn on iommu priv context clocks\n");
		goto skip_regs;
	}
	kgsl_mmu_enable_clk(&device->mmu, KGSL_IOMMU_MAX_UNITS);

	/* Master set of (non debug) registers */
	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS, snapshot,
		kgsl_snapshot_dump_regs, &list);

	kgsl_mmu_disable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_USER);
	kgsl_mmu_disable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_PRIV);
	kgsl_mmu_disable_clk(&device->mmu, KGSL_IOMMU_MAX_UNITS);

skip_regs:
	kgsl_snapshot_indexed_registers(device, snapshot,
		A4XX_CP_STATE_DEBUG_INDEX, A4XX_CP_STATE_DEBUG_DATA,
		0, snap_data->sect_sizes->cp_state_deb);
+93 −141
Original line number Diff line number Diff line
@@ -464,38 +464,29 @@ done:
/*
 * kgsl_iommu_disable_clk - Disable iommu clocks
 * @mmu - Pointer to mmu structure
 * @unit - Iommu unit
 *
 * Disables iommu clocks
 * Disables iommu clocks for an iommu unit
 * Return - void
 */
static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu, int ctx_id)
static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu, int unit)
{
	struct kgsl_iommu *iommu = mmu->priv;
	struct msm_iommu_drvdata *iommu_drvdata;
	int i, j;

	for (i = 0; i < iommu->unit_count; i++) {
		struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
		for (j = 0; j < iommu_unit->dev_count; j++) {
			if (ctx_id != iommu_unit->dev[j].ctx_id)

		/* Turn off the clks for IOMMU unit requested */
		if ((unit != i) && (unit != KGSL_IOMMU_MAX_UNITS))
			continue;
			atomic_dec(&iommu_unit->dev[j].clk_enable_count);
			BUG_ON(
			atomic_read(&iommu_unit->dev[j].clk_enable_count) < 0);
			/*
			 * the clock calls have a refcount so call them on every
			 * enable/disable call
			 */
			iommu_drvdata = dev_get_drvdata(
					iommu_unit->dev[j].dev->parent);
			if (iommu->gtcu_iface_clk)
				clk_disable_unprepare(iommu->gtcu_iface_clk);
			if (iommu_drvdata->aclk)
				clk_disable_unprepare(iommu_drvdata->aclk);
			if (iommu_drvdata->clk)
				clk_disable_unprepare(iommu_drvdata->clk);
			clk_disable_unprepare(iommu_drvdata->pclk);
		}

		atomic_dec(&iommu_unit->clk_enable_count);
		BUG_ON(atomic_read(&iommu_unit->clk_enable_count) < 0);

		for (j = (KGSL_IOMMU_MAX_CLKS - 1); j >= 0; j--)
			if (iommu_unit->clks[j])
				clk_disable_unprepare(iommu_unit->clks[j]);
	}
}

@@ -517,7 +508,7 @@ static void kgsl_iommu_clk_disable_event(struct kgsl_device *device,
{
	struct kgsl_iommu_disable_clk_param *param = data;

	kgsl_iommu_disable_clk(param->mmu, param->ctx_id);
	kgsl_iommu_disable_clk(param->mmu, param->unit);

	/* Free param we are done using it */
	kfree(param);
@@ -530,8 +521,7 @@ static void kgsl_iommu_clk_disable_event(struct kgsl_device *device,
 * @ts_valid - Indicates whether ts parameter is valid, if this parameter
 * is false then it means that the calling function wants to disable the
 * IOMMU clocks immediately without waiting for any timestamp
 * @ctx_id: Context id of the IOMMU context for which clocks are to be
 * turned off
 * @unit: IOMMU unit for which clocks are to be turned off
 *
 * Creates an event to disable the IOMMU clocks on timestamp and if event
 * already exists then updates the timestamp of disabling the IOMMU clocks
@@ -541,7 +531,7 @@ static void kgsl_iommu_clk_disable_event(struct kgsl_device *device,
 */
static void
kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu,
				unsigned int ts, int ctx_id)
				unsigned int ts, int unit)
{
	struct kgsl_iommu_disable_clk_param *param;

@@ -550,7 +540,7 @@ kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu,
		return;

	param->mmu = mmu;
	param->ctx_id = ctx_id;
	param->unit = unit;
	param->ts = ts;

	if (kgsl_add_event(mmu->device, &mmu->device->iommu_events,
@@ -561,76 +551,66 @@ kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu,
	}
}

/*
 * kgsl_iommu_enable_clk_prepare_enable - Enable iommu clock
 * @clk - clock to enable
 *
 * Prepare enables clock. Retries 3 times on enable failure, on 4th failure
 * returns an error.
 * Return: 0 on success else 1 on error
 */

static int kgsl_iommu_clk_prepare_enable(struct clk *clk)
{
	int num_retries = 4;

	while (num_retries--) {
		if (!clk_prepare_enable(clk))
			return 0;
	}

	return 1;
}

/*
 * kgsl_iommu_enable_clk - Enable iommu clocks
 * @mmu - Pointer to mmu structure
 * @ctx_id - The context bank whose clocks are to be turned on
 * @unit - The iommu unit whose clocks are to be turned on
 *
 * Enables iommu clocks of a given context
 * Enables iommu clocks of a given iommu unit
 * Return: 0 on success else error code
 */
static int kgsl_iommu_enable_clk(struct kgsl_mmu *mmu,
				int ctx_id)
static void kgsl_iommu_enable_clk(struct kgsl_mmu *mmu,
				int unit)
{
	int ret = 0;
	int i, j;
	struct kgsl_iommu *iommu = mmu->priv;
	struct msm_iommu_drvdata *iommu_drvdata;

	for (i = 0; i < iommu->unit_count; i++) {
		struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
		for (j = 0; j < iommu_unit->dev_count; j++) {
			if (ctx_id != iommu_unit->dev[j].ctx_id)

		/* Turn on the clks for IOMMU unit requested */
		if ((unit != i) && (unit != KGSL_IOMMU_MAX_UNITS))
			continue;
			iommu_drvdata =
			dev_get_drvdata(iommu_unit->dev[j].dev->parent);
			ret = clk_prepare_enable(iommu_drvdata->pclk);
			if (ret)
				goto done;
			if (iommu_drvdata->clk) {
				ret = clk_prepare_enable(iommu_drvdata->clk);
				if (ret) {
					clk_disable_unprepare(
						iommu_drvdata->pclk);
					goto done;
				}
			}
			if (iommu_drvdata->aclk) {
				ret = clk_prepare_enable(iommu_drvdata->aclk);
				if (ret) {
					if (iommu_drvdata->clk)
						clk_disable_unprepare(
							iommu_drvdata->clk);
					clk_disable_unprepare(
							iommu_drvdata->pclk);
					goto done;
				}
			}
			if (iommu->gtcu_iface_clk) {
				ret = clk_prepare_enable(iommu->gtcu_iface_clk);
				if (ret)

		for (j = 0; j < KGSL_IOMMU_MAX_CLKS; j++) {
			if (iommu_unit->clks[j])
				if (kgsl_iommu_clk_prepare_enable(
						iommu_unit->clks[j]))
						goto done;
		}
			atomic_inc(&iommu_unit->dev[j].clk_enable_count);
		}
		atomic_inc(&iommu_unit->clk_enable_count);
	}
	return;
done:
	if (ret) {
		struct kgsl_iommu_unit *iommu_unit;
		if (iommu->unit_count == i)
			i--;
		iommu_unit = &iommu->iommu_units[i];
		do {
			for (j--; j >= 0; j--)
				kgsl_iommu_disable_clk(mmu, ctx_id);
			i--;
			if (i >= 0) {
				iommu_unit = &iommu->iommu_units[i];
				j = iommu_unit->dev_count;
			}
		} while (i >= 0);
	}
	return ret;
	/*
	 * Any Clock enable failure should be fatal,
	 * System usually crashes when enabling clock fails
	 * BUG_ON here to catch the system in bad state for
	 * further debug
	 */
	KGSL_CORE_ERR("IOMMU clk enable failed\n");
	BUG();
}

/*
@@ -789,6 +769,7 @@ static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
{
	struct kgsl_iommu_pt *iommu_pt;
	struct kgsl_iommu *iommu = mmu->priv;
	struct msm_iommu_drvdata *drvdata = 0;
	int i, j, ret = 0;

	/*
@@ -820,6 +801,16 @@ static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
				"iommu pt %p attached to dev %p, ctx_id %d\n",
				iommu_pt->domain, iommu_unit->dev[j].dev,
				iommu_unit->dev[j].ctx_id);
				/* Init IOMMU unit clks here */
				if (!drvdata) {
					drvdata = dev_get_drvdata(
					iommu_unit->dev[j].dev->parent);
					iommu_unit->clks[0] = drvdata->pclk;
					iommu_unit->clks[1] = drvdata->clk;
					iommu_unit->clks[2] = drvdata->aclk;
					iommu_unit->clks[3] =
							iommu->gtcu_iface_clk;
				}
			}
		}
	}
@@ -862,9 +853,7 @@ static int _get_iommu_ctxs(struct kgsl_mmu *mmu,
			ret = -EINVAL;
			goto done;
		}
		atomic_set(
		&(iommu_unit->dev[iommu_unit->dev_count].clk_enable_count),
		0);
		atomic_set(&(iommu_unit->clk_enable_count), 0);

		iommu_unit->dev[iommu_unit->dev_count].dev =
			msm_iommu_get_ctx(data->iommu_ctxs[i].iommu_ctx_name);
@@ -1576,17 +1565,9 @@ static int kgsl_iommu_start(struct kgsl_mmu *mmu)
		mmu->hwpagetable = NULL;
		goto done;
	}
	status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
	if (status) {
		KGSL_CORE_ERR("clk enable failed\n");
		goto done;
	}
	status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
	if (status) {
		kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
		KGSL_CORE_ERR("clk enable failed\n");
		goto done;
	}

	kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_MAX_UNITS);

	/* Get the lsb value of pagetables set in the IOMMU ttbr0 register as
	 * that value should not change when we change pagetables, so while
	 * changing pagetables we can use this lsb value of the pagetable w/o
@@ -1629,8 +1610,7 @@ static int kgsl_iommu_start(struct kgsl_mmu *mmu)
				KGSL_IOMMU_SETSTATE_NOP_OFFSET,
				cp_nop_packet(1));

	kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
	kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
	kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
	mmu->flags |= KGSL_FLAGS_STARTED;

done:
@@ -1777,12 +1757,12 @@ static void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
	int i, j;

	if (atomic_read(&mmu->fault)) {
		kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
		for (i = 0; i < iommu->unit_count; i++) {
			struct kgsl_iommu_unit *iommu_unit =
						&iommu->iommu_units[i];
			for (j = 0; j < iommu_unit->dev_count; j++) {
				if (iommu_unit->dev[j].fault) {
					kgsl_iommu_enable_clk(mmu, j);
					_iommu_lock(iommu);
					KGSL_IOMMU_SET_CTX_REG(iommu,
						iommu_unit,
@@ -1792,12 +1772,12 @@ static void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
						iommu_unit,
						iommu_unit->dev[j].ctx_id,
						FSR, 0);
					kgsl_iommu_disable_clk(mmu, j);
					_iommu_unlock(iommu);
					iommu_unit->dev[j].fault = 0;
				}
			}
		}
		kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
		atomic_set(&mmu->fault, 0);
	}
}
@@ -1872,11 +1852,11 @@ kgsl_iommu_get_current_ptbase(struct kgsl_mmu *mmu)
	if (in_interrupt())
		return 0;
	/* Return the current pt base by reading IOMMU pt_base register */
	kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
	kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
	pt_base = KGSL_IOMMU_GET_CTX_REG_Q(iommu,
				(&iommu->iommu_units[0]),
				KGSL_IOMMU_CONTEXT_USER, TTBR0);
	kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
	kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
	return pt_base & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
}

@@ -1903,11 +1883,7 @@ static int kgsl_iommu_default_setstate(struct kgsl_mmu *mmu,
						mmu->hwpagetable);
	uint64_t pt_val;

	ret = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
	if (ret) {
		KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
		return ret;
	}
	kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_MAX_UNITS);

	/* For v0 SMMU GPU needs to be idle for tlb invalidate as well */
	if (msm_soc_version_supports_iommu_v0()) {
@@ -1984,7 +1960,8 @@ unlock:
	_iommu_unlock(iommu);

	/* Disable smmu clock */
	kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
	kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);

	return ret;
}

@@ -2054,24 +2031,12 @@ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
	if (msm_soc_version_supports_iommu_v0())
		return ret;

	ret = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);

	if (ret) {
		KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
		return ret;
	}
	ret = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
	kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_MAX_UNITS);

	if (ret) {
		KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
		kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
		return ret;
	}
	/* Need to idle device before changing options */
	ret = mmu->device->ftbl->idle(mmu->device);
	if (ret) {
		kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
		kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
		kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
		return ret;
	}

@@ -2094,8 +2059,8 @@ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
					SCTLR, sctlr_val);
		}
	}
	kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
	kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);

	kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
	return ret;
}

@@ -2108,7 +2073,6 @@ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
 */
static void kgsl_iommu_set_pagefault(struct kgsl_mmu *mmu)
{
	int status;
	int i, j;
	struct kgsl_iommu *iommu = mmu->priv;
	unsigned int fsr;
@@ -2117,19 +2081,8 @@ static void kgsl_iommu_set_pagefault(struct kgsl_mmu *mmu)
	if (atomic_read(&mmu->fault))
		return;

	status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
	if (status) {
		KGSL_CORE_ERR("IOMMU user ctx clk enable failed err: %d\n",
				status);
		return;
	}
	status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
	if (status) {
		kgsl_iommu_disable_clk(mmu,  KGSL_IOMMU_CONTEXT_USER);
		KGSL_CORE_ERR("IOMMU priv ctx clk enable failed err: %d\n",
				status);
		return;
	}
	kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_MAX_UNITS);

	/* Loop through all IOMMU devices to check for fault */
	for (i = 0; i < iommu->unit_count; i++) {
		for (j = 0; j < iommu->iommu_units[i].dev_count; j++) {
@@ -2149,8 +2102,7 @@ static void kgsl_iommu_set_pagefault(struct kgsl_mmu *mmu)
		}
	}

	kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
	kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
	kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
}

struct kgsl_protected_registers *kgsl_iommu_get_prot_regs(struct kgsl_mmu *mmu)
+13 −5
Original line number Diff line number Diff line
@@ -99,10 +99,16 @@ struct kgsl_iommu_register_list {
 * Max number of iommu units that the gpu core can have
 * On APQ8064, KGSL can control a maximum of 2 IOMMU units.
 */
#define KGSL_IOMMU_MAX_UNITS 2
enum kgsl_iommu_units {
	KGSL_IOMMU_UNIT_0 = 0,
	KGSL_IOMMU_UNIT_1 = 1,
	KGSL_IOMMU_MAX_UNITS = 2,
};

/* Max number of iommu contexts per IOMMU unit */
#define KGSL_IOMMU_MAX_DEVS_PER_UNIT 2
/* Max number of iommu clks per IOMMU unit */
#define KGSL_IOMMU_MAX_CLKS 4

enum kgsl_iommu_context_id {
	KGSL_IOMMU_CONTEXT_USER = 0,
@@ -183,7 +189,6 @@ struct kgsl_device_iommu_data {
 * are on, else the clocks are off
 * fault: Flag when set indicates that this iommu device has caused a page
 * fault
 * @clk_enable_count: The ref count of clock enable calls
 */
struct kgsl_iommu_device {
	struct device *dev;
@@ -193,7 +198,6 @@ struct kgsl_iommu_device {
	bool clk_enabled;
	struct kgsl_device *kgsldev;
	int fault;
	atomic_t clk_enable_count;
};

/*
@@ -209,6 +213,8 @@ struct kgsl_iommu_device {
 * @iommu_halt_enable: Valid only on IOMMU-v1, when set indicates that the iommu
 * unit supports halting of the IOMMU, which can be enabled while programming
 * the IOMMU registers for synchronization
 * @clk_enable_count: The ref count of clock enable calls
 * @clks: iommu unit clks
 */
struct kgsl_iommu_unit {
	struct kgsl_iommu_device dev[KGSL_IOMMU_MAX_DEVS_PER_UNIT];
@@ -216,6 +222,8 @@ struct kgsl_iommu_unit {
	struct kgsl_memdesc reg_map;
	unsigned int ahb_base;
	int iommu_halt_enable;
	atomic_t clk_enable_count;
	struct clk *clks[KGSL_IOMMU_MAX_CLKS];
};

/*
@@ -267,13 +275,13 @@ struct kgsl_iommu_pt {
 * struct kgsl_iommu_disable_clk_param - Parameter struct for disble clk event
 * @mmu: The mmu pointer
 * @rb_level: the rb level in which the timestamp of the event belongs to
 * @ctx_id: The IOMMU context whose clock is to be turned off
 * @unit: The IOMMU unit whose clock is to be turned off
 * @ts: Timestamp on which clock is to be disabled
 */
struct kgsl_iommu_disable_clk_param {
	struct kgsl_mmu *mmu;
	int rb_level;
	int ctx_id;
	int unit;
	unsigned int ts;
};

+11 −13
Original line number Diff line number Diff line
@@ -90,11 +90,11 @@ struct kgsl_mmu_ops {
			(struct kgsl_mmu *mmu);
	void (*mmu_disable_clk_on_ts)
		(struct kgsl_mmu *mmu,
		uint32_t ts, int ctx_id);
	int (*mmu_enable_clk)
		(struct kgsl_mmu *mmu, int ctx_id);
		uint32_t ts, int unit);
	void (*mmu_enable_clk)
		(struct kgsl_mmu *mmu, int unit);
	void (*mmu_disable_clk)
		(struct kgsl_mmu *mmu, int ctx_id);
		(struct kgsl_mmu *mmu, int unit);
	uint64_t (*mmu_get_default_ttbr0)(struct kgsl_mmu *mmu,
				unsigned int unit_id,
				enum kgsl_iommu_context_id ctx_id);
@@ -261,27 +261,25 @@ static inline phys_addr_t kgsl_mmu_get_default_ttbr0(struct kgsl_mmu *mmu,
		return 0;
}

static inline int kgsl_mmu_enable_clk(struct kgsl_mmu *mmu,
					int ctx_id)
static inline void kgsl_mmu_enable_clk(struct kgsl_mmu *mmu, int unit)
{
	if (mmu->mmu_ops && mmu->mmu_ops->mmu_enable_clk)
		return mmu->mmu_ops->mmu_enable_clk(mmu, ctx_id);
		mmu->mmu_ops->mmu_enable_clk(mmu, unit);
	else
		return 0;
		return;
}

static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu, int ctx_id)
static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu, int unit)
{
	if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk)
		mmu->mmu_ops->mmu_disable_clk(mmu, ctx_id);
		mmu->mmu_ops->mmu_disable_clk(mmu, unit);
}

static inline void kgsl_mmu_disable_clk_on_ts(struct kgsl_mmu *mmu,
						unsigned int ts,
						int ctx_id)
						unsigned int ts, int unit)
{
	if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk_on_ts)
		mmu->mmu_ops->mmu_disable_clk_on_ts(mmu, ts, ctx_id);
		mmu->mmu_ops->mmu_disable_clk_on_ts(mmu, ts, unit);
}

static inline unsigned int kgsl_mmu_get_reg_gpuaddr(struct kgsl_mmu *mmu,