Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8f2d7b1f authored by Chintan Pandya's avatar Chintan Pandya Committed by Shiraz Hashim
Browse files

iommu: msm: Introduce AARCH64 page table format



Some clients can generate 48/49 bit virtual address.
Support those clients by AARCH64 page table format.

Change-Id: Ic8d9a12e990f13ffebd6be6c81506d6bcc421f05
Signed-off-by: default avatarChintan Pandya <cpandya@codeaurora.org>
parent 8b1a0f7c
Loading
Loading
Loading
Loading
+13 −0
Original line number Diff line number Diff line
@@ -110,6 +110,19 @@ config IOMMU_LPAE

	  If unsure, say N here.

config IOMMU_AARCH64
	bool "Enable support for AARCH64 in IOMMU"
	depends on (MSM_IOMMU && (!IOMMU_LPAE))
	help
	  Enables AARCH64 format page tables for IOMMU. This allows clients of
	  IOMMUs to use Virtual and physical addresses up-to 48 bits. This will
	  also support the clients who can't generate addresses of more than 32
	  bits. Presently, this config assumes that if SMMU global space is
	  programmed by some secure environment, they configure all the CBs of
	  all the SMMUs as AARCH64 formatted.

	  If unsure, say N here.

config MSM_IOMMU_VBIF_CHECK
	bool "Enable support for VBIF check when IOMMU gets stuck"
	depends on MSM_IOMMU
+4 −0
Original line number Diff line number Diff line
@@ -11,8 +11,12 @@ obj-$(CONFIG_MSM_IOMMU_PMON) += msm_iommu_perfmon.o msm_iommu_perfmon-v1.o
ifdef CONFIG_IOMMU_LPAE
obj-$(CONFIG_MSM_IOMMU_V1) += msm_iommu_pagetable_lpae.o
else
ifdef CONFIG_IOMMU_AARCH64
obj-$(CONFIG_MSM_IOMMU_V1) += msm_iommu_pagetable_aarch64.o
else
obj-$(CONFIG_MSM_IOMMU_V1) += msm_iommu_pagetable.o
endif
endif
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
+129 −47
Original line number Diff line number Diff line
@@ -33,13 +33,14 @@
#include <linux/notifier.h>
#include <linux/qcom_iommu.h>
#include <linux/sizes.h>
#include <soc/qcom/scm.h>

#include "msm_iommu_hw-v1.h"
#include "msm_iommu_priv.h"
#include "msm_iommu_perfmon.h"
#include "msm_iommu_pagetable.h"

#ifdef CONFIG_IOMMU_LPAE
#if defined(CONFIG_IOMMU_LPAE) || defined(CONFIG_IOMMU_AARCH64)
/* bitmap of the page sizes currently supported */
#define MSM_IOMMU_PGSIZES	(SZ_4K | SZ_64K | SZ_2M | SZ_32M | SZ_1G)
#else
@@ -50,6 +51,8 @@
#define IOMMU_USEC_STEP		10
#define IOMMU_USEC_TIMEOUT	500

/* commands for SCM_SVC_SMMU_PROGRAM */
#define SMMU_CHANGE_PAGETABLE_FORMAT    0X01

/*
 * msm_iommu_spin_lock protects anything that can race with map
@@ -536,34 +539,70 @@ static void __release_smg(void __iomem *base)
			SET_SMR_VALID(base, i, 0);
}

#ifdef CONFIG_IOMMU_LPAE
static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num,
			       unsigned int asid)
#if defined(CONFIG_IOMMU_LPAE)
static inline phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par)
{
	SET_CB_TTBR0_ASID(base, ctx_num, asid);
	phys_addr_t phy;
	/* Upper 28 bits from PAR, lower 12 from VA */
	phy = (par & 0x0000FFFFF000ULL) | (va & 0x000000000FFFULL);
	return phy;
}
#else
static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num,
			       unsigned int asid)

static void msm_iommu_setup_ctx(void __iomem *base, unsigned int ctx)
{
	SET_CB_CONTEXTIDR_ASID(base, ctx_num, asid);
	SET_CB_TTBCR_EAE(base, ctx, 1); /* Extended Address Enable (EAE) */
}
#endif

static void msm_iommu_assign_ASID(const struct msm_iommu_drvdata *iommu_drvdata,
				  struct msm_iommu_ctx_drvdata *curr_ctx,
				  struct msm_iommu_priv *priv)
static void msm_iommu_setup_memory_remap(void __iomem *base, unsigned int ctx)
{
	void __iomem *cb_base = iommu_drvdata->cb_base;
	SET_CB_MAIR0(base, ctx, msm_iommu_get_mair0());
	SET_CB_MAIR1(base, ctx, msm_iommu_get_mair1());
}

	curr_ctx->asid = curr_ctx->num;
	msm_iommu_set_ASID(cb_base, curr_ctx->num, curr_ctx->asid);
static void msm_iommu_setup_pg_l2_redirect(void __iomem *base, unsigned int ctx)
{
	/*
	 * Configure page tables as inner-cacheable and shareable to reduce
	 * the TLB miss penalty.
	 */
	SET_CB_TTBCR_SH0(base, ctx, 3); /* Inner shareable */
	SET_CB_TTBCR_ORGN0(base, ctx, 1); /* outer cachable*/
	SET_CB_TTBCR_IRGN0(base, ctx, 1); /* inner cachable*/
	SET_CB_TTBCR_T0SZ(base, ctx, 0); /* 0GB-4GB */

	SET_CB_TTBCR_SH1(base, ctx, 3); /* Inner shareable */
	SET_CB_TTBCR_ORGN1(base, ctx, 1); /* outer cachable*/
	SET_CB_TTBCR_IRGN1(base, ctx, 1); /* inner cachable*/
	SET_CB_TTBCR_T1SZ(base, ctx, 0); /* TTBR1 not used */
}

static void __set_cb_format(struct msm_iommu_drvdata *iommu_drvdata,
				struct msm_iommu_ctx_drvdata *ctx_drvdata)
{
}

static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num,
			       unsigned int asid)
{
	SET_CB_TTBR0_ASID(base, ctx_num, asid);
}
#elif defined(CONFIG_IOMMU_AARCH64)
static inline phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par)
{
	phys_addr_t phy;
	/* Upper 48 bits from PAR, lower 12 from VA */
	phy = (par & 0xFFFFFFFFF000ULL) | (va & 0x000000000FFFULL);
	return phy;
}

#ifdef CONFIG_IOMMU_LPAE
static void msm_iommu_setup_ctx(void __iomem *base, unsigned int ctx)
{
	SET_CB_TTBCR_EAE(base, ctx, 1); /* Extended Address Enable (EAE) */
	/*
	 * TCR2 presently sets PA size as 32-bits. When entire platform
	 * gets more physical size, we need to change for SMMU too.
	 * Change CB_TCR2_PA in that case.
	 */
	SET_CB_TCR2_SEP(base, ctx, 7); /* bit[48] as sign bit */
}

static void msm_iommu_setup_memory_remap(void __iomem *base, unsigned int ctx)
@@ -581,16 +620,62 @@ static void msm_iommu_setup_pg_l2_redirect(void __iomem *base, unsigned int ctx)
	SET_CB_TTBCR_SH0(base, ctx, 3); /* Inner shareable */
	SET_CB_TTBCR_ORGN0(base, ctx, 1); /* outer cachable*/
	SET_CB_TTBCR_IRGN0(base, ctx, 1); /* inner cachable*/
	SET_CB_TTBCR_T0SZ(base, ctx, 0); /* 0GB-4GB */

	SET_CB_TTBCR_T0SZ(base, ctx, 16); /* 48-bit VA */

	SET_CB_TTBCR_SH1(base, ctx, 3); /* Inner shareable */
	SET_CB_TTBCR_ORGN1(base, ctx, 1); /* outer cachable*/
	SET_CB_TTBCR_IRGN1(base, ctx, 1); /* inner cachable*/
	SET_CB_TTBCR_T1SZ(base, ctx, 0); /* TTBR1 not used */
	SET_CB_TTBCR_T1SZ(base, ctx, 63); /*TTBR1 not used */
}

#else
static void __set_cb_format(struct msm_iommu_drvdata *iommu_drvdata,
				struct msm_iommu_ctx_drvdata *ctx_drvdata)
{
	struct scm_desc desc = {0};
	unsigned int ret = 0;

	if (iommu_drvdata->sec_id != -1) {
		desc.args[0] = iommu_drvdata->sec_id;
		desc.args[1] = ctx_drvdata->num;
		desc.args[2] = 1;	/* Enable */
		desc.arginfo = SCM_ARGS(3, SCM_VAL, SCM_VAL, SCM_VAL);

		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
				SMMU_CHANGE_PAGETABLE_FORMAT), &desc);

		/* At this stage, we cannot afford to fail because we have
		 * chosen AARCH64 format at compile time and we have nothing
		 * to fallback on.
		 */
		if (ret) {
			pr_err("Format change failed for CB %d with ret %d\n",
				ctx_drvdata->num, ret);
			BUG();
		}
	} else {
		/* Set page table format as AARCH64 */
		SET_CBA2R_VA64(iommu_drvdata->base, ctx_drvdata->num, 1);
	}
}

static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num,
			       unsigned int asid)
{
	SET_CB_TTBR0_ASID(base, ctx_num, asid);
}
#else /* v7S format */
static phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par)
{
	phys_addr_t phy;

	/* We are dealing with a supersection */
	if (par & CB_PAR_SS)
		phy = (par & 0x0000FF000000ULL) | (va & 0x000000FFFFFFULL);
	else /* Upper 20 bits from PAR, lower 12 from VA */
		phy = (par & 0x0000FFFFF000ULL) | (va & 0x000000000FFFULL);

	return phy;
}

static void msm_iommu_setup_ctx(void __iomem *base, unsigned int ctx)
{
@@ -616,8 +701,28 @@ static void msm_iommu_setup_pg_l2_redirect(void __iomem *base, unsigned int ctx)
	SET_CB_TTBR0_RGN(base, ctx, 1);   /* WB, WA */
}

static void __set_cb_format(struct msm_iommu_drvdata *iommu_drvdata,
				struct msm_iommu_ctx_drvdata *ctx_drvdata)
{
}

static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num,
			       unsigned int asid)
{
	SET_CB_CONTEXTIDR_ASID(base, ctx_num, asid);
}
#endif

static void msm_iommu_assign_ASID(const struct msm_iommu_drvdata *iommu_drvdata,
				  struct msm_iommu_ctx_drvdata *curr_ctx,
				  struct msm_iommu_priv *priv)
{
	void __iomem *cb_base = iommu_drvdata->cb_base;

	curr_ctx->asid = curr_ctx->num;
	msm_iommu_set_ASID(cb_base, curr_ctx->num, curr_ctx->asid);
}

static int program_m2v_table(struct device *dev, void __iomem *base)
{
	struct msm_iommu_ctx_drvdata *ctx_drvdata = dev_get_drvdata(dev);
@@ -716,9 +821,9 @@ static void __program_context(struct msm_iommu_drvdata *iommu_drvdata,

		/* Do not downgrade memory attributes */
		SET_CBAR_MEMATTR(base, ctx, 0x0A);

	}

	__set_cb_format(iommu_drvdata, ctx_drvdata);
	msm_iommu_assign_ASID(iommu_drvdata, ctx_drvdata, priv);

	/* Ensure that ASID assignment has completed before we use
@@ -1077,29 +1182,6 @@ static size_t msm_iommu_map_sg(struct iommu_domain *domain, unsigned long va,
		return len;
}

#ifdef CONFIG_IOMMU_LPAE
static phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par)
{
	phys_addr_t phy;
	/* Upper 28 bits from PAR, lower 12 from VA */
	phy = (par & 0xFFFFFFF000ULL) | (va & 0x00000FFF);
	return phy;
}
#else
static phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par)
{
	phys_addr_t phy;

	/* We are dealing with a supersection */
	if (par & CB_PAR_SS)
		phy = (par & 0xFF000000) | (va & 0x00FFFFFF);
	else /* Upper 20 bits from PAR, lower 12 from VA */
		phy = (par & 0xFFFFF000) | (va & 0x00000FFF);

	return phy;
}
#endif

static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
					  phys_addr_t va)
{
@@ -1199,7 +1281,7 @@ static bool msm_iommu_capable(enum iommu_cap cap)
	return false;
}

#ifdef CONFIG_IOMMU_LPAE
#if defined(CONFIG_IOMMU_LPAE) || defined(CONFIG_IOMMU_AARCH64)
static inline void print_ctx_mem_attr_regs(struct msm_iommu_context_reg regs[])
{
	pr_err("MAIR0   = %08x    MAIR1   = %08x\n",
+48 −46
Original line number Diff line number Diff line
/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -114,50 +114,62 @@ struct device *msm_iommu_get_ctx(const char *ctx_name)
}
EXPORT_SYMBOL(msm_iommu_get_ctx);

#ifdef CONFIG_ARM
#ifdef CONFIG_IOMMU_LPAE
#ifdef CONFIG_ARM_LPAE
/*
 * If CONFIG_ARM_LPAE AND CONFIG_IOMMU_LPAE are enabled we can use the MAIR
 * register directly
 * Selecting NMRR, PRRR, MAIR0 and MAIR1 for SMMU has a dependency on
 * the SMMU page table formate and a CPU mode. To simplify that, refer
 * the table below.
 *
 *		+-----------+-------------+------+
 *		| ARM       | ARM_LPAE    | ARM64|
 * +------------+-----------+-------------+------+
 * | SMMUv7S    | RCP15_PRRR| PRRR        | PRRR |
 * |            | RCP15_NMRR| NMRR        | NMRR |
 * +------------+-----------+-------------+------+
 * | SMMUv7L    | MAIR0     | RCP15_MAIR0 | MAIR0|
 * |            | MAIR1     | RCP15_MAIR1 | MAIR1|
 * +------------+-----------+-------------+------+
 * | SMMUv8L    | MAIR0     | RCP15_MAIR0 | MAIR0|
 * |            | MAIR1     | RCP15_MAIR1 | MAIR1|
 * +------------+-----------+-------------+------+
 */

#ifdef CONFIG_ARM64
u32 msm_iommu_get_mair0(void)
{
	unsigned int mair0;

	RCP15_MAIR0(mair0);
	return mair0;
	return MAIR0_VALUE;
}

u32 msm_iommu_get_mair1(void)
{
	unsigned int mair1;
	return MAIR1_VALUE;
}

	RCP15_MAIR1(mair1);
	return mair1;
u32 msm_iommu_get_prrr(void)
{
	return PRRR_VALUE;
}
#else
/*
 * However, If CONFIG_ARM_LPAE is not enabled but CONFIG_IOMMU_LPAE is enabled
 * we'll just use the hard coded values directly..
 */

u32 msm_iommu_get_nmrr(void)
{
	return NMRR_VALUE;
}
#elif defined(CONFIG_ARM_LPAE)
u32 msm_iommu_get_mair0(void)
{
	return MAIR0_VALUE;
	unsigned int mair0;

	RCP15_MAIR0(mair0);
	return mair0;
}

u32 msm_iommu_get_mair1(void)
{
	return MAIR1_VALUE;
	unsigned int mair1;

	RCP15_MAIR1(mair1);
	return mair1;
}
#endif

#else
#ifdef CONFIG_ARM_LPAE
/*
 * If CONFIG_ARM_LPAE is enabled AND CONFIG_IOMMU_LPAE is disabled
 * we must use the hardcoded values.
 */
u32 msm_iommu_get_prrr(void)
{
	return PRRR_VALUE;
@@ -168,12 +180,15 @@ u32 msm_iommu_get_nmrr(void)
	return NMRR_VALUE;
}
#else
/*
 * If both CONFIG_ARM_LPAE AND CONFIG_IOMMU_LPAE are disabled
 * we can use the registers directly.
 */
#define RCP15_PRRR(reg)		MRC(reg, p15, 0, c10, c2, 0)
#define RCP15_NMRR(reg)		MRC(reg, p15, 0, c10, c2, 1)
u32 msm_iommu_get_mair0(void)
{
	return MAIR0_VALUE;
}

u32 msm_iommu_get_mair1(void)
{
	return MAIR1_VALUE;
}

u32 msm_iommu_get_prrr(void)
{
@@ -191,16 +206,3 @@ u32 msm_iommu_get_nmrr(void)
	return nmrr;
}
#endif
#endif
#endif
#ifdef CONFIG_ARM64
u32 msm_iommu_get_prrr(void)
{
	return PRRR_VALUE;
}

u32 msm_iommu_get_nmrr(void)
{
	return NMRR_VALUE;
}
#endif
+58 −6
Original line number Diff line number Diff line
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -167,9 +167,11 @@ do { \
/* Global register space 1 setters / getters */
#define SET_CBAR_N(b, N, v)      SET_GLOBAL_REG_N(CBAR, N, (b), (v))
#define SET_CBFRSYNRA_N(b, N, v) SET_GLOBAL_REG_N(CBFRSYNRA, N, (b), (v))
#define SET_CBA2R_N(b, N, v)     SET_GLOBAL_REG_N(CBA2R, N, (b), (v))

#define GET_CBAR_N(b, N)         GET_GLOBAL_REG_N(CBAR, N, (b))
#define GET_CBFRSYNRA_N(b, N)    GET_GLOBAL_REG_N(CBFRSYNRA, N, (b))
#define GET_CBA2R_N(b, N)        GET_GLOBAL_REG_N(CBA2R, N, (b))

/* Implementation defined register setters/getters */
#define SET_MICRO_MMU_CTRL_HALT_REQ(b, v) \
@@ -194,6 +196,7 @@ do { \
#define SET_SCTLR(b, c, v)       SET_CTX_REG(CB_SCTLR, (b), (c), (v))
#define SET_ACTLR(b, c, v)       SET_CTX_REG(CB_ACTLR, (b), (c), (v))
#define SET_RESUME(b, c, v)      SET_CTX_REG(CB_RESUME, (b), (c), (v))
#define SET_TCR2(b, c, v)        SET_CTX_REG(CB_TCR2, (b), (c), (v))
#define SET_TTBCR(b, c, v)       SET_CTX_REG(CB_TTBCR, (b), (c), (v))
#define SET_CONTEXTIDR(b, c, v)  SET_CTX_REG(CB_CONTEXTIDR, (b), (c), (v))
#define SET_PRRR(b, c, v)        SET_CTX_REG(CB_PRRR, (b), (c), (v))
@@ -223,6 +226,7 @@ do { \
#define GET_RESUME(b, c)         GET_CTX_REG(CB_RESUME, (b), (c))
#define GET_TTBR0(b, c)          GET_CTX_REG(CB_TTBR0, (b), (c))
#define GET_TTBR1(b, c)          GET_CTX_REG(CB_TTBR1, (b), (c))
#define GET_TCR2(b, c)           GET_CTX_REG(CB_TCR2, (b), (c))
#define GET_TTBCR(b, c)          GET_CTX_REG(CB_TTBCR, (b), (c))
#define GET_CONTEXTIDR(b, c)     GET_CTX_REG(CB_CONTEXTIDR, (b), (c))
#define GET_PRRR(b, c)           GET_CTX_REG(CB_PRRR, (b), (c))
@@ -602,6 +606,12 @@ do { \

#define GET_CBFRSYNRA_SID(b, n)    GET_GLOBAL_FIELD_N(b, n, CBFRSYNRA, SID)

/* Context Bank Attribute 2 Register: CBA2R_N */
#define SET_CBA2R_VA64(b, n, v)     SET_GLOBAL_FIELD_N(b, n, CBA2R, VA64, v)
#define SET_CBA2R_MONC(b, n, v)     SET_GLOBAL_FIELD_N(b, n, CBA2R, MONC, v)
#define GET_CBA2R_VA64(b, n)        GET_GLOBAL_FIELD_N(b, n, CBA2R, VA64)
#define GET_CBA2R_MOC(b, n)         GET_GLOBAL_FIELD_N(b, n, CBA2R, MONC)

/* Stage 1 Context Bank Format Fields */
#define SET_CB_ACTLR_REQPRIORITY (b, c, v) \
		SET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITY, v)
@@ -936,7 +946,7 @@ do { \
#define GET_CB_TTBCR_NSCFG1(b, c)    \
			GET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1)

#ifdef CONFIG_IOMMU_LPAE
#if defined(CONFIG_IOMMU_LPAE) || defined(CONFIG_IOMMU_AARCH64)

/* LPAE format */

@@ -951,6 +961,15 @@ do { \
#define GET_CB_TTBR0_ADDR(b, c)     GET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ADDR)
#define GET_CB_TTBR0(b, c)          GET_CTX_REG_Q(CB_TTBR0, (b), (c))

/* Translation Control Register 2: CB_TCR2 */
#define SET_CB_TCR2_PA(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_TCR2, PA, v)
#define SET_CB_TCR2_AS(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_TCR2, AS, v)
#define SET_CB_TCR2_SEP(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_TCR2, SEP, v)

#define GET_CB_TCR2_PA(b, c)       GET_CONTEXT_FIELD(b, c, CB_TCR2, PA)
#define GET_CB_TCR2_AS(b, c)       GET_CONTEXT_FIELD(b, c, CB_TCR2, AS)
#define GET_CB_TCR2_SEP(b, c)      GET_CONTEXT_FIELD(b, c, CB_TCR2, SEP)

/* Translation Table Base Control Register: CB_TTBCR */
#define SET_CB_TTBCR_T0SZ(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_TTBCR, T0SZ, v)
#define SET_CB_TTBCR_T1SZ(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_TTBCR, T1SZ, v)
@@ -1065,6 +1084,7 @@ do { \
/* Global Register Space 1 */
#define CBAR		(0x1000)
#define CBFRSYNRA	(0x1400)
#define CBA2R		(0x1800)

/* Implementation defined Register Space */
#define MICRO_MMU_CTRL	(0x2000)
@@ -1097,6 +1117,7 @@ do { \
#define CB_SCTLR	(0x000)
#define CB_ACTLR	(0x004)
#define CB_RESUME	(0x008)
#define CB_TCR2		(0x010)
#define CB_TTBR0	(0x020)
#define CB_TTBR1	(0x028)
#define CB_TTBCR	(0x030)
@@ -1317,6 +1338,10 @@ do { \
/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
#define CBFRSYNRA_SID   (CBFRSYNRA_SID_MASK << CBFRSYNRA_SID_SHIFT)

/* Context Bank Attribute 2 Register: CBA2R */
#define CBA2R_VA64       (CBA2R_VA64_MASK    << CBA2R_VA64_SHIFT)
#define CBA2R_MONC       (CBA2R_MONC_MASK    << CBA2R_MONC_SHIFT)

/* Performance Monitoring Register Fields */

/* Stage 1 Context Bank Format Fields */
@@ -1482,12 +1507,16 @@ do { \
#define CB_TLBSTATUS_SACTIVE (CB_TLBSTATUS_SACTIVE_MASK << \
						CB_TLBSTATUS_SACTIVE_SHIFT)

/* Translation Control Register 2: CB_TCR2 */
#define CB_TCR2_PA         (CB_TCR2_PA_MASK     << CB_TTBCR_PA_SHIFT)
#define CB_TCR2_AS         (CB_TCR2_AS_MASK     << CB_TTBCR_AS_SHIFT)
#define CB_TCR2_SEP        (CB_TCR2_SEP_MASK    << CB_TTBCR_SEP_SHIFT)

/* Translation Table Base Control Register: CB_TTBCR */
#define CB_TTBCR_EAE         (CB_TTBCR_EAE_MASK     << CB_TTBCR_EAE_SHIFT)

#define CB_TTBR0_ADDR        (CB_TTBR0_ADDR_MASK    << CB_TTBR0_ADDR_SHIFT)

#ifdef CONFIG_IOMMU_LPAE
#if defined(CONFIG_IOMMU_LPAE) || defined(CONFIG_IOMMU_AARCH64)
/* Translation Table Base Register: CB_TTBR */
#define CB_TTBR0_ASID        (CB_TTBR0_ASID_MASK    << CB_TTBR0_ASID_SHIFT)
#define CB_TTBR1_ASID        (CB_TTBR1_ASID_MASK    << CB_TTBR1_ASID_SHIFT)
@@ -1723,6 +1752,10 @@ do { \
/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
#define CBFRSYNRA_SID_MASK      0x7FFF

/* Context Bank Attribute 2 Register: CBA2R */
#define CBA2R_VA64_MASK		0x1
#define CBA2R_MONC_MASK		0x1

/* Implementation defined register space masks */
#define MICRO_MMU_CTRL_RESERVED_MASK          0x03
#define MICRO_MMU_CTRL_HALT_REQ_MASK          0x01
@@ -1885,9 +1918,19 @@ do { \
/* TLB Status: CB_TLBSTATUS */
#define CB_TLBSTATUS_SACTIVE_MASK  0x01

/* Translation Control Register 2: CB_TCR2 */
#define CB_TCR2_PA_MASK            0x07
#define CB_TCR2_AS_MASK            0x01
#define CB_TCR2_SEP_MASK           0x07

/* Translation Table Base Control Register: CB_TTBCR */
#if defined(CONFIG_IOMMU_AARCH64)
#define CB_TTBCR_T0SZ_MASK         0x03F
#define CB_TTBCR_T1SZ_MASK         0x03F
#else
#define CB_TTBCR_T0SZ_MASK         0x07
#define CB_TTBCR_T1SZ_MASK         0x07
#endif
#define CB_TTBCR_EPD0_MASK         0x01
#define CB_TTBCR_EPD1_MASK         0x01
#define CB_TTBCR_IRGN0_MASK        0x03
@@ -1902,7 +1945,7 @@ do { \
#define CB_TTBCR_EAE_MASK          0x01

/* Translation Table Base Register 0/1: CB_TTBR */
#ifdef CONFIG_IOMMU_LPAE
#if defined(CONFIG_IOMMU_LPAE) || defined(CONFIG_IOMMU_AARCH64)
#define CB_TTBR0_ADDR_MASK         0x7FFFFFFFFULL
#define CB_TTBR0_ASID_MASK         0xFF
#define CB_TTBR1_ASID_MASK         0xFF
@@ -2118,6 +2161,10 @@ do { \
/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
#define CBFRSYNRA_SID_SHIFT        0

/* Context Bank Attribute 2 Register: CBA2R */
#define CBA2R_VA64_SHIFT		0
#define CBA2R_MONC_SHIFT		1

/* Implementation defined register space shift */
#define MICRO_MMU_CTRL_RESERVED_SHIFT         0x00
#define MICRO_MMU_CTRL_HALT_REQ_SHIFT         0x02
@@ -2280,6 +2327,11 @@ do { \
/* TLB Status: CB_TLBSTATUS */
#define CB_TLBSTATUS_SACTIVE_SHIFT  0

/* Translation Control Register 2: CB_TCR2 */
#define CB_TCR2_PA_SHIFT            0
#define CB_TCR2_AS_SHIFT            4
#define CB_TCR2_SEP_SHIFT           15

/* Translation Table Base Control Register: CB_TTBCR */
#define CB_TTBCR_T0SZ_SHIFT          0
#define CB_TTBCR_T1SZ_SHIFT         16
@@ -2297,7 +2349,7 @@ do { \
#define CB_TTBCR_SH1_SHIFT          28

/* Translation Table Base Register 0/1: CB_TTBR */
#ifdef CONFIG_IOMMU_LPAE
#if defined(CONFIG_IOMMU_LPAE) || defined(CONFIG_IOMMU_AARCH64)
#define CB_TTBR0_ADDR_SHIFT         5
#define CB_TTBR0_ASID_SHIFT         48
#define CB_TTBR1_ASID_SHIFT         48
Loading