Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit da9d4994 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "iommu/arm-smmu: Rename arm-smmu-regs.h"

parents 88623cbb e0ced41c
Loading
Loading
Loading
Loading
+71 −64
Original line number Diff line number Diff line
@@ -55,7 +55,7 @@
#include <linux/fsl/mc.h>

#include <soc/qcom/scm.h>
#include "arm-smmu-regs.h"
#include "arm-smmu.h"

#define CREATE_TRACE_POINTS
#include "arm-smmu-trace.h"
@@ -86,19 +86,6 @@
/* Maximum number of context banks per SMMU */
#define ARM_SMMU_MAX_CBS		128

/* SMMU global address space */
#define ARM_SMMU_GR0(smmu)		((smmu)->base)

/*
 * SMMU global address space with conditional offset to access secure
 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
 * nsGFSYNR0: 0x450)
 */
#define ARM_SMMU_GR0_NS(smmu)						\
	((smmu)->base +							\
		((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS)	\
			? 0x400 : 0))

#define MSI_IOVA_BASE			0x8000000
#define MSI_IOVA_LENGTH			0x100000

@@ -363,6 +350,21 @@ struct arm_smmu_domain {
	struct msm_iommu_domain		domain;
};

static int arm_smmu_gr0_ns(int offset)
{
	switch(offset) {
	case ARM_SMMU_GR0_sCR0:
	case ARM_SMMU_GR0_sACR:
	case ARM_SMMU_GR0_sGFSR:
	case ARM_SMMU_GR0_sGFSYNR0:
	case ARM_SMMU_GR0_sGFSYNR1:
	case ARM_SMMU_GR0_sGFSYNR2:
		return offset + 0x400;
	default:
		return offset;
	}
}

static void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)
{
	return smmu->base + (n << smmu->pgshift);
@@ -370,12 +372,18 @@ static void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)

static u32 arm_smmu_readl(struct arm_smmu_device *smmu, int page, int offset)
{
	if ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) && page == 0)
		offset = arm_smmu_gr0_ns(offset);

	return readl_relaxed(arm_smmu_page(smmu, page) + offset);
}

static void arm_smmu_writel(struct arm_smmu_device *smmu, int page, int offset,
			    u32 val)
{
	if ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) && page == 0)
		offset = arm_smmu_gr0_ns(offset);

	writel_relaxed(val, arm_smmu_page(smmu, page) + offset);
}

@@ -390,9 +398,15 @@ static void arm_smmu_writeq(struct arm_smmu_device *smmu, int page, int offset,
	writeq_relaxed(val, arm_smmu_page(smmu, page) + offset);
}

#define ARM_SMMU_GR0		0
#define ARM_SMMU_GR1		1
#define ARM_SMMU_CB(s, n)	((s)->numpage + (n))

#define arm_smmu_gr0_read(s, o)		\
	arm_smmu_readl((s), ARM_SMMU_GR0, (o))
#define arm_smmu_gr0_write(s, o, v)	\
	arm_smmu_writel((s), ARM_SMMU_GR0, (o), (v))

#define arm_smmu_gr1_read(s, o)		\
	arm_smmu_readl((s), ARM_SMMU_GR1, (o))
#define arm_smmu_gr1_write(s, o, v)	\
@@ -705,7 +719,7 @@ static void arm_smmu_arch_write_sync(struct arm_smmu_device *smmu)
		return;

	/* Read to complete prior write transcations */
	id = readl_relaxed(ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_ID0);
	id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);

	/* Wait for read to complete before off */
	rmb();
@@ -1138,7 +1152,7 @@ static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
	unsigned long flags;

	spin_lock_irqsave(&smmu->global_sync_lock, flags);
	if (__arm_smmu_tlb_sync(smmu, 0, ARM_SMMU_GR0_sTLBGSYNC,
	if (__arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
				ARM_SMMU_GR0_sTLBGSTATUS))
		dev_err_ratelimited(smmu->dev,
				    "TLB global sync failed!\n");
@@ -1198,9 +1212,10 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
{
	struct arm_smmu_domain *smmu_domain = cookie;
	struct arm_smmu_device *smmu = smmu_domain->smmu;
	void __iomem *base = ARM_SMMU_GR0(smmu);

	writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
	/* See above */
	wmb();
	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
	arm_smmu_tlb_sync_global(smmu);
}

@@ -1270,12 +1285,12 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
					 size_t granule, bool leaf, void *cookie)
{
	struct arm_smmu_domain *smmu_domain = cookie;
	void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
	struct arm_smmu_device *smmu = smmu_domain->smmu;

	if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
	if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
		wmb();

	writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
}

struct arm_smmu_secure_pool_chunk {
@@ -1631,15 +1646,14 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
{
	u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
	struct arm_smmu_device *smmu = dev;
	void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);

	if (arm_smmu_power_on(smmu->pwr))
		return IRQ_NONE;

	gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
	gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
	gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
	gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
	gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
	gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
	gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
	gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);

	if (!gfsr) {
		arm_smmu_power_off(smmu, smmu->pwr);
@@ -1652,7 +1666,8 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
		"\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
		gfsr, gfsynr0, gfsynr1, gfsynr2);

	writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
	wmb();
	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
	arm_smmu_power_off(smmu, smmu->pwr);
	return IRQ_HANDLED;
}
@@ -2341,7 +2356,7 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)

	if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
		reg |= SMR_VALID;
	writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
}

static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
@@ -2355,7 +2370,7 @@ static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
	if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
	    smmu->smrs[idx].valid)
		reg |= S2CR_EXIDVALID;
	writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
}

static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
@@ -2372,7 +2387,6 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
{
	unsigned long size;
	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
	u32 smr, id;
	int idx;

@@ -2381,7 +2395,7 @@ static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
		return;

	/* ID0 */
	id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
	id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
	size = FIELD_GET(ID0_NUMSMRG, id);

	/*
@@ -2390,7 +2404,7 @@ static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
	 * which is not inuse.
	 */
	for (idx = 0; idx < size; idx++) {
		smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(idx));
		smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(idx));
		if (!(smr & SMR_VALID))
			break;
	}
@@ -2406,13 +2420,13 @@ static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
	 * masters later if they try to claim IDs outside these masks.
	 */
	smr = FIELD_PREP(SMR_ID, smmu->streamid_mask);
	writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(idx));
	smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(idx));
	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), smr);
	smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(idx));
	smmu->streamid_mask = FIELD_GET(SMR_ID, smr);

	smr = FIELD_PREP(SMR_MASK, smmu->streamid_mask);
	writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(idx));
	smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(idx));
	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), smr);
	smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(idx));
	smmu->smr_mask_mask = FIELD_GET(SMR_MASK, smr);
}

@@ -2571,8 +2585,8 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
		if (s2cr[idx].attach_count > 0)
			continue;

		writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
		writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
		arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), 0);
		arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), 0);
	}
	mutex_unlock(&smmu->stream_map_mutex);

@@ -4059,7 +4073,6 @@ static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
{
	int i;
	u32 reg, major;
	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);

	if (smmu->model == ARM_MMU500) {
		/*
@@ -4067,9 +4080,9 @@ static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
		 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
		 * bit is only present in MMU-500r2 onwards.
		 */
		reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
		reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7);
		major = FIELD_GET(ID7_MAJOR, reg);
		reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
		reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
		if (major >= 2)
			reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
		/*
@@ -4077,7 +4090,7 @@ static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
		 * TLB entries for reduced latency.
		 */
		reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
		writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
		arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, reg);
	}

	/* Make sure all context banks are disabled and clear CB_FSR  */
@@ -4098,13 +4111,12 @@ static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)

static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
{
	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
	int i;
	u32 reg;

	/* clear global FSR */
	reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
	writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
	reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);

	/*
	 * Reset stream mapping groups: Initial values mark all SMRn as
@@ -4118,10 +4130,10 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
	}

	/* Invalidate the TLB, just in case */
	writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLH);
	writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);

	reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
	reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);

	/* Enable fault reporting */
	reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
@@ -4154,7 +4166,8 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)

	/* Push the button */
	arm_smmu_tlb_sync_global(smmu);
	writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
	wmb();
	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);

	/* Manage any implementation defined features */
	arm_smmu_arch_device_reset(smmu);
@@ -4241,8 +4254,7 @@ static int arm_smmu_handoff_cbs(struct arm_smmu_device *smmu)
	struct arm_smmu_s2cr s2cr;

	for (i = 0; i < smmu->num_mapping_groups; i++) {
		raw_smr = readl_relaxed(ARM_SMMU_GR0(smmu) +
					ARM_SMMU_GR0_SMR(i));
		raw_smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
		if (!(raw_smr & SMR_VALID))
			continue;

@@ -4250,8 +4262,7 @@ static int arm_smmu_handoff_cbs(struct arm_smmu_device *smmu)
		smr.id = FIELD_GET(SMR_ID, raw_smr);
		smr.valid = true;

		raw_s2cr = readl_relaxed(ARM_SMMU_GR0(smmu) +
					ARM_SMMU_GR0_S2CR(i));
		raw_s2cr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_S2CR(i));
		memset(&s2cr, 0, sizeof(s2cr));
		s2cr.group = NULL;
		s2cr.count = 1;
@@ -4464,7 +4475,6 @@ static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
{
	unsigned int size;
	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
	u32 id;
	bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
	int i;
@@ -4474,7 +4484,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
			smmu->version == ARM_SMMU_V2 ? 2 : 1);

	/* ID0 */
	id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
	id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);

	/* Restrict available stages based on module parameter */
	if (force_stage == 1)
@@ -4569,7 +4579,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
	}

	/* ID1 */
	id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
	id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
	smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;

	/* Check for size mismatch of SMMU address space from mapped region */
@@ -4607,7 +4617,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
		return -ENOMEM;

	/* ID2 */
	id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
	id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
	size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_IAS, id));
	smmu->ipa_size = size;

@@ -5010,8 +5020,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
	idr_destroy(&smmu->asid_idr);

	/* Turn the thing off */
	writel_relaxed(sCR0_CLIENTPD,
			ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, sCR0_CLIENTPD);
	arm_smmu_power_off(smmu, smmu->pwr);

	arm_smmu_exit_power_resources(smmu->pwr);
@@ -5650,7 +5659,6 @@ static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
	struct platform_device *pdev;
	int ret;
	u32 val;
	void __iomem *reg;

	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
	if (!data)
@@ -5675,11 +5683,10 @@ static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
	if (ret)
		return ret;

	reg = ARM_SMMU_GR0(smmu);
	val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
	val = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
	val &= ~ARM_MMU500_ACR_CACHE_LOCK;
	writel_relaxed(val, reg + ARM_SMMU_GR0_sACR);
	val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, val);
	val = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
	/*
	 * Modifiying the nonsecure copy of the sACR register is only
	 * allowed if permission is given in the secure sACR register.
+0 −0

File moved.

+1 −1
Original line number Diff line number Diff line
@@ -33,7 +33,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>

#include "arm-smmu-regs.h"
#include "arm-smmu.h"

#define SMMU_INTR_SEL_NS     0x2000