Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d2eac4d8 authored by Robin Murphy's avatar Robin Murphy Committed by Isaac J. Manjarres
Browse files

iommu/arm-smmu: Split arm_smmu_tlb_inv_range_nosync()



Since we now use separate iommu_gather_ops for stage 1 and stage 2
contexts, we may as well divide up the monolithic callback into its
respective stage 1 and stage 2 parts.

Change-Id: I566d03bd7941baedcbc2345ffdeefcb4041e9b54
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
Git-commit: 71e8a8cd
Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git


[isaacm@codeaurora: resolved trivial merge conflicts]
Signed-off-by: default avatarIsaac J. Manjarres <isaacm@codeaurora.org>
parent d683e0f7
Loading
Loading
Loading
Loading
+27 −18
Original line number Diff line number Diff line
@@ -1170,20 +1170,19 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
	arm_smmu_tlb_sync_global(smmu);
}

static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
				      size_t granule, bool leaf, void *cookie)
{
	struct arm_smmu_domain *smmu_domain = cookie;
	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
	struct arm_smmu_device *smmu = smmu_domain->smmu;
	bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
	void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
	void __iomem *reg = ARM_SMMU_CB(smmu, cfg->cbndx);
	bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;

	if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
	if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
		wmb();

	if (stage1 && !use_tlbiall) {
	if (!use_tlbiall) {
		reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;

		if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
@@ -1201,19 +1200,29 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
				iova += granule >> 12;
			} while (size -= granule);
		}
	} else if (stage1 && use_tlbiall) {
	} else {
		reg += ARM_SMMU_CB_S1_TLBIALL;
		writel_relaxed(0, reg);
	} else {
		reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
			      ARM_SMMU_CB_S2_TLBIIPAS2;
	}
}

static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
				      size_t granule, bool leaf, void *cookie)
{
	struct arm_smmu_domain *smmu_domain = cookie;
	struct arm_smmu_device *smmu = smmu_domain->smmu;
	void __iomem *reg = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);

	if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
		wmb();

	reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2;
	iova >>= 12;
	do {
		smmu_write_atomic_lq(iova, reg);
		iova += granule >> 12;
	} while (size -= granule);
}
}

/*
 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
@@ -1341,7 +1350,7 @@ static const struct msm_iommu_gather_ops arm_smmu_s1_tlb_ops = {
	.free_pages_exact = arm_smmu_free_pages_exact,
	.tlb_ops = {
		.tlb_flush_all	= arm_smmu_tlb_inv_context_s1,
		.tlb_add_flush	= arm_smmu_tlb_inv_range_nosync,
		.tlb_add_flush	= arm_smmu_tlb_inv_range_s1,
		.tlb_sync	= arm_smmu_tlb_sync_context,
	}
};
@@ -1351,7 +1360,7 @@ static const struct msm_iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
	.free_pages_exact = arm_smmu_free_pages_exact,
	.tlb_ops = {
		.tlb_flush_all	= arm_smmu_tlb_inv_context_s2,
		.tlb_add_flush	= arm_smmu_tlb_inv_range_nosync,
		.tlb_add_flush	= arm_smmu_tlb_inv_range_s2,
		.tlb_sync	= arm_smmu_tlb_sync_context,
	}
};