Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8f45532e authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "Revert "iommu/arm-smmu: Add IOMMU API TLB sync support""

parents a8f58ee8 98ade145
Loading
Loading
Loading
Loading
+26 −40
Original line number Diff line number Diff line
@@ -1160,31 +1160,12 @@ static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
	spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
}

static void arm_smmu_tlb_inv_context_s1(void *cookie);

static void arm_smmu_tlb_sync_context(void *cookie)
{
	struct arm_smmu_domain *smmu_domain = cookie;
	struct arm_smmu_device *smmu = smmu_domain->smmu;
	struct device *dev = smmu_domain->dev;
	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
	void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
	unsigned long flags;
	size_t ret;
	bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
	ktime_t cur = ktime_get();

	ret = arm_smmu_domain_power_on(&smmu_domain->domain,
				       smmu_domain->smmu);
	if (ret)
		return;

	trace_tlbi_start(dev, 0);

	if (!use_tlbiall)
		writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
	else
		writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);

	spin_lock_irqsave(&smmu_domain->sync_lock, flags);
	if (__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
@@ -1194,10 +1175,6 @@ static void arm_smmu_tlb_sync_context(void *cookie)
				smmu_domain->cfg.cbndx,
				dev_name(smmu_domain->dev));
	spin_unlock_irqrestore(&smmu_domain->sync_lock, flags);

	trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));

	arm_smmu_domain_power_off(&smmu_domain->domain, smmu_domain->smmu);
}

static void arm_smmu_tlb_sync_vmid(void *cookie)
@@ -1209,7 +1186,23 @@ static void arm_smmu_tlb_sync_vmid(void *cookie)

static void arm_smmu_tlb_inv_context_s1(void *cookie)
{
	return;
	struct arm_smmu_domain *smmu_domain = cookie;
	struct device *dev = smmu_domain->dev;
	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
	struct arm_smmu_device *smmu = smmu_domain->smmu;
	void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
	bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
	ktime_t cur = ktime_get();

	trace_tlbi_start(dev, 0);

	if (!use_tlbiall)
		writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
	else
		writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);

	arm_smmu_tlb_sync_context(cookie);
	trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
}

static void arm_smmu_tlb_inv_context_s2(void *cookie)
@@ -1503,7 +1496,6 @@ static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,

	phys = arm_smmu_iova_to_phys_hard(domain, iova);
	smmu_domain->pgtbl_cfg.tlb->tlb_flush_all(smmu_domain);
	smmu_domain->pgtbl_cfg.tlb->tlb_sync(smmu_domain);
	phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);

	if (phys != phys_post_tlbiall) {
@@ -2560,7 +2552,6 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,

	/* Ensure there are no stale mappings for this context bank */
	tlb->tlb_flush_all(smmu_domain);
	tlb->tlb_sync(smmu_domain);
}

static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
@@ -3047,12 +3038,17 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
	if (arm_smmu_is_slave_side_secure(smmu_domain))
		return msm_secure_smmu_unmap(domain, iova, size);

	ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
	if (ret)
		return ret;

	arm_smmu_secure_domain_lock(smmu_domain);

	spin_lock_irqsave(&smmu_domain->cb_lock, flags);
	ret = ops->unmap(ops, iova, size);
	spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);

	arm_smmu_domain_power_off(domain, smmu_domain->smmu);
	/*
	 * While splitting up block mappings, we might allocate page table
	 * memory during unmap, so the vmids needs to be assigned to the
@@ -3211,14 +3207,6 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
	return ret;
}

static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
{
	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);

	if (smmu_domain->tlb_ops)
		smmu_domain->tlb_ops->tlb_sync(smmu_domain);
}

/*
 * This function can sleep, and cannot be called from atomic context. Will
 * power on register block if required. This restriction does not apply to the
@@ -3985,8 +3973,6 @@ static struct iommu_ops arm_smmu_ops = {
	.map			= arm_smmu_map,
	.unmap			= arm_smmu_unmap,
	.map_sg			= arm_smmu_map_sg,
	.flush_iotlb_all	= arm_smmu_iotlb_sync,
	.iotlb_sync		= arm_smmu_iotlb_sync,
	.iova_to_phys		= arm_smmu_iova_to_phys,
	.iova_to_phys_hard	= arm_smmu_iova_to_phys_hard,
	.add_device		= arm_smmu_add_device,
+0 −1
Original line number Diff line number Diff line
@@ -208,7 +208,6 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,
		bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);

		iommu_tlbiall(mapping->domain);
		iommu_tlb_sync(mapping->domain);
		mapping->have_stale_tlbs = false;
		av8l_fast_clear_stale_ptes(mapping->pgtbl_ops, skip_sync);
	}
+0 −1
Original line number Diff line number Diff line
@@ -86,7 +86,6 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops)

	iop = container_of(ops, struct io_pgtable, ops);
	io_pgtable_tlb_flush_all(iop);
	io_pgtable_tlb_sync(iop);
	io_pgtable_init_table[iop->fmt]->free(iop);
}