Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e15c25ca authored by Mitchel Humpherys's avatar Mitchel Humpherys Committed by Matt Wagantall
Browse files

Revert "iommu/arm-smmu: Make the arm_smmu_map operation atomic"



This reverts commit 0385f613fc41c394873d64785c073dffdfaf0fc4.  This code
is about to be ripped out in favor of the generic io-pgtable code, so
leaving it in will only cause merged conflicts.

Change-Id: Id8474f2a400736c288d9db5f9608eddbd24b0793
Signed-off-by: default avatarMitchel Humpherys <mitchelh@codeaurora.org>
parent 983eb110
Loading
Loading
Loading
Loading
+32 −11
Original line number Diff line number Diff line
@@ -487,7 +487,6 @@ struct arm_smmu_domain {
	struct arm_smmu_device		*smmu;
	struct arm_smmu_cfg		cfg;
	struct mutex			lock;
	spinlock_t			pt_lock;
	u32				attributes;
	int				secure_vmid;
};
@@ -983,9 +982,12 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
	return IRQ_HANDLED;
}

/* smmu_domain->lock must be held across any calls to this function */
static void arm_smmu_flush_pgtable(struct arm_smmu_domain *smmu_domain,
				   void *addr, size_t size)
{
	struct arm_smmu_device *smmu = smmu_domain->smmu;
	unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
	int coherent_htw_disable = smmu_domain->attributes &
		(1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE);

@@ -1000,9 +1002,22 @@ static void arm_smmu_flush_pgtable(struct arm_smmu_domain *smmu_domain,
		 * recursion here as the SMMU table walker will not be wired
		 * through another SMMU.
		 */
		if (smmu) {
			dma_addr_t handle =
				dma_map_page(smmu->dev, virt_to_page(addr),
					offset, size, DMA_TO_DEVICE);
			if (handle == DMA_ERROR_CODE)
				dev_err(smmu->dev,
					"Couldn't flush page tables at %p!\n",
					addr);
			else
				dma_unmap_page(smmu->dev, handle, size,
					DMA_TO_DEVICE);
		} else {
			dmac_clean_range(addr, addr + size);
		}
	}
}

static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
{
@@ -1278,7 +1293,6 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
	smmu_domain->secure_vmid = VMID_INVAL;

	mutex_init(&smmu_domain->lock);
	spin_lock_init(&smmu_domain->pt_lock);
	domain->priv = smmu_domain;
	return 0;

@@ -1855,7 +1869,6 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
	struct arm_smmu_device *smmu = smmu_domain->smmu;
	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
	pgd_t *pgd = cfg->pgd;
	unsigned long flags;

	/* some extra sanity checks for attached domains */
	if (smmu) {
@@ -1883,7 +1896,6 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
	if (size & ~PAGE_MASK)
		return -EINVAL;

	spin_lock_irqsave(&smmu_domain->pt_lock, flags);
	pgd += pgd_index(iova);
	end = iova + size;
	do {
@@ -1892,26 +1904,36 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
		ret = arm_smmu_alloc_init_pud(smmu_domain, pgd, iova, next,
					      paddr, prot, stage);
		if (ret)
			goto out_unlock;
			return ret;

		paddr += next - iova;
		iova = next;
	} while (pgd++, iova != end);

out_unlock:
	spin_unlock_irqrestore(&smmu_domain->pt_lock, flags);
	return ret;
}

static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
			phys_addr_t paddr, size_t size, int prot)
{
	int ret;
	struct arm_smmu_domain *smmu_domain = domain->priv;

	if (!smmu_domain)
		return -ENODEV;

	return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot);
	mutex_lock(&smmu_domain->lock);
	ret = arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot);

	if (!ret && smmu_domain->smmu &&
		(smmu_domain->smmu->options & ARM_SMMU_OPT_INVALIDATE_ON_MAP)) {
		arm_smmu_enable_clocks(smmu_domain->smmu);
		arm_smmu_tlb_inv_context(smmu_domain);
		arm_smmu_disable_clocks(smmu_domain->smmu);
	}
	mutex_unlock(&smmu_domain->lock);

	return ret;
}

static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
@@ -1920,9 +1942,8 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
	int ret;
	struct arm_smmu_domain *smmu_domain = domain->priv;

	ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0);

	mutex_lock(&smmu_domain->lock);
	ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0);
	if (smmu_domain->smmu) {
		arm_smmu_enable_clocks(smmu_domain->smmu);
		arm_smmu_tlb_inv_context(smmu_domain);