Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 319720f5 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'iommu-fixes-for-v3.13-rc4' of git://github.com/awilliam/linux-vfio

Pull iommu fixes from Alex Williamson:
 "arm/smmu driver updates via Will Deacon fixing locking around page
  table walks and a couple other issues"

* tag 'iommu-fixes-for-v3.13-rc4' of git://github.com/awilliam/linux-vfio:
  iommu/arm-smmu: fix error return code in arm_smmu_device_dt_probe()
  iommu/arm-smmu: remove potential NULL dereference on mapping path
  iommu/arm-smmu: use mutex instead of spinlock for locking page tables
parents 5dec682c 239a8702
Loading
Loading
Loading
Loading
+28 −38
Original line number Diff line number Diff line
@@ -392,7 +392,7 @@ struct arm_smmu_domain {
	struct arm_smmu_cfg		root_cfg;
	phys_addr_t			output_mask;

	spinlock_t			lock;
	struct mutex			lock;
};

static DEFINE_SPINLOCK(arm_smmu_devices_lock);
@@ -900,7 +900,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
		goto out_free_domain;
	smmu_domain->root_cfg.pgd = pgd;

	spin_lock_init(&smmu_domain->lock);
	mutex_init(&smmu_domain->lock);
	domain->priv = smmu_domain;
	return 0;

@@ -1137,7 +1137,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
	 * Sanity check the domain. We don't currently support domains
	 * that cross between different SMMU chains.
	 */
	spin_lock(&smmu_domain->lock);
	mutex_lock(&smmu_domain->lock);
	if (!smmu_domain->leaf_smmu) {
		/* Now that we have a master, we can finalise the domain */
		ret = arm_smmu_init_domain_context(domain, dev);
@@ -1152,7 +1152,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
			dev_name(device_smmu->dev));
		goto err_unlock;
	}
	spin_unlock(&smmu_domain->lock);
	mutex_unlock(&smmu_domain->lock);

	/* Looks ok, so add the device to the domain */
	master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
@@ -1162,7 +1162,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
	return arm_smmu_domain_add_master(smmu_domain, master);

err_unlock:
	spin_unlock(&smmu_domain->lock);
	mutex_unlock(&smmu_domain->lock);
	return ret;
}

@@ -1394,7 +1394,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
	if (paddr & ~output_mask)
		return -ERANGE;

	spin_lock(&smmu_domain->lock);
	mutex_lock(&smmu_domain->lock);
	pgd += pgd_index(iova);
	end = iova + size;
	do {
@@ -1410,7 +1410,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
	} while (pgd++, iova != end);

out_unlock:
	spin_unlock(&smmu_domain->lock);
	mutex_unlock(&smmu_domain->lock);

	/* Ensure new page tables are visible to the hardware walker */
	if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
@@ -1423,9 +1423,8 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
			phys_addr_t paddr, size_t size, int flags)
{
	struct arm_smmu_domain *smmu_domain = domain->priv;
	struct arm_smmu_device *smmu = smmu_domain->leaf_smmu;

	if (!smmu_domain || !smmu)
	if (!smmu_domain)
		return -ENODEV;

	/* Check for silent address truncation up the SMMU chain. */
@@ -1449,44 +1448,34 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
					 dma_addr_t iova)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	pgd_t *pgdp, pgd;
	pud_t pud;
	pmd_t pmd;
	pte_t pte;
	struct arm_smmu_domain *smmu_domain = domain->priv;
	struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
	struct arm_smmu_device *smmu = root_cfg->smmu;

	spin_lock(&smmu_domain->lock);
	pgd = root_cfg->pgd;
	if (!pgd)
		goto err_unlock;
	pgdp = root_cfg->pgd;
	if (!pgdp)
		return 0;

	pgd += pgd_index(iova);
	if (pgd_none_or_clear_bad(pgd))
		goto err_unlock;
	pgd = *(pgdp + pgd_index(iova));
	if (pgd_none(pgd))
		return 0;

	pud = pud_offset(pgd, iova);
	if (pud_none_or_clear_bad(pud))
		goto err_unlock;
	pud = *pud_offset(&pgd, iova);
	if (pud_none(pud))
		return 0;

	pmd = pmd_offset(pud, iova);
	if (pmd_none_or_clear_bad(pmd))
		goto err_unlock;
	pmd = *pmd_offset(&pud, iova);
	if (pmd_none(pmd))
		return 0;

	pte = pmd_page_vaddr(*pmd) + pte_index(iova);
	pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
	if (pte_none(pte))
		goto err_unlock;

	spin_unlock(&smmu_domain->lock);
	return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK);
		return 0;

err_unlock:
	spin_unlock(&smmu_domain->lock);
	dev_warn(smmu->dev,
		 "invalid (corrupt?) page tables detected for iova 0x%llx\n",
		 (unsigned long long)iova);
	return -EINVAL;
	return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
}

static int arm_smmu_domain_has_cap(struct iommu_domain *domain,
@@ -1863,6 +1852,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
		dev_err(dev,
			"found only %d context interrupt(s) but %d required\n",
			smmu->num_context_irqs, smmu->num_context_banks);
		err = -ENODEV;
		goto out_put_parent;
	}