Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a46cdc4c authored by Joerg Roedel's avatar Joerg Roedel
Browse files

Merge branch 'for-joerg/arm-smmu/fixes' of...

Merge branch 'for-joerg/arm-smmu/fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into iommu/fixes
parents 520eccdf 76557391
Loading
Loading
Loading
Loading
+13 −5
Original line number Diff line number Diff line
@@ -400,6 +400,8 @@ struct arm_smmu_device {

	u32				cavium_id_base; /* Specific to Cavium */

	spinlock_t			global_sync_lock;

	/* IOMMU core code handle */
	struct iommu_device		iommu;
};
@@ -436,7 +438,7 @@ struct arm_smmu_domain {
	struct arm_smmu_cfg		cfg;
	enum arm_smmu_domain_stage	stage;
	struct mutex			init_mutex; /* Protects smmu pointer */
	spinlock_t			cb_lock; /* Serialises ATS1* ops */
	spinlock_t			cb_lock; /* Serialises ATS1* ops and TLB syncs */
	struct iommu_domain		domain;
};

@@ -602,9 +604,12 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
{
	void __iomem *base = ARM_SMMU_GR0(smmu);
	unsigned long flags;

	spin_lock_irqsave(&smmu->global_sync_lock, flags);
	__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
			    base + ARM_SMMU_GR0_sTLBGSTATUS);
	spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
}

static void arm_smmu_tlb_sync_context(void *cookie)
@@ -612,9 +617,12 @@ static void arm_smmu_tlb_sync_context(void *cookie)
	struct arm_smmu_domain *smmu_domain = cookie;
	struct arm_smmu_device *smmu = smmu_domain->smmu;
	void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
	unsigned long flags;

	spin_lock_irqsave(&smmu_domain->cb_lock, flags);
	__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
			    base + ARM_SMMU_CB_TLBSTATUS);
	spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
}

static void arm_smmu_tlb_sync_vmid(void *cookie)
@@ -1511,7 +1519,6 @@ static int arm_smmu_add_device(struct device *dev)

	if (using_legacy_binding) {
		ret = arm_smmu_register_legacy_master(dev, &smmu);
		fwspec = dev->iommu_fwspec;
		if (ret)
			goto out_free;
	} else if (fwspec && fwspec->ops == &arm_smmu_ops) {
@@ -1550,15 +1557,15 @@ static int arm_smmu_add_device(struct device *dev)

	ret = arm_smmu_master_alloc_smes(dev);
	if (ret)
		goto out_free;
		goto out_cfg_free;

	iommu_device_link(&smmu->iommu, dev);

	return 0;

out_cfg_free:
	kfree(cfg);
out_free:
	if (fwspec)
		kfree(fwspec->iommu_priv);
	iommu_fwspec_free(dev);
	return ret;
}
@@ -1925,6 +1932,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)

	smmu->num_mapping_groups = size;
	mutex_init(&smmu->stream_map_mutex);
	spin_lock_init(&smmu->global_sync_lock);

	if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
		smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
+6 −0
Original line number Diff line number Diff line
@@ -479,6 +479,9 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
	if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
		return 0;

	if (WARN_ON(upper_32_bits(iova) || upper_32_bits(paddr)))
		return -ERANGE;

	ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd);
	/*
	 * Synchronise all PTE updates for the new mapping before there's
@@ -659,6 +662,9 @@ static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
	struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
	size_t unmapped;

	if (WARN_ON(upper_32_bits(iova)))
		return 0;

	unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd);
	if (unmapped)
		io_pgtable_tlb_sync(&data->iop);
+7 −0
Original line number Diff line number Diff line
@@ -452,6 +452,10 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
		return 0;

	if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
		    paddr >= (1ULL << data->iop.cfg.oas)))
		return -ERANGE;

	prot = arm_lpae_prot_to_pte(data, iommu_prot);
	ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
	/*
@@ -610,6 +614,9 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
	arm_lpae_iopte *ptep = data->pgd;
	int lvl = ARM_LPAE_START_LVL(data);

	if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
		return 0;

	unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
	if (unmapped)
		io_pgtable_tlb_sync(&data->iop);
+1 −8
Original line number Diff line number Diff line
@@ -158,14 +158,12 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops);
 * @fmt:    The page table format.
 * @cookie: An opaque token provided by the IOMMU driver and passed back to
 *          any callback routines.
 * @tlb_sync_pending: Private flag for optimising out redundant syncs.
 * @cfg:    A copy of the page table configuration.
 * @ops:    The page table operations in use for this set of page tables.
 */
struct io_pgtable {
	enum io_pgtable_fmt	fmt;
	void			*cookie;
	bool			tlb_sync_pending;
	struct io_pgtable_cfg	cfg;
	struct io_pgtable_ops	ops;
};
@@ -175,22 +173,17 @@ struct io_pgtable {
static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
{
	iop->cfg.tlb->tlb_flush_all(iop->cookie);
	iop->tlb_sync_pending = true;
}

static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
		unsigned long iova, size_t size, size_t granule, bool leaf)
{
	iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
	iop->tlb_sync_pending = true;
}

static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
{
	if (iop->tlb_sync_pending) {
	iop->cfg.tlb->tlb_sync(iop->cookie);
		iop->tlb_sync_pending = false;
	}
}

/**
+6 −0
Original line number Diff line number Diff line
@@ -129,6 +129,7 @@ static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
	writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
	writel_relaxed(iova + size - 1, data->base + REG_MMU_INVLD_END_A);
	writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE);
	data->tlb_flush_active = true;
}

static void mtk_iommu_tlb_sync(void *cookie)
@@ -137,6 +138,10 @@ static void mtk_iommu_tlb_sync(void *cookie)
	int ret;
	u32 tmp;

	/* Avoid timing out if there's nothing to wait for */
	if (!data->tlb_flush_active)
		return;

	ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp,
					tmp != 0, 10, 100000);
	if (ret) {
@@ -146,6 +151,7 @@ static void mtk_iommu_tlb_sync(void *cookie)
	}
	/* Clear the CPE status */
	writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
	data->tlb_flush_active = false;
}

static const struct iommu_gather_ops mtk_iommu_gather_ops = {
Loading