Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d1bda515 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "iommu/arm-smmu: Make arm_smmu_unprepare_pgtable atomic"

parents e00ccf14 402575ce
Loading
Loading
Loading
Loading
+71 −13
Original line number Diff line number Diff line
@@ -432,7 +432,8 @@ enum arm_smmu_domain_stage {
};

struct arm_smmu_pte_info {
	phys_addr_t phys_addr;
	void *virt_addr;
	size_t size;
	struct list_head entry;
};

@@ -447,6 +448,7 @@ struct arm_smmu_domain {
	u32				attributes;
	u32				secure_vmid;
	struct list_head		pte_info_list;
	struct list_head		unassign_list;
};

static struct iommu_ops arm_smmu_ops;
@@ -948,15 +950,32 @@ static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
}

static void arm_smmu_prepare_pgtable(void *addr, void *cookie);
static void arm_smmu_unprepare_pgtable(void *cookie, void *addr);
static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);

static void *arm_smmu_alloc_pages_exact(void *cookie,
					size_t size, gfp_t gfp_mask)
{
	void *ret = alloc_pages_exact(size, gfp_mask);

	if (likely(ret))
		arm_smmu_prepare_pgtable(ret, cookie);

	return ret;
}

static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
{
	arm_smmu_unprepare_pgtable(cookie, virt, size);
	/* unprepare also frees (possibly later), no need to free here */
}

static struct iommu_gather_ops arm_smmu_gather_ops = {
	.tlb_flush_all	= arm_smmu_tlb_inv_context,
	.tlb_add_flush	= arm_smmu_tlb_inv_range_nosync,
	.tlb_sync	= arm_smmu_tlb_sync,
	.flush_pgtable	= arm_smmu_flush_pgtable,
	.prepare_pgtable = arm_smmu_prepare_pgtable,
	.unprepare_pgtable = arm_smmu_unprepare_pgtable,
	.alloc_pages_exact = arm_smmu_alloc_pages_exact,
	.free_pages_exact = arm_smmu_free_pages_exact,
};

static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
@@ -1433,6 +1452,8 @@ out:
	return ret;
}

static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);

static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
{
	struct arm_smmu_domain *smmu_domain = domain->priv;
@@ -1454,8 +1475,11 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
		free_irq(irq, domain);
	}

	if (smmu_domain->pgtbl_ops)
	if (smmu_domain->pgtbl_ops) {
		free_io_pgtable_ops(smmu_domain->pgtbl_ops);
		/* unassign any freed page table memory */
		arm_smmu_unassign_table(smmu_domain);
	}

	arm_smmu_disable_clocks(smmu_domain->smmu);

@@ -1480,6 +1504,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
	/* disable coherent htw by default */
	smmu_domain->attributes = (1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE);
	INIT_LIST_HEAD(&smmu_domain->pte_info_list);
	INIT_LIST_HEAD(&smmu_domain->unassign_list);
	smmu_domain->cfg.cbndx = INVALID_CBNDX;
	smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
	smmu_domain->cfg.asid = INVALID_ASID;
@@ -1899,8 +1924,9 @@ static void arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)

	list_for_each_entry(pte_info, &smmu_domain->pte_info_list,
								entry) {
		ret = hyp_assign_phys(pte_info->phys_addr, PAGE_SIZE,
				&source_vmid, 1, dest_vmids, dest_perms, 2);
		ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
				      PAGE_SIZE, &source_vmid, 1,
				      dest_vmids, dest_perms, 2);
		if (WARN_ON(ret))
			break;
	}
@@ -1912,20 +1938,50 @@ static void arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
	}
}

static void arm_smmu_unprepare_pgtable(void *cookie, void *addr)
static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
{
	struct arm_smmu_domain *smmu_domain = cookie;
	int ret;
	int dest_vmids = VMID_HLOS;
	int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
	int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
	struct arm_smmu_pte_info *pte_info, *temp;

	if (smmu_domain->secure_vmid == VMID_INVAL)
		return;

	ret = hyp_assign_phys((phys_addr_t)virt_to_phys(addr), PAGE_SIZE,
			source_vmlist, 2, &dest_vmids, &dest_perms, 1);
	WARN_ON(ret);
	list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
		ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
				      PAGE_SIZE, source_vmlist, 2,
				      &dest_vmids, &dest_perms, 1);
		if (WARN_ON(ret))
			break;
		free_pages_exact(pte_info->virt_addr, pte_info->size);
	}

	list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
				 entry) {
		list_del(&pte_info->entry);
		kfree(pte_info);
	}
}

static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
{
	struct arm_smmu_domain *smmu_domain = cookie;
	struct arm_smmu_pte_info *pte_info;

	if (smmu_domain->secure_vmid == VMID_INVAL) {
		free_pages_exact(addr, size);
		return;
	}

	pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
	if (!pte_info)
		return;

	pte_info->virt_addr = addr;
	pte_info->size = size;
	list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
}

static void arm_smmu_prepare_pgtable(void *addr, void *cookie)
@@ -1939,7 +1995,7 @@ static void arm_smmu_prepare_pgtable(void *addr, void *cookie)
	pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
	if (!pte_info)
		return;
	pte_info->phys_addr = (phys_addr_t)virt_to_phys(addr);
	pte_info->virt_addr = addr;
	list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
}

@@ -2026,6 +2082,8 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
	 * memory here as well.
	 */
	arm_smmu_assign_table(smmu_domain);
	/* Also unassign any pages that were free'd during unmap */
	arm_smmu_unassign_table(smmu_domain);

	if (atomic_ctx) {
		arm_smmu_disable_clocks_atomic(smmu_domain->smmu);
+9 −9
Original line number Diff line number Diff line
@@ -325,7 +325,8 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
	/* Grab a pointer to the next level */
	pte = *ptep;
	if (!pte) {
		cptep = io_pgtable_alloc_pages_exact(1UL << data->pg_shift,
		cptep = io_pgtable_alloc_pages_exact(&data->iop.cfg, cookie,
						     1UL << data->pg_shift,
						     GFP_ATOMIC | __GFP_ZERO);
		if (!cptep)
			return -ENOMEM;
@@ -337,7 +338,6 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
			pte |= ARM_LPAE_PTE_NSTABLE;
		*ptep = pte;
		data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
		data->iop.cfg.tlb->prepare_pgtable(cptep, cookie);
	} else {
		cptep = iopte_deref(pte, data);
	}
@@ -490,8 +490,8 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
	}

	data->iop.cfg.tlb->unprepare_pgtable(data->iop.cookie, start);
	io_pgtable_free_pages_exact(start, table_size);
	io_pgtable_free_pages_exact(&data->iop.cfg, data->iop.cookie,
				    start, table_size);
}

static void arm_lpae_free_pgtable(struct io_pgtable *iop)
@@ -517,6 +517,7 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
	blk_start = iova & ~(blk_size - 1);
	blk_end = blk_start + blk_size;
	blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
	size = ARM_LPAE_BLOCK_SIZE(lvl + 1, data);

	for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
		arm_lpae_iopte *tablep;
@@ -599,7 +600,8 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
			*ptep = 0;
			tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
			io_pgtable_free_pages_exact(
				table_base, max_entries * sizeof(*table_base));
				&data->iop.cfg, cookie, table_base,
				max_entries * sizeof(*table_base));
		}

		return entries * entry_size;
@@ -824,13 +826,12 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
	cfg->arm_lpae_s1_cfg.mair[1] = 0;

	/* Looking good; allocate a pgd */
	data->pgd = io_pgtable_alloc_pages_exact(data->pgd_size,
	data->pgd = io_pgtable_alloc_pages_exact(cfg, cookie, data->pgd_size,
						 GFP_KERNEL | __GFP_ZERO);
	if (!data->pgd)
		goto out_free_data;

	cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
	cfg->tlb->prepare_pgtable(data->pgd, cookie);
	/* TTBRs */
	cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
	cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
@@ -913,13 +914,12 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
	cfg->arm_lpae_s2_cfg.vtcr = reg;

	/* Allocate pgd pages */
	data->pgd = io_pgtable_alloc_pages_exact(data->pgd_size,
	data->pgd = io_pgtable_alloc_pages_exact(cfg, cookie, data->pgd_size,
						 GFP_KERNEL | __GFP_ZERO);
	if (!data->pgd)
		goto out_free_data;

	cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
	cfg->tlb->prepare_pgtable(data->pgd, cookie);
	/* VTTBR */
	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
	return &data->iop;
+16 −4
Original line number Diff line number Diff line
@@ -90,18 +90,30 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops)

static atomic_t pages_allocated;

void *io_pgtable_alloc_pages_exact(size_t size, gfp_t gfp_mask)
void *io_pgtable_alloc_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
				   size_t size, gfp_t gfp_mask)
{
	void *ret = alloc_pages_exact(size, gfp_mask);
	void *ret;

	if (cfg->tlb->alloc_pages_exact)
		ret = cfg->tlb->alloc_pages_exact(cookie, size, gfp_mask);
	else
		ret = alloc_pages_exact(size, gfp_mask);

	if (likely(ret))
		atomic_add(1 << get_order(size), &pages_allocated);

	return ret;
}

void io_pgtable_free_pages_exact(void *virt, size_t size)
void io_pgtable_free_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
				 void *virt, size_t size)
{
	if (cfg->tlb->free_pages_exact)
		cfg->tlb->free_pages_exact(cookie, virt, size);
	else
		free_pages_exact(virt, size);

	atomic_sub(1 << get_order(size), &pages_allocated);
}

+10 −6
Original line number Diff line number Diff line
@@ -21,8 +21,10 @@ enum io_pgtable_fmt {
 * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
 * @tlb_sync:      Ensure any queue TLB invalidation has taken effect.
 * @flush_pgtable: Ensure page table updates are visible to the IOMMU.
 * @prepare_pgtable: Do necessary fixup for newly allocated page table memory
 * @unprepare_pgtable: Undo fixups done during @prepare_pgtable
 * @alloc_pages_exact: Allocate page table memory (optional, defaults to
 *                     alloc_pages_exact)
 * @free_pages_exact:  Free page table memory (optional, defaults to
 *                     free_pages_exact)
 *
 * Note that these can all be called in atomic context and must therefore
 * not block.
@@ -33,8 +35,8 @@ struct iommu_gather_ops {
			      void *cookie);
	void (*tlb_sync)(void *cookie);
	void (*flush_pgtable)(void *ptr, size_t size, void *cookie);
	void (*prepare_pgtable)(void *addr, void *cookie);
	void (*unprepare_pgtable)(void *cookie, void *addr);
	void *(*alloc_pages_exact)(void *cookie, size_t size, gfp_t gfp_mask);
	void (*free_pages_exact)(void *cookie, void *virt, size_t size);
};

/**
@@ -156,7 +158,8 @@ struct io_pgtable_init_fns {
 * Like alloc_pages_exact(), but with some additional accounting for debug
 * purposes.
 */
void *io_pgtable_alloc_pages_exact(size_t size, gfp_t gfp_mask);
void *io_pgtable_alloc_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
				   size_t size, gfp_t gfp_mask);

/**
 * io_pgtable_free_pages_exact - release memory allocated via io_pgtable_alloc_pages_exact()
@@ -166,6 +169,7 @@ void *io_pgtable_alloc_pages_exact(size_t size, gfp_t gfp_mask);
 * Like free_pages_exact(), but with some additional accounting for debug
 * purposes.
 */
void io_pgtable_free_pages_exact(void *virt, size_t size);
void io_pgtable_free_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
				 void *virt, size_t size);

#endif /* __IO_PGTABLE_H */