Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 29a9c35b authored by Patrick Daly's avatar Patrick Daly Committed by Liam Mark
Browse files

iommu: arm-smmu: Preallocate memory for map operation



page allocation failure: order:0, mode:0x2088020(GFP_ATOMIC|__GFP_ZERO)
 Call trace:
 [<ffffff80080f15c8>] dump_backtrace+0x0/0x248
 [<ffffff80080f1894>] show_stack+0x18/0x28
 [<ffffff8008484984>] dump_stack+0x98/0xc0
 [<ffffff8008231b0c>] warn_alloc+0x114/0x134
 [<ffffff8008231f7c>] __alloc_pages_nodemask+0x3e8/0xd30
 [<ffffff8008232b2c>] alloc_pages_exact+0x4c/0xa4
 [<ffffff800866bec4>] arm_smmu_alloc_pages_exact+0x188/0x1bc
 [<ffffff8008664b28>] io_pgtable_alloc_pages_exact+0x30/0xa0
 [<ffffff8008664ff8>] __arm_lpae_alloc_pages+0x40/0x1c8
 [<ffffff8008665cb4>] __arm_lpae_map+0x224/0x3b4
 [<ffffff8008665b98>] __arm_lpae_map+0x108/0x3b4
 [<ffffff8008666474>] arm_lpae_map+0x78/0x9c
 [<ffffff800866aed4>] arm_smmu_map+0x80/0xdc
 [<ffffff800866015c>] iommu_map+0x118/0x284
 [<ffffff8008c66294>] cam_smmu_alloc_firmware+0x188/0x3c0
 [<ffffff8008cc8afc>] cam_icp_mgr_hw_open+0x88/0x874
 [<ffffff8008cca030>] cam_icp_mgr_acquire_hw+0x2d4/0xc9c
 [<ffffff8008c5fe84>] cam_context_acquire_dev_to_hw+0xb0/0x26c
 [<ffffff8008cd0ce0>] __cam_icp_acquire_dev_in_available+0x1c/0xf0
 [<ffffff8008c5ea98>] cam_context_handle_acquire_dev+0x5c/0x1a8
 [<ffffff8008c619b4>] cam_node_handle_ioctl+0x30c/0xdc8
 [<ffffff8008c62640>] cam_subdev_compat_ioctl+0xe4/0x1dc
 [<ffffff8008bcf8bc>] subdev_compat_ioctl32+0x40/0x68
 [<ffffff8008bd3858>] v4l2_compat_ioctl32+0x64/0x1780

Preallocate the required memory using GFP_KERNEL when the iommu
domain allows sleep.

Change-Id: I96194a4fabd21cc1d685e4f12afe8fbdd4768ac2
Signed-off-by: default avatarPatrick Daly <pdaly@codeaurora.org>
Signed-off-by: default avatarLiam Mark <lmark@codeaurora.org>
parent 6193841f
Loading
Loading
Loading
Loading
+26 −11
Original line number Diff line number Diff line
@@ -2477,22 +2477,16 @@ static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
}

static void arm_smmu_prealloc_memory(struct arm_smmu_domain *smmu_domain,
					struct scatterlist *sgl, int nents,
					struct list_head *pool)
					size_t size, struct list_head *pool)
{
	u32 nr = 0;
	int i;
	size_t size = 0;
	struct scatterlist *sg;
	u32 nr = 0;
	struct page *page;

	if ((smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC)) ||
			arm_smmu_has_secure_vmid(smmu_domain))
		return;

	for_each_sg(sgl, sg, nents, i)
		size += sg->length;

	/* number of 2nd level pagetable entries */
	nr += round_up(size, SZ_1G) >> 30;
	/* number of 3rd level pagetabel entries */
@@ -2507,16 +2501,32 @@ static void arm_smmu_prealloc_memory(struct arm_smmu_domain *smmu_domain,
	}
}

static void arm_smmu_prealloc_memory_sg(struct arm_smmu_domain *smmu_domain,
					struct scatterlist *sgl, int nents,
					struct list_head *pool)
{
	int i;
	size_t size = 0;
	struct scatterlist *sg;

	if ((smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC)) ||
			arm_smmu_has_secure_vmid(smmu_domain))
		return;

	for_each_sg(sgl, sg, nents, i)
		size += sg->length;

	arm_smmu_prealloc_memory(smmu_domain, size, pool);
}

static void arm_smmu_release_prealloc_memory(
		struct arm_smmu_domain *smmu_domain, struct list_head *list)
{
	struct page *page, *tmp;
	u32 remaining = 0;

	list_for_each_entry_safe(page, tmp, list, lru) {
		list_del(&page->lru);
		__free_pages(page, 0);
		remaining++;
	}
}

@@ -2598,6 +2608,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
	unsigned long flags;
	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
	LIST_HEAD(nonsecure_pool);

	if (!ops)
		return -ENODEV;
@@ -2605,15 +2616,19 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
	if (arm_smmu_is_slave_side_secure(smmu_domain))
		return msm_secure_smmu_map(domain, iova, paddr, size, prot);

	arm_smmu_prealloc_memory(smmu_domain, size, &nonsecure_pool);
	arm_smmu_secure_domain_lock(smmu_domain);

	spin_lock_irqsave(&smmu_domain->cb_lock, flags);
	list_splice_init(&nonsecure_pool, &smmu_domain->nonsecure_pool);
	ret = ops->map(ops, iova, paddr, size, prot);
	list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool);
	spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);

	arm_smmu_assign_table(smmu_domain);
	arm_smmu_secure_domain_unlock(smmu_domain);

	arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool);
	return ret;
}

@@ -2695,7 +2710,7 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
	if (ret)
		return ret;

	arm_smmu_prealloc_memory(smmu_domain, sg, nents, &nonsecure_pool);
	arm_smmu_prealloc_memory_sg(smmu_domain, sg, nents, &nonsecure_pool);
	arm_smmu_secure_domain_lock(smmu_domain);

	__saved_iova_start = iova;