Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b312b4f0 authored by Swathi Sridhar's avatar Swathi Sridhar
Browse files

iommu: arm-smmu: Preallocate memory for map only on failure



page allocation failure: order:0, mode:0x2088020(GFP_ATOMIC|__GFP_ZERO)

Call trace:
	[<ffffff80080f15c8>] dump_backtrace+0x0/0x248
	[<ffffff80080f1894>] show_stack+0x18/0x28
	[<ffffff8008484984>] dump_stack+0x98/0xc0
	[<ffffff8008231b0c>] warn_alloc+0x114/0x134
	[<ffffff8008231f7c>] __alloc_pages_nodemask+0x3e8/0xd30
	[<ffffff8008232b2c>] alloc_pages_exact+0x4c/0xa4
	[<ffffff800866bec4>] arm_smmu_alloc_pages_exact+0x188/0x1bc
	[<ffffff8008664b28>] io_pgtable_alloc_pages_exact+0x30/0xa0
	[<ffffff8008664ff8>] __arm_lpae_alloc_pages+0x40/0x1c8
	[<ffffff8008665cb4>] __arm_lpae_map+0x224/0x3b4
	[<ffffff8008665b98>] __arm_lpae_map+0x108/0x3b4
	[<ffffff8008666474>] arm_lpae_map+0x78/0x9c
	[<ffffff800866aed4>] arm_smmu_map+0x80/0xdc
	[<ffffff800866015c>] iommu_map+0x118/0x284
	[<ffffff8008c66294>] cam_smmu_alloc_firmware+0x188/0x3c0
	[<ffffff8008cc8afc>] cam_icp_mgr_hw_open+0x88/0x874
	[<ffffff8008cca030>] cam_icp_mgr_acquire_hw+0x2d4/0xc9c
	[<ffffff8008c5fe84>] cam_context_acquire_dev_to_hw+0xb0/0x26c
	[<ffffff8008cd0ce0>] __cam_icp_acquire_dev_in_available+0x1c/0xf0
	[<ffffff8008c5ea98>] cam_context_handle_acquire_dev+0x5c/0x1a8
	[<ffffff8008c619b4>] cam_node_handle_ioctl+0x30c/0xdc8
	[<ffffff8008c62640>] cam_subdev_compat_ioctl+0xe4/0x1dc
	[<ffffff8008bcf8bc>] subdev_compat_ioctl32+0x40/0x68
	[<ffffff8008bd3858>] v4l2_compat_ioctl32+0x64/0x1780

In order to avoid page allocation failure of order 0 during the
smmu map operation, the existing implementation preallocates
the required memory using GFP_KERNEL so as to make sure that
there is sufficient page table memory available and the atomic
allocation succeeds during the map operation.This might not be
necessary for every single map call as the atomic allocation
might succeed most of the time.Hence preallocate the necessary
memory only when the map operation fails due to insufficient
memory and again retry the map operation with the preallocated
memory.This solution applies only to map calls made from a
non-atomic context.

Change-Id: I417f311c2224eb863d6c99612b678bbb2dd3db58
Signed-off-by: default avatarSwathi Sridhar <swatsrid@codeaurora.org>
parent d25fbc3d
Loading
Loading
Loading
Loading
+32 −27
Original line number Diff line number Diff line
@@ -2493,24 +2493,6 @@ static void arm_smmu_prealloc_memory(struct arm_smmu_domain *smmu_domain,
	}
}

static void arm_smmu_prealloc_memory_sg(struct arm_smmu_domain *smmu_domain,
					struct scatterlist *sgl, int nents,
					struct list_head *pool)
{
	int i;
	size_t size = 0;
	struct scatterlist *sg;

	if ((smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC)) ||
			arm_smmu_has_secure_vmid(smmu_domain))
		return;

	for_each_sg(sgl, sg, nents, i)
		size += sg->length;

	arm_smmu_prealloc_memory(smmu_domain, size, pool);
}

static void arm_smmu_release_prealloc_memory(
		struct arm_smmu_domain *smmu_domain, struct list_head *list)
{
@@ -2759,19 +2741,29 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
	if (arm_smmu_is_slave_side_secure(smmu_domain))
		return msm_secure_smmu_map(domain, iova, paddr, size, prot);

	arm_smmu_prealloc_memory(smmu_domain, size, &nonsecure_pool);
	arm_smmu_secure_domain_lock(smmu_domain);
	spin_lock_irqsave(&smmu_domain->cb_lock, flags);
	ret = ops->map(ops, iova, paddr, size, prot);
	spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);

	/* if the map call failed due to insufficient memory,
	 * then retry again with preallocated memory to see
	 * if the map call succeeds.
	 */
	if (ret == -ENOMEM) {
		arm_smmu_prealloc_memory(smmu_domain, size, &nonsecure_pool);
		spin_lock_irqsave(&smmu_domain->cb_lock, flags);
		list_splice_init(&nonsecure_pool, &smmu_domain->nonsecure_pool);
		ret = ops->map(ops, iova, paddr, size, prot);
		list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool);
		spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
		arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool);

	}

	arm_smmu_assign_table(smmu_domain);
	arm_smmu_secure_domain_unlock(smmu_domain);

	arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool);
	return ret;
}

@@ -2857,7 +2849,7 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
	if (arm_smmu_is_slave_side_secure(smmu_domain))
		return msm_secure_smmu_map_sg(domain, iova, sg, nents, prot);

	arm_smmu_prealloc_memory_sg(smmu_domain, sg, nents, &nonsecure_pool);

	arm_smmu_secure_domain_lock(smmu_domain);

	__saved_iova_start = iova;
@@ -2876,12 +2868,26 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
		}

		spin_lock_irqsave(&smmu_domain->cb_lock, flags);
		list_splice_init(&nonsecure_pool, &smmu_domain->nonsecure_pool);
		ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start,
				  prot, &size);
		list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool);
		spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);


		if (ret == -ENOMEM) {
			arm_smmu_prealloc_memory(smmu_domain,
						 batch_size, &nonsecure_pool);
			spin_lock_irqsave(&smmu_domain->cb_lock, flags);
			list_splice_init(&nonsecure_pool,
					 &smmu_domain->nonsecure_pool);
			ret = ops->map_sg(ops, iova, sg_start,
					  idx_end - idx_start, prot, &size);
			list_splice_init(&smmu_domain->nonsecure_pool,
					 &nonsecure_pool);
			spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
			arm_smmu_release_prealloc_memory(smmu_domain,
							 &nonsecure_pool);
		}

		/* Returns 0 on error */
		if (!ret) {
			size_to_unmap = iova + size - __saved_iova_start;
@@ -2901,7 +2907,6 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
		iova = __saved_iova_start;
	}
	arm_smmu_secure_domain_unlock(smmu_domain);
	arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool);
	return iova - __saved_iova_start;
}