Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 26fe56f4 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "iommu: ignore scm_call return if SMC_ID not supported"

parents d8526300 65111b1f
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
/*
 * Copyright (c) 2017, The Linux Foundation. All rights reserved.
 * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -41,7 +41,7 @@ struct page *arm_smmu_errata_get_guard_page(int vmid)
		ret = hyp_assign_phys(page_to_phys(page), PAGE_ALIGN(size),
				&source_vm, 1,
				&dest_vm, &dest_perm, 1);
		if (ret) {
		if (ret && (ret != -EIO)) {
			__free_pages(page, get_order(size));
			page = NULL;
		}
+80 −1
Original line number Diff line number Diff line
@@ -370,6 +370,8 @@ struct arm_smmu_domain {
	struct list_head		unassign_list;
	struct mutex			assign_lock;
	struct list_head		secure_pool_list;
	/* nonsecure pool protected by pgtbl_lock */
	struct list_head		nonsecure_pool;
	struct iommu_domain		domain;
	bool				qsmmuv500_errata1_min_iova_align;
};
@@ -1172,8 +1174,19 @@ static void *arm_smmu_alloc_pages_exact(void *cookie,
	void *page;
	struct arm_smmu_domain *smmu_domain = cookie;

	if (!arm_smmu_is_master_side_secure(smmu_domain))
	if (!arm_smmu_is_master_side_secure(smmu_domain)) {
		struct page *pg;
		/* size is expected to be 4K with current configuration */
		if (size == PAGE_SIZE) {
			pg = list_first_entry_or_null(
				&smmu_domain->nonsecure_pool, struct page, lru);
			if (pg) {
				list_del_init(&pg->lru);
				return page_address(pg);
			}
		}
		return alloc_pages_exact(size, gfp_mask);
	}

	page = arm_smmu_secure_pool_remove(smmu_domain, size);
	if (page)
@@ -2038,6 +2051,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
	INIT_LIST_HEAD(&smmu_domain->unassign_list);
	mutex_init(&smmu_domain->assign_lock);
	INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
	INIT_LIST_HEAD(&smmu_domain->nonsecure_pool);
	arm_smmu_domain_reinit(smmu_domain);

	return &smmu_domain->domain;
@@ -2462,6 +2476,60 @@ static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
	return 0;
}

static void arm_smmu_prealloc_memory(struct arm_smmu_domain *smmu_domain,
					size_t size, struct list_head *pool)
{
	int i;
	u32 nr = 0;
	struct page *page;

	if ((smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC)) ||
			arm_smmu_has_secure_vmid(smmu_domain))
		return;

	/* number of 2nd level pagetable entries */
	nr += round_up(size, SZ_1G) >> 30;
	/* number of 3rd level pagetabel entries */
	nr += round_up(size, SZ_2M) >> 21;

	/* Retry later with atomic allocation on error */
	for (i = 0; i < nr; i++) {
		page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
		if (!page)
			break;
		list_add(&page->lru, pool);
	}
}

static void arm_smmu_prealloc_memory_sg(struct arm_smmu_domain *smmu_domain,
					struct scatterlist *sgl, int nents,
					struct list_head *pool)
{
	int i;
	size_t size = 0;
	struct scatterlist *sg;

	if ((smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC)) ||
			arm_smmu_has_secure_vmid(smmu_domain))
		return;

	for_each_sg(sgl, sg, nents, i)
		size += sg->length;

	arm_smmu_prealloc_memory(smmu_domain, size, pool);
}

static void arm_smmu_release_prealloc_memory(
		struct arm_smmu_domain *smmu_domain, struct list_head *list)
{
	struct page *page, *tmp;

	list_for_each_entry_safe(page, tmp, list, lru) {
		list_del(&page->lru);
		__free_pages(page, 0);
	}
}

static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
	int ret;
@@ -2540,6 +2608,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
	unsigned long flags;
	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
	LIST_HEAD(nonsecure_pool);

	if (!ops)
		return -ENODEV;
@@ -2547,15 +2616,19 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
	if (arm_smmu_is_slave_side_secure(smmu_domain))
		return msm_secure_smmu_map(domain, iova, paddr, size, prot);

	arm_smmu_prealloc_memory(smmu_domain, size, &nonsecure_pool);
	arm_smmu_secure_domain_lock(smmu_domain);

	spin_lock_irqsave(&smmu_domain->cb_lock, flags);
	list_splice_init(&nonsecure_pool, &smmu_domain->nonsecure_pool);
	ret = ops->map(ops, iova, paddr, size, prot);
	list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool);
	spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);

	arm_smmu_assign_table(smmu_domain);
	arm_smmu_secure_domain_unlock(smmu_domain);

	arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool);
	return ret;
}

@@ -2625,6 +2698,7 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
	unsigned int idx_start, idx_end;
	struct scatterlist *sg_start, *sg_end;
	unsigned long __saved_iova_start;
	LIST_HEAD(nonsecure_pool);

	if (!ops)
		return -ENODEV;
@@ -2636,6 +2710,7 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
	if (ret)
		return ret;

	arm_smmu_prealloc_memory_sg(smmu_domain, sg, nents, &nonsecure_pool);
	arm_smmu_secure_domain_lock(smmu_domain);

	__saved_iova_start = iova;
@@ -2654,9 +2729,12 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
		}

		spin_lock_irqsave(&smmu_domain->cb_lock, flags);
		list_splice_init(&nonsecure_pool, &smmu_domain->nonsecure_pool);
		ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start,
				  prot, &size);
		list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool);
		spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);

		/* Returns 0 on error */
		if (!ret) {
			size_to_unmap = iova + size - __saved_iova_start;
@@ -2677,6 +2755,7 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
	}
	arm_smmu_secure_domain_unlock(smmu_domain);
	arm_smmu_domain_power_off(domain, smmu_domain->smmu);
	arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool);
	return iova - __saved_iova_start;
}

+1 −1
Original line number Diff line number Diff line
@@ -199,7 +199,7 @@ static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
	mutex_lock(&iommu_meta->lock);
	iommu_map = msm_iommu_lookup(iommu_meta, dev);
	if (!iommu_map) {
		iommu_map = kmalloc(sizeof(*iommu_map), GFP_ATOMIC);
		iommu_map = kmalloc(sizeof(*iommu_map), GFP_KERNEL);

		if (!iommu_map) {
			ret = -ENOMEM;