Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 140eb545 authored by Isaac J. Manjarres's avatar Isaac J. Manjarres Committed by Gerrit - the friendly Code Review server
Browse files

iommu/arm-smmu: Deprecate msm_iommu_flush ops



msm_iommu_flush_ops was originally introduced to support calling
into the IOMMU driver to allocate and free the page table memory.

Since the support for calling into the IOMMU driver to manage the page
table memory was introduced in a recent patch in a different way,
the support for msm_iommu_flush_ops is no longer required, so remove it.

Change-Id: I052dab627b4008e7661511d2e4e7a7e26d3b83dc
Signed-off-by: default avatarIsaac J. Manjarres <isaacm@codeaurora.org>
parent 5bebd12b
Loading
Loading
Loading
Loading
+20 −24
Original line number Diff line number Diff line
@@ -1115,39 +1115,35 @@ static const struct iommu_pgtable_ops arm_smmu_pgtable_ops = {
	.free_pgtable  = arm_smmu_free_pages_exact,
};

#define ARM_SMMU_INIT_MSM_TLB_OPS(_tlb_flush_all) \
	{\
		.tlb_ops = { \
			.tlb_flush_all = _tlb_flush_all, \
			.tlb_flush_walk = arm_smmu_tlb_inv_walk, \
			.tlb_flush_leaf = arm_smmu_tlb_inv_leaf, \
			.tlb_add_page = arm_smmu_tlb_add_page, \
		} \
	}

#define ARM_SMMU_MSM_TLB_OPS_S1	\
	ARM_SMMU_INIT_MSM_TLB_OPS(arm_smmu_tlb_inv_context_s1)

#define ARM_SMMU_MSM_TLB_OPS_S2_V2 \
	ARM_SMMU_INIT_MSM_TLB_OPS(arm_smmu_tlb_inv_context_s2)

#define ARM_SMMU_MSM_TLB_OPS_S2_V1 \
	ARM_SMMU_INIT_MSM_TLB_OPS(arm_smmu_tlb_inv_context_s2)

static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = {
	.tlb			= ARM_SMMU_MSM_TLB_OPS_S1,
	.tlb = {
		.tlb_flush_all  = arm_smmu_tlb_inv_context_s1,
		.tlb_flush_walk = arm_smmu_tlb_inv_walk,
		.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
		.tlb_add_page   = arm_smmu_tlb_add_page,
	},
	.tlb_inv_range		= arm_smmu_tlb_inv_range_s1,
	.tlb_sync		= arm_smmu_tlb_sync_context,
};

static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
	.tlb			= ARM_SMMU_MSM_TLB_OPS_S2_V2,
	.tlb = {
		.tlb_flush_all  = arm_smmu_tlb_inv_context_s2,
		.tlb_flush_walk = arm_smmu_tlb_inv_walk,
		.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
		.tlb_add_page   = arm_smmu_tlb_add_page,
	},
	.tlb_inv_range		= arm_smmu_tlb_inv_range_s2,
	.tlb_sync		= arm_smmu_tlb_sync_context,
};

static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
	.tlb			= ARM_SMMU_MSM_TLB_OPS_S2_V1,
	.tlb = {
		.tlb_flush_all  = arm_smmu_tlb_inv_context_s2,
		.tlb_flush_walk = arm_smmu_tlb_inv_walk,
		.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
		.tlb_add_page   = arm_smmu_tlb_add_page,
	},
	.tlb_inv_range		= arm_smmu_tlb_inv_vmid_nosync,
	.tlb_sync		= arm_smmu_tlb_sync_vmid,
};
@@ -2176,7 +2172,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
		.ias		= ias,
		.oas		= oas,
		.coherent_walk	= is_iommu_pt_coherent(smmu_domain),
		.tlb		= &smmu_domain->flush_ops->tlb.tlb_ops,
		.tlb		= &smmu_domain->flush_ops->tlb,
		.iommu_pgtable_ops = &arm_smmu_pgtable_ops,
		.iommu_dev	= smmu->dev,
	};
@@ -3242,7 +3238,7 @@ static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
			arm_smmu_rpm_put(smmu);
			return;
		}
		smmu_domain->flush_ops->tlb.tlb_ops.tlb_flush_all(smmu_domain);
		smmu_domain->flush_ops->tlb.tlb_flush_all(smmu_domain);
		arm_smmu_domain_power_off(domain, smmu);
		arm_smmu_rpm_put(smmu);
	}
+1 −1
Original line number Diff line number Diff line
@@ -461,7 +461,7 @@ enum arm_smmu_domain_stage {
};

struct arm_smmu_flush_ops {
	struct msm_iommu_flush_ops	tlb;
	struct iommu_flush_ops		tlb;
	void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule,
			      bool leaf, void *cookie);
	void (*tlb_sync)(void *cookie);
+0 −30
Original line number Diff line number Diff line
@@ -114,36 +114,6 @@ static void mod_pages_allocated(int nr_pages)
}
#endif

void *io_pgtable_alloc_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
				   size_t size, gfp_t gfp_mask)
{
	void *ret;
	struct msm_iommu_flush_ops *ops = to_msm_iommu_flush_ops(cfg->tlb);

	if (ops->alloc_pages_exact)
		ret = ops->alloc_pages_exact(cookie, size, gfp_mask);
	else
		ret = alloc_pages_exact(size, gfp_mask);

	if (likely(ret))
		mod_pages_allocated(1 << get_order(size));

	return ret;
}

void io_pgtable_free_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
				 void *virt, size_t size)
{
	struct msm_iommu_flush_ops *ops = to_msm_iommu_flush_ops(cfg->tlb);

	if (ops->free_pages_exact)
		ops->free_pages_exact(cookie, virt, size);
	else
		free_pages_exact(virt, size);

	mod_pages_allocated(-(1 << get_order(size)));
}

void *io_pgtable_alloc_pages(struct io_pgtable_cfg *cfg, void *cookie,
			     int order, gfp_t gfp_mask)
{
+0 −17
Original line number Diff line number Diff line
@@ -7,8 +7,6 @@

#include <linux/scatterlist.h>

#define to_msm_iommu_flush_ops(_tlb_ops) \
	container_of(_tlb_ops, struct msm_iommu_flush_ops, tlb_ops)
#define to_msm_io_pgtable_info(_cfg) \
	container_of(_cfg, struct msm_io_pgtable_info, pgtbl_cfg)

@@ -56,21 +54,6 @@ struct iommu_flush_ops {
			     unsigned long iova, size_t granule, void *cookie);
};

/**
 * struct msm_iommu_flush_ops - MSM and standard IOMMU callbacks for TLB and
 * page table management.
 *
 * @alloc_pages_exact: Allocate page table memory (optional, defaults to
 *                     alloc_pages_exact)
 * @free_pages_exact:  Free page table memory (optional, defaults to
 *                     free_pages_exact)
 */
struct msm_iommu_flush_ops {
	void *(*alloc_pages_exact)(void *cookie, size_t size, gfp_t gfp_mask);
	void (*free_pages_exact)(void *cookie, void *virt, size_t size);
	const struct iommu_flush_ops tlb_ops;
};

/**
 * struct iommu_pgtable_ops - IOMMU callbacks for page table memory management.
 *