Loading Documentation/devicetree/bindings/iommu/arm,smmu.txt +3 −0 Original line number Diff line number Diff line Loading @@ -74,6 +74,9 @@ conditions. address size faults are due to a fundamental programming error from which we don't care about recovering anyways. - qcom,tz-device-id : A string indicating the device ID for this SMMU known to TZ. See msm_tz_smmu.c for a full list of mappings. - qcom,skip-init : Disable resetting configuration for all context banks during device reset. This is useful for targets where some context banks are dedicated to other execution Loading drivers/iommu/Makefile +1 −0 Original line number Diff line number Diff line Loading @@ -7,6 +7,7 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o obj-$(CONFIG_IOMMU_IOVA) += iova.o obj-$(CONFIG_MSM_TZ_SMMU) += io-pgtable-msm-secure.o obj-$(CONFIG_IOMMU_IO_PGTABLE_FAST) += io-pgtable-fast.o dma-mapping-fast.o obj-$(CONFIG_OF_IOMMU) += of_iommu.o obj-$(CONFIG_IOMMU_DEBUG) += iommu-debug.o Loading drivers/iommu/arm-smmu.c +186 −38 Original line number Diff line number Diff line Loading @@ -55,6 +55,8 @@ #include <linux/remote_spinlock.h> #include <linux/ktime.h> #include <trace/events/iommu.h> #include <soc/qcom/msm_tz_smmu.h> #include <soc/qcom/scm.h> #include <linux/amba/bus.h> Loading Loading @@ -174,6 +176,7 @@ #define SMR_VALID (1 << 31) #define SMR_MASK_SHIFT 16 #define SMR_MASK_MASK 0x7FFF #define SID_MASK 0x7FFF #define SMR_ID_SHIFT 0 #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) Loading Loading @@ -411,6 +414,7 @@ struct arm_smmu_device { void __iomem *base; unsigned long size; phys_addr_t phys_addr; unsigned long pgshift; #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0) Loading Loading @@ -478,6 +482,8 @@ struct arm_smmu_device { struct arm_smmu_arch_ops *arch_ops; void *archdata; enum tz_smmu_device_id sec_id; }; enum arm_smmu_context_fmt { Loading Loading @@ -529,6 +535,7 @@ struct arm_smmu_domain { enum arm_smmu_domain_stage stage; struct mutex init_mutex; /* Protects smmu pointer */ u32 attributes; bool slave_side_secure; u32 secure_vmid; struct list_head pte_info_list; struct list_head unassign_list; Loading Loading @@ -588,6 +595,8 @@ static int arm_smmu_alloc_cb(struct iommu_domain *domain, static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops; static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu); static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain); static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain); static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) { Loading Loading @@ -615,6 +624,22 @@ static bool is_dynamic_domain(struct iommu_domain *domain) return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC)); } static int arm_smmu_restore_sec_cfg(struct arm_smmu_device *smmu) { int ret; int scm_ret = 0; if (!arm_smmu_is_static_cb(smmu)) return 0; ret = scm_restore_sec_cfg(smmu->sec_id, 0x0, &scm_ret); if (ret || scm_ret) { pr_err("scm call IOMMU_SECURE_CFG failed\n"); return -EINVAL; } return 0; } static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain) { if (smmu_domain->attributes & Loading @@ -631,20 +656,32 @@ static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu) return smmu->options & ARM_SMMU_OPT_STATIC_CB; } static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain) static bool arm_smmu_has_secure_vmid(struct arm_smmu_domain *smmu_domain) { return (smmu_domain->secure_vmid != VMID_INVAL); } static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain) { return arm_smmu_has_secure_vmid(smmu_domain) && smmu_domain->slave_side_secure; } static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain) { return arm_smmu_has_secure_vmid(smmu_domain) && !smmu_domain->slave_side_secure; } static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain) { if (arm_smmu_is_domain_secure(smmu_domain)) if (arm_smmu_is_master_side_secure(smmu_domain)) mutex_lock(&smmu_domain->assign_lock); } static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain) { if (arm_smmu_is_domain_secure(smmu_domain)) if (arm_smmu_is_master_side_secure(smmu_domain)) mutex_unlock(&smmu_domain->assign_lock); } Loading Loading @@ -1281,7 +1318,7 @@ static void *arm_smmu_alloc_pages_exact(void *cookie, void *page; struct arm_smmu_domain *smmu_domain = cookie; if (!arm_smmu_is_domain_secure(smmu_domain)) if (!arm_smmu_is_master_side_secure(smmu_domain)) return alloc_pages_exact(size, gfp_mask); page = arm_smmu_secure_pool_remove(smmu_domain, size); Loading @@ -1304,7 +1341,7 @@ static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size) { struct arm_smmu_domain *smmu_domain = cookie; if (!arm_smmu_is_domain_secure(smmu_domain)) { if (!arm_smmu_is_master_side_secure(smmu_domain)) { free_pages_exact(virt, size); return; } Loading Loading @@ -1515,6 +1552,22 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev) return IRQ_HANDLED; } static int arm_smmu_set_pt_format(struct arm_smmu_domain *smmu_domain, struct io_pgtable_cfg *pgtbl_cfg) { struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_cfg *cfg = &smmu_domain->cfg; int ret = 0; if ((smmu->version > ARM_SMMU_V1) && (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) && !arm_smmu_has_secure_vmid(smmu_domain) && arm_smmu_is_static_cb(smmu)) { ret = msm_tz_set_cb_format(smmu->sec_id, cfg->cbndx); } return ret; } static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, struct io_pgtable_cfg *pgtbl_cfg) { Loading Loading @@ -1825,6 +1878,18 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, cfg->irptndx = cfg->cbndx; } if (arm_smmu_is_slave_side_secure(smmu_domain)) { smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) { .quirks = quirks, .pgsize_bitmap = smmu->pgsize_bitmap, .arm_msm_secure_cfg = { .sec_id = smmu->sec_id, .cbndx = cfg->cbndx, }, .iommu_dev = smmu->dev, }; fmt = ARM_MSM_SECURE; } else { smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) { .quirks = quirks, .pgsize_bitmap = smmu->pgsize_bitmap, Loading @@ -1833,6 +1898,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, .tlb = tlb, .iommu_dev = smmu->dev, }; } smmu_domain->smmu = smmu; smmu_domain->dev = dev; Loading Loading @@ -1865,6 +1931,13 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, /* Initialise the context bank with our page table cfg */ arm_smmu_init_context_bank(smmu_domain, &smmu_domain->pgtbl_cfg); /* for slave side secure, we may have to force the pagetable * format to V8L. */ ret = arm_smmu_set_pt_format(smmu_domain, &smmu_domain->pgtbl_cfg); if (ret) goto out_clear_smmu; arm_smmu_arch_init_context_bank(smmu_domain, dev); Loading Loading @@ -2173,6 +2246,8 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, const struct iommu_gather_ops *tlb; tlb = smmu_domain->pgtbl_cfg.tlb; if (!tlb) return; mutex_lock(&smmu->stream_map_mutex); for_each_cfg_sme(fwspec, i, idx) { Loading Loading @@ -2249,7 +2324,7 @@ static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain) int source_vmid = VMID_HLOS; struct arm_smmu_pte_info *pte_info, *temp; if (!arm_smmu_is_domain_secure(smmu_domain)) if (!arm_smmu_is_master_side_secure(smmu_domain)) return ret; list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) { Loading @@ -2276,7 +2351,7 @@ static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain) int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid}; struct arm_smmu_pte_info *pte_info, *temp; if (!arm_smmu_is_domain_secure(smmu_domain)) if (!arm_smmu_is_master_side_secure(smmu_domain)) return; list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) { Loading @@ -2300,7 +2375,14 @@ static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size) struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_pte_info *pte_info; BUG_ON(!arm_smmu_is_domain_secure(smmu_domain)); if (smmu_domain->slave_side_secure || !arm_smmu_has_secure_vmid(smmu_domain)) { if (smmu_domain->slave_side_secure) WARN(1, "slave side secure is enforced\n"); else WARN(1, "Invalid VMID is set !!\n"); return; } pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC); if (!pte_info) Loading @@ -2316,7 +2398,14 @@ static int arm_smmu_prepare_pgtable(void *addr, void *cookie) struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_pte_info *pte_info; BUG_ON(!arm_smmu_is_domain_secure(smmu_domain)); if (smmu_domain->slave_side_secure || !arm_smmu_has_secure_vmid(smmu_domain)) { if (smmu_domain->slave_side_secure) WARN(1, "slave side secure is enforced\n"); else WARN(1, "Invalid VMID is set !!\n"); return -EINVAL; } pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC); if (!pte_info) Loading Loading @@ -3294,7 +3383,20 @@ static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait) reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL); reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ; if (arm_smmu_is_static_cb(smmu)) { phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base + smmu->phys_addr; if (scm_io_write(impl_def1_base_phys + IMPL_DEF1_MICRO_MMU_CTRL, reg)) { dev_err(smmu->dev, "scm_io_write fail. SMMU might not be halted"); return -EINVAL; } } else { writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL); } return wait ? qsmmuv2_wait_for_halt(smmu) : 0; } Loading @@ -3316,8 +3418,19 @@ static void qsmmuv2_resume(struct arm_smmu_device *smmu) reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL); reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ; if (arm_smmu_is_static_cb(smmu)) { phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base + smmu->phys_addr; if (scm_io_write(impl_def1_base_phys + IMPL_DEF1_MICRO_MMU_CTRL, reg)) dev_err(smmu->dev, "scm_io_write fail. SMMU might not be resumed"); } else { writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL); } } static void qsmmuv2_device_reset(struct arm_smmu_device *smmu) { Loading Loading @@ -3549,6 +3662,9 @@ static int arm_smmu_alloc_cb(struct iommu_domain *domain, cb = smmu->s2crs[idx].cbndx; } if (cb >= 0 && arm_smmu_is_static_cb(smmu)) smmu_domain->slave_side_secure = true; if (cb < 0 && !arm_smmu_is_static_cb(smmu)) { mutex_unlock(&smmu->stream_map_mutex); return __arm_smmu_alloc_bitmap(smmu->context_map, Loading @@ -3558,6 +3674,7 @@ static int arm_smmu_alloc_cb(struct iommu_domain *domain, for (i = 0; i < smmu->num_mapping_groups; i++) { if (smmu->s2crs[i].cb_handoff && smmu->s2crs[i].cbndx == cb) { if (!arm_smmu_is_static_cb(smmu)) smmu->s2crs[i].cb_handoff = false; smmu->s2crs[i].count -= 1; } Loading Loading @@ -3810,6 +3927,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) bool cttw_dt, cttw_reg; int i; if (arm_smmu_restore_sec_cfg(smmu)) return -ENODEV; dev_dbg(smmu->dev, "probing hardware configuration...\n"); dev_dbg(smmu->dev, "SMMUv%d with:\n", smmu->version == ARM_SMMU_V2 ? 2 : 1); Loading Loading @@ -3887,8 +4007,10 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) * bits are set, so check each one separately. We can reject * masters later if they try to claim IDs outside these masks. */ if (!arm_smmu_is_static_cb(smmu)) { for (i = 0; i < size; i++) { smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i)); smr = readl_relaxed( gr0_base + ARM_SMMU_GR0_SMR(i)); if (!(smr & SMR_VALID)) break; } Loading @@ -3907,6 +4029,10 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i)); smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i)); smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT; } else { smmu->smr_mask_mask = SMR_MASK_MASK; smmu->streamid_mask = SID_MASK; } /* Zero-initialised to mark as invalid */ smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs), Loading Loading @@ -4070,7 +4196,23 @@ static const struct of_device_id arm_smmu_of_match[] = { }; MODULE_DEVICE_TABLE(of, arm_smmu_of_match); #ifdef CONFIG_MSM_TZ_SMMU int register_iommu_sec_ptbl(void) { struct device_node *np; for_each_matching_node(np, arm_smmu_of_match) if (of_find_property(np, "qcom,tz-device-id", NULL) && of_device_is_available(np)) break; if (!np) return -ENODEV; of_node_put(np); return msm_iommu_sec_pgtbl_init(); } #endif static int arm_smmu_of_iommu_configure_fixup(struct device *dev, void *data) { if (!dev->iommu_fwspec) Loading Loading @@ -4124,6 +4266,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) smmu->arch_ops = data->arch_ops; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res) smmu->phys_addr = res->start; smmu->base = devm_ioremap_resource(dev, res); if (IS_ERR(smmu->base)) return PTR_ERR(smmu->base); Loading Loading @@ -4175,6 +4319,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) if (err) goto out_exit_power_resources; smmu->sec_id = msm_dev_to_device_id(dev); err = arm_smmu_device_cfg_probe(smmu); if (err) goto out_power_off; Loading Loading @@ -4302,6 +4447,9 @@ static int __init arm_smmu_init(void) return ret; ret = platform_driver_register(&arm_smmu_driver); #ifdef CONFIG_MSM_TZ_SMMU ret = register_iommu_sec_ptbl(); #endif registered = !ret; trace_smmu_init(ktime_us_delta(ktime_get(), cur)); Loading drivers/iommu/io-pgtable-msm-secure.c 0 → 100644 +350 −0 Original line number Diff line number Diff line /* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "io-pgtable-msm-secure: " fmt #include <linux/iommu.h> #include <linux/kernel.h> #include <linux/scatterlist.h> #include <linux/sizes.h> #include <linux/slab.h> #include <linux/types.h> #include <soc/qcom/scm.h> #include <linux/dma-mapping.h> #include <asm/cacheflush.h> #include "io-pgtable.h" #define IOMMU_SECURE_PTBL_SIZE 3 #define IOMMU_SECURE_PTBL_INIT 4 #define IOMMU_SECURE_MAP2_FLAT 0x12 #define IOMMU_SECURE_UNMAP2_FLAT 0x13 #define IOMMU_TLBINVAL_FLAG 0x00000001 #define io_pgtable_to_data(x) \ container_of((x), struct msm_secure_io_pgtable, iop) #define io_pgtable_ops_to_pgtable(x) \ container_of((x), struct io_pgtable, ops) #define io_pgtable_ops_to_data(x) \ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) struct msm_secure_io_pgtable { struct io_pgtable iop; }; int msm_iommu_sec_pgtbl_init(void) { int psize[2] = {0, 0}; unsigned int spare = 0; int ret, ptbl_ret = 0; struct device dev = {0}; void *cpu_addr; dma_addr_t paddr; unsigned long attrs = 0; if (is_scm_armv8()) { struct scm_desc desc = {0}; desc.args[0] = spare; desc.arginfo = SCM_ARGS(1); ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, IOMMU_SECURE_PTBL_SIZE), &desc); psize[0] = desc.ret[0]; psize[1] = desc.ret[1]; if (ret || psize[1]) { pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n"); return ret; } } /* Now allocate memory for the secure page tables */ attrs = DMA_ATTR_NO_KERNEL_MAPPING; dev.coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8); arch_setup_dma_ops(&dev, 0, 0, NULL, 1); cpu_addr = dma_alloc_attrs(&dev, psize[0], &paddr, GFP_KERNEL, attrs); if (!cpu_addr) { pr_err("%s: Failed to allocate %d bytes for PTBL\n", __func__, psize[0]); return -ENOMEM; } if (is_scm_armv8()) { struct scm_desc desc = {0}; desc.args[0] = paddr; desc.args[1] = psize[0]; desc.args[2] = 0; desc.arginfo = SCM_ARGS(3, SCM_RW, SCM_VAL, SCM_VAL); ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, IOMMU_SECURE_PTBL_INIT), &desc); ptbl_ret = desc.ret[0]; if (ret) { pr_err("scm call IOMMU_SECURE_PTBL_INIT failed\n"); return ret; } if (ptbl_ret) { pr_err("scm call IOMMU_SECURE_PTBL_INIT extended ret fail\n"); return ret; } } return 0; } EXPORT_SYMBOL(msm_iommu_sec_pgtbl_init); static int msm_secure_map(struct io_pgtable_ops *ops, unsigned long iova, phys_addr_t paddr, size_t size, int iommu_prot) { struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops); struct io_pgtable_cfg *cfg = &data->iop.cfg; void *flush_va, *flush_va_end; struct scm_desc desc = {0}; int ret = -EINVAL; u32 resp; if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(paddr, SZ_1M) || !IS_ALIGNED(size, SZ_1M)) return -EINVAL; desc.args[0] = virt_to_phys(&paddr); desc.args[1] = 1; desc.args[2] = size; desc.args[3] = cfg->arm_msm_secure_cfg.sec_id; desc.args[4] = cfg->arm_msm_secure_cfg.cbndx; desc.args[5] = iova; desc.args[6] = size; desc.args[7] = 0; flush_va = &paddr; flush_va_end = (void *) (((unsigned long) flush_va) + sizeof(phys_addr_t)); /* * Ensure that the buffer is in RAM by the time it gets to TZ */ dmac_clean_range(flush_va, flush_va_end); desc.arginfo = SCM_ARGS(8, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL); if (is_scm_armv8()) { ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_MP, IOMMU_SECURE_MAP2_FLAT), &desc); resp = desc.ret[0]; } if (ret || resp) return -EINVAL; return 0; } static dma_addr_t msm_secure_get_phys_addr(struct scatterlist *sg) { /* * Try sg_dma_address first so that we can * map carveout regions that do not have a * struct page associated with them. */ dma_addr_t pa = sg_dma_address(sg); if (pa == 0) pa = sg_phys(sg); return pa; } static int msm_secure_map_sg(struct io_pgtable_ops *ops, unsigned long iova, struct scatterlist *sg, unsigned int nents, int iommu_prot, size_t *size) { struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops); struct io_pgtable_cfg *cfg = &data->iop.cfg; int ret = -EINVAL; struct scatterlist *tmp, *sgiter; dma_addr_t *pa_list = 0; unsigned int cnt, offset = 0, chunk_offset = 0; dma_addr_t pa; void *flush_va, *flush_va_end; unsigned long len = 0; struct scm_desc desc = {0}; int i; u32 resp; for_each_sg(sg, tmp, nents, i) len += tmp->length; if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(len, SZ_1M)) return -EINVAL; if (sg->length == len) { cnt = 1; pa = msm_secure_get_phys_addr(sg); if (!IS_ALIGNED(pa, SZ_1M)) return -EINVAL; desc.args[0] = virt_to_phys(&pa); desc.args[1] = cnt; desc.args[2] = len; flush_va = &pa; } else { sgiter = sg; if (!IS_ALIGNED(sgiter->length, SZ_1M)) return -EINVAL; cnt = sg->length / SZ_1M; while ((sgiter = sg_next(sgiter))) { if (!IS_ALIGNED(sgiter->length, SZ_1M)) return -EINVAL; cnt += sgiter->length / SZ_1M; } pa_list = kmalloc_array(cnt, sizeof(*pa_list), GFP_KERNEL); if (!pa_list) return -ENOMEM; sgiter = sg; cnt = 0; pa = msm_secure_get_phys_addr(sgiter); while (offset < len) { if (!IS_ALIGNED(pa, SZ_1M)) { kfree(pa_list); return -EINVAL; } pa_list[cnt] = pa + chunk_offset; chunk_offset += SZ_1M; offset += SZ_1M; cnt++; if (chunk_offset >= sgiter->length && offset < len) { chunk_offset = 0; sgiter = sg_next(sgiter); pa = msm_secure_get_phys_addr(sgiter); } } desc.args[0] = virt_to_phys(pa_list); desc.args[1] = cnt; desc.args[2] = SZ_1M; flush_va = pa_list; } desc.args[3] = cfg->arm_msm_secure_cfg.sec_id; desc.args[4] = cfg->arm_msm_secure_cfg.cbndx; desc.args[5] = iova; desc.args[6] = len; desc.args[7] = 0; desc.arginfo = SCM_ARGS(8, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL); /* * Ensure that the buffer is in RAM by the time it gets to TZ */ flush_va_end = (void *) (((unsigned long) flush_va) + (cnt * sizeof(*pa_list))); dmac_clean_range(flush_va, flush_va_end); if (is_scm_armv8()) { ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_MP, IOMMU_SECURE_MAP2_FLAT), &desc); resp = desc.ret[0]; if (ret || resp) ret = -EINVAL; else ret = len; } kfree(pa_list); return ret; } static size_t msm_secure_unmap(struct io_pgtable_ops *ops, unsigned long iova, size_t len) { struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops); struct io_pgtable_cfg *cfg = &data->iop.cfg; int ret = -EINVAL; struct scm_desc desc = {0}; if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(len, SZ_1M)) return ret; desc.args[0] = cfg->arm_msm_secure_cfg.sec_id; desc.args[1] = cfg->arm_msm_secure_cfg.cbndx; desc.args[2] = iova; desc.args[3] = len; desc.args[4] = IOMMU_TLBINVAL_FLAG; desc.arginfo = SCM_ARGS(5); if (is_scm_armv8()) { ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_MP, IOMMU_SECURE_UNMAP2_FLAT), &desc); if (!ret) ret = len; } return ret; } static phys_addr_t msm_secure_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova) { return -EINVAL; } static struct msm_secure_io_pgtable * msm_secure_alloc_pgtable_data(struct io_pgtable_cfg *cfg) { struct msm_secure_io_pgtable *data; data = kmalloc(sizeof(*data), GFP_KERNEL); if (!data) return NULL; data->iop.ops = (struct io_pgtable_ops) { .map = msm_secure_map, .map_sg = msm_secure_map_sg, .unmap = msm_secure_unmap, .iova_to_phys = msm_secure_iova_to_phys, }; return data; } static struct io_pgtable * msm_secure_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) { struct msm_secure_io_pgtable *data = msm_secure_alloc_pgtable_data(cfg); return &data->iop; } static void msm_secure_free_pgtable(struct io_pgtable *iop) { struct msm_secure_io_pgtable *data = io_pgtable_to_data(iop); kfree(data); } struct io_pgtable_init_fns io_pgtable_arm_msm_secure_init_fns = { .alloc = msm_secure_alloc_pgtable, .free = msm_secure_free_pgtable, }; drivers/iommu/io-pgtable.c +3 −0 Original line number Diff line number Diff line Loading @@ -44,6 +44,9 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = { #ifdef CONFIG_IOMMU_IO_PGTABLE_FAST [ARM_V8L_FAST] = &io_pgtable_av8l_fast_init_fns, #endif #ifdef CONFIG_MSM_TZ_SMMU [ARM_MSM_SECURE] = &io_pgtable_arm_msm_secure_init_fns, #endif }; static struct dentry *io_pgtable_top; Loading Loading
Documentation/devicetree/bindings/iommu/arm,smmu.txt +3 −0 Original line number Diff line number Diff line Loading @@ -74,6 +74,9 @@ conditions. address size faults are due to a fundamental programming error from which we don't care about recovering anyways. - qcom,tz-device-id : A string indicating the device ID for this SMMU known to TZ. See msm_tz_smmu.c for a full list of mappings. - qcom,skip-init : Disable resetting configuration for all context banks during device reset. This is useful for targets where some context banks are dedicated to other execution Loading
drivers/iommu/Makefile +1 −0 Original line number Diff line number Diff line Loading @@ -7,6 +7,7 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o obj-$(CONFIG_IOMMU_IOVA) += iova.o obj-$(CONFIG_MSM_TZ_SMMU) += io-pgtable-msm-secure.o obj-$(CONFIG_IOMMU_IO_PGTABLE_FAST) += io-pgtable-fast.o dma-mapping-fast.o obj-$(CONFIG_OF_IOMMU) += of_iommu.o obj-$(CONFIG_IOMMU_DEBUG) += iommu-debug.o Loading
drivers/iommu/arm-smmu.c +186 −38 Original line number Diff line number Diff line Loading @@ -55,6 +55,8 @@ #include <linux/remote_spinlock.h> #include <linux/ktime.h> #include <trace/events/iommu.h> #include <soc/qcom/msm_tz_smmu.h> #include <soc/qcom/scm.h> #include <linux/amba/bus.h> Loading Loading @@ -174,6 +176,7 @@ #define SMR_VALID (1 << 31) #define SMR_MASK_SHIFT 16 #define SMR_MASK_MASK 0x7FFF #define SID_MASK 0x7FFF #define SMR_ID_SHIFT 0 #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) Loading Loading @@ -411,6 +414,7 @@ struct arm_smmu_device { void __iomem *base; unsigned long size; phys_addr_t phys_addr; unsigned long pgshift; #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0) Loading Loading @@ -478,6 +482,8 @@ struct arm_smmu_device { struct arm_smmu_arch_ops *arch_ops; void *archdata; enum tz_smmu_device_id sec_id; }; enum arm_smmu_context_fmt { Loading Loading @@ -529,6 +535,7 @@ struct arm_smmu_domain { enum arm_smmu_domain_stage stage; struct mutex init_mutex; /* Protects smmu pointer */ u32 attributes; bool slave_side_secure; u32 secure_vmid; struct list_head pte_info_list; struct list_head unassign_list; Loading Loading @@ -588,6 +595,8 @@ static int arm_smmu_alloc_cb(struct iommu_domain *domain, static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops; static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu); static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain); static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain); static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) { Loading Loading @@ -615,6 +624,22 @@ static bool is_dynamic_domain(struct iommu_domain *domain) return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC)); } static int arm_smmu_restore_sec_cfg(struct arm_smmu_device *smmu) { int ret; int scm_ret = 0; if (!arm_smmu_is_static_cb(smmu)) return 0; ret = scm_restore_sec_cfg(smmu->sec_id, 0x0, &scm_ret); if (ret || scm_ret) { pr_err("scm call IOMMU_SECURE_CFG failed\n"); return -EINVAL; } return 0; } static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain) { if (smmu_domain->attributes & Loading @@ -631,20 +656,32 @@ static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu) return smmu->options & ARM_SMMU_OPT_STATIC_CB; } static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain) static bool arm_smmu_has_secure_vmid(struct arm_smmu_domain *smmu_domain) { return (smmu_domain->secure_vmid != VMID_INVAL); } static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain) { return arm_smmu_has_secure_vmid(smmu_domain) && smmu_domain->slave_side_secure; } static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain) { return arm_smmu_has_secure_vmid(smmu_domain) && !smmu_domain->slave_side_secure; } static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain) { if (arm_smmu_is_domain_secure(smmu_domain)) if (arm_smmu_is_master_side_secure(smmu_domain)) mutex_lock(&smmu_domain->assign_lock); } static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain) { if (arm_smmu_is_domain_secure(smmu_domain)) if (arm_smmu_is_master_side_secure(smmu_domain)) mutex_unlock(&smmu_domain->assign_lock); } Loading Loading @@ -1281,7 +1318,7 @@ static void *arm_smmu_alloc_pages_exact(void *cookie, void *page; struct arm_smmu_domain *smmu_domain = cookie; if (!arm_smmu_is_domain_secure(smmu_domain)) if (!arm_smmu_is_master_side_secure(smmu_domain)) return alloc_pages_exact(size, gfp_mask); page = arm_smmu_secure_pool_remove(smmu_domain, size); Loading @@ -1304,7 +1341,7 @@ static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size) { struct arm_smmu_domain *smmu_domain = cookie; if (!arm_smmu_is_domain_secure(smmu_domain)) { if (!arm_smmu_is_master_side_secure(smmu_domain)) { free_pages_exact(virt, size); return; } Loading Loading @@ -1515,6 +1552,22 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev) return IRQ_HANDLED; } static int arm_smmu_set_pt_format(struct arm_smmu_domain *smmu_domain, struct io_pgtable_cfg *pgtbl_cfg) { struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_cfg *cfg = &smmu_domain->cfg; int ret = 0; if ((smmu->version > ARM_SMMU_V1) && (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) && !arm_smmu_has_secure_vmid(smmu_domain) && arm_smmu_is_static_cb(smmu)) { ret = msm_tz_set_cb_format(smmu->sec_id, cfg->cbndx); } return ret; } static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, struct io_pgtable_cfg *pgtbl_cfg) { Loading Loading @@ -1825,6 +1878,18 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, cfg->irptndx = cfg->cbndx; } if (arm_smmu_is_slave_side_secure(smmu_domain)) { smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) { .quirks = quirks, .pgsize_bitmap = smmu->pgsize_bitmap, .arm_msm_secure_cfg = { .sec_id = smmu->sec_id, .cbndx = cfg->cbndx, }, .iommu_dev = smmu->dev, }; fmt = ARM_MSM_SECURE; } else { smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) { .quirks = quirks, .pgsize_bitmap = smmu->pgsize_bitmap, Loading @@ -1833,6 +1898,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, .tlb = tlb, .iommu_dev = smmu->dev, }; } smmu_domain->smmu = smmu; smmu_domain->dev = dev; Loading Loading @@ -1865,6 +1931,13 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, /* Initialise the context bank with our page table cfg */ arm_smmu_init_context_bank(smmu_domain, &smmu_domain->pgtbl_cfg); /* for slave side secure, we may have to force the pagetable * format to V8L. */ ret = arm_smmu_set_pt_format(smmu_domain, &smmu_domain->pgtbl_cfg); if (ret) goto out_clear_smmu; arm_smmu_arch_init_context_bank(smmu_domain, dev); Loading Loading @@ -2173,6 +2246,8 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, const struct iommu_gather_ops *tlb; tlb = smmu_domain->pgtbl_cfg.tlb; if (!tlb) return; mutex_lock(&smmu->stream_map_mutex); for_each_cfg_sme(fwspec, i, idx) { Loading Loading @@ -2249,7 +2324,7 @@ static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain) int source_vmid = VMID_HLOS; struct arm_smmu_pte_info *pte_info, *temp; if (!arm_smmu_is_domain_secure(smmu_domain)) if (!arm_smmu_is_master_side_secure(smmu_domain)) return ret; list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) { Loading @@ -2276,7 +2351,7 @@ static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain) int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid}; struct arm_smmu_pte_info *pte_info, *temp; if (!arm_smmu_is_domain_secure(smmu_domain)) if (!arm_smmu_is_master_side_secure(smmu_domain)) return; list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) { Loading @@ -2300,7 +2375,14 @@ static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size) struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_pte_info *pte_info; BUG_ON(!arm_smmu_is_domain_secure(smmu_domain)); if (smmu_domain->slave_side_secure || !arm_smmu_has_secure_vmid(smmu_domain)) { if (smmu_domain->slave_side_secure) WARN(1, "slave side secure is enforced\n"); else WARN(1, "Invalid VMID is set !!\n"); return; } pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC); if (!pte_info) Loading @@ -2316,7 +2398,14 @@ static int arm_smmu_prepare_pgtable(void *addr, void *cookie) struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_pte_info *pte_info; BUG_ON(!arm_smmu_is_domain_secure(smmu_domain)); if (smmu_domain->slave_side_secure || !arm_smmu_has_secure_vmid(smmu_domain)) { if (smmu_domain->slave_side_secure) WARN(1, "slave side secure is enforced\n"); else WARN(1, "Invalid VMID is set !!\n"); return -EINVAL; } pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC); if (!pte_info) Loading Loading @@ -3294,7 +3383,20 @@ static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait) reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL); reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ; if (arm_smmu_is_static_cb(smmu)) { phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base + smmu->phys_addr; if (scm_io_write(impl_def1_base_phys + IMPL_DEF1_MICRO_MMU_CTRL, reg)) { dev_err(smmu->dev, "scm_io_write fail. SMMU might not be halted"); return -EINVAL; } } else { writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL); } return wait ? qsmmuv2_wait_for_halt(smmu) : 0; } Loading @@ -3316,8 +3418,19 @@ static void qsmmuv2_resume(struct arm_smmu_device *smmu) reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL); reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ; if (arm_smmu_is_static_cb(smmu)) { phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base + smmu->phys_addr; if (scm_io_write(impl_def1_base_phys + IMPL_DEF1_MICRO_MMU_CTRL, reg)) dev_err(smmu->dev, "scm_io_write fail. SMMU might not be resumed"); } else { writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL); } } static void qsmmuv2_device_reset(struct arm_smmu_device *smmu) { Loading Loading @@ -3549,6 +3662,9 @@ static int arm_smmu_alloc_cb(struct iommu_domain *domain, cb = smmu->s2crs[idx].cbndx; } if (cb >= 0 && arm_smmu_is_static_cb(smmu)) smmu_domain->slave_side_secure = true; if (cb < 0 && !arm_smmu_is_static_cb(smmu)) { mutex_unlock(&smmu->stream_map_mutex); return __arm_smmu_alloc_bitmap(smmu->context_map, Loading @@ -3558,6 +3674,7 @@ static int arm_smmu_alloc_cb(struct iommu_domain *domain, for (i = 0; i < smmu->num_mapping_groups; i++) { if (smmu->s2crs[i].cb_handoff && smmu->s2crs[i].cbndx == cb) { if (!arm_smmu_is_static_cb(smmu)) smmu->s2crs[i].cb_handoff = false; smmu->s2crs[i].count -= 1; } Loading Loading @@ -3810,6 +3927,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) bool cttw_dt, cttw_reg; int i; if (arm_smmu_restore_sec_cfg(smmu)) return -ENODEV; dev_dbg(smmu->dev, "probing hardware configuration...\n"); dev_dbg(smmu->dev, "SMMUv%d with:\n", smmu->version == ARM_SMMU_V2 ? 2 : 1); Loading Loading @@ -3887,8 +4007,10 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) * bits are set, so check each one separately. We can reject * masters later if they try to claim IDs outside these masks. */ if (!arm_smmu_is_static_cb(smmu)) { for (i = 0; i < size; i++) { smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i)); smr = readl_relaxed( gr0_base + ARM_SMMU_GR0_SMR(i)); if (!(smr & SMR_VALID)) break; } Loading @@ -3907,6 +4029,10 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i)); smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i)); smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT; } else { smmu->smr_mask_mask = SMR_MASK_MASK; smmu->streamid_mask = SID_MASK; } /* Zero-initialised to mark as invalid */ smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs), Loading Loading @@ -4070,7 +4196,23 @@ static const struct of_device_id arm_smmu_of_match[] = { }; MODULE_DEVICE_TABLE(of, arm_smmu_of_match); #ifdef CONFIG_MSM_TZ_SMMU int register_iommu_sec_ptbl(void) { struct device_node *np; for_each_matching_node(np, arm_smmu_of_match) if (of_find_property(np, "qcom,tz-device-id", NULL) && of_device_is_available(np)) break; if (!np) return -ENODEV; of_node_put(np); return msm_iommu_sec_pgtbl_init(); } #endif static int arm_smmu_of_iommu_configure_fixup(struct device *dev, void *data) { if (!dev->iommu_fwspec) Loading Loading @@ -4124,6 +4266,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) smmu->arch_ops = data->arch_ops; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res) smmu->phys_addr = res->start; smmu->base = devm_ioremap_resource(dev, res); if (IS_ERR(smmu->base)) return PTR_ERR(smmu->base); Loading Loading @@ -4175,6 +4319,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) if (err) goto out_exit_power_resources; smmu->sec_id = msm_dev_to_device_id(dev); err = arm_smmu_device_cfg_probe(smmu); if (err) goto out_power_off; Loading Loading @@ -4302,6 +4447,9 @@ static int __init arm_smmu_init(void) return ret; ret = platform_driver_register(&arm_smmu_driver); #ifdef CONFIG_MSM_TZ_SMMU ret = register_iommu_sec_ptbl(); #endif registered = !ret; trace_smmu_init(ktime_us_delta(ktime_get(), cur)); Loading
drivers/iommu/io-pgtable-msm-secure.c 0 → 100644 +350 −0 Original line number Diff line number Diff line /* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "io-pgtable-msm-secure: " fmt #include <linux/iommu.h> #include <linux/kernel.h> #include <linux/scatterlist.h> #include <linux/sizes.h> #include <linux/slab.h> #include <linux/types.h> #include <soc/qcom/scm.h> #include <linux/dma-mapping.h> #include <asm/cacheflush.h> #include "io-pgtable.h" #define IOMMU_SECURE_PTBL_SIZE 3 #define IOMMU_SECURE_PTBL_INIT 4 #define IOMMU_SECURE_MAP2_FLAT 0x12 #define IOMMU_SECURE_UNMAP2_FLAT 0x13 #define IOMMU_TLBINVAL_FLAG 0x00000001 #define io_pgtable_to_data(x) \ container_of((x), struct msm_secure_io_pgtable, iop) #define io_pgtable_ops_to_pgtable(x) \ container_of((x), struct io_pgtable, ops) #define io_pgtable_ops_to_data(x) \ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) struct msm_secure_io_pgtable { struct io_pgtable iop; }; int msm_iommu_sec_pgtbl_init(void) { int psize[2] = {0, 0}; unsigned int spare = 0; int ret, ptbl_ret = 0; struct device dev = {0}; void *cpu_addr; dma_addr_t paddr; unsigned long attrs = 0; if (is_scm_armv8()) { struct scm_desc desc = {0}; desc.args[0] = spare; desc.arginfo = SCM_ARGS(1); ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, IOMMU_SECURE_PTBL_SIZE), &desc); psize[0] = desc.ret[0]; psize[1] = desc.ret[1]; if (ret || psize[1]) { pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n"); return ret; } } /* Now allocate memory for the secure page tables */ attrs = DMA_ATTR_NO_KERNEL_MAPPING; dev.coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8); arch_setup_dma_ops(&dev, 0, 0, NULL, 1); cpu_addr = dma_alloc_attrs(&dev, psize[0], &paddr, GFP_KERNEL, attrs); if (!cpu_addr) { pr_err("%s: Failed to allocate %d bytes for PTBL\n", __func__, psize[0]); return -ENOMEM; } if (is_scm_armv8()) { struct scm_desc desc = {0}; desc.args[0] = paddr; desc.args[1] = psize[0]; desc.args[2] = 0; desc.arginfo = SCM_ARGS(3, SCM_RW, SCM_VAL, SCM_VAL); ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, IOMMU_SECURE_PTBL_INIT), &desc); ptbl_ret = desc.ret[0]; if (ret) { pr_err("scm call IOMMU_SECURE_PTBL_INIT failed\n"); return ret; } if (ptbl_ret) { pr_err("scm call IOMMU_SECURE_PTBL_INIT extended ret fail\n"); return ret; } } return 0; } EXPORT_SYMBOL(msm_iommu_sec_pgtbl_init); static int msm_secure_map(struct io_pgtable_ops *ops, unsigned long iova, phys_addr_t paddr, size_t size, int iommu_prot) { struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops); struct io_pgtable_cfg *cfg = &data->iop.cfg; void *flush_va, *flush_va_end; struct scm_desc desc = {0}; int ret = -EINVAL; u32 resp; if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(paddr, SZ_1M) || !IS_ALIGNED(size, SZ_1M)) return -EINVAL; desc.args[0] = virt_to_phys(&paddr); desc.args[1] = 1; desc.args[2] = size; desc.args[3] = cfg->arm_msm_secure_cfg.sec_id; desc.args[4] = cfg->arm_msm_secure_cfg.cbndx; desc.args[5] = iova; desc.args[6] = size; desc.args[7] = 0; flush_va = &paddr; flush_va_end = (void *) (((unsigned long) flush_va) + sizeof(phys_addr_t)); /* * Ensure that the buffer is in RAM by the time it gets to TZ */ dmac_clean_range(flush_va, flush_va_end); desc.arginfo = SCM_ARGS(8, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL); if (is_scm_armv8()) { ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_MP, IOMMU_SECURE_MAP2_FLAT), &desc); resp = desc.ret[0]; } if (ret || resp) return -EINVAL; return 0; } static dma_addr_t msm_secure_get_phys_addr(struct scatterlist *sg) { /* * Try sg_dma_address first so that we can * map carveout regions that do not have a * struct page associated with them. */ dma_addr_t pa = sg_dma_address(sg); if (pa == 0) pa = sg_phys(sg); return pa; } static int msm_secure_map_sg(struct io_pgtable_ops *ops, unsigned long iova, struct scatterlist *sg, unsigned int nents, int iommu_prot, size_t *size) { struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops); struct io_pgtable_cfg *cfg = &data->iop.cfg; int ret = -EINVAL; struct scatterlist *tmp, *sgiter; dma_addr_t *pa_list = 0; unsigned int cnt, offset = 0, chunk_offset = 0; dma_addr_t pa; void *flush_va, *flush_va_end; unsigned long len = 0; struct scm_desc desc = {0}; int i; u32 resp; for_each_sg(sg, tmp, nents, i) len += tmp->length; if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(len, SZ_1M)) return -EINVAL; if (sg->length == len) { cnt = 1; pa = msm_secure_get_phys_addr(sg); if (!IS_ALIGNED(pa, SZ_1M)) return -EINVAL; desc.args[0] = virt_to_phys(&pa); desc.args[1] = cnt; desc.args[2] = len; flush_va = &pa; } else { sgiter = sg; if (!IS_ALIGNED(sgiter->length, SZ_1M)) return -EINVAL; cnt = sg->length / SZ_1M; while ((sgiter = sg_next(sgiter))) { if (!IS_ALIGNED(sgiter->length, SZ_1M)) return -EINVAL; cnt += sgiter->length / SZ_1M; } pa_list = kmalloc_array(cnt, sizeof(*pa_list), GFP_KERNEL); if (!pa_list) return -ENOMEM; sgiter = sg; cnt = 0; pa = msm_secure_get_phys_addr(sgiter); while (offset < len) { if (!IS_ALIGNED(pa, SZ_1M)) { kfree(pa_list); return -EINVAL; } pa_list[cnt] = pa + chunk_offset; chunk_offset += SZ_1M; offset += SZ_1M; cnt++; if (chunk_offset >= sgiter->length && offset < len) { chunk_offset = 0; sgiter = sg_next(sgiter); pa = msm_secure_get_phys_addr(sgiter); } } desc.args[0] = virt_to_phys(pa_list); desc.args[1] = cnt; desc.args[2] = SZ_1M; flush_va = pa_list; } desc.args[3] = cfg->arm_msm_secure_cfg.sec_id; desc.args[4] = cfg->arm_msm_secure_cfg.cbndx; desc.args[5] = iova; desc.args[6] = len; desc.args[7] = 0; desc.arginfo = SCM_ARGS(8, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL); /* * Ensure that the buffer is in RAM by the time it gets to TZ */ flush_va_end = (void *) (((unsigned long) flush_va) + (cnt * sizeof(*pa_list))); dmac_clean_range(flush_va, flush_va_end); if (is_scm_armv8()) { ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_MP, IOMMU_SECURE_MAP2_FLAT), &desc); resp = desc.ret[0]; if (ret || resp) ret = -EINVAL; else ret = len; } kfree(pa_list); return ret; } static size_t msm_secure_unmap(struct io_pgtable_ops *ops, unsigned long iova, size_t len) { struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops); struct io_pgtable_cfg *cfg = &data->iop.cfg; int ret = -EINVAL; struct scm_desc desc = {0}; if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(len, SZ_1M)) return ret; desc.args[0] = cfg->arm_msm_secure_cfg.sec_id; desc.args[1] = cfg->arm_msm_secure_cfg.cbndx; desc.args[2] = iova; desc.args[3] = len; desc.args[4] = IOMMU_TLBINVAL_FLAG; desc.arginfo = SCM_ARGS(5); if (is_scm_armv8()) { ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_MP, IOMMU_SECURE_UNMAP2_FLAT), &desc); if (!ret) ret = len; } return ret; } static phys_addr_t msm_secure_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova) { return -EINVAL; } static struct msm_secure_io_pgtable * msm_secure_alloc_pgtable_data(struct io_pgtable_cfg *cfg) { struct msm_secure_io_pgtable *data; data = kmalloc(sizeof(*data), GFP_KERNEL); if (!data) return NULL; data->iop.ops = (struct io_pgtable_ops) { .map = msm_secure_map, .map_sg = msm_secure_map_sg, .unmap = msm_secure_unmap, .iova_to_phys = msm_secure_iova_to_phys, }; return data; } static struct io_pgtable * msm_secure_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) { struct msm_secure_io_pgtable *data = msm_secure_alloc_pgtable_data(cfg); return &data->iop; } static void msm_secure_free_pgtable(struct io_pgtable *iop) { struct msm_secure_io_pgtable *data = io_pgtable_to_data(iop); kfree(data); } struct io_pgtable_init_fns io_pgtable_arm_msm_secure_init_fns = { .alloc = msm_secure_alloc_pgtable, .free = msm_secure_free_pgtable, };
drivers/iommu/io-pgtable.c +3 −0 Original line number Diff line number Diff line Loading @@ -44,6 +44,9 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = { #ifdef CONFIG_IOMMU_IO_PGTABLE_FAST [ARM_V8L_FAST] = &io_pgtable_av8l_fast_init_fns, #endif #ifdef CONFIG_MSM_TZ_SMMU [ARM_MSM_SECURE] = &io_pgtable_arm_msm_secure_init_fns, #endif }; static struct dentry *io_pgtable_top; Loading