Loading drivers/iommu/arm-smmu.c +92 −37 Original line number Diff line number Diff line Loading @@ -449,6 +449,7 @@ struct arm_smmu_domain { struct list_head unassign_list; struct mutex assign_lock; struct list_head secure_pool_list; bool non_fatal_faults; }; static struct iommu_ops arm_smmu_ops; Loading Loading @@ -781,16 +782,16 @@ static int arm_smmu_enable_regulators(struct arm_smmu_device *smmu) if (smmu->gdsc) { ret = regulator_enable(smmu->gdsc); if (ret) if (WARN_ON_ONCE(ret)) goto out; } ret = arm_smmu_request_bus(smmu); if (ret) if (WARN_ON_ONCE(ret)) goto out_reg; ret = arm_smmu_prepare_clocks(smmu); if (ret) if (WARN_ON_ONCE(ret)) goto out_bus; return ret; Loading @@ -808,9 +809,11 @@ static int arm_smmu_enable_clocks(struct arm_smmu_device *smmu) { int ret = 0; arm_smmu_enable_regulators(smmu); ret = arm_smmu_enable_regulators(smmu); if (unlikely(ret)) return ret; ret = arm_smmu_enable_clocks_atomic(smmu); if (ret) if (unlikely(ret)) arm_smmu_disable_regulators(smmu); return ret; Loading @@ -836,10 +839,11 @@ static int arm_smmu_enable_clocks_atomic(struct arm_smmu_device *smmu) for (i = 0; i < smmu->num_clocks; ++i) { ret = clk_enable(smmu->clocks[i]); if (ret) { if (WARN_ON_ONCE(ret)) { dev_err(smmu->dev, "Couldn't enable clock #%d\n", i); while (i--) clk_disable(smmu->clocks[i]); smmu->clock_refs_count--; break; } } Loading Loading @@ -945,7 +949,8 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, if (!smmu) return; arm_smmu_enable_clocks_atomic(smmu); if (arm_smmu_enable_clocks_atomic(smmu)) return; if (stage1) { reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); Loading Loading @@ -1158,6 +1163,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) void __iomem *gr1_base; phys_addr_t phys_soft; u32 frsynra; bool non_fatal_fault = smmu_domain->non_fatal_faults; static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, Loading @@ -1166,13 +1172,17 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) mutex_lock(&smmu_domain->init_mutex); smmu = smmu_domain->smmu; if (!smmu) { ret = IRQ_HANDLED; pr_err("took a fault on a detached domain (%p)\n", domain); return IRQ_HANDLED; goto out_unlock; } ctx_hang_errata = smmu->options & ARM_SMMU_OPT_ERRATA_CTX_FAULT_HANG; fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF; arm_smmu_enable_clocks(smmu); if (arm_smmu_enable_clocks(smmu)) { ret = IRQ_NONE; goto out_unlock; } gr1_base = ARM_SMMU_GR1(smmu); cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); Loading @@ -1180,8 +1190,8 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) if (!(fsr & FSR_FAULT)) { arm_smmu_disable_clocks(smmu); mutex_unlock(&smmu_domain->init_mutex); return IRQ_NONE; ret = IRQ_NONE; goto out_unlock; } if (fatal_asf && (fsr & FSR_ASF)) { Loading Loading @@ -1248,6 +1258,11 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) } ret = IRQ_NONE; resume = RESUME_TERMINATE; if (!non_fatal_fault) { dev_err(smmu->dev, "Unhandled context faults are fatal on this domain. Going down now...\n"); BUG(); } } /* Loading Loading @@ -1283,6 +1298,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) } arm_smmu_disable_clocks(smmu); out_unlock: mutex_unlock(&smmu_domain->init_mutex); return ret; Loading @@ -1294,7 +1310,8 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev) struct arm_smmu_device *smmu = dev; void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu); arm_smmu_enable_clocks(smmu); if (arm_smmu_enable_clocks(smmu)) return IRQ_NONE; gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0); Loading Loading @@ -1333,10 +1350,13 @@ static void arm_smmu_trigger_fault(struct iommu_domain *domain, smmu = smmu_domain->smmu; cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); arm_smmu_enable_clocks(smmu); if (arm_smmu_enable_clocks(smmu)) return; dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n", flags, cfg->cbndx); writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE); /* give the interrupt time to fire... */ msleep(1000); arm_smmu_disable_clocks(smmu); } Loading Loading @@ -1602,7 +1622,8 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) void __iomem *cb_base; int irq; arm_smmu_enable_clocks(smmu_domain->smmu); if (arm_smmu_enable_clocks(smmu_domain->smmu)) goto free_irqs; /* * Disable the context bank and free the page tables before freeing * it. Loading @@ -1612,13 +1633,14 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) arm_smmu_tlb_inv_context(smmu_domain); arm_smmu_disable_clocks(smmu_domain->smmu); free_irqs: if (cfg->irptndx != INVALID_IRPTNDX) { irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; free_irq(irq, domain); } arm_smmu_disable_clocks(smmu_domain->smmu); __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); smmu_domain->smmu = NULL; } Loading Loading @@ -1791,7 +1813,8 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, * We *must* clear the S2CR first, because freeing the SMR means * that it can be re-allocated immediately. */ arm_smmu_enable_clocks(smmu); if (arm_smmu_enable_clocks(smmu)) return; for (i = 0; i < cfg->num_streamids; ++i) { u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; Loading Loading @@ -1924,12 +1947,11 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) if (dev->archdata.iommu) { dev_err(dev, "already attached to IOMMU domain\n"); mutex_unlock(&smmu->attach_lock); mutex_unlock(&smmu_domain->init_mutex); return -EEXIST; ret = -EEXIST; goto err_unlock; } if (!smmu->attach_count++) { if (!smmu->attach_count) { /* * We need an extra power vote if we can't retain register * settings across a power collapse, or if this is an Loading @@ -1940,14 +1962,22 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) * client is attached in these cases. */ if (!(smmu->options & ARM_SMMU_OPT_REGISTER_SAVE) || atomic_ctx) arm_smmu_enable_regulators(smmu); arm_smmu_enable_clocks(smmu); atomic_ctx) { ret = arm_smmu_enable_regulators(smmu); if (ret) goto err_unlock; } ret = arm_smmu_enable_clocks(smmu); if (ret) goto err_disable_regulators; arm_smmu_device_reset(smmu); arm_smmu_impl_def_programming(smmu); } else { arm_smmu_enable_clocks(smmu); ret = arm_smmu_enable_clocks(smmu); if (ret) goto err_unlock; } smmu->attach_count++; /* Ensure that the domain is finalised */ ret = arm_smmu_init_domain_context(domain, smmu); Loading Loading @@ -1995,9 +2025,12 @@ err_destroy_domain_context: arm_smmu_destroy_domain_context(domain); err_disable_clocks: arm_smmu_disable_clocks(smmu); if (!--smmu->attach_count && --smmu->attach_count; err_disable_regulators: if (!smmu->attach_count && (!(smmu->options & ARM_SMMU_OPT_REGISTER_SAVE) || atomic_ctx)) arm_smmu_disable_regulators(smmu); err_unlock: mutex_unlock(&smmu->attach_lock); mutex_unlock(&smmu_domain->init_mutex); return ret; Loading @@ -2007,7 +2040,8 @@ static void arm_smmu_power_off(struct arm_smmu_device *smmu, bool force_regulator_disable) { /* Turn the thing off */ arm_smmu_enable_clocks(smmu); if (arm_smmu_enable_clocks(smmu)) return; writel_relaxed(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); arm_smmu_disable_clocks(smmu); Loading @@ -2023,10 +2057,12 @@ static void arm_smmu_detach_dynamic(struct iommu_domain *domain, mutex_lock(&smmu->attach_lock); if (smmu->attach_count > 0) { arm_smmu_enable_clocks(smmu_domain->smmu); if (arm_smmu_enable_clocks(smmu_domain->smmu)) goto idr_remove; arm_smmu_tlb_inv_context(smmu_domain); arm_smmu_disable_clocks(smmu_domain->smmu); } idr_remove: idr_remove(&smmu->asid_idr, smmu_domain->cfg.asid); smmu_domain->cfg.asid = INVALID_ASID; smmu_domain->smmu = NULL; Loading Loading @@ -2242,12 +2278,17 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, BUG_ON(atomic_ctx && !smmu_domain->smmu); if (atomic_ctx) { arm_smmu_enable_clocks_atomic(smmu_domain->smmu); if (arm_smmu_enable_clocks_atomic(smmu_domain->smmu)) return 0; } else { mutex_lock(&smmu_domain->init_mutex); arm_smmu_secure_domain_lock(smmu_domain); if (smmu_domain->smmu) arm_smmu_enable_clocks(smmu_domain->smmu); if (smmu_domain->smmu && arm_smmu_enable_clocks(smmu_domain->smmu)) { arm_smmu_secure_domain_unlock(smmu_domain); mutex_unlock(&smmu_domain->init_mutex); return 0; } } spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); Loading Loading @@ -2357,7 +2398,8 @@ static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, u64 phys; unsigned long flags; arm_smmu_enable_clocks(smmu); if (arm_smmu_enable_clocks(smmu)) return 0; cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); Loading Loading @@ -2444,7 +2486,10 @@ static unsigned long arm_smmu_reg_read(struct iommu_domain *domain, } cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); arm_smmu_enable_clocks(smmu); if (arm_smmu_enable_clocks(smmu)) { val = 0; goto unlock; } val = readl_relaxed(cb_base + offset); arm_smmu_disable_clocks(smmu); Loading Loading @@ -2474,7 +2519,8 @@ static void arm_smmu_reg_write(struct iommu_domain *domain, } cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); arm_smmu_enable_clocks(smmu); if (arm_smmu_enable_clocks(smmu)) goto unlock; writel_relaxed(val, cb_base + offset); arm_smmu_disable_clocks(smmu); unlock: Loading Loading @@ -2630,6 +2676,11 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, & (1 << DOMAIN_ATTR_DYNAMIC)); ret = 0; break; case DOMAIN_ATTR_NON_FATAL_FAULTS: *((int *)data) = !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS)); ret = 0; break; default: ret = -ENODEV; break; Loading Loading @@ -2737,6 +2788,10 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, smmu_domain->cfg.cbndx = *((unsigned int *)data); ret = 0; break; case DOMAIN_ATTR_NON_FATAL_FAULTS: smmu_domain->non_fatal_faults = *((int *)data); ret = 0; break; default: ret = -ENODEV; break; Loading Loading @@ -3260,11 +3315,11 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) parse_driver_options(smmu); arm_smmu_enable_regulators(smmu); arm_smmu_enable_clocks(smmu); err = arm_smmu_enable_clocks(smmu); if (err) goto out_put_masters; err = arm_smmu_device_cfg_probe(smmu); arm_smmu_disable_clocks(smmu); arm_smmu_disable_regulators(smmu); if (err) goto out_put_masters; Loading include/linux/iommu.h +1 −0 Original line number Diff line number Diff line Loading @@ -99,6 +99,7 @@ enum iommu_attr { DOMAIN_ATTR_CONTEXTIDR, DOMAIN_ATTR_PROCID, DOMAIN_ATTR_DYNAMIC, DOMAIN_ATTR_NON_FATAL_FAULTS, DOMAIN_ATTR_MAX, }; Loading Loading
drivers/iommu/arm-smmu.c +92 −37 Original line number Diff line number Diff line Loading @@ -449,6 +449,7 @@ struct arm_smmu_domain { struct list_head unassign_list; struct mutex assign_lock; struct list_head secure_pool_list; bool non_fatal_faults; }; static struct iommu_ops arm_smmu_ops; Loading Loading @@ -781,16 +782,16 @@ static int arm_smmu_enable_regulators(struct arm_smmu_device *smmu) if (smmu->gdsc) { ret = regulator_enable(smmu->gdsc); if (ret) if (WARN_ON_ONCE(ret)) goto out; } ret = arm_smmu_request_bus(smmu); if (ret) if (WARN_ON_ONCE(ret)) goto out_reg; ret = arm_smmu_prepare_clocks(smmu); if (ret) if (WARN_ON_ONCE(ret)) goto out_bus; return ret; Loading @@ -808,9 +809,11 @@ static int arm_smmu_enable_clocks(struct arm_smmu_device *smmu) { int ret = 0; arm_smmu_enable_regulators(smmu); ret = arm_smmu_enable_regulators(smmu); if (unlikely(ret)) return ret; ret = arm_smmu_enable_clocks_atomic(smmu); if (ret) if (unlikely(ret)) arm_smmu_disable_regulators(smmu); return ret; Loading @@ -836,10 +839,11 @@ static int arm_smmu_enable_clocks_atomic(struct arm_smmu_device *smmu) for (i = 0; i < smmu->num_clocks; ++i) { ret = clk_enable(smmu->clocks[i]); if (ret) { if (WARN_ON_ONCE(ret)) { dev_err(smmu->dev, "Couldn't enable clock #%d\n", i); while (i--) clk_disable(smmu->clocks[i]); smmu->clock_refs_count--; break; } } Loading Loading @@ -945,7 +949,8 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, if (!smmu) return; arm_smmu_enable_clocks_atomic(smmu); if (arm_smmu_enable_clocks_atomic(smmu)) return; if (stage1) { reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); Loading Loading @@ -1158,6 +1163,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) void __iomem *gr1_base; phys_addr_t phys_soft; u32 frsynra; bool non_fatal_fault = smmu_domain->non_fatal_faults; static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, Loading @@ -1166,13 +1172,17 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) mutex_lock(&smmu_domain->init_mutex); smmu = smmu_domain->smmu; if (!smmu) { ret = IRQ_HANDLED; pr_err("took a fault on a detached domain (%p)\n", domain); return IRQ_HANDLED; goto out_unlock; } ctx_hang_errata = smmu->options & ARM_SMMU_OPT_ERRATA_CTX_FAULT_HANG; fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF; arm_smmu_enable_clocks(smmu); if (arm_smmu_enable_clocks(smmu)) { ret = IRQ_NONE; goto out_unlock; } gr1_base = ARM_SMMU_GR1(smmu); cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); Loading @@ -1180,8 +1190,8 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) if (!(fsr & FSR_FAULT)) { arm_smmu_disable_clocks(smmu); mutex_unlock(&smmu_domain->init_mutex); return IRQ_NONE; ret = IRQ_NONE; goto out_unlock; } if (fatal_asf && (fsr & FSR_ASF)) { Loading Loading @@ -1248,6 +1258,11 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) } ret = IRQ_NONE; resume = RESUME_TERMINATE; if (!non_fatal_fault) { dev_err(smmu->dev, "Unhandled context faults are fatal on this domain. Going down now...\n"); BUG(); } } /* Loading Loading @@ -1283,6 +1298,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) } arm_smmu_disable_clocks(smmu); out_unlock: mutex_unlock(&smmu_domain->init_mutex); return ret; Loading @@ -1294,7 +1310,8 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev) struct arm_smmu_device *smmu = dev; void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu); arm_smmu_enable_clocks(smmu); if (arm_smmu_enable_clocks(smmu)) return IRQ_NONE; gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0); Loading Loading @@ -1333,10 +1350,13 @@ static void arm_smmu_trigger_fault(struct iommu_domain *domain, smmu = smmu_domain->smmu; cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); arm_smmu_enable_clocks(smmu); if (arm_smmu_enable_clocks(smmu)) return; dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n", flags, cfg->cbndx); writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE); /* give the interrupt time to fire... */ msleep(1000); arm_smmu_disable_clocks(smmu); } Loading Loading @@ -1602,7 +1622,8 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) void __iomem *cb_base; int irq; arm_smmu_enable_clocks(smmu_domain->smmu); if (arm_smmu_enable_clocks(smmu_domain->smmu)) goto free_irqs; /* * Disable the context bank and free the page tables before freeing * it. Loading @@ -1612,13 +1633,14 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) arm_smmu_tlb_inv_context(smmu_domain); arm_smmu_disable_clocks(smmu_domain->smmu); free_irqs: if (cfg->irptndx != INVALID_IRPTNDX) { irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; free_irq(irq, domain); } arm_smmu_disable_clocks(smmu_domain->smmu); __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); smmu_domain->smmu = NULL; } Loading Loading @@ -1791,7 +1813,8 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, * We *must* clear the S2CR first, because freeing the SMR means * that it can be re-allocated immediately. */ arm_smmu_enable_clocks(smmu); if (arm_smmu_enable_clocks(smmu)) return; for (i = 0; i < cfg->num_streamids; ++i) { u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; Loading Loading @@ -1924,12 +1947,11 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) if (dev->archdata.iommu) { dev_err(dev, "already attached to IOMMU domain\n"); mutex_unlock(&smmu->attach_lock); mutex_unlock(&smmu_domain->init_mutex); return -EEXIST; ret = -EEXIST; goto err_unlock; } if (!smmu->attach_count++) { if (!smmu->attach_count) { /* * We need an extra power vote if we can't retain register * settings across a power collapse, or if this is an Loading @@ -1940,14 +1962,22 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) * client is attached in these cases. */ if (!(smmu->options & ARM_SMMU_OPT_REGISTER_SAVE) || atomic_ctx) arm_smmu_enable_regulators(smmu); arm_smmu_enable_clocks(smmu); atomic_ctx) { ret = arm_smmu_enable_regulators(smmu); if (ret) goto err_unlock; } ret = arm_smmu_enable_clocks(smmu); if (ret) goto err_disable_regulators; arm_smmu_device_reset(smmu); arm_smmu_impl_def_programming(smmu); } else { arm_smmu_enable_clocks(smmu); ret = arm_smmu_enable_clocks(smmu); if (ret) goto err_unlock; } smmu->attach_count++; /* Ensure that the domain is finalised */ ret = arm_smmu_init_domain_context(domain, smmu); Loading Loading @@ -1995,9 +2025,12 @@ err_destroy_domain_context: arm_smmu_destroy_domain_context(domain); err_disable_clocks: arm_smmu_disable_clocks(smmu); if (!--smmu->attach_count && --smmu->attach_count; err_disable_regulators: if (!smmu->attach_count && (!(smmu->options & ARM_SMMU_OPT_REGISTER_SAVE) || atomic_ctx)) arm_smmu_disable_regulators(smmu); err_unlock: mutex_unlock(&smmu->attach_lock); mutex_unlock(&smmu_domain->init_mutex); return ret; Loading @@ -2007,7 +2040,8 @@ static void arm_smmu_power_off(struct arm_smmu_device *smmu, bool force_regulator_disable) { /* Turn the thing off */ arm_smmu_enable_clocks(smmu); if (arm_smmu_enable_clocks(smmu)) return; writel_relaxed(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); arm_smmu_disable_clocks(smmu); Loading @@ -2023,10 +2057,12 @@ static void arm_smmu_detach_dynamic(struct iommu_domain *domain, mutex_lock(&smmu->attach_lock); if (smmu->attach_count > 0) { arm_smmu_enable_clocks(smmu_domain->smmu); if (arm_smmu_enable_clocks(smmu_domain->smmu)) goto idr_remove; arm_smmu_tlb_inv_context(smmu_domain); arm_smmu_disable_clocks(smmu_domain->smmu); } idr_remove: idr_remove(&smmu->asid_idr, smmu_domain->cfg.asid); smmu_domain->cfg.asid = INVALID_ASID; smmu_domain->smmu = NULL; Loading Loading @@ -2242,12 +2278,17 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, BUG_ON(atomic_ctx && !smmu_domain->smmu); if (atomic_ctx) { arm_smmu_enable_clocks_atomic(smmu_domain->smmu); if (arm_smmu_enable_clocks_atomic(smmu_domain->smmu)) return 0; } else { mutex_lock(&smmu_domain->init_mutex); arm_smmu_secure_domain_lock(smmu_domain); if (smmu_domain->smmu) arm_smmu_enable_clocks(smmu_domain->smmu); if (smmu_domain->smmu && arm_smmu_enable_clocks(smmu_domain->smmu)) { arm_smmu_secure_domain_unlock(smmu_domain); mutex_unlock(&smmu_domain->init_mutex); return 0; } } spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); Loading Loading @@ -2357,7 +2398,8 @@ static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, u64 phys; unsigned long flags; arm_smmu_enable_clocks(smmu); if (arm_smmu_enable_clocks(smmu)) return 0; cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); Loading Loading @@ -2444,7 +2486,10 @@ static unsigned long arm_smmu_reg_read(struct iommu_domain *domain, } cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); arm_smmu_enable_clocks(smmu); if (arm_smmu_enable_clocks(smmu)) { val = 0; goto unlock; } val = readl_relaxed(cb_base + offset); arm_smmu_disable_clocks(smmu); Loading Loading @@ -2474,7 +2519,8 @@ static void arm_smmu_reg_write(struct iommu_domain *domain, } cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); arm_smmu_enable_clocks(smmu); if (arm_smmu_enable_clocks(smmu)) goto unlock; writel_relaxed(val, cb_base + offset); arm_smmu_disable_clocks(smmu); unlock: Loading Loading @@ -2630,6 +2676,11 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, & (1 << DOMAIN_ATTR_DYNAMIC)); ret = 0; break; case DOMAIN_ATTR_NON_FATAL_FAULTS: *((int *)data) = !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS)); ret = 0; break; default: ret = -ENODEV; break; Loading Loading @@ -2737,6 +2788,10 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, smmu_domain->cfg.cbndx = *((unsigned int *)data); ret = 0; break; case DOMAIN_ATTR_NON_FATAL_FAULTS: smmu_domain->non_fatal_faults = *((int *)data); ret = 0; break; default: ret = -ENODEV; break; Loading Loading @@ -3260,11 +3315,11 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) parse_driver_options(smmu); arm_smmu_enable_regulators(smmu); arm_smmu_enable_clocks(smmu); err = arm_smmu_enable_clocks(smmu); if (err) goto out_put_masters; err = arm_smmu_device_cfg_probe(smmu); arm_smmu_disable_clocks(smmu); arm_smmu_disable_regulators(smmu); if (err) goto out_put_masters; Loading
include/linux/iommu.h +1 −0 Original line number Diff line number Diff line Loading @@ -99,6 +99,7 @@ enum iommu_attr { DOMAIN_ATTR_CONTEXTIDR, DOMAIN_ATTR_PROCID, DOMAIN_ATTR_DYNAMIC, DOMAIN_ATTR_NON_FATAL_FAULTS, DOMAIN_ATTR_MAX, }; Loading