Loading drivers/iommu/arm-smmu-impl.c +2 −0 Original line number Diff line number Diff line Loading @@ -163,6 +163,8 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu) return cavium_smmu_impl_init(smmu); case QCOM_SMMUV500: return qsmmuv500_impl_init(smmu); case QCOM_SMMUV2: return qsmmuv2_impl_init(smmu); default: break; } Loading drivers/iommu/arm-smmu-qcom.c +289 −0 Original line number Diff line number Diff line Loading @@ -15,6 +15,270 @@ #include "arm-smmu.h" #define ARM_SMMU_IMPL_DEF1 6 #define IMPL_DEF1_MICRO_MMU_CTRL 0 #define MICRO_MMU_CTRL_LOCAL_HALT_REQ BIT(2) #define MICRO_MMU_CTRL_IDLE BIT(3) /* Definitions for implementation-defined registers */ #define ACTLR_QCOM_OSH BIT(28) #define ACTLR_QCOM_ISH BIT(29) #define ACTLR_QCOM_NSH BIT(30) struct arm_smmu_impl_def_reg { u32 offset; u32 value; }; struct qsmmuv2_archdata { spinlock_t atos_lock; struct arm_smmu_impl_def_reg *impl_def_attach_registers; unsigned int num_impl_def_attach_registers; struct arm_smmu_device smmu; }; #define to_qsmmuv2_archdata(smmu) \ container_of(smmu, struct qsmmuv2_archdata, smmu) static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu) { void __iomem *reg = arm_smmu_page(smmu, ARM_SMMU_IMPL_DEF1); struct device *dev = smmu->dev; u32 tmp; if (readl_poll_timeout_atomic(reg + IMPL_DEF1_MICRO_MMU_CTRL, tmp, (tmp & MICRO_MMU_CTRL_IDLE), 0, 30000)) { dev_err(dev, "Couldn't halt SMMU!\n"); return -EBUSY; } return 0; } static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait) { u32 val; val = arm_smmu_readl(smmu, ARM_SMMU_IMPL_DEF1, IMPL_DEF1_MICRO_MMU_CTRL); val |= MICRO_MMU_CTRL_LOCAL_HALT_REQ; arm_smmu_writel(smmu, ARM_SMMU_IMPL_DEF1, IMPL_DEF1_MICRO_MMU_CTRL, val); return wait ? qsmmuv2_wait_for_halt(smmu) : 0; } static int qsmmuv2_halt(struct arm_smmu_device *smmu) { return __qsmmuv2_halt(smmu, true); } static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu) { return __qsmmuv2_halt(smmu, false); } static void qsmmuv2_resume(struct arm_smmu_device *smmu) { u32 val; val = arm_smmu_readl(smmu, ARM_SMMU_IMPL_DEF1, IMPL_DEF1_MICRO_MMU_CTRL); val &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ; arm_smmu_writel(smmu, ARM_SMMU_IMPL_DEF1, IMPL_DEF1_MICRO_MMU_CTRL, val); } static phys_addr_t __qsmmuv2_iova_to_phys_hard( struct arm_smmu_domain *smmu_domain, dma_addr_t iova) { struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_cfg *cfg = &smmu_domain->cfg; struct device *dev = smmu->dev; int idx = cfg->cbndx; void __iomem *reg; u32 tmp; u64 phys; unsigned long va; /* ATS1 registers can only be written atomically */ va = iova & ~0xfffUL; if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va); else arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va); reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)); if (readl_poll_timeout_atomic(reg + ARM_SMMU_CB_ATSR, tmp, !(tmp & ATSR_ACTIVE), 5, 50)) { dev_err(dev, "iova to phys timed out on %pad.\n", &iova); phys = 0; return phys; } phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR); if (phys & CB_PAR_F) { dev_err(dev, "translation fault!\n"); dev_err(dev, "PAR = 0x%llx\n", phys); phys = 0; } else { phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff); } return phys; } static phys_addr_t qsmmuv2_iova_to_phys_hard( struct arm_smmu_domain *smmu_domain, dma_addr_t iova, unsigned long trans_flags) { struct arm_smmu_device *smmu = smmu_domain->smmu; struct qsmmuv2_archdata *data = to_qsmmuv2_archdata(smmu); int idx = smmu_domain->cfg.cbndx; phys_addr_t phys = 0; unsigned long flags; u32 sctlr, sctlr_orig, fsr; spin_lock_irqsave(&data->atos_lock, flags); qsmmuv2_halt_nowait(smmu); /* disable stall mode momentarily */ sctlr_orig = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_SCTLR); sctlr = sctlr_orig & ~(SCTLR_CFCFG); arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr); /* clear FSR to allow ATOS to log any faults */ fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR); if (fsr & FSR_FAULT) { /* Clear pending interrupts */ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr); /* * Barrier required to ensure that the FSR is cleared * before resuming SMMU operation. */ wmb(); /* * TBU halt takes care of resuming any stalled transcation. * Kept it here for completeness sake. */ if (fsr & FSR_SS) arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME, RESUME_TERMINATE); } qsmmuv2_wait_for_halt(smmu); phys = __qsmmuv2_iova_to_phys_hard(smmu_domain, iova); /* restore SCTLR */ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr_orig); qsmmuv2_resume(smmu); spin_unlock_irqrestore(&data->atos_lock, flags); return phys; } static int qsmmuv2_device_reset(struct arm_smmu_device *smmu) { struct qsmmuv2_archdata *data = to_qsmmuv2_archdata(smmu); struct arm_smmu_impl_def_reg *regs = data->impl_def_attach_registers; u32 i; /* Program implementation defined registers */ qsmmuv2_halt(smmu); for (i = 0; i < data->num_impl_def_attach_registers; ++i) arm_smmu_gr0_write(smmu, regs[i].offset, regs[i].value); qsmmuv2_resume(smmu); return 0; } static void qsmmuv2_init_cb(struct arm_smmu_domain *smmu_domain, struct device *dev) { struct arm_smmu_device *smmu = smmu_domain->smmu; int idx = smmu_domain->cfg.cbndx; const struct iommu_flush_ops *tlb; u32 val; tlb = smmu_domain->pgtbl_info[0].pgtbl_cfg.tlb; val = ACTLR_QCOM_ISH | ACTLR_QCOM_OSH | ACTLR_QCOM_NSH; arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ACTLR, val); /* * Flush the context bank after modifying ACTLR to ensure there * are no cache entries with stale state */ tlb->tlb_flush_all(smmu_domain); } static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu) { struct device *dev = smmu->dev; struct qsmmuv2_archdata *data = to_qsmmuv2_archdata(smmu); int i, ntuples, ret; u32 *tuples; struct arm_smmu_impl_def_reg *regs, *regit; if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples)) return 0; ntuples /= sizeof(u32); if (ntuples % 2) { dev_err(dev, "Invalid number of attach-impl-defs registers: %d\n", ntuples); return -EINVAL; } regs = devm_kzalloc(dev, sizeof(*data->impl_def_attach_registers) * ntuples, GFP_KERNEL); if (!regs) return -ENOMEM; tuples = kzalloc(sizeof(u32) * ntuples * 2, GFP_KERNEL); if (!tuples) return -ENOMEM; ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs", tuples, ntuples); if (ret) { kfree(tuples); return ret; } for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) { regit->offset = tuples[i]; regit->value = tuples[i + 1]; } kfree(tuples); data->impl_def_attach_registers = regs; data->num_impl_def_attach_registers = ntuples / 2; return 0; } static const struct arm_smmu_impl qsmmuv2_impl = { .init_context_bank = qsmmuv2_init_cb, .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard, .reset = qsmmuv2_device_reset, }; struct qcom_smmu { struct arm_smmu_device smmu; }; Loading Loading @@ -927,3 +1191,28 @@ struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu) return &qsmmu->smmu; } struct arm_smmu_device *qsmmuv2_impl_init(struct arm_smmu_device *smmu) { struct device *dev = smmu->dev; struct qsmmuv2_archdata *data; struct platform_device *pdev; int ret; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return ERR_PTR(-ENOMEM); pdev = to_platform_device(dev); spin_lock_init(&data->atos_lock); data->smmu = *smmu; data->smmu.impl = &qsmmuv2_impl; ret = arm_smmu_parse_impl_def_registers(&data->smmu); if (ret) return ERR_PTR(ret); devm_kfree(smmu->dev, smmu); return &data->smmu; } drivers/iommu/arm-smmu.c +0 −54 Original line number Diff line number Diff line Loading @@ -68,9 +68,6 @@ #define TLB_LOOP_TIMEOUT 500000 /* 500ms */ #define TLB_LOOP_INC_MAX 1000 /*1ms*/ #define ARM_SMMU_IMPL_DEF1(smmu) \ ((smmu)->base + (6 * (1 << (smmu)->pgshift))) #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 Loading Loading @@ -4264,53 +4261,6 @@ static int arm_smmu_handoff_cbs(struct arm_smmu_device *smmu) return 0; } static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu) { struct device *dev = smmu->dev; int i, ntuples, ret; u32 *tuples; struct arm_smmu_impl_def_reg *regs, *regit; if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples)) return 0; ntuples /= sizeof(u32); if (ntuples % 2) { dev_err(dev, "Invalid number of attach-impl-defs registers: %d\n", ntuples); return -EINVAL; } regs = devm_kmalloc( dev, sizeof(*smmu->impl_def_attach_registers) * ntuples, GFP_KERNEL); if (!regs) return -ENOMEM; tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL); if (!tuples) return -ENOMEM; ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs", tuples, ntuples); if (ret) return ret; for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) { regit->offset = tuples[i]; regit->value = tuples[i + 1]; } devm_kfree(dev, tuples); smmu->impl_def_attach_registers = regs; smmu->num_impl_def_attach_registers = ntuples / 2; return 0; } static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr) { const char *cname; Loading Loading @@ -4908,10 +4858,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) if (err) goto out_power_off; err = arm_smmu_parse_impl_def_registers(smmu); if (err) goto out_power_off; if (smmu->version == ARM_SMMU_V2) { if (smmu->num_context_banks > smmu->num_context_irqs) { dev_err(dev, Loading drivers/iommu/arm-smmu.h +1 −9 Original line number Diff line number Diff line Loading @@ -279,11 +279,6 @@ enum arm_smmu_implementation { QCOM_SMMUV500, }; struct arm_smmu_impl_def_reg { u32 offset; u32 value; }; /* * Describes resources required for on/off power operation. * Separate reference count is provided for atomic/nonatomic Loading Loading @@ -400,10 +395,6 @@ struct arm_smmu_device { /* IOMMU core code handle */ struct iommu_device iommu; /* Specific to QCOM */ struct arm_smmu_impl_def_reg *impl_def_attach_registers; unsigned int num_impl_def_attach_registers; struct arm_smmu_power_resources *pwr; /* protects idr */ Loading Loading @@ -587,6 +578,7 @@ static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page, struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu); struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu); struct arm_smmu_device *qsmmuv500_impl_init(struct arm_smmu_device *smmu); struct arm_smmu_device *qsmmuv2_impl_init(struct arm_smmu_device *smmu); int arm_mmu500_reset(struct arm_smmu_device *smmu); Loading Loading
drivers/iommu/arm-smmu-impl.c +2 −0 Original line number Diff line number Diff line Loading @@ -163,6 +163,8 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu) return cavium_smmu_impl_init(smmu); case QCOM_SMMUV500: return qsmmuv500_impl_init(smmu); case QCOM_SMMUV2: return qsmmuv2_impl_init(smmu); default: break; } Loading
drivers/iommu/arm-smmu-qcom.c +289 −0 Original line number Diff line number Diff line Loading @@ -15,6 +15,270 @@ #include "arm-smmu.h" #define ARM_SMMU_IMPL_DEF1 6 #define IMPL_DEF1_MICRO_MMU_CTRL 0 #define MICRO_MMU_CTRL_LOCAL_HALT_REQ BIT(2) #define MICRO_MMU_CTRL_IDLE BIT(3) /* Definitions for implementation-defined registers */ #define ACTLR_QCOM_OSH BIT(28) #define ACTLR_QCOM_ISH BIT(29) #define ACTLR_QCOM_NSH BIT(30) struct arm_smmu_impl_def_reg { u32 offset; u32 value; }; struct qsmmuv2_archdata { spinlock_t atos_lock; struct arm_smmu_impl_def_reg *impl_def_attach_registers; unsigned int num_impl_def_attach_registers; struct arm_smmu_device smmu; }; #define to_qsmmuv2_archdata(smmu) \ container_of(smmu, struct qsmmuv2_archdata, smmu) static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu) { void __iomem *reg = arm_smmu_page(smmu, ARM_SMMU_IMPL_DEF1); struct device *dev = smmu->dev; u32 tmp; if (readl_poll_timeout_atomic(reg + IMPL_DEF1_MICRO_MMU_CTRL, tmp, (tmp & MICRO_MMU_CTRL_IDLE), 0, 30000)) { dev_err(dev, "Couldn't halt SMMU!\n"); return -EBUSY; } return 0; } static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait) { u32 val; val = arm_smmu_readl(smmu, ARM_SMMU_IMPL_DEF1, IMPL_DEF1_MICRO_MMU_CTRL); val |= MICRO_MMU_CTRL_LOCAL_HALT_REQ; arm_smmu_writel(smmu, ARM_SMMU_IMPL_DEF1, IMPL_DEF1_MICRO_MMU_CTRL, val); return wait ? qsmmuv2_wait_for_halt(smmu) : 0; } static int qsmmuv2_halt(struct arm_smmu_device *smmu) { return __qsmmuv2_halt(smmu, true); } static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu) { return __qsmmuv2_halt(smmu, false); } static void qsmmuv2_resume(struct arm_smmu_device *smmu) { u32 val; val = arm_smmu_readl(smmu, ARM_SMMU_IMPL_DEF1, IMPL_DEF1_MICRO_MMU_CTRL); val &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ; arm_smmu_writel(smmu, ARM_SMMU_IMPL_DEF1, IMPL_DEF1_MICRO_MMU_CTRL, val); } static phys_addr_t __qsmmuv2_iova_to_phys_hard( struct arm_smmu_domain *smmu_domain, dma_addr_t iova) { struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_cfg *cfg = &smmu_domain->cfg; struct device *dev = smmu->dev; int idx = cfg->cbndx; void __iomem *reg; u32 tmp; u64 phys; unsigned long va; /* ATS1 registers can only be written atomically */ va = iova & ~0xfffUL; if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va); else arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va); reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)); if (readl_poll_timeout_atomic(reg + ARM_SMMU_CB_ATSR, tmp, !(tmp & ATSR_ACTIVE), 5, 50)) { dev_err(dev, "iova to phys timed out on %pad.\n", &iova); phys = 0; return phys; } phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR); if (phys & CB_PAR_F) { dev_err(dev, "translation fault!\n"); dev_err(dev, "PAR = 0x%llx\n", phys); phys = 0; } else { phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff); } return phys; } static phys_addr_t qsmmuv2_iova_to_phys_hard( struct arm_smmu_domain *smmu_domain, dma_addr_t iova, unsigned long trans_flags) { struct arm_smmu_device *smmu = smmu_domain->smmu; struct qsmmuv2_archdata *data = to_qsmmuv2_archdata(smmu); int idx = smmu_domain->cfg.cbndx; phys_addr_t phys = 0; unsigned long flags; u32 sctlr, sctlr_orig, fsr; spin_lock_irqsave(&data->atos_lock, flags); qsmmuv2_halt_nowait(smmu); /* disable stall mode momentarily */ sctlr_orig = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_SCTLR); sctlr = sctlr_orig & ~(SCTLR_CFCFG); arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr); /* clear FSR to allow ATOS to log any faults */ fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR); if (fsr & FSR_FAULT) { /* Clear pending interrupts */ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr); /* * Barrier required to ensure that the FSR is cleared * before resuming SMMU operation. */ wmb(); /* * TBU halt takes care of resuming any stalled transcation. * Kept it here for completeness sake. */ if (fsr & FSR_SS) arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME, RESUME_TERMINATE); } qsmmuv2_wait_for_halt(smmu); phys = __qsmmuv2_iova_to_phys_hard(smmu_domain, iova); /* restore SCTLR */ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr_orig); qsmmuv2_resume(smmu); spin_unlock_irqrestore(&data->atos_lock, flags); return phys; } static int qsmmuv2_device_reset(struct arm_smmu_device *smmu) { struct qsmmuv2_archdata *data = to_qsmmuv2_archdata(smmu); struct arm_smmu_impl_def_reg *regs = data->impl_def_attach_registers; u32 i; /* Program implementation defined registers */ qsmmuv2_halt(smmu); for (i = 0; i < data->num_impl_def_attach_registers; ++i) arm_smmu_gr0_write(smmu, regs[i].offset, regs[i].value); qsmmuv2_resume(smmu); return 0; } static void qsmmuv2_init_cb(struct arm_smmu_domain *smmu_domain, struct device *dev) { struct arm_smmu_device *smmu = smmu_domain->smmu; int idx = smmu_domain->cfg.cbndx; const struct iommu_flush_ops *tlb; u32 val; tlb = smmu_domain->pgtbl_info[0].pgtbl_cfg.tlb; val = ACTLR_QCOM_ISH | ACTLR_QCOM_OSH | ACTLR_QCOM_NSH; arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ACTLR, val); /* * Flush the context bank after modifying ACTLR to ensure there * are no cache entries with stale state */ tlb->tlb_flush_all(smmu_domain); } static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu) { struct device *dev = smmu->dev; struct qsmmuv2_archdata *data = to_qsmmuv2_archdata(smmu); int i, ntuples, ret; u32 *tuples; struct arm_smmu_impl_def_reg *regs, *regit; if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples)) return 0; ntuples /= sizeof(u32); if (ntuples % 2) { dev_err(dev, "Invalid number of attach-impl-defs registers: %d\n", ntuples); return -EINVAL; } regs = devm_kzalloc(dev, sizeof(*data->impl_def_attach_registers) * ntuples, GFP_KERNEL); if (!regs) return -ENOMEM; tuples = kzalloc(sizeof(u32) * ntuples * 2, GFP_KERNEL); if (!tuples) return -ENOMEM; ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs", tuples, ntuples); if (ret) { kfree(tuples); return ret; } for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) { regit->offset = tuples[i]; regit->value = tuples[i + 1]; } kfree(tuples); data->impl_def_attach_registers = regs; data->num_impl_def_attach_registers = ntuples / 2; return 0; } static const struct arm_smmu_impl qsmmuv2_impl = { .init_context_bank = qsmmuv2_init_cb, .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard, .reset = qsmmuv2_device_reset, }; struct qcom_smmu { struct arm_smmu_device smmu; }; Loading Loading @@ -927,3 +1191,28 @@ struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu) return &qsmmu->smmu; } struct arm_smmu_device *qsmmuv2_impl_init(struct arm_smmu_device *smmu) { struct device *dev = smmu->dev; struct qsmmuv2_archdata *data; struct platform_device *pdev; int ret; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return ERR_PTR(-ENOMEM); pdev = to_platform_device(dev); spin_lock_init(&data->atos_lock); data->smmu = *smmu; data->smmu.impl = &qsmmuv2_impl; ret = arm_smmu_parse_impl_def_registers(&data->smmu); if (ret) return ERR_PTR(ret); devm_kfree(smmu->dev, smmu); return &data->smmu; }
drivers/iommu/arm-smmu.c +0 −54 Original line number Diff line number Diff line Loading @@ -68,9 +68,6 @@ #define TLB_LOOP_TIMEOUT 500000 /* 500ms */ #define TLB_LOOP_INC_MAX 1000 /*1ms*/ #define ARM_SMMU_IMPL_DEF1(smmu) \ ((smmu)->base + (6 * (1 << (smmu)->pgshift))) #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 Loading Loading @@ -4264,53 +4261,6 @@ static int arm_smmu_handoff_cbs(struct arm_smmu_device *smmu) return 0; } static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu) { struct device *dev = smmu->dev; int i, ntuples, ret; u32 *tuples; struct arm_smmu_impl_def_reg *regs, *regit; if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples)) return 0; ntuples /= sizeof(u32); if (ntuples % 2) { dev_err(dev, "Invalid number of attach-impl-defs registers: %d\n", ntuples); return -EINVAL; } regs = devm_kmalloc( dev, sizeof(*smmu->impl_def_attach_registers) * ntuples, GFP_KERNEL); if (!regs) return -ENOMEM; tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL); if (!tuples) return -ENOMEM; ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs", tuples, ntuples); if (ret) return ret; for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) { regit->offset = tuples[i]; regit->value = tuples[i + 1]; } devm_kfree(dev, tuples); smmu->impl_def_attach_registers = regs; smmu->num_impl_def_attach_registers = ntuples / 2; return 0; } static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr) { const char *cname; Loading Loading @@ -4908,10 +4858,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) if (err) goto out_power_off; err = arm_smmu_parse_impl_def_registers(smmu); if (err) goto out_power_off; if (smmu->version == ARM_SMMU_V2) { if (smmu->num_context_banks > smmu->num_context_irqs) { dev_err(dev, Loading
drivers/iommu/arm-smmu.h +1 −9 Original line number Diff line number Diff line Loading @@ -279,11 +279,6 @@ enum arm_smmu_implementation { QCOM_SMMUV500, }; struct arm_smmu_impl_def_reg { u32 offset; u32 value; }; /* * Describes resources required for on/off power operation. * Separate reference count is provided for atomic/nonatomic Loading Loading @@ -400,10 +395,6 @@ struct arm_smmu_device { /* IOMMU core code handle */ struct iommu_device iommu; /* Specific to QCOM */ struct arm_smmu_impl_def_reg *impl_def_attach_registers; unsigned int num_impl_def_attach_registers; struct arm_smmu_power_resources *pwr; /* protects idr */ Loading Loading @@ -587,6 +578,7 @@ static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page, struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu); struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu); struct arm_smmu_device *qsmmuv500_impl_init(struct arm_smmu_device *smmu); struct arm_smmu_device *qsmmuv2_impl_init(struct arm_smmu_device *smmu); int arm_mmu500_reset(struct arm_smmu_device *smmu); Loading