Loading drivers/iommu/arm-smmu-v3.c +12 −19 Original line number Diff line number Diff line Loading @@ -1042,13 +1042,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, } } /* Nuke the existing Config, as we're going to rewrite it */ val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT); if (ste->valid) val |= STRTAB_STE_0_V; else val &= ~STRTAB_STE_0_V; /* Nuke the existing STE_0 value, as we're going to rewrite it */ val = ste->valid ? STRTAB_STE_0_V : 0; if (ste->bypass) { val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT Loading Loading @@ -1083,7 +1078,6 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK << STRTAB_STE_0_S1CTXPTR_SHIFT) | STRTAB_STE_0_CFG_S1_TRANS; } if (ste->s2_cfg) { Loading Loading @@ -1983,17 +1977,9 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) u32 size, l1size; struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; /* * If we can resolve everything with a single L2 table, then we * just need a single L1 descriptor. Otherwise, calculate the L1 * size, capped to the SIDSIZE. */ if (smmu->sid_bits < STRTAB_SPLIT) { size = 0; } else { /* Calculate the L1 size, capped to the SIDSIZE. */ size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); size = min(size, smmu->sid_bits - STRTAB_SPLIT); } cfg->num_l1_ents = 1 << size; size += STRTAB_SPLIT; Loading Loading @@ -2504,6 +2490,13 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK; smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK; /* * If the SMMU supports fewer bits than would fill a single L2 stream * table, use a linear table instead. */ if (smmu->sid_bits <= STRTAB_SPLIT) smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB; /* IDR5 */ reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5); Loading drivers/iommu/arm-smmu.c +51 −21 Original line number Diff line number Diff line Loading @@ -24,6 +24,7 @@ * - v7/v8 long-descriptor format * - Non-secure access to the SMMU * - Context fault reporting * - Extended Stream ID (16 bit) */ #define pr_fmt(fmt) "arm-smmu: " fmt Loading Loading @@ -87,6 +88,7 @@ #define sCR0_CLIENTPD (1 << 0) #define sCR0_GFRE (1 << 1) #define sCR0_GFIE (1 << 2) #define sCR0_EXIDENABLE (1 << 3) #define sCR0_GCFGFRE (1 << 4) #define sCR0_GCFGFIE (1 << 5) #define sCR0_USFCFG (1 << 10) Loading Loading @@ -126,6 +128,7 @@ #define ID0_NUMIRPT_MASK 0xff #define ID0_NUMSIDB_SHIFT 9 #define ID0_NUMSIDB_MASK 0xf #define ID0_EXIDS (1 << 8) #define ID0_NUMSMRG_SHIFT 0 #define ID0_NUMSMRG_MASK 0xff Loading Loading @@ -169,6 +172,7 @@ #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) #define S2CR_CBNDX_SHIFT 0 #define S2CR_CBNDX_MASK 0xff #define S2CR_EXIDVALID (1 << 10) #define S2CR_TYPE_SHIFT 16 #define S2CR_TYPE_MASK 0x3 enum arm_smmu_s2cr_type { Loading Loading @@ -260,6 +264,7 @@ enum arm_smmu_s2cr_privcfg { #define TTBCR2_SEP_SHIFT 15 #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT) #define TTBCR2_AS (1 << 4) #define TTBRn_ASID_SHIFT 48 Loading Loading @@ -351,6 +356,7 @@ struct arm_smmu_device { #define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9) #define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10) #define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11) #define ARM_SMMU_FEAT_EXIDS (1 << 12) u32 features; #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) Loading Loading @@ -778,6 +784,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr; reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; reg2 |= TTBCR2_SEP_UPSTREAM; if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) reg2 |= TTBCR2_AS; } if (smmu->version > ARM_SMMU_V1) writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2); Loading Loading @@ -1048,7 +1056,7 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) struct arm_smmu_smr *smr = smmu->smrs + idx; u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT; if (smr->valid) if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid) reg |= SMR_VALID; writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx)); } Loading @@ -1060,6 +1068,9 @@ static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT | (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT; if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs && smmu->smrs[idx].valid) reg |= S2CR_EXIDVALID; writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx)); } Loading @@ -1070,6 +1081,34 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx) arm_smmu_write_smr(smmu, idx); } /* * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function * should be called after sCR0 is written. */ static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu) { void __iomem *gr0_base = ARM_SMMU_GR0(smmu); u32 smr; if (!smmu->smrs) return; /* * SMR.ID bits may not be preserved if the corresponding MASK * bits are set, so check each one separately. We can reject * masters later if they try to claim IDs outside these masks. */ smr = smmu->streamid_mask << SMR_ID_SHIFT; writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); smmu->streamid_mask = smr >> SMR_ID_SHIFT; smr = smmu->streamid_mask << SMR_MASK_SHIFT; writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT; } static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask) { struct arm_smmu_smr *smrs = smmu->smrs; Loading Loading @@ -1648,6 +1687,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) if (smmu->features & ARM_SMMU_FEAT_VMID16) reg |= sCR0_VMID16EN; if (smmu->features & ARM_SMMU_FEAT_EXIDS) reg |= sCR0_EXIDENABLE; /* Push the button */ __arm_smmu_tlb_sync(smmu); writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); Loading Loading @@ -1735,11 +1777,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) "\t(IDR0.CTTW overridden by FW configuration)\n"); /* Max. number of entries we have for stream matching/indexing */ if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) { smmu->features |= ARM_SMMU_FEAT_EXIDS; size = 1 << 16; } else { size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK); } smmu->streamid_mask = size - 1; if (id & ID0_SMS) { u32 smr; smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK; if (size == 0) { Loading @@ -1748,21 +1793,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) return -ENODEV; } /* * SMR.ID bits may not be preserved if the corresponding MASK * bits are set, so check each one separately. We can reject * masters later if they try to claim IDs outside these masks. */ smr = smmu->streamid_mask << SMR_ID_SHIFT; writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); smmu->streamid_mask = smr >> SMR_ID_SHIFT; smr = smmu->streamid_mask << SMR_MASK_SHIFT; writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT; /* Zero-initialised to mark as invalid */ smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs), GFP_KERNEL); Loading @@ -1770,8 +1800,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) return -ENOMEM; dev_notice(smmu->dev, "\tstream matching with %lu register groups, mask 0x%x", size, smmu->smr_mask_mask); "\tstream matching with %lu register groups", size); } /* s2cr->type == 0 means translation, so initialise explicitly */ smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs), Loading Loading @@ -2094,6 +2123,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) iommu_register_instance(dev->fwnode, &arm_smmu_ops); platform_set_drvdata(pdev, smmu); arm_smmu_device_reset(smmu); arm_smmu_test_smr_masks(smmu); /* Oh, for a proper bus abstraction */ if (!iommu_present(&platform_bus_type)) Loading drivers/iommu/exynos-iommu.c +1 −1 Original line number Diff line number Diff line Loading @@ -628,7 +628,7 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev) pm_runtime_enable(dev); of_iommu_set_ops(dev->of_node, &exynos_iommu_ops); iommu_register_instance(dev->fwnode, &exynos_iommu_ops); return 0; } Loading drivers/iommu/msm_iommu.c +1 −1 Original line number Diff line number Diff line Loading @@ -737,7 +737,7 @@ static int msm_iommu_probe(struct platform_device *pdev) } list_add(&iommu->dev_node, &qcom_iommu_devices); of_iommu_set_ops(pdev->dev.of_node, &msm_iommu_ops); iommu_register_instance(pdev->dev.fwnode, &msm_iommu_ops); pr_info("device mapped at %p, irq %d with %d ctx banks\n", iommu->base, iommu->irq, iommu->ncb); Loading drivers/iommu/mtk_iommu.c +1 −1 Original line number Diff line number Diff line Loading @@ -655,7 +655,7 @@ static int mtk_iommu_init_fn(struct device_node *np) return ret; } of_iommu_set_ops(np, &mtk_iommu_ops); iommu_register_instance(&np->fwnode, &mtk_iommu_ops); return 0; } Loading Loading
drivers/iommu/arm-smmu-v3.c +12 −19 Original line number Diff line number Diff line Loading @@ -1042,13 +1042,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, } } /* Nuke the existing Config, as we're going to rewrite it */ val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT); if (ste->valid) val |= STRTAB_STE_0_V; else val &= ~STRTAB_STE_0_V; /* Nuke the existing STE_0 value, as we're going to rewrite it */ val = ste->valid ? STRTAB_STE_0_V : 0; if (ste->bypass) { val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT Loading Loading @@ -1083,7 +1078,6 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK << STRTAB_STE_0_S1CTXPTR_SHIFT) | STRTAB_STE_0_CFG_S1_TRANS; } if (ste->s2_cfg) { Loading Loading @@ -1983,17 +1977,9 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) u32 size, l1size; struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; /* * If we can resolve everything with a single L2 table, then we * just need a single L1 descriptor. Otherwise, calculate the L1 * size, capped to the SIDSIZE. */ if (smmu->sid_bits < STRTAB_SPLIT) { size = 0; } else { /* Calculate the L1 size, capped to the SIDSIZE. */ size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); size = min(size, smmu->sid_bits - STRTAB_SPLIT); } cfg->num_l1_ents = 1 << size; size += STRTAB_SPLIT; Loading Loading @@ -2504,6 +2490,13 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK; smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK; /* * If the SMMU supports fewer bits than would fill a single L2 stream * table, use a linear table instead. */ if (smmu->sid_bits <= STRTAB_SPLIT) smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB; /* IDR5 */ reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5); Loading
drivers/iommu/arm-smmu.c +51 −21 Original line number Diff line number Diff line Loading @@ -24,6 +24,7 @@ * - v7/v8 long-descriptor format * - Non-secure access to the SMMU * - Context fault reporting * - Extended Stream ID (16 bit) */ #define pr_fmt(fmt) "arm-smmu: " fmt Loading Loading @@ -87,6 +88,7 @@ #define sCR0_CLIENTPD (1 << 0) #define sCR0_GFRE (1 << 1) #define sCR0_GFIE (1 << 2) #define sCR0_EXIDENABLE (1 << 3) #define sCR0_GCFGFRE (1 << 4) #define sCR0_GCFGFIE (1 << 5) #define sCR0_USFCFG (1 << 10) Loading Loading @@ -126,6 +128,7 @@ #define ID0_NUMIRPT_MASK 0xff #define ID0_NUMSIDB_SHIFT 9 #define ID0_NUMSIDB_MASK 0xf #define ID0_EXIDS (1 << 8) #define ID0_NUMSMRG_SHIFT 0 #define ID0_NUMSMRG_MASK 0xff Loading Loading @@ -169,6 +172,7 @@ #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) #define S2CR_CBNDX_SHIFT 0 #define S2CR_CBNDX_MASK 0xff #define S2CR_EXIDVALID (1 << 10) #define S2CR_TYPE_SHIFT 16 #define S2CR_TYPE_MASK 0x3 enum arm_smmu_s2cr_type { Loading Loading @@ -260,6 +264,7 @@ enum arm_smmu_s2cr_privcfg { #define TTBCR2_SEP_SHIFT 15 #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT) #define TTBCR2_AS (1 << 4) #define TTBRn_ASID_SHIFT 48 Loading Loading @@ -351,6 +356,7 @@ struct arm_smmu_device { #define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9) #define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10) #define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11) #define ARM_SMMU_FEAT_EXIDS (1 << 12) u32 features; #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) Loading Loading @@ -778,6 +784,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr; reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; reg2 |= TTBCR2_SEP_UPSTREAM; if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) reg2 |= TTBCR2_AS; } if (smmu->version > ARM_SMMU_V1) writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2); Loading Loading @@ -1048,7 +1056,7 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) struct arm_smmu_smr *smr = smmu->smrs + idx; u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT; if (smr->valid) if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid) reg |= SMR_VALID; writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx)); } Loading @@ -1060,6 +1068,9 @@ static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT | (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT; if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs && smmu->smrs[idx].valid) reg |= S2CR_EXIDVALID; writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx)); } Loading @@ -1070,6 +1081,34 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx) arm_smmu_write_smr(smmu, idx); } /* * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function * should be called after sCR0 is written. */ static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu) { void __iomem *gr0_base = ARM_SMMU_GR0(smmu); u32 smr; if (!smmu->smrs) return; /* * SMR.ID bits may not be preserved if the corresponding MASK * bits are set, so check each one separately. We can reject * masters later if they try to claim IDs outside these masks. */ smr = smmu->streamid_mask << SMR_ID_SHIFT; writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); smmu->streamid_mask = smr >> SMR_ID_SHIFT; smr = smmu->streamid_mask << SMR_MASK_SHIFT; writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT; } static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask) { struct arm_smmu_smr *smrs = smmu->smrs; Loading Loading @@ -1648,6 +1687,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) if (smmu->features & ARM_SMMU_FEAT_VMID16) reg |= sCR0_VMID16EN; if (smmu->features & ARM_SMMU_FEAT_EXIDS) reg |= sCR0_EXIDENABLE; /* Push the button */ __arm_smmu_tlb_sync(smmu); writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); Loading Loading @@ -1735,11 +1777,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) "\t(IDR0.CTTW overridden by FW configuration)\n"); /* Max. number of entries we have for stream matching/indexing */ if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) { smmu->features |= ARM_SMMU_FEAT_EXIDS; size = 1 << 16; } else { size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK); } smmu->streamid_mask = size - 1; if (id & ID0_SMS) { u32 smr; smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK; if (size == 0) { Loading @@ -1748,21 +1793,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) return -ENODEV; } /* * SMR.ID bits may not be preserved if the corresponding MASK * bits are set, so check each one separately. We can reject * masters later if they try to claim IDs outside these masks. */ smr = smmu->streamid_mask << SMR_ID_SHIFT; writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); smmu->streamid_mask = smr >> SMR_ID_SHIFT; smr = smmu->streamid_mask << SMR_MASK_SHIFT; writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT; /* Zero-initialised to mark as invalid */ smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs), GFP_KERNEL); Loading @@ -1770,8 +1800,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) return -ENOMEM; dev_notice(smmu->dev, "\tstream matching with %lu register groups, mask 0x%x", size, smmu->smr_mask_mask); "\tstream matching with %lu register groups", size); } /* s2cr->type == 0 means translation, so initialise explicitly */ smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs), Loading Loading @@ -2094,6 +2123,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) iommu_register_instance(dev->fwnode, &arm_smmu_ops); platform_set_drvdata(pdev, smmu); arm_smmu_device_reset(smmu); arm_smmu_test_smr_masks(smmu); /* Oh, for a proper bus abstraction */ if (!iommu_present(&platform_bus_type)) Loading
drivers/iommu/exynos-iommu.c +1 −1 Original line number Diff line number Diff line Loading @@ -628,7 +628,7 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev) pm_runtime_enable(dev); of_iommu_set_ops(dev->of_node, &exynos_iommu_ops); iommu_register_instance(dev->fwnode, &exynos_iommu_ops); return 0; } Loading
drivers/iommu/msm_iommu.c +1 −1 Original line number Diff line number Diff line Loading @@ -737,7 +737,7 @@ static int msm_iommu_probe(struct platform_device *pdev) } list_add(&iommu->dev_node, &qcom_iommu_devices); of_iommu_set_ops(pdev->dev.of_node, &msm_iommu_ops); iommu_register_instance(pdev->dev.fwnode, &msm_iommu_ops); pr_info("device mapped at %p, irq %d with %d ctx banks\n", iommu->base, iommu->irq, iommu->ncb); Loading
drivers/iommu/mtk_iommu.c +1 −1 Original line number Diff line number Diff line Loading @@ -655,7 +655,7 @@ static int mtk_iommu_init_fn(struct device_node *np) return ret; } of_iommu_set_ops(np, &mtk_iommu_ops); iommu_register_instance(&np->fwnode, &mtk_iommu_ops); return 0; } Loading