Loading drivers/iommu/arm-smmu.c +80 −45 Original line number Diff line number Diff line Loading @@ -730,6 +730,20 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); if (smmu->version > ARM_SMMU_V1) { /* * CBA2R. * *Must* be initialised before CBAR thanks to VMID16 * architectural oversight affected some implementations. */ #ifdef CONFIG_64BIT reg = CBA2R_RW64_64BIT; #else reg = CBA2R_RW64_32BIT; #endif writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); } /* CBAR */ reg = cfg->cbar; if (smmu->version == ARM_SMMU_V1) Loading @@ -747,16 +761,6 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, } writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)); if (smmu->version > ARM_SMMU_V1) { /* CBA2R */ #ifdef CONFIG_64BIT reg = CBA2R_RW64_64BIT; #else reg = CBA2R_RW64_32BIT; #endif writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); } /* TTBRs */ if (stage1) { reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; Loading Loading @@ -1326,59 +1330,81 @@ static void __arm_smmu_release_pci_iommudata(void *data) kfree(data); } static int arm_smmu_add_device(struct device *dev) static int arm_smmu_add_pci_device(struct pci_dev *pdev) { struct arm_smmu_device *smmu; struct arm_smmu_master_cfg *cfg; int i, ret; u16 sid; struct iommu_group *group; void (*releasefn)(void *) = NULL; int ret; smmu = find_smmu_for_device(dev); if (!smmu) return -ENODEV; struct arm_smmu_master_cfg *cfg; group = iommu_group_alloc(); if (IS_ERR(group)) { dev_err(dev, "Failed to allocate IOMMU group\n"); group = iommu_group_get_for_dev(&pdev->dev); if (IS_ERR(group)) return PTR_ERR(group); } if (dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(dev); cfg = iommu_group_get_iommudata(group); if (!cfg) { cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); if (!cfg) { ret = -ENOMEM; goto out_put_group; } cfg->num_streamids = 1; iommu_group_set_iommudata(group, cfg, __arm_smmu_release_pci_iommudata); } if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) { ret = -ENOSPC; goto out_put_group; } /* * Assume Stream ID == Requester ID for now. * We need a way to describe the ID mappings in FDT. */ pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &cfg->streamids[0]); releasefn = __arm_smmu_release_pci_iommudata; } else { pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid); for (i = 0; i < cfg->num_streamids; ++i) if (cfg->streamids[i] == sid) break; /* Avoid duplicate SIDs, as this can lead to SMR conflicts */ if (i == cfg->num_streamids) cfg->streamids[cfg->num_streamids++] = sid; return 0; out_put_group: iommu_group_put(group); return ret; } static int arm_smmu_add_platform_device(struct device *dev) { struct iommu_group *group; struct arm_smmu_master *master; struct arm_smmu_device *smmu = find_smmu_for_device(dev); if (!smmu) return -ENODEV; master = find_smmu_master(smmu, dev->of_node); if (!master) { ret = -ENODEV; goto out_put_group; } if (!master) return -ENODEV; /* No automatic group creation for platform devices */ group = iommu_group_alloc(); if (IS_ERR(group)) return PTR_ERR(group); cfg = &master->cfg; iommu_group_set_iommudata(group, &master->cfg, NULL); return iommu_group_add_device(group, dev); } iommu_group_set_iommudata(group, cfg, releasefn); ret = iommu_group_add_device(group, dev); static int arm_smmu_add_device(struct device *dev) { if (dev_is_pci(dev)) return arm_smmu_add_pci_device(to_pci_dev(dev)); out_put_group: iommu_group_put(group); return ret; return arm_smmu_add_platform_device(dev); } static void arm_smmu_remove_device(struct device *dev) Loading Loading @@ -1630,6 +1656,15 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); smmu->pa_size = size; /* * What the page table walker can address actually depends on which * descriptor format is in use, but since a) we don't know that yet, * and b) it can vary per context bank, this will have to do... */ if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size))) dev_warn(smmu->dev, "failed to set DMA mask for table walker\n"); if (smmu->version == ARM_SMMU_V1) { smmu->va_size = smmu->ipa_size; size = SZ_4K | SZ_2M | SZ_1G; Loading drivers/iommu/io-pgtable-arm.c +5 −0 Original line number Diff line number Diff line Loading @@ -116,6 +116,8 @@ #define ARM_32_LPAE_TCR_EAE (1 << 31) #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) #define ARM_LPAE_TCR_EPD1 (1 << 23) #define ARM_LPAE_TCR_TG0_4K (0 << 14) #define ARM_LPAE_TCR_TG0_64K (1 << 14) #define ARM_LPAE_TCR_TG0_16K (2 << 14) Loading Loading @@ -621,6 +623,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) } reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; /* Disable speculative walks through TTBR1 */ reg |= ARM_LPAE_TCR_EPD1; cfg->arm_lpae_s1_cfg.tcr = reg; /* MAIRs */ Loading Loading
drivers/iommu/arm-smmu.c +80 −45 Original line number Diff line number Diff line Loading @@ -730,6 +730,20 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); if (smmu->version > ARM_SMMU_V1) { /* * CBA2R. * *Must* be initialised before CBAR thanks to VMID16 * architectural oversight affected some implementations. */ #ifdef CONFIG_64BIT reg = CBA2R_RW64_64BIT; #else reg = CBA2R_RW64_32BIT; #endif writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); } /* CBAR */ reg = cfg->cbar; if (smmu->version == ARM_SMMU_V1) Loading @@ -747,16 +761,6 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, } writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)); if (smmu->version > ARM_SMMU_V1) { /* CBA2R */ #ifdef CONFIG_64BIT reg = CBA2R_RW64_64BIT; #else reg = CBA2R_RW64_32BIT; #endif writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); } /* TTBRs */ if (stage1) { reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; Loading Loading @@ -1326,59 +1330,81 @@ static void __arm_smmu_release_pci_iommudata(void *data) kfree(data); } static int arm_smmu_add_device(struct device *dev) static int arm_smmu_add_pci_device(struct pci_dev *pdev) { struct arm_smmu_device *smmu; struct arm_smmu_master_cfg *cfg; int i, ret; u16 sid; struct iommu_group *group; void (*releasefn)(void *) = NULL; int ret; smmu = find_smmu_for_device(dev); if (!smmu) return -ENODEV; struct arm_smmu_master_cfg *cfg; group = iommu_group_alloc(); if (IS_ERR(group)) { dev_err(dev, "Failed to allocate IOMMU group\n"); group = iommu_group_get_for_dev(&pdev->dev); if (IS_ERR(group)) return PTR_ERR(group); } if (dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(dev); cfg = iommu_group_get_iommudata(group); if (!cfg) { cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); if (!cfg) { ret = -ENOMEM; goto out_put_group; } cfg->num_streamids = 1; iommu_group_set_iommudata(group, cfg, __arm_smmu_release_pci_iommudata); } if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) { ret = -ENOSPC; goto out_put_group; } /* * Assume Stream ID == Requester ID for now. * We need a way to describe the ID mappings in FDT. */ pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &cfg->streamids[0]); releasefn = __arm_smmu_release_pci_iommudata; } else { pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid); for (i = 0; i < cfg->num_streamids; ++i) if (cfg->streamids[i] == sid) break; /* Avoid duplicate SIDs, as this can lead to SMR conflicts */ if (i == cfg->num_streamids) cfg->streamids[cfg->num_streamids++] = sid; return 0; out_put_group: iommu_group_put(group); return ret; } static int arm_smmu_add_platform_device(struct device *dev) { struct iommu_group *group; struct arm_smmu_master *master; struct arm_smmu_device *smmu = find_smmu_for_device(dev); if (!smmu) return -ENODEV; master = find_smmu_master(smmu, dev->of_node); if (!master) { ret = -ENODEV; goto out_put_group; } if (!master) return -ENODEV; /* No automatic group creation for platform devices */ group = iommu_group_alloc(); if (IS_ERR(group)) return PTR_ERR(group); cfg = &master->cfg; iommu_group_set_iommudata(group, &master->cfg, NULL); return iommu_group_add_device(group, dev); } iommu_group_set_iommudata(group, cfg, releasefn); ret = iommu_group_add_device(group, dev); static int arm_smmu_add_device(struct device *dev) { if (dev_is_pci(dev)) return arm_smmu_add_pci_device(to_pci_dev(dev)); out_put_group: iommu_group_put(group); return ret; return arm_smmu_add_platform_device(dev); } static void arm_smmu_remove_device(struct device *dev) Loading Loading @@ -1630,6 +1656,15 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); smmu->pa_size = size; /* * What the page table walker can address actually depends on which * descriptor format is in use, but since a) we don't know that yet, * and b) it can vary per context bank, this will have to do... */ if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size))) dev_warn(smmu->dev, "failed to set DMA mask for table walker\n"); if (smmu->version == ARM_SMMU_V1) { smmu->va_size = smmu->ipa_size; size = SZ_4K | SZ_2M | SZ_1G; Loading
drivers/iommu/io-pgtable-arm.c +5 −0 Original line number Diff line number Diff line Loading @@ -116,6 +116,8 @@ #define ARM_32_LPAE_TCR_EAE (1 << 31) #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) #define ARM_LPAE_TCR_EPD1 (1 << 23) #define ARM_LPAE_TCR_TG0_4K (0 << 14) #define ARM_LPAE_TCR_TG0_64K (1 << 14) #define ARM_LPAE_TCR_TG0_16K (2 << 14) Loading Loading @@ -621,6 +623,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) } reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; /* Disable speculative walks through TTBR1 */ reg |= ARM_LPAE_TCR_EPD1; cfg->arm_lpae_s1_cfg.tcr = reg; /* MAIRs */ Loading