Loading arch/arm64/configs/vendor/holi_QGKI.config +1 −0 Original line number Diff line number Diff line CONFIG_LOCALVERSION="-qgki" # CONFIG_TRIM_UNUSED_KSYMS is not set CONFIG_QCOM_IOMMU_IO_PGTABLE_QUIRKS=y CONFIG_QCOM_IOMMU_TLBI_QUIRKS=y CONFIG_QCOM_LAZY_MAPPING=y CONFIG_DEBUG_FS=y CONFIG_IOMMU_DYNAMIC_DOMAINS=y Loading arch/arm64/configs/vendor/lahaina_QGKI.config +1 −0 Original line number Diff line number Diff line CONFIG_LOCALVERSION="-qgki" # CONFIG_TRIM_UNUSED_KSYMS is not set CONFIG_QCOM_IOMMU_IO_PGTABLE_QUIRKS=y CONFIG_QCOM_IOMMU_TLBI_QUIRKS=y CONFIG_QCOM_LAZY_MAPPING=y CONFIG_QCOM_SECURE_BUFFER=y CONFIG_DMABUF_DESTRUCTOR_SUPPORT=y Loading drivers/iommu/Kconfig +11 −0 Original line number Diff line number Diff line Loading @@ -693,6 +693,17 @@ config IOMMU_TESTS endif # IOMMU_DEBUG config QCOM_IOMMU_TLBI_QUIRKS bool "QCOM IOMMU Quirks for TLB invalidation" depends on ARM_SMMU depends on QGKI help Enables some quirks that are used when performing TLBI for faster invalidation. The quirks that are supported deal with allowing defer tlbi and perform tlbiasid at the end of unmap. If unsure, say N here. config QCOM_IOMMU # Note: iommu drivers cannot (yet?) be built as modules bool "Qualcomm IOMMU Support" Loading drivers/iommu/arm-smmu.c +35 −0 Original line number Diff line number Diff line Loading @@ -970,6 +970,11 @@ static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, struct arm_smmu_domain *smmu_domain = cookie; const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; if (!IS_ENABLED(CONFIG_QCOM_IOMMU_TLBI_QUIRKS)) { smmu_domain->defer_flush = true; return; } ops->tlb_inv_range(iova, size, granule, false, cookie); ops->tlb_sync(cookie); } Loading @@ -980,6 +985,11 @@ static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, struct arm_smmu_domain *smmu_domain = cookie; const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; if (!IS_ENABLED(CONFIG_QCOM_IOMMU_TLBI_QUIRKS)) { smmu_domain->defer_flush = true; return; } ops->tlb_inv_range(iova, size, granule, true, cookie); ops->tlb_sync(cookie); } Loading @@ -991,6 +1001,11 @@ static void arm_smmu_tlb_add_page(struct iommu_iotlb_gather *gather, struct arm_smmu_domain *smmu_domain = cookie; const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; if (!IS_ENABLED(CONFIG_QCOM_IOMMU_TLBI_QUIRKS)) { smmu_domain->defer_flush = true; return; } ops->tlb_inv_range(iova, granule, granule, true, cookie); } Loading Loading @@ -1161,6 +1176,19 @@ static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = { .tlb_sync = arm_smmu_tlb_sync_vmid, }; static void arm_smmu_deferred_flush(struct arm_smmu_domain *smmu_domain) { /* * This checks for deferred invalidations, and perform flush all. * Deferred invalidations helps replace multiple invalidations with * single flush */ if (smmu_domain->defer_flush) { smmu_domain->flush_ops->tlb.tlb_flush_all(smmu_domain); smmu_domain->defer_flush = false; } } static void print_ctx_regs(struct arm_smmu_device *smmu, struct arm_smmu_cfg *cfg, unsigned int fsr) { Loading Loading @@ -3183,6 +3211,9 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, arm_smmu_rpm_get(smmu); spin_lock_irqsave(&smmu_domain->cb_lock, flags); ret = ops->map(ops, iova, paddr, size, prot); arm_smmu_deferred_flush(smmu_domain); spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); arm_smmu_rpm_put(smmu); Loading @@ -3197,6 +3228,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, list_splice_init(&nonsecure_pool, &smmu_domain->nonsecure_pool); ret = ops->map(ops, iova, paddr, size, prot); list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool); arm_smmu_deferred_flush(smmu_domain); spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); arm_smmu_rpm_put(smmu); arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool); Loading Loading @@ -3256,6 +3288,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, arm_smmu_rpm_get(smmu); spin_lock_irqsave(&smmu_domain->cb_lock, flags); ret = ops->unmap(ops, iova, size, gather); arm_smmu_deferred_flush(smmu_domain); spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); arm_smmu_rpm_put(smmu); Loading Loading @@ -3358,6 +3391,7 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova, spin_lock_irqsave(&smmu_domain->cb_lock, flags); ret = pgtbl_info->map_sg(ops, iova, sg_start, idx_end - idx_start, prot, &size); arm_smmu_deferred_flush(smmu_domain); spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); if (ret == -ENOMEM) { Loading @@ -3377,6 +3411,7 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova, &size); list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool); arm_smmu_deferred_flush(smmu_domain); spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool); Loading drivers/iommu/arm-smmu.h +1 −0 Original line number Diff line number Diff line Loading @@ -492,6 +492,7 @@ struct arm_smmu_domain { struct list_head nonsecure_pool; struct iommu_debug_attachment *logger; struct msm_iommu_domain domain; bool defer_flush; }; Loading Loading
arch/arm64/configs/vendor/holi_QGKI.config +1 −0 Original line number Diff line number Diff line CONFIG_LOCALVERSION="-qgki" # CONFIG_TRIM_UNUSED_KSYMS is not set CONFIG_QCOM_IOMMU_IO_PGTABLE_QUIRKS=y CONFIG_QCOM_IOMMU_TLBI_QUIRKS=y CONFIG_QCOM_LAZY_MAPPING=y CONFIG_DEBUG_FS=y CONFIG_IOMMU_DYNAMIC_DOMAINS=y Loading
arch/arm64/configs/vendor/lahaina_QGKI.config +1 −0 Original line number Diff line number Diff line CONFIG_LOCALVERSION="-qgki" # CONFIG_TRIM_UNUSED_KSYMS is not set CONFIG_QCOM_IOMMU_IO_PGTABLE_QUIRKS=y CONFIG_QCOM_IOMMU_TLBI_QUIRKS=y CONFIG_QCOM_LAZY_MAPPING=y CONFIG_QCOM_SECURE_BUFFER=y CONFIG_DMABUF_DESTRUCTOR_SUPPORT=y Loading
drivers/iommu/Kconfig +11 −0 Original line number Diff line number Diff line Loading @@ -693,6 +693,17 @@ config IOMMU_TESTS endif # IOMMU_DEBUG config QCOM_IOMMU_TLBI_QUIRKS bool "QCOM IOMMU Quirks for TLB invalidation" depends on ARM_SMMU depends on QGKI help Enables some quirks that are used when performing TLBI for faster invalidation. The quirks that are supported deal with allowing defer tlbi and perform tlbiasid at the end of unmap. If unsure, say N here. config QCOM_IOMMU # Note: iommu drivers cannot (yet?) be built as modules bool "Qualcomm IOMMU Support" Loading
drivers/iommu/arm-smmu.c +35 −0 Original line number Diff line number Diff line Loading @@ -970,6 +970,11 @@ static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, struct arm_smmu_domain *smmu_domain = cookie; const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; if (!IS_ENABLED(CONFIG_QCOM_IOMMU_TLBI_QUIRKS)) { smmu_domain->defer_flush = true; return; } ops->tlb_inv_range(iova, size, granule, false, cookie); ops->tlb_sync(cookie); } Loading @@ -980,6 +985,11 @@ static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, struct arm_smmu_domain *smmu_domain = cookie; const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; if (!IS_ENABLED(CONFIG_QCOM_IOMMU_TLBI_QUIRKS)) { smmu_domain->defer_flush = true; return; } ops->tlb_inv_range(iova, size, granule, true, cookie); ops->tlb_sync(cookie); } Loading @@ -991,6 +1001,11 @@ static void arm_smmu_tlb_add_page(struct iommu_iotlb_gather *gather, struct arm_smmu_domain *smmu_domain = cookie; const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; if (!IS_ENABLED(CONFIG_QCOM_IOMMU_TLBI_QUIRKS)) { smmu_domain->defer_flush = true; return; } ops->tlb_inv_range(iova, granule, granule, true, cookie); } Loading Loading @@ -1161,6 +1176,19 @@ static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = { .tlb_sync = arm_smmu_tlb_sync_vmid, }; static void arm_smmu_deferred_flush(struct arm_smmu_domain *smmu_domain) { /* * This checks for deferred invalidations, and perform flush all. * Deferred invalidations helps replace multiple invalidations with * single flush */ if (smmu_domain->defer_flush) { smmu_domain->flush_ops->tlb.tlb_flush_all(smmu_domain); smmu_domain->defer_flush = false; } } static void print_ctx_regs(struct arm_smmu_device *smmu, struct arm_smmu_cfg *cfg, unsigned int fsr) { Loading Loading @@ -3183,6 +3211,9 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, arm_smmu_rpm_get(smmu); spin_lock_irqsave(&smmu_domain->cb_lock, flags); ret = ops->map(ops, iova, paddr, size, prot); arm_smmu_deferred_flush(smmu_domain); spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); arm_smmu_rpm_put(smmu); Loading @@ -3197,6 +3228,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, list_splice_init(&nonsecure_pool, &smmu_domain->nonsecure_pool); ret = ops->map(ops, iova, paddr, size, prot); list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool); arm_smmu_deferred_flush(smmu_domain); spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); arm_smmu_rpm_put(smmu); arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool); Loading Loading @@ -3256,6 +3288,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, arm_smmu_rpm_get(smmu); spin_lock_irqsave(&smmu_domain->cb_lock, flags); ret = ops->unmap(ops, iova, size, gather); arm_smmu_deferred_flush(smmu_domain); spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); arm_smmu_rpm_put(smmu); Loading Loading @@ -3358,6 +3391,7 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova, spin_lock_irqsave(&smmu_domain->cb_lock, flags); ret = pgtbl_info->map_sg(ops, iova, sg_start, idx_end - idx_start, prot, &size); arm_smmu_deferred_flush(smmu_domain); spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); if (ret == -ENOMEM) { Loading @@ -3377,6 +3411,7 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova, &size); list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool); arm_smmu_deferred_flush(smmu_domain); spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool); Loading
drivers/iommu/arm-smmu.h +1 −0 Original line number Diff line number Diff line Loading @@ -492,6 +492,7 @@ struct arm_smmu_domain { struct list_head nonsecure_pool; struct iommu_debug_attachment *logger; struct msm_iommu_domain domain; bool defer_flush; }; Loading