Loading drivers/iommu/io-pgtable-arm.c +19 −10 Original line number Diff line number Diff line Loading @@ -288,6 +288,16 @@ static dma_addr_t __arm_lpae_dma_addr(void *pages) return (dma_addr_t)virt_to_phys(pages); } static inline void pgtable_dma_sync_single_for_device( struct io_pgtable_cfg *cfg, dma_addr_t addr, size_t size, enum dma_data_direction dir) { if (!(cfg->quirks & IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT)) dma_sync_single_for_device(cfg->iommu_dev, addr, size, dir); } static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, struct io_pgtable_cfg *cfg, void *cookie) { Loading Loading @@ -337,7 +347,7 @@ static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, *ptep = pte; if (!selftest_running) dma_sync_single_for_device(cfg->iommu_dev, pgtable_dma_sync_single_for_device(cfg, __arm_lpae_dma_addr(ptep), sizeof(pte), DMA_TO_DEVICE); } Loading Loading @@ -411,8 +421,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, if (lvl == MAP_STATE_LVL) { if (ms->pgtable) dma_sync_single_for_device( cfg->iommu_dev, pgtable_dma_sync_single_for_device(cfg, __arm_lpae_dma_addr(ms->pte_start), ms->num_pte * sizeof(*ptep), DMA_TO_DEVICE); Loading @@ -430,8 +439,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, * mapping. Flush out the previous page mappings. */ if (ms->pgtable) dma_sync_single_for_device( cfg->iommu_dev, pgtable_dma_sync_single_for_device(cfg, __arm_lpae_dma_addr(ms->pte_start), ms->num_pte * sizeof(*ptep), DMA_TO_DEVICE); Loading Loading @@ -602,9 +610,10 @@ static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova, } if (ms.pgtable) dma_sync_single_for_device( cfg->iommu_dev, __arm_lpae_dma_addr(ms.pte_start), ms.num_pte * sizeof(*ptep), DMA_TO_DEVICE); pgtable_dma_sync_single_for_device(cfg, __arm_lpae_dma_addr(ms.pte_start), ms.num_pte * sizeof(*ms.pte_start), DMA_TO_DEVICE); return mapped; Loading Loading @@ -741,7 +750,7 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, table += tl_offset; memset(table, 0, table_len); dma_sync_single_for_device(iop->cfg.iommu_dev, pgtable_dma_sync_single_for_device(&iop->cfg, __arm_lpae_dma_addr(table), table_len, DMA_TO_DEVICE); Loading Loading @@ -990,7 +999,7 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) return NULL; /* TCR */ if (cfg->iommu_dev && cfg->iommu_dev->archdata.dma_coherent) if (cfg->quirks & IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT) reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) | (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); Loading Loading
drivers/iommu/io-pgtable-arm.c +19 −10 Original line number Diff line number Diff line Loading @@ -288,6 +288,16 @@ static dma_addr_t __arm_lpae_dma_addr(void *pages) return (dma_addr_t)virt_to_phys(pages); } static inline void pgtable_dma_sync_single_for_device( struct io_pgtable_cfg *cfg, dma_addr_t addr, size_t size, enum dma_data_direction dir) { if (!(cfg->quirks & IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT)) dma_sync_single_for_device(cfg->iommu_dev, addr, size, dir); } static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, struct io_pgtable_cfg *cfg, void *cookie) { Loading Loading @@ -337,7 +347,7 @@ static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, *ptep = pte; if (!selftest_running) dma_sync_single_for_device(cfg->iommu_dev, pgtable_dma_sync_single_for_device(cfg, __arm_lpae_dma_addr(ptep), sizeof(pte), DMA_TO_DEVICE); } Loading Loading @@ -411,8 +421,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, if (lvl == MAP_STATE_LVL) { if (ms->pgtable) dma_sync_single_for_device( cfg->iommu_dev, pgtable_dma_sync_single_for_device(cfg, __arm_lpae_dma_addr(ms->pte_start), ms->num_pte * sizeof(*ptep), DMA_TO_DEVICE); Loading @@ -430,8 +439,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, * mapping. Flush out the previous page mappings. */ if (ms->pgtable) dma_sync_single_for_device( cfg->iommu_dev, pgtable_dma_sync_single_for_device(cfg, __arm_lpae_dma_addr(ms->pte_start), ms->num_pte * sizeof(*ptep), DMA_TO_DEVICE); Loading Loading @@ -602,9 +610,10 @@ static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova, } if (ms.pgtable) dma_sync_single_for_device( cfg->iommu_dev, __arm_lpae_dma_addr(ms.pte_start), ms.num_pte * sizeof(*ptep), DMA_TO_DEVICE); pgtable_dma_sync_single_for_device(cfg, __arm_lpae_dma_addr(ms.pte_start), ms.num_pte * sizeof(*ms.pte_start), DMA_TO_DEVICE); return mapped; Loading Loading @@ -741,7 +750,7 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, table += tl_offset; memset(table, 0, table_len); dma_sync_single_for_device(iop->cfg.iommu_dev, pgtable_dma_sync_single_for_device(&iop->cfg, __arm_lpae_dma_addr(table), table_len, DMA_TO_DEVICE); Loading Loading @@ -990,7 +999,7 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) return NULL; /* TCR */ if (cfg->iommu_dev && cfg->iommu_dev->archdata.dma_coherent) if (cfg->quirks & IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT) reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) | (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); Loading