Loading arch/arm64/Kconfig +26 −0 Original line number Diff line number Diff line Loading @@ -219,6 +219,32 @@ config NEED_SG_DMA_LENGTH config SMP def_bool y config ARM64_DMA_USE_IOMMU bool select ARM_HAS_SG_CHAIN select NEED_SG_DMA_LENGTH if ARM64_DMA_USE_IOMMU config ARM64_DMA_IOMMU_ALIGNMENT int "Maximum PAGE_SIZE order of alignment for DMA IOMMU buffers" range 4 9 default 8 help DMA mapping framework by default aligns all buffers to the smallest PAGE_SIZE order which is greater than or equal to the requested buffer size. This works well for buffers up to a few hundreds kilobytes, but for larger buffers it just a waste of address space. Drivers which has relatively small addressing window (like 64Mib) might run out of virtual space with just a few allocations. With this parameter you can specify the maximum PAGE_SIZE order for DMA IOMMU buffers. Larger buffers will be aligned only to this specified order. The order is expressed as a power of two multiplied by the PAGE_SIZE. endif config SWIOTLB def_bool y Loading arch/arm64/include/asm/device.h +9 −0 Original line number Diff line number Diff line Loading @@ -22,10 +22,19 @@ struct dev_archdata { void *iommu; /* private IOMMU data */ #endif bool dma_coherent; #ifdef CONFIG_ARM64_DMA_USE_IOMMU struct dma_iommu_mapping *mapping; #endif }; struct pdev_archdata { u64 dma_mask; }; #ifdef CONFIG_ARM64_DMA_USE_IOMMU #define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping) #else #define to_dma_iommu_mapping(dev) NULL #endif #endif arch/arm64/include/asm/dma-iommu.h 0 → 100644 +36 −0 Original line number Diff line number Diff line #ifndef ASMARM_DMA_IOMMU_H #define ASMARM_DMA_IOMMU_H #ifdef __KERNEL__ #include <linux/mm_types.h> #include <linux/scatterlist.h> #include <linux/dma-debug.h> #include <linux/kmemcheck.h> #include <linux/kref.h> struct dma_iommu_mapping { /* iommu specific data */ struct iommu_domain *domain; void *bitmap; size_t bits; unsigned int order; dma_addr_t base; spinlock_t lock; struct kref kref; }; struct dma_iommu_mapping * arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, int order); void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping); int arm_iommu_attach_device(struct device *dev, struct dma_iommu_mapping *mapping); void arm_iommu_detach_device(struct device *dev); #endif /* __KERNEL__ */ #endif Loading
arch/arm64/Kconfig +26 −0 Original line number Diff line number Diff line Loading @@ -219,6 +219,32 @@ config NEED_SG_DMA_LENGTH config SMP def_bool y config ARM64_DMA_USE_IOMMU bool select ARM_HAS_SG_CHAIN select NEED_SG_DMA_LENGTH if ARM64_DMA_USE_IOMMU config ARM64_DMA_IOMMU_ALIGNMENT int "Maximum PAGE_SIZE order of alignment for DMA IOMMU buffers" range 4 9 default 8 help DMA mapping framework by default aligns all buffers to the smallest PAGE_SIZE order which is greater than or equal to the requested buffer size. This works well for buffers up to a few hundreds kilobytes, but for larger buffers it just a waste of address space. Drivers which has relatively small addressing window (like 64Mib) might run out of virtual space with just a few allocations. With this parameter you can specify the maximum PAGE_SIZE order for DMA IOMMU buffers. Larger buffers will be aligned only to this specified order. The order is expressed as a power of two multiplied by the PAGE_SIZE. endif config SWIOTLB def_bool y Loading
arch/arm64/include/asm/device.h +9 −0 Original line number Diff line number Diff line Loading @@ -22,10 +22,19 @@ struct dev_archdata { void *iommu; /* private IOMMU data */ #endif bool dma_coherent; #ifdef CONFIG_ARM64_DMA_USE_IOMMU struct dma_iommu_mapping *mapping; #endif }; struct pdev_archdata { u64 dma_mask; }; #ifdef CONFIG_ARM64_DMA_USE_IOMMU #define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping) #else #define to_dma_iommu_mapping(dev) NULL #endif #endif
arch/arm64/include/asm/dma-iommu.h 0 → 100644 +36 −0 Original line number Diff line number Diff line #ifndef ASMARM_DMA_IOMMU_H #define ASMARM_DMA_IOMMU_H #ifdef __KERNEL__ #include <linux/mm_types.h> #include <linux/scatterlist.h> #include <linux/dma-debug.h> #include <linux/kmemcheck.h> #include <linux/kref.h> struct dma_iommu_mapping { /* iommu specific data */ struct iommu_domain *domain; void *bitmap; size_t bits; unsigned int order; dma_addr_t base; spinlock_t lock; struct kref kref; }; struct dma_iommu_mapping * arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, int order); void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping); int arm_iommu_attach_device(struct device *dev, struct dma_iommu_mapping *mapping); void arm_iommu_detach_device(struct device *dev); #endif /* __KERNEL__ */ #endif