Loading arch/arm/mm/dma-mapping.c +81 −30 Original line number Diff line number Diff line Loading @@ -30,6 +30,8 @@ #include <linux/cma.h> #include <linux/msm_dma_iommu_mapping.h> #include <linux/dma-mapping-fast.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/memory.h> #include <asm/highmem.h> Loading Loading @@ -122,7 +124,8 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, static void __dma_free_remap(void *cpu_addr, size_t size, bool no_warn); static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot); static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot, bool coherent); static void *arm_dma_remap(struct device *dev, void *cpu_addr, dma_addr_t handle, size_t size, Loading @@ -131,6 +134,30 @@ static void *arm_dma_remap(struct device *dev, void *cpu_addr, static void arm_dma_unremap(struct device *dev, void *remapped_addr, size_t size); static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot, bool coherent) { if (attrs & DMA_ATTR_STRONGLY_ORDERED) return pgprot_stronglyordered(prot); else if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE)) return pgprot_writecombine(prot); return prot; } static bool is_dma_coherent(struct device *dev, unsigned long attrs, bool is_coherent) { if (attrs & DMA_ATTR_FORCE_COHERENT) is_coherent = true; else if (attrs & DMA_ATTR_FORCE_NON_COHERENT) is_coherent = false; else if (is_device_dma_coherent(dev)) is_coherent = true; return is_coherent; } /** * arm_dma_map_page - map a portion of a page for streaming DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices Loading Loading @@ -709,19 +736,6 @@ static void __free_from_contiguous(struct device *dev, struct page *page, dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); } static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot) { if (attrs & DMA_ATTR_WRITE_COMBINE) prot = pgprot_writecombine(prot); else if (attrs & DMA_ATTR_STRONGLY_ORDERED) prot = pgprot_stronglyordered(prot); /* if non-consistent just pass back what was given */ else if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) prot = pgprot_dmacoherent(prot); return prot; } #define nommu() 0 #else /* !CONFIG_MMU */ Loading Loading @@ -915,7 +929,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, unsigned long attrs) { pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false); return __dma_alloc(dev, size, handle, gfp, prot, false, attrs, __builtin_return_address(0)); Loading Loading @@ -959,7 +973,7 @@ static void *arm_dma_remap(struct device *dev, void *cpu_addr, { void *ptr; struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false); unsigned long offset = handle & ~PAGE_MASK; size = PAGE_ALIGN(size + offset); Loading Loading @@ -1003,7 +1017,8 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, unsigned long attrs) { #ifdef CONFIG_MMU vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, false); #endif /* CONFIG_MMU */ return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); } Loading Loading @@ -1529,16 +1544,19 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, * Create a mapping in device IO address space for specified pages */ static dma_addr_t __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, int coherent_flag) { struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; dma_addr_t dma_addr, iova; int i; int prot = IOMMU_READ | IOMMU_WRITE; dma_addr = __alloc_iova(mapping, size); if (dma_addr == DMA_ERROR_CODE) return dma_addr; prot |= coherent_flag ? IOMMU_CACHE : 0; iova = dma_addr; for (i = 0; i < count; ) { Loading @@ -1553,8 +1571,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) break; len = (j - i) << PAGE_SHIFT; ret = iommu_map(mapping->domain, iova, phys, len, IOMMU_READ|IOMMU_WRITE); ret = iommu_map(mapping->domain, iova, phys, len, prot); if (ret < 0) goto fail; iova += len; Loading Loading @@ -1623,7 +1640,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, if (!addr) return NULL; *handle = __iommu_create_mapping(dev, &page, size); *handle = __iommu_create_mapping(dev, &page, size, coherent_flag); if (*handle == DMA_ERROR_CODE) goto err_mapping; Loading @@ -1648,9 +1665,9 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, unsigned long attrs, int coherent_flag) { pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); struct page **pages; void *addr = NULL; pgprot_t prot; *handle = DMA_ERROR_CODE; size = PAGE_ALIGN(size); Loading @@ -1659,6 +1676,8 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, return __iommu_alloc_simple(dev, size, gfp, handle, coherent_flag); coherent_flag = is_dma_coherent(dev, attrs, coherent_flag); prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent_flag); /* * Following is a work-around (a.k.a. hack) to prevent pages * with __GFP_COMP being passed to split_page() which cannot Loading @@ -1672,7 +1691,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, if (!pages) return NULL; *handle = __iommu_create_mapping(dev, pages, size); *handle = __iommu_create_mapping(dev, pages, size, coherent_flag); if (*handle == DMA_ERROR_CODE) goto err_buffer; Loading Loading @@ -1739,7 +1758,8 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, is_dma_coherent(dev, attrs, NORMAL)); return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); } Loading Loading @@ -1784,7 +1804,8 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, unsigned long attrs) { __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL); __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, is_dma_coherent(dev, attrs, NORMAL)); } void arm_coherent_iommu_free_attrs(struct device *dev, size_t size, Loading Loading @@ -1959,6 +1980,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, struct dma_iommu_mapping *mapping = dev->archdata.mapping; unsigned int total_length = 0, current_offset = 0; dma_addr_t iova; bool coherent; int prot = __dma_direction_to_prot(dir); for_each_sg(sg, s, nents, i) Loading @@ -1967,6 +1989,10 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, iova = __alloc_iova(mapping, total_length); if (iova == DMA_ERROR_CODE) return 0; coherent = of_dma_is_coherent(dev->of_node); prot |= is_dma_coherent(dev, attrs, coherent) ? IOMMU_CACHE : 0; ret = iommu_map_sg(mapping->domain, iova, sg, nents, prot); if (ret != total_length) { __free_iova(mapping, iova, total_length); Loading Loading @@ -2053,6 +2079,12 @@ void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, { struct scatterlist *s; int i; struct dma_iommu_mapping *mapping = dev->archdata.mapping; dma_addr_t iova = sg_dma_address(sg); bool iova_coherent = iommu_is_iova_coherent(mapping->domain, iova); if (iova_coherent) return; for_each_sg(sg, s, nents, i) __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); Loading @@ -2072,6 +2104,13 @@ void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, struct scatterlist *s; int i; struct dma_iommu_mapping *mapping = dev->archdata.mapping; dma_addr_t iova = sg_dma_address(sg); bool iova_coherent = iommu_is_iova_coherent(mapping->domain, iova); if (iova_coherent) return; for_each_sg(sg, s, nents, i) __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); } Loading Loading @@ -2130,7 +2169,8 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) if (!is_dma_coherent(dev, attrs, false) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) __dma_page_cpu_to_dev(page, offset, size, dir); return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); Loading Loading @@ -2178,7 +2218,8 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, if (!iova) return; if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) if (!(is_dma_coherent(dev, attrs, false) || (attrs & DMA_ATTR_SKIP_CPU_SYNC))) __dma_page_dev_to_cpu(page, offset, size, dir); iommu_unmap(mapping->domain, iova, len); Loading Loading @@ -2249,7 +2290,9 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev, dma_addr_t iova = handle & PAGE_MASK; struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); unsigned int offset = handle & ~PAGE_MASK; bool iova_coherent = iommu_is_iova_coherent(mapping->domain, handle); if (!iova_coherent) __dma_page_dev_to_cpu(page, offset, size, dir); } Loading @@ -2260,10 +2303,16 @@ static void arm_iommu_sync_single_for_device(struct device *dev, dma_addr_t iova = handle & PAGE_MASK; struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); unsigned int offset = handle & ~PAGE_MASK; bool iova_coherent = iommu_is_iova_coherent(mapping->domain, handle); if (!iova_coherent) __dma_page_cpu_to_dev(page, offset, size, dir); } static int arm_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr) { return dma_addr == DMA_ERROR_CODE; } const struct dma_map_ops iommu_ops = { .alloc = arm_iommu_alloc_attrs, .free = arm_iommu_free_attrs, Loading @@ -2282,6 +2331,8 @@ const struct dma_map_ops iommu_ops = { .map_resource = arm_iommu_map_resource, .unmap_resource = arm_iommu_unmap_resource, .mapping_error = arm_iommu_mapping_error, }; const struct dma_map_ops iommu_coherent_ops = { Loading Loading
arch/arm/mm/dma-mapping.c +81 −30 Original line number Diff line number Diff line Loading @@ -30,6 +30,8 @@ #include <linux/cma.h> #include <linux/msm_dma_iommu_mapping.h> #include <linux/dma-mapping-fast.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/memory.h> #include <asm/highmem.h> Loading Loading @@ -122,7 +124,8 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, static void __dma_free_remap(void *cpu_addr, size_t size, bool no_warn); static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot); static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot, bool coherent); static void *arm_dma_remap(struct device *dev, void *cpu_addr, dma_addr_t handle, size_t size, Loading @@ -131,6 +134,30 @@ static void *arm_dma_remap(struct device *dev, void *cpu_addr, static void arm_dma_unremap(struct device *dev, void *remapped_addr, size_t size); static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot, bool coherent) { if (attrs & DMA_ATTR_STRONGLY_ORDERED) return pgprot_stronglyordered(prot); else if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE)) return pgprot_writecombine(prot); return prot; } static bool is_dma_coherent(struct device *dev, unsigned long attrs, bool is_coherent) { if (attrs & DMA_ATTR_FORCE_COHERENT) is_coherent = true; else if (attrs & DMA_ATTR_FORCE_NON_COHERENT) is_coherent = false; else if (is_device_dma_coherent(dev)) is_coherent = true; return is_coherent; } /** * arm_dma_map_page - map a portion of a page for streaming DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices Loading Loading @@ -709,19 +736,6 @@ static void __free_from_contiguous(struct device *dev, struct page *page, dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); } static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot) { if (attrs & DMA_ATTR_WRITE_COMBINE) prot = pgprot_writecombine(prot); else if (attrs & DMA_ATTR_STRONGLY_ORDERED) prot = pgprot_stronglyordered(prot); /* if non-consistent just pass back what was given */ else if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) prot = pgprot_dmacoherent(prot); return prot; } #define nommu() 0 #else /* !CONFIG_MMU */ Loading Loading @@ -915,7 +929,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, unsigned long attrs) { pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false); return __dma_alloc(dev, size, handle, gfp, prot, false, attrs, __builtin_return_address(0)); Loading Loading @@ -959,7 +973,7 @@ static void *arm_dma_remap(struct device *dev, void *cpu_addr, { void *ptr; struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false); unsigned long offset = handle & ~PAGE_MASK; size = PAGE_ALIGN(size + offset); Loading Loading @@ -1003,7 +1017,8 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, unsigned long attrs) { #ifdef CONFIG_MMU vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, false); #endif /* CONFIG_MMU */ return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); } Loading Loading @@ -1529,16 +1544,19 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, * Create a mapping in device IO address space for specified pages */ static dma_addr_t __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, int coherent_flag) { struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; dma_addr_t dma_addr, iova; int i; int prot = IOMMU_READ | IOMMU_WRITE; dma_addr = __alloc_iova(mapping, size); if (dma_addr == DMA_ERROR_CODE) return dma_addr; prot |= coherent_flag ? IOMMU_CACHE : 0; iova = dma_addr; for (i = 0; i < count; ) { Loading @@ -1553,8 +1571,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) break; len = (j - i) << PAGE_SHIFT; ret = iommu_map(mapping->domain, iova, phys, len, IOMMU_READ|IOMMU_WRITE); ret = iommu_map(mapping->domain, iova, phys, len, prot); if (ret < 0) goto fail; iova += len; Loading Loading @@ -1623,7 +1640,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, if (!addr) return NULL; *handle = __iommu_create_mapping(dev, &page, size); *handle = __iommu_create_mapping(dev, &page, size, coherent_flag); if (*handle == DMA_ERROR_CODE) goto err_mapping; Loading @@ -1648,9 +1665,9 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, unsigned long attrs, int coherent_flag) { pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); struct page **pages; void *addr = NULL; pgprot_t prot; *handle = DMA_ERROR_CODE; size = PAGE_ALIGN(size); Loading @@ -1659,6 +1676,8 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, return __iommu_alloc_simple(dev, size, gfp, handle, coherent_flag); coherent_flag = is_dma_coherent(dev, attrs, coherent_flag); prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent_flag); /* * Following is a work-around (a.k.a. hack) to prevent pages * with __GFP_COMP being passed to split_page() which cannot Loading @@ -1672,7 +1691,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, if (!pages) return NULL; *handle = __iommu_create_mapping(dev, pages, size); *handle = __iommu_create_mapping(dev, pages, size, coherent_flag); if (*handle == DMA_ERROR_CODE) goto err_buffer; Loading Loading @@ -1739,7 +1758,8 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, is_dma_coherent(dev, attrs, NORMAL)); return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); } Loading Loading @@ -1784,7 +1804,8 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, unsigned long attrs) { __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL); __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, is_dma_coherent(dev, attrs, NORMAL)); } void arm_coherent_iommu_free_attrs(struct device *dev, size_t size, Loading Loading @@ -1959,6 +1980,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, struct dma_iommu_mapping *mapping = dev->archdata.mapping; unsigned int total_length = 0, current_offset = 0; dma_addr_t iova; bool coherent; int prot = __dma_direction_to_prot(dir); for_each_sg(sg, s, nents, i) Loading @@ -1967,6 +1989,10 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, iova = __alloc_iova(mapping, total_length); if (iova == DMA_ERROR_CODE) return 0; coherent = of_dma_is_coherent(dev->of_node); prot |= is_dma_coherent(dev, attrs, coherent) ? IOMMU_CACHE : 0; ret = iommu_map_sg(mapping->domain, iova, sg, nents, prot); if (ret != total_length) { __free_iova(mapping, iova, total_length); Loading Loading @@ -2053,6 +2079,12 @@ void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, { struct scatterlist *s; int i; struct dma_iommu_mapping *mapping = dev->archdata.mapping; dma_addr_t iova = sg_dma_address(sg); bool iova_coherent = iommu_is_iova_coherent(mapping->domain, iova); if (iova_coherent) return; for_each_sg(sg, s, nents, i) __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); Loading @@ -2072,6 +2104,13 @@ void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, struct scatterlist *s; int i; struct dma_iommu_mapping *mapping = dev->archdata.mapping; dma_addr_t iova = sg_dma_address(sg); bool iova_coherent = iommu_is_iova_coherent(mapping->domain, iova); if (iova_coherent) return; for_each_sg(sg, s, nents, i) __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); } Loading Loading @@ -2130,7 +2169,8 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) if (!is_dma_coherent(dev, attrs, false) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) __dma_page_cpu_to_dev(page, offset, size, dir); return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); Loading Loading @@ -2178,7 +2218,8 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, if (!iova) return; if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) if (!(is_dma_coherent(dev, attrs, false) || (attrs & DMA_ATTR_SKIP_CPU_SYNC))) __dma_page_dev_to_cpu(page, offset, size, dir); iommu_unmap(mapping->domain, iova, len); Loading Loading @@ -2249,7 +2290,9 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev, dma_addr_t iova = handle & PAGE_MASK; struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); unsigned int offset = handle & ~PAGE_MASK; bool iova_coherent = iommu_is_iova_coherent(mapping->domain, handle); if (!iova_coherent) __dma_page_dev_to_cpu(page, offset, size, dir); } Loading @@ -2260,10 +2303,16 @@ static void arm_iommu_sync_single_for_device(struct device *dev, dma_addr_t iova = handle & PAGE_MASK; struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); unsigned int offset = handle & ~PAGE_MASK; bool iova_coherent = iommu_is_iova_coherent(mapping->domain, handle); if (!iova_coherent) __dma_page_cpu_to_dev(page, offset, size, dir); } static int arm_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr) { return dma_addr == DMA_ERROR_CODE; } const struct dma_map_ops iommu_ops = { .alloc = arm_iommu_alloc_attrs, .free = arm_iommu_free_attrs, Loading @@ -2282,6 +2331,8 @@ const struct dma_map_ops iommu_ops = { .map_resource = arm_iommu_map_resource, .unmap_resource = arm_iommu_unmap_resource, .mapping_error = arm_iommu_mapping_error, }; const struct dma_map_ops iommu_coherent_ops = { Loading