Loading arch/arm64/mm/dma-mapping.c +14 −13 Original line number Diff line number Diff line Loading @@ -164,6 +164,8 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, struct dma_attrs *attrs) { void *addr; if (dev == NULL) { WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); return NULL; Loading @@ -174,7 +176,6 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, flags |= GFP_DMA; if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) { struct page *page; void *addr; page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, get_order(size)); Loading @@ -184,20 +185,20 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, *dma_handle = phys_to_dma(dev, page_to_phys(page)); addr = page_address(page); memset(addr, 0, size); } else { addr = swiotlb_alloc_coherent(dev, size, dma_handle, flags); } if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs) || dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs)) { if (addr && (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs) || dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs))) { /* * flush the caches here because we can't later */ __dma_flush_range(addr, addr + size); __dma_remap(page, size, 0, true); __dma_remap(virt_to_page(addr), size, 0, true); } return addr; } else { return swiotlb_alloc_coherent(dev, size, dma_handle, flags); } } static void __dma_free_coherent(struct device *dev, size_t size, Loading Loading
arch/arm64/mm/dma-mapping.c +14 −13 Original line number Diff line number Diff line Loading @@ -164,6 +164,8 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, struct dma_attrs *attrs) { void *addr; if (dev == NULL) { WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); return NULL; Loading @@ -174,7 +176,6 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, flags |= GFP_DMA; if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) { struct page *page; void *addr; page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, get_order(size)); Loading @@ -184,20 +185,20 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, *dma_handle = phys_to_dma(dev, page_to_phys(page)); addr = page_address(page); memset(addr, 0, size); } else { addr = swiotlb_alloc_coherent(dev, size, dma_handle, flags); } if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs) || dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs)) { if (addr && (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs) || dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs))) { /* * flush the caches here because we can't later */ __dma_flush_range(addr, addr + size); __dma_remap(page, size, 0, true); __dma_remap(virt_to_page(addr), size, 0, true); } return addr; } else { return swiotlb_alloc_coherent(dev, size, dma_handle, flags); } } static void __dma_free_coherent(struct device *dev, size_t size, Loading