Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e67095fd authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'dma-mapping-5.3-5' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping fixes from Christoph Hellwig:
 "Two fixes for regressions in this merge window:

   - select the Kconfig symbols for the noncoherent dma arch helpers on
     arm if swiotlb is selected, not just for LPAE to not break then Xen
     build, that uses swiotlb indirectly through swiotlb-xen

   - fix the page allocator fallback in dma_alloc_contiguous if the CMA
     allocation fails"

* tag 'dma-mapping-5.3-5' of git://git.infradead.org/users/hch/dma-mapping:
  dma-direct: fix zone selection after an unaddressable CMA allocation
  arm: select the dma-noncoherent symbols for all swiotlb builds
parents 083f0f2c 90ae409f
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -7,6 +7,8 @@ config ARM
	select ARCH_HAS_BINFMT_FLAT
	select ARCH_HAS_DEBUG_VIRTUAL if MMU
	select ARCH_HAS_DEVMEM_IS_ALLOWED
	select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
	select ARCH_HAS_DMA_MMAP_PGPROT if SWIOTLB
	select ARCH_HAS_ELF_RANDOMIZE
	select ARCH_HAS_FORTIFY_SOURCE
	select ARCH_HAS_KEEPINITRD
@@ -18,6 +20,8 @@ config ARM
	select ARCH_HAS_SET_MEMORY
	select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
	select ARCH_HAS_STRICT_MODULE_RWX if MMU
	select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB
	select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB
	select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
	select ARCH_HAVE_CUSTOM_GPIO_H
+0 −4
Original line number Diff line number Diff line
@@ -664,10 +664,6 @@ config ARM_LPAE
		!CPU_32v4 && !CPU_32v3
	select PHYS_ADDR_T_64BIT
	select SWIOTLB
	select ARCH_HAS_DMA_COHERENT_TO_PFN
	select ARCH_HAS_DMA_MMAP_PGPROT
	select ARCH_HAS_SYNC_DMA_FOR_DEVICE
	select ARCH_HAS_SYNC_DMA_FOR_CPU
	help
	  Say Y if you have an ARMv7 processor supporting the LPAE page
	  table format and you would like to access memory beyond the
+3 −0
Original line number Diff line number Diff line
@@ -965,10 +965,13 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
{
	bool coherent = dev_is_dma_coherent(dev);
	size_t alloc_size = PAGE_ALIGN(size);
	int node = dev_to_node(dev);
	struct page *page = NULL;
	void *cpu_addr;

	page = dma_alloc_contiguous(dev, alloc_size, gfp);
	if (!page)
		page = alloc_pages_node(node, gfp, get_order(alloc_size));
	if (!page)
		return NULL;

+1 −4
Original line number Diff line number Diff line
@@ -160,10 +160,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
		gfp_t gfp)
{
	int node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
	size_t align = get_order(PAGE_ALIGN(size));

	return alloc_pages_node(node, gfp, align);
	return NULL;
}

static inline void dma_free_contiguous(struct device *dev, struct page *page,
+2 −6
Original line number Diff line number Diff line
@@ -230,9 +230,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
 */
struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
{
	int node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
	size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	size_t align = get_order(PAGE_ALIGN(size));
	size_t count = size >> PAGE_SHIFT;
	struct page *page = NULL;
	struct cma *cma = NULL;

@@ -243,14 +241,12 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)

	/* CMA can be used only in the context which permits sleeping */
	if (cma && gfpflags_allow_blocking(gfp)) {
		size_t align = get_order(size);
		size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);

		page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN);
	}

	/* Fallback allocation of normal pages */
	if (!page)
		page = alloc_pages_node(node, gfp, align);
	return page;
}

Loading