Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 712c604d authored by Lucas Stach's avatar Lucas Stach Committed by Linus Torvalds
Browse files

mm: wire up GFP flag passing in dma_alloc_from_contiguous

The callers of the DMA alloc functions already provide the proper
context GFP flags.  Make sure to pass them through to the CMA allocator,
to make the CMA compaction context aware.

Link: http://lkml.kernel.org/r/20170127172328.18574-3-l.stach@pengutronix.de


Signed-off-by: default avatarLucas Stach <l.stach@pengutronix.de>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Radim Krcmar <rkrcmar@redhat.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Alexander Graf <agraf@suse.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e2f466e3
Loading
Loading
Loading
Loading
+9 −7
Original line number Original line Diff line number Diff line
@@ -349,7 +349,7 @@ static void __dma_free_buffer(struct page *page, size_t size)
static void *__alloc_from_contiguous(struct device *dev, size_t size,
static void *__alloc_from_contiguous(struct device *dev, size_t size,
				     pgprot_t prot, struct page **ret_page,
				     pgprot_t prot, struct page **ret_page,
				     const void *caller, bool want_vaddr,
				     const void *caller, bool want_vaddr,
				     int coherent_flag);
				     int coherent_flag, gfp_t gfp);


static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
				 pgprot_t prot, struct page **ret_page,
				 pgprot_t prot, struct page **ret_page,
@@ -420,7 +420,8 @@ static int __init atomic_pool_init(void)
	 */
	 */
	if (dev_get_cma_area(NULL))
	if (dev_get_cma_area(NULL))
		ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
		ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
				      &page, atomic_pool_init, true, NORMAL);
				      &page, atomic_pool_init, true, NORMAL,
				      GFP_KERNEL);
	else
	else
		ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
		ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
					   &page, atomic_pool_init, true);
					   &page, atomic_pool_init, true);
@@ -594,14 +595,14 @@ static int __free_from_pool(void *start, size_t size)
static void *__alloc_from_contiguous(struct device *dev, size_t size,
static void *__alloc_from_contiguous(struct device *dev, size_t size,
				     pgprot_t prot, struct page **ret_page,
				     pgprot_t prot, struct page **ret_page,
				     const void *caller, bool want_vaddr,
				     const void *caller, bool want_vaddr,
				     int coherent_flag)
				     int coherent_flag, gfp_t gfp)
{
{
	unsigned long order = get_order(size);
	unsigned long order = get_order(size);
	size_t count = size >> PAGE_SHIFT;
	size_t count = size >> PAGE_SHIFT;
	struct page *page;
	struct page *page;
	void *ptr = NULL;
	void *ptr = NULL;


	page = dma_alloc_from_contiguous(dev, count, order);
	page = dma_alloc_from_contiguous(dev, count, order, gfp);
	if (!page)
	if (!page)
		return NULL;
		return NULL;


@@ -655,7 +656,7 @@ static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
#define __get_dma_pgprot(attrs, prot)				__pgprot(0)
#define __get_dma_pgprot(attrs, prot)				__pgprot(0)
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv)	NULL
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv)	NULL
#define __alloc_from_pool(size, ret_page)			NULL
#define __alloc_from_pool(size, ret_page)			NULL
#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag)	NULL
#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag, gfp)	NULL
#define __free_from_pool(cpu_addr, size)			do { } while (0)
#define __free_from_pool(cpu_addr, size)			do { } while (0)
#define __free_from_contiguous(dev, page, cpu_addr, size, wv)	do { } while (0)
#define __free_from_contiguous(dev, page, cpu_addr, size, wv)	do { } while (0)
#define __dma_free_remap(cpu_addr, size)			do { } while (0)
#define __dma_free_remap(cpu_addr, size)			do { } while (0)
@@ -697,7 +698,8 @@ static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
{
{
	return __alloc_from_contiguous(args->dev, args->size, args->prot,
	return __alloc_from_contiguous(args->dev, args->size, args->prot,
				       ret_page, args->caller,
				       ret_page, args->caller,
				       args->want_vaddr, args->coherent_flag);
				       args->want_vaddr, args->coherent_flag,
				       args->gfp);
}
}


static void cma_allocator_free(struct arm_dma_free_args *args)
static void cma_allocator_free(struct arm_dma_free_args *args)
@@ -1312,7 +1314,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
		unsigned long order = get_order(size);
		unsigned long order = get_order(size);
		struct page *page;
		struct page *page;


		page = dma_alloc_from_contiguous(dev, count, order);
		page = dma_alloc_from_contiguous(dev, count, order, gfp);
		if (!page)
		if (!page)
			goto error;
			goto error;


+2 −2
Original line number Original line Diff line number Diff line
@@ -107,7 +107,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
		void *addr;
		void *addr;


		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
							get_order(size));
						 get_order(size), flags);
		if (!page)
		if (!page)
			return NULL;
			return NULL;


@@ -390,7 +390,7 @@ static int __init atomic_pool_init(void)


	if (dev_get_cma_area(NULL))
	if (dev_get_cma_area(NULL))
		page = dma_alloc_from_contiguous(NULL, nr_pages,
		page = dma_alloc_from_contiguous(NULL, nr_pages,
							pool_size_order);
						 pool_size_order, GFP_KERNEL);
	else
	else
		page = alloc_pages(GFP_DMA, pool_size_order);
		page = alloc_pages(GFP_DMA, pool_size_order);


+2 −2
Original line number Original line Diff line number Diff line
@@ -148,8 +148,8 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
	gfp = massage_gfp_flags(dev, gfp);
	gfp = massage_gfp_flags(dev, gfp);


	if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
	if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
		page = dma_alloc_from_contiguous(dev,
		page = dma_alloc_from_contiguous(dev, count, get_order(size),
					count, get_order(size));
						 gfp);
	if (!page)
	if (!page)
		page = alloc_pages(gfp, get_order(size));
		page = alloc_pages(gfp, get_order(size));


+2 −1
Original line number Original line Diff line number Diff line
@@ -91,7 +91,8 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
	page = NULL;
	page = NULL;
	/* CMA can be used only in the context which permits sleeping */
	/* CMA can be used only in the context which permits sleeping */
	if (gfpflags_allow_blocking(flag)) {
	if (gfpflags_allow_blocking(flag)) {
		page = dma_alloc_from_contiguous(dev, count, get_order(size));
		page = dma_alloc_from_contiguous(dev, count, get_order(size),
						 flag);
		if (page && page_to_phys(page) + size > dma_mask) {
		if (page && page_to_phys(page) + size > dma_mask) {
			dma_release_from_contiguous(dev, page, count);
			dma_release_from_contiguous(dev, page, count);
			page = NULL;
			page = NULL;
+2 −1
Original line number Original line Diff line number Diff line
@@ -158,7 +158,8 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
		flag |= GFP_DMA;
		flag |= GFP_DMA;


	if (gfpflags_allow_blocking(flag))
	if (gfpflags_allow_blocking(flag))
		page = dma_alloc_from_contiguous(dev, count, get_order(size));
		page = dma_alloc_from_contiguous(dev, count, get_order(size),
						 flag);


	if (!page)
	if (!page)
		page = alloc_pages(flag, get_order(size));
		page = alloc_pages(flag, get_order(size));
Loading