Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8b046724 authored by Liam Mark's avatar Liam Mark
Browse files

mm: direct previous __GFP_CMA allocations to offlinable memory



Ensure we continue to direct __GFP_CMA allocations to offlinable memory now
that __GFP_OFFLINABLE has been introduced.

Change-Id: Ibdd71e45f4e47110f3ec4d308d26ffae58beb300
Signed-off-by: default avatarLiam Mark <lmark@codeaurora.org>
parent 0bbd8092
Loading
Loading
Loading
Loading
+4 −2
Original line number Diff line number Diff line
@@ -1360,13 +1360,15 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
				__GFP_NOWARN |
				__GFP_HIGHMEM |
				__GFP_MOVABLE |
				__GFP_CMA);
				__GFP_CMA |
				__GFP_OFFLINABLE);
	if (!handle) {
		zcomp_stream_put(zram->comp);
		atomic64_inc(&zram->stats.writestall);
		handle = zs_malloc(zram->mem_pool, comp_len,
				GFP_NOIO | __GFP_HIGHMEM |
				__GFP_MOVABLE | __GFP_CMA);
				__GFP_MOVABLE | __GFP_CMA |
				__GFP_OFFLINABLE);
		if (handle)
			goto compress_again;
		return -ENOMEM;
+3 −6
Original line number Diff line number Diff line
@@ -205,12 +205,9 @@ static inline struct page *
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
					unsigned long vaddr)
{
#ifndef CONFIG_CMA_DIRECT_UTILIZATION
	return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
#else
	return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
	return __alloc_zeroed_user_highpage(
			__GFP_MOVABLE|__GFP_CMA|__GFP_OFFLINABLE, vma,
			vaddr);
#endif
}

static inline void clear_highpage(struct page *page)
+4 −4
Original line number Diff line number Diff line
@@ -350,8 +350,8 @@ static void destroy_cache(struct zs_pool *pool)

static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
{
	return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
			gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE|__GFP_CMA));
	return (unsigned long)kmem_cache_alloc(pool->handle_cachep, gfp &
		~(__GFP_HIGHMEM|__GFP_MOVABLE|__GFP_CMA|__GFP_OFFLINABLE));
}

static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
@@ -361,8 +361,8 @@ static void cache_free_handle(struct zs_pool *pool, unsigned long handle)

static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
{
	return kmem_cache_alloc(pool->zspage_cachep,
			flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE|__GFP_CMA));
	return kmem_cache_alloc(pool->zspage_cachep, flags &
		~(__GFP_HIGHMEM|__GFP_MOVABLE|__GFP_CMA|__GFP_OFFLINABLE));
}

static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)