Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d883c6cf authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds
Browse files

Revert "mm/cma: manage the memory of the CMA area by using the ZONE_MOVABLE"



This reverts the following commits that change CMA design in MM.

 3d2054ad ("ARM: CMA: avoid double mapping to the CMA area if CONFIG_HIGHMEM=y")

 1d47a3ec ("mm/cma: remove ALLOC_CMA")

 bad8c6c0 ("mm/cma: manage the memory of the CMA area by using the ZONE_MOVABLE")

Ville reported a following error on i386.

  Inode-cache hash table entries: 65536 (order: 6, 262144 bytes)
  microcode: microcode updated early to revision 0x4, date = 2013-06-28
  Initializing CPU#0
  Initializing HighMem for node 0 (000377fe:00118000)
  Initializing Movable for node 0 (00000001:00118000)
  BUG: Bad page state in process swapper  pfn:377fe
  page:f53effc0 count:0 mapcount:-127 mapping:00000000 index:0x0
  flags: 0x80000000()
  raw: 80000000 00000000 00000000 ffffff80 00000000 00000100 00000200 00000001
  page dumped because: nonzero mapcount
  Modules linked in:
  CPU: 0 PID: 0 Comm: swapper Not tainted 4.17.0-rc5-elk+ #145
  Hardware name: Dell Inc. Latitude E5410/03VXMC, BIOS A15 07/11/2013
  Call Trace:
   dump_stack+0x60/0x96
   bad_page+0x9a/0x100
   free_pages_check_bad+0x3f/0x60
   free_pcppages_bulk+0x29d/0x5b0
   free_unref_page_commit+0x84/0xb0
   free_unref_page+0x3e/0x70
   __free_pages+0x1d/0x20
   free_highmem_page+0x19/0x40
   add_highpages_with_active_regions+0xab/0xeb
   set_highmem_pages_init+0x66/0x73
   mem_init+0x1b/0x1d7
   start_kernel+0x17a/0x363
   i386_start_kernel+0x95/0x99
   startup_32_smp+0x164/0x168

The reason for this error is that the span of MOVABLE_ZONE is extended
to whole node span for future CMA initialization, and, normal memory is
wrongly freed here.  I submitted the fix and it seems to work, but,
another problem happened.

It's so late time to fix the later problem so I decide to reverting the
series.

Reported-by: default avatarVille Syrjälä <ville.syrjala@linux.intel.com>
Acked-by: default avatarLaura Abbott <labbott@redhat.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 577e75e0
Loading
Loading
Loading
Loading
+1 −15
Original line number Diff line number Diff line
@@ -466,12 +466,6 @@ void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
void __init dma_contiguous_remap(void)
{
	int i;

	if (!dma_mmu_remap_num)
		return;

	/* call flush_cache_all() since CMA area would be large enough */
	flush_cache_all();
	for (i = 0; i < dma_mmu_remap_num; i++) {
		phys_addr_t start = dma_mmu_remap[i].base;
		phys_addr_t end = start + dma_mmu_remap[i].size;
@@ -504,14 +498,6 @@ void __init dma_contiguous_remap(void)
		flush_tlb_kernel_range(__phys_to_virt(start),
				       __phys_to_virt(end));

		/*
		 * All the memory in CMA region will be on ZONE_MOVABLE.
		 * If that zone is considered as highmem, the memory in CMA
		 * region is also considered as highmem even if it's
		 * physical address belong to lowmem. In this case,
		 * re-mapping isn't required.
		 */
		if (!is_highmem_idx(ZONE_MOVABLE))
		iotable_init(&map, 1);
	}
}
+3 −0
Original line number Diff line number Diff line
@@ -216,6 +216,9 @@ void put_online_mems(void);
void mem_hotplug_begin(void);
void mem_hotplug_done(void);

extern void set_zone_contiguous(struct zone *zone);
extern void clear_zone_contiguous(struct zone *zone);

#else /* ! CONFIG_MEMORY_HOTPLUG */
#define pfn_to_online_page(pfn)			\
({						\
+0 −1
Original line number Diff line number Diff line
@@ -2109,7 +2109,6 @@ extern void setup_per_cpu_pageset(void);

extern void zone_pcp_update(struct zone *zone);
extern void zone_pcp_reset(struct zone *zone);
extern void setup_zone_pageset(struct zone *zone);

/* page_alloc.c */
extern int min_free_kbytes;
+11 −72
Original line number Diff line number Diff line
@@ -39,7 +39,6 @@
#include <trace/events/cma.h>

#include "cma.h"
#include "internal.h"

struct cma cma_areas[MAX_CMA_AREAS];
unsigned cma_area_count;
@@ -110,25 +109,23 @@ static int __init cma_activate_area(struct cma *cma)
	if (!cma->bitmap)
		return -ENOMEM;

	WARN_ON_ONCE(!pfn_valid(pfn));
	zone = page_zone(pfn_to_page(pfn));

	do {
		unsigned j;

		base_pfn = pfn;
		if (!pfn_valid(base_pfn))
			goto err;

		zone = page_zone(pfn_to_page(base_pfn));
		for (j = pageblock_nr_pages; j; --j, pfn++) {
			if (!pfn_valid(pfn))
				goto err;

			WARN_ON_ONCE(!pfn_valid(pfn));
			/*
			 * In init_cma_reserved_pageblock(), present_pages
			 * is adjusted with assumption that all pages in
			 * the pageblock come from a single zone.
			 * alloc_contig_range requires the pfn range
			 * specified to be in the same zone. Make this
			 * simple by forcing the entire CMA resv range
			 * to be in the same zone.
			 */
			if (page_zone(pfn_to_page(pfn)) != zone)
				goto err;
				goto not_in_zone;
		}
		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
	} while (--i);
@@ -142,7 +139,7 @@ static int __init cma_activate_area(struct cma *cma)

	return 0;

err:
not_in_zone:
	pr_err("CMA area %s could not be activated\n", cma->name);
	kfree(cma->bitmap);
	cma->count = 0;
@@ -152,41 +149,6 @@ static int __init cma_activate_area(struct cma *cma)
static int __init cma_init_reserved_areas(void)
{
	int i;
	struct zone *zone;
	pg_data_t *pgdat;

	if (!cma_area_count)
		return 0;

	for_each_online_pgdat(pgdat) {
		unsigned long start_pfn = UINT_MAX, end_pfn = 0;

		zone = &pgdat->node_zones[ZONE_MOVABLE];

		/*
		 * In this case, we cannot adjust the zone range
		 * since it is now maximum node span and we don't
		 * know original zone range.
		 */
		if (populated_zone(zone))
			continue;

		for (i = 0; i < cma_area_count; i++) {
			if (pfn_to_nid(cma_areas[i].base_pfn) !=
				pgdat->node_id)
				continue;

			start_pfn = min(start_pfn, cma_areas[i].base_pfn);
			end_pfn = max(end_pfn, cma_areas[i].base_pfn +
						cma_areas[i].count);
		}

		if (!end_pfn)
			continue;

		zone->zone_start_pfn = start_pfn;
		zone->spanned_pages = end_pfn - start_pfn;
	}

	for (i = 0; i < cma_area_count; i++) {
		int ret = cma_activate_area(&cma_areas[i]);
@@ -195,32 +157,9 @@ static int __init cma_init_reserved_areas(void)
			return ret;
	}

	/*
	 * Reserved pages for ZONE_MOVABLE are now activated and
	 * this would change ZONE_MOVABLE's managed page counter and
	 * the other zones' present counter. We need to re-calculate
	 * various zone information that depends on this initialization.
	 */
	build_all_zonelists(NULL);
	for_each_populated_zone(zone) {
		if (zone_idx(zone) == ZONE_MOVABLE) {
			zone_pcp_reset(zone);
			setup_zone_pageset(zone);
		} else
			zone_pcp_update(zone);

		set_zone_contiguous(zone);
	}

	/*
	 * We need to re-init per zone wmark by calling
	 * init_per_zone_wmark_min() but doesn't call here because it is
	 * registered on core_initcall and it will be called later than us.
	 */

	return 0;
}
pure_initcall(cma_init_reserved_areas);
core_initcall(cma_init_reserved_areas);

/**
 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
+3 −1
Original line number Diff line number Diff line
@@ -1450,12 +1450,14 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order,
	 * if compaction succeeds.
	 * For costly orders, we require low watermark instead of min for
	 * compaction to proceed to increase its chances.
	 * ALLOC_CMA is used, as pages in CMA pageblocks are considered
	 * suitable migration targets
	 */
	watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
				low_wmark_pages(zone) : min_wmark_pages(zone);
	watermark += compact_gap(order);
	if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
						0, wmark_target))
						ALLOC_CMA, wmark_target))
		return COMPACT_SKIPPED;

	return COMPACT_CONTINUE;
Loading