Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 42e668f4 authored by Laura Abbott's avatar Laura Abbott
Browse files

cma: Delay non-placed memblocks until after all allocations



CMA is now responsible for almost all memory reservation/removal.
Some regions are at fixed locations, some are placed dynamically.
We need to place all fixed regions first before trying to place
dynamic regions to avoid overlap. Additionally, allow an
architectural callback after all removals/fixed location has
happened to potentially update any relevant limits.

Change-Id: Iaaffe60445ef44d432f0d87875ce2b292b717cc7
Signed-off-by: default avatarLaura Abbott <lauraa@codeaurora.org>
parent 07eb9396
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -8,6 +8,7 @@
#include <asm-generic/dma-contiguous.h>

void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
void __init dma_contiguous_early_removal_fixup(void);

#endif
#endif
+5 −0
Original line number Diff line number Diff line
@@ -1225,6 +1225,11 @@ void __init sanity_check_meminfo(void)
	memblock_set_current_limit(arm_lowmem_limit);
}

void __init dma_contiguous_early_removal_fixup(void)
{
	sanity_check_meminfo();
}

static inline void prepare_page_table(void)
{
	unsigned long addr;
+2 −1
Original line number Diff line number Diff line
/*
 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -22,6 +22,7 @@

static inline void
dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
static inline void __init dma_contiguous_early_removal_fixup(void) { }

#endif
#endif
+65 −18
Original line number Diff line number Diff line
@@ -60,6 +60,8 @@ static struct cma_area {
	struct cma *cma;
	const char *name;
	bool to_system;
	unsigned long alignment;
	unsigned long limit;
} cma_areas[MAX_CMA_AREAS];
static unsigned cma_area_count;

@@ -69,6 +71,7 @@ static struct cma_map {
	struct device *dev;
} cma_maps[MAX_CMA_AREAS] __initdata;
static unsigned cma_map_count __initdata;
static bool allow_memblock_alloc __initdata;

static struct cma *cma_get_area(phys_addr_t base)
{
@@ -275,6 +278,24 @@ int __init cma_fdt_scan(unsigned long node, const char *uname,
}
#endif

int __init __dma_contiguous_reserve_memory(size_t size, size_t alignment,
					size_t limit, phys_addr_t *base)
{	phys_addr_t addr;

	if (!allow_memblock_alloc) {
		*base = 0;
		return 0;
	}

	addr = __memblock_alloc_base(size, alignment, limit);
	if (!addr) {
		return -ENOMEM;
	} else {
		*base = addr;
		return 0;
	}
}

/**
 * dma_contiguous_reserve() - reserve area for contiguous memory handling
 * @limit: End address of the reserved memory (optional, 0 for any).
@@ -288,7 +309,11 @@ int __init cma_fdt_scan(unsigned long node, const char *uname,
void __init dma_contiguous_reserve(phys_addr_t limit)
{
	phys_addr_t sel_size = 0;
	int i;

#ifdef CONFIG_OF
	of_scan_flat_dt(cma_fdt_scan, NULL);
#endif
	pr_debug("%s(limit %pa)\n", __func__, &limit);

	if (size_cmdline != -1) {
@@ -305,18 +330,47 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
#endif
	}

	dma_contiguous_early_removal_fixup();
	allow_memblock_alloc = true;

	for (i = 0; i < cma_area_count; i++) {
		if (cma_areas[i].base == 0) {
			int ret;

			ret = __dma_contiguous_reserve_memory(
						cma_areas[i].size,
						cma_areas[i].alignment,
						cma_areas[i].limit,
						&cma_areas[i].base);
			if (ret) {
				pr_err("CMA: failed to reserve %ld MiB for %s\n",
				       (unsigned long)cma_areas[i].size / SZ_1M,
				       cma_areas[i].name);
				memmove(&cma_areas[i], &cma_areas[i+1],
				   (cma_area_count - i)*sizeof(cma_areas[i]));
				cma_area_count--;
				i--;
				continue;
			}
		}

		pr_info("CMA: reserved %ld MiB at %pa for %s\n",
			(unsigned long)cma_areas[i].size / SZ_1M,
			&cma_areas[i].base, cma_areas[i].name);
	}

	if (sel_size) {
		phys_addr_t base = 0;
		pr_debug("%s: reserving %ld MiB for global area\n", __func__,
			 (unsigned long)sel_size / SZ_1M);

		if (dma_contiguous_reserve_area(sel_size, &base, limit, NULL,
		    CMA_RESERVE_AREA ? 0 : 1, false) == 0)
		    CMA_RESERVE_AREA ? 0 : 1, false) == 0) {
			pr_info("CMA: reserved %ld MiB at %pa for default region\n",
				(unsigned long)sel_size / SZ_1M, &base);
			dma_contiguous_def_base = base;
		}
#ifdef CONFIG_OF
	of_scan_flat_dt(cma_fdt_scan, NULL);
#endif
	}
};

/**
@@ -369,20 +423,13 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base,
			goto err;
		}
	} else {
		/*
		 * Use __memblock_alloc_base() since
		 * memblock_alloc_base() panic()s.
		 */
		phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
		if (!addr) {
			ret = -ENOMEM;
		ret = __dma_contiguous_reserve_memory(size, alignment, limit,
							&base);
		if (ret)
			goto err;
		} else {
			base = addr;
		}
	}

	if (remove) {
	if (base && remove) {
		if (!to_system) {
			memblock_free(base, size);
			memblock_remove(base, size);
@@ -398,15 +445,15 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base,
	cma_areas[cma_area_count].base = base;
	cma_areas[cma_area_count].size = size;
	cma_areas[cma_area_count].name = name;
	cma_areas[cma_area_count].alignment = alignment;
	cma_areas[cma_area_count].limit = limit;
	cma_areas[cma_area_count].to_system = to_system;
	cma_area_count++;
	*res_base = base;

	pr_info("CMA: reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
		 &base);

	/* Architecture specific contiguous memory fixup. */
	if (!remove)
	if (!remove && base)
		dma_contiguous_early_fixup(base, size);
	return 0;
err: