Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ed7b56a7 authored by Tejun Heo's avatar Tejun Heo Committed by H. Peter Anvin
Browse files

memblock: Remove memblock_memory_can_coalesce()



Arch could implement memblock_memor_can_coalesce() to veto merging of
adjacent or overlapping memblock regions; however, no arch did and any
vetoing would trigger WARN_ON().  Memblock regions are supposed to
deal with proper memory anyway.  Remove the unused hook.

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/1310462166-31469-2-git-send-email-tj@kernel.org


Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent eb40c4c2
Loading
Loading
Loading
Loading
+0 −4
Original line number Original line Diff line number Diff line
@@ -92,10 +92,6 @@ extern int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);


extern void memblock_dump_all(void);
extern void memblock_dump_all(void);


/* Provided by the architecture */
extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
				   phys_addr_t addr2, phys_addr_t size2);

/**
/**
 * memblock_set_current_limit - Set the current allocation limit to allow
 * memblock_set_current_limit - Set the current allocation limit to allow
 *                         limiting allocations to what is currently
 *                         limiting allocations to what is currently
+0 −29
Original line number Original line Diff line number Diff line
@@ -251,12 +251,6 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
	return 0;
	return 0;
}
}


extern int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
					  phys_addr_t addr2, phys_addr_t size2)
{
	return 1;
}

static long __init_memblock memblock_add_region(struct memblock_type *type,
static long __init_memblock memblock_add_region(struct memblock_type *type,
						phys_addr_t base, phys_addr_t size)
						phys_addr_t base, phys_addr_t size)
{
{
@@ -282,17 +276,6 @@ static long __init_memblock memblock_add_region(struct memblock_type *type,
		 * of a block.
		 * of a block.
		 */
		 */
		if (base < rgn->base && end >= rgn->base) {
		if (base < rgn->base && end >= rgn->base) {
			/* If we can't coalesce, create a new block */
			if (!memblock_memory_can_coalesce(base, size,
							  rgn->base,
							  rgn->size)) {
				/* Overlap & can't coalesce are mutually
				 * exclusive, if you do that, be prepared
				 * for trouble
				 */
				WARN_ON(end != rgn->base);
				goto new_block;
			}
			/* We extend the bottom of the block down to our
			/* We extend the bottom of the block down to our
			 * base
			 * base
			 */
			 */
@@ -316,17 +299,6 @@ static long __init_memblock memblock_add_region(struct memblock_type *type,
		 * top of a block
		 * top of a block
		 */
		 */
		if (base <= rend && end >= rend) {
		if (base <= rend && end >= rend) {
			/* If we can't coalesce, create a new block */
			if (!memblock_memory_can_coalesce(rgn->base,
							  rgn->size,
							  base, size)) {
				/* Overlap & can't coalesce are mutually
				 * exclusive, if you do that, be prepared
				 * for trouble
				 */
				WARN_ON(rend != base);
				goto new_block;
			}
			/* We adjust our base down to enclose the
			/* We adjust our base down to enclose the
			 * original block and destroy it. It will be
			 * original block and destroy it. It will be
			 * part of our new allocation. Since we've
			 * part of our new allocation. Since we've
@@ -349,7 +321,6 @@ static long __init_memblock memblock_add_region(struct memblock_type *type,
		return 0;
		return 0;
	}
	}


 new_block:
	/* If we are out of space, we fail. It's too late to resize the array
	/* If we are out of space, we fail. It's too late to resize the array
	 * but then this shouldn't have happened in the first place.
	 * but then this shouldn't have happened in the first place.
	 */
	 */