Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 50be6345 authored by Philipp Hachtmann's avatar Philipp Hachtmann Committed by Martin Schwidefsky
Browse files

s390/mm: Convert bootmem to memblock



The original bootmem allocator is getting replaced by memblock. To
cover the needs of the s390 kdump implementation the physical memory
list is used.
With this patch the bootmem allocator and its bitmaps are completely
removed from s390.

Signed-off-by: default avatarPhilipp Hachtmann <phacht@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 70210ed9
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -60,7 +60,6 @@ config PCI_QUIRKS

config S390
	def_bool y
	select ARCH_DISCARD_MEMBLOCK
	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
	select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
	select ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -130,6 +129,7 @@ config S390
	select HAVE_KVM if 64BIT
	select HAVE_MEMBLOCK
	select HAVE_MEMBLOCK_NODE_MAP
	select HAVE_MEMBLOCK_PHYS_MAP
	select HAVE_MOD_ARCH_SPECIFIC
	select HAVE_OPROFILE
	select HAVE_PERF_EVENTS
@@ -139,6 +139,7 @@ config S390
	select HAVE_VIRT_CPU_ACCOUNTING
	select KTIME_SCALAR if 32BIT
	select MODULES_USE_ELF_RELA
	select NO_BOOTMEM
	select OLD_SIGACTION
	select OLD_SIGSUSPEND3
	select SYSCTL_EXCEPTION_TRACE
+2 −14
Original line number Diff line number Diff line
@@ -9,7 +9,6 @@


#define PARMAREA		0x10400
#define MEMORY_CHUNKS		256

#ifndef __ASSEMBLY__

@@ -31,22 +30,11 @@
#endif /* CONFIG_64BIT */
#define COMMAND_LINE      ((char *)            (0x10480))

#define CHUNK_READ_WRITE 0
#define CHUNK_READ_ONLY  1

struct mem_chunk {
	unsigned long addr;
	unsigned long size;
	int type;
};

extern struct mem_chunk memory_chunk[];
extern int memory_end_set;
extern unsigned long memory_end;
extern unsigned long max_physmem_end;

void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize);
void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
		     unsigned long size);
extern void detect_memory_memblock(void);

/*
 * Machine features detected in head.S
+39 −44
Original line number Diff line number Diff line
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/bootmem.h>
#include <linux/elf.h>
#include <linux/memblock.h>
#include <asm/os_info.h>
#include <asm/elf.h>
#include <asm/ipl.h>
@@ -22,6 +23,24 @@
#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))

static struct memblock_region oldmem_region;

static struct memblock_type oldmem_type = {
	.cnt = 1,
	.max = 1,
	.total_size = 0,
	.regions = &oldmem_region,
};

#define for_each_dump_mem_range(i, nid, p_start, p_end, p_nid)		\
	for (i = 0, __next_mem_range(&i, nid, &memblock.physmem,	\
				     &oldmem_type, p_start,		\
				     p_end, p_nid);			\
	     i != (u64)ULLONG_MAX;					\
	     __next_mem_range(&i, nid, &memblock.physmem,		\
			      &oldmem_type,				\
			      p_start, p_end, p_nid))

struct dump_save_areas dump_save_areas;

/*
@@ -263,19 +282,6 @@ static void *kzalloc_panic(int len)
	return rc;
}

/*
 * Get memory layout and create hole for oldmem
 */
static struct mem_chunk *get_memory_layout(void)
{
	struct mem_chunk *chunk_array;

	chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk));
	detect_memory_layout(chunk_array, 0);
	create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE);
	return chunk_array;
}

/*
 * Initialize ELF note
 */
@@ -490,52 +496,33 @@ static int get_cpu_cnt(void)
 */
static int get_mem_chunk_cnt(void)
{
	struct mem_chunk *chunk_array, *mem_chunk;
	int i, cnt = 0;
	int cnt = 0;
	u64 idx;

	chunk_array = get_memory_layout();
	for (i = 0; i < MEMORY_CHUNKS; i++) {
		mem_chunk = &chunk_array[i];
		if (chunk_array[i].type != CHUNK_READ_WRITE &&
		    chunk_array[i].type != CHUNK_READ_ONLY)
			continue;
		if (mem_chunk->size == 0)
			continue;
	for_each_dump_mem_range(idx, NUMA_NO_NODE, NULL, NULL, NULL)
		cnt++;
	}
	kfree(chunk_array);
	return cnt;
}

/*
 * Initialize ELF loads (new kernel)
 */
static int loads_init(Elf64_Phdr *phdr, u64 loads_offset)
static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
{
	struct mem_chunk *chunk_array, *mem_chunk;
	int i;
	phys_addr_t start, end;
	u64 idx;

	chunk_array = get_memory_layout();
	for (i = 0; i < MEMORY_CHUNKS; i++) {
		mem_chunk = &chunk_array[i];
		if (mem_chunk->size == 0)
			continue;
		if (chunk_array[i].type != CHUNK_READ_WRITE &&
		    chunk_array[i].type != CHUNK_READ_ONLY)
			continue;
		else
			phdr->p_filesz = mem_chunk->size;
	for_each_dump_mem_range(idx, NUMA_NO_NODE, &start, &end, NULL) {
		phdr->p_filesz = end - start;
		phdr->p_type = PT_LOAD;
		phdr->p_offset = mem_chunk->addr;
		phdr->p_vaddr = mem_chunk->addr;
		phdr->p_paddr = mem_chunk->addr;
		phdr->p_memsz = mem_chunk->size;
		phdr->p_offset = start;
		phdr->p_vaddr = start;
		phdr->p_paddr = start;
		phdr->p_memsz = end - start;
		phdr->p_flags = PF_R | PF_W | PF_X;
		phdr->p_align = PAGE_SIZE;
		phdr++;
	}
	kfree(chunk_array);
	return i;
}

/*
@@ -584,6 +571,14 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
	/* If we cannot get HSA size for zfcpdump return error */
	if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp_get_hsa_size())
		return -ENODEV;

	/* For kdump, exclude previous crashkernel memory */
	if (OLDMEM_BASE) {
		oldmem_region.base = OLDMEM_BASE;
		oldmem_region.size = OLDMEM_SIZE;
		oldmem_type.total_size = OLDMEM_SIZE;
	}

	mem_chunk_cnt = get_mem_chunk_cnt();

	alloc_size = 0x1000 + get_cpu_cnt() * 0x300 +
+6 −0
Original line number Diff line number Diff line
@@ -258,13 +258,19 @@ static __init void setup_topology(void)
static void early_pgm_check_handler(void)
{
	const struct exception_table_entry *fixup;
	unsigned long cr0, cr0_new;
	unsigned long addr;

	addr = S390_lowcore.program_old_psw.addr;
	fixup = search_exception_tables(addr & PSW_ADDR_INSN);
	if (!fixup)
		disabled_wait(0);
	/* Disable low address protection before storing into lowcore. */
	__ctl_store(cr0, 0, 0);
	cr0_new = cr0 & ~(1UL << 28);
	__ctl_load(cr0_new, 0, 0);
	S390_lowcore.program_old_psw.addr = extable_fixup(fixup)|PSW_ADDR_AMODE;
	__ctl_load(cr0, 0, 0);
}

static noinline __init void setup_lowcore_early(void)
+0 −1
Original line number Diff line number Diff line
@@ -59,7 +59,6 @@ ENTRY(startup_continue)
	.long	0			# cr13: home space segment table
	.long	0xc0000000		# cr14: machine check handling off
	.long	0			# cr15: linkage stack operations
.Lmchunk:.long	memory_chunk
.Lbss_bgn:  .long __bss_start
.Lbss_end:  .long _end
.Lparmaddr: .long PARMAREA
Loading