Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6966d604 authored by Vasily Gorbik's avatar Vasily Gorbik Committed by Martin Schwidefsky
Browse files

s390/mem_detect: move tprot loop to early boot phase



Move memory detection to early boot phase. To store online memory
regions "struct mem_detect_info" has been introduced together with
for_each_mem_detect_block iterator. mem_detect_info is later converted
to memblock.

Also introduces sclp_early_get_meminfo function to get maximum physical
memory and maximum increment number.

Reviewed-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Reviewed-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 17aacfbf
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -27,7 +27,7 @@ endif

CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char

obj-y	:= head.o als.o startup.o ebcdic.o sclp_early_core.o mem.o
obj-y	:= head.o als.o startup.o mem_detect.o ebcdic.o sclp_early_core.o mem.o
targets	:= bzImage startup.a $(obj-y)
subdir-	:= compressed

+1 −0
Original line number Diff line number Diff line
@@ -3,5 +3,6 @@
#define BOOT_BOOT_H

void startup_kernel(void);
void detect_memory(void);

#endif /* BOOT_BOOT_H */
+133 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <asm/sclp.h>
#include <asm/sections.h>
#include <asm/mem_detect.h>
#include "compressed/decompressor.h"
#include "boot.h"

#define CHUNK_READ_WRITE 0
#define CHUNK_READ_ONLY  1

unsigned long __bootdata(max_physmem_end);
struct mem_detect_info __bootdata(mem_detect);

/* up to 256 storage elements, 1020 subincrements each */
#define ENTRIES_EXTENDED_MAX						       \
	(256 * (1020 / 2) * sizeof(struct mem_detect_block))

/*
 * To avoid corrupting old kernel memory during dump, find lowest memory
 * chunk possible either right after the kernel end (decompressed kernel) or
 * after initrd (if it is present and there is no hole between the kernel end
 * and initrd)
 */
static void *mem_detect_alloc_extended(void)
{
	unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));

	if (IS_ENABLED(BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
	    INITRD_START < offset + ENTRIES_EXTENDED_MAX)
		offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64));

	return (void *)offset;
}

static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
{
	if (n < MEM_INLINED_ENTRIES)
		return &mem_detect.entries[n];
	if (unlikely(!mem_detect.entries_extended))
		mem_detect.entries_extended = mem_detect_alloc_extended();
	return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
}

/*
 * sequential calls to add_mem_detect_block with adjacent memory areas
 * are merged together into single memory block.
 */
void add_mem_detect_block(u64 start, u64 end)
{
	struct mem_detect_block *block;

	if (mem_detect.count) {
		block = __get_mem_detect_block_ptr(mem_detect.count - 1);
		if (block->end == start) {
			block->end = end;
			return;
		}
	}

	block = __get_mem_detect_block_ptr(mem_detect.count);
	block->start = start;
	block->end = end;
	mem_detect.count++;
}

static unsigned long get_mem_detect_end(void)
{
	if (mem_detect.count)
		return __get_mem_detect_block_ptr(mem_detect.count - 1)->end;
	return 0;
}

static int tprot(unsigned long addr)
{
	unsigned long pgm_addr;
	int rc = -EFAULT;
	psw_t old = S390_lowcore.program_new_psw;

	S390_lowcore.program_new_psw.mask = __extract_psw();
	asm volatile(
		"	larl	%[pgm_addr],1f\n"
		"	stg	%[pgm_addr],%[psw_pgm_addr]\n"
		"	tprot	0(%[addr]),0\n"
		"	ipm	%[rc]\n"
		"	srl	%[rc],28\n"
		"1:\n"
		: [pgm_addr] "=&d"(pgm_addr),
		  [psw_pgm_addr] "=Q"(S390_lowcore.program_new_psw.addr),
		  [rc] "+&d"(rc)
		: [addr] "a"(addr)
		: "cc", "memory");
	S390_lowcore.program_new_psw = old;
	return rc;
}

static void scan_memory(unsigned long rzm)
{
	unsigned long addr, size;
	int type;

	if (!rzm)
		rzm = 1UL << 20;

	addr = 0;
	do {
		size = 0;
		/* assume lowcore is writable */
		type = addr ? tprot(addr) : CHUNK_READ_WRITE;
		do {
			size += rzm;
			if (max_physmem_end && addr + size >= max_physmem_end)
				break;
		} while (type == tprot(addr + size));
		if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
			if (max_physmem_end && (addr + size > max_physmem_end))
				size = max_physmem_end - addr;
			add_mem_detect_block(addr, addr + size);
		}
		addr += size;
	} while (addr < max_physmem_end);
}

void detect_memory(void)
{
	unsigned long rzm;

	sclp_early_get_meminfo(&max_physmem_end, &rzm);
	scan_memory(rzm);
	mem_detect.info_source = MEM_DETECT_TPROT_LOOP;
	if (!max_physmem_end)
		max_physmem_end = get_mem_detect_end();
}
+1 −0
Original line number Diff line number Diff line
@@ -51,6 +51,7 @@ void startup_kernel(void)

	rescue_initrd();
	sclp_early_read_info();
	detect_memory();
	if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
		img = decompress_kernel();
		memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
+77 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_MEM_DETECT_H
#define _ASM_S390_MEM_DETECT_H

#include <linux/types.h>

enum mem_info_source {
	MEM_DETECT_NONE = 0,
	MEM_DETECT_TPROT_LOOP
};

struct mem_detect_block {
	u64 start;
	u64 end;
};

/*
 * Storage element id is defined as 1 byte (up to 256 storage elements).
 * In practise only storage element id 0 and 1 are used).
 * According to architecture one storage element could have as much as
 * 1020 subincrements. 255 mem_detect_blocks are embedded in mem_detect_info.
 * If more mem_detect_blocks are required, a block of memory from already
 * known mem_detect_block is taken (entries_extended points to it).
 */
#define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */

struct mem_detect_info {
	u32 count;
	u8 info_source;
	struct mem_detect_block entries[MEM_INLINED_ENTRIES];
	struct mem_detect_block *entries_extended;
};
extern struct mem_detect_info mem_detect;

static inline int __get_mem_detect_block(u32 n, unsigned long *start,
					 unsigned long *end)
{
	if (n >= mem_detect.count) {
		*start = 0;
		*end = 0;
		return -1;
	}

	if (n < MEM_INLINED_ENTRIES) {
		*start = (unsigned long)mem_detect.entries[n].start;
		*end = (unsigned long)mem_detect.entries[n].end;
	} else {
		*start = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].start;
		*end = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].end;
	}
	return 0;
}

/**
 * for_each_mem_detect_block - early online memory range iterator
 * @i: an integer used as loop variable
 * @p_start: ptr to unsigned long for start address of the range
 * @p_end: ptr to unsigned long for end address of the range
 *
 * Walks over detected online memory ranges.
 */
#define for_each_mem_detect_block(i, p_start, p_end)			\
	for (i = 0, __get_mem_detect_block(i, p_start, p_end);		\
	     i < mem_detect.count;					\
	     i++, __get_mem_detect_block(i, p_start, p_end))

static inline void get_mem_detect_reserved(unsigned long *start,
					   unsigned long *size)
{
	*start = (unsigned long)mem_detect.entries_extended;
	if (mem_detect.count > MEM_INLINED_ENTRIES)
		*size = (mem_detect.count - MEM_INLINED_ENTRIES) * sizeof(struct mem_detect_block);
	else
		*size = 0;
}

#endif
Loading