Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 32ce55a6 authored by Vasily Gorbik's avatar Vasily Gorbik Committed by Martin Schwidefsky
Browse files

s390: unify stack size definitions



Remove STACK_ORDER and STACK_SIZE in favour of identical THREAD_SIZE_ORDER
and THREAD_SIZE definitions. THREAD_SIZE and THREAD_SIZE_ORDER naming is
misleading since it is used as general kernel stack size information. But
both those definitions are used in the common code and throughout
architectures specific code, so changing the naming is problematic.

Reviewed-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent ce3dc447
Loading
Loading
Loading
Loading
+0 −8
Original line number Diff line number Diff line
@@ -162,14 +162,6 @@ struct thread_struct {

typedef struct thread_struct thread_struct;

/*
 * General size of a stack
 */
#define STACK_ORDER 2
#define STACK_SIZE (PAGE_SIZE << STACK_ORDER)
#define STACK_INIT_OFFSET \
	(STACK_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs))

/*
 * Stack layout of a C stack frame.
 */
+4 −1
Original line number Diff line number Diff line
@@ -11,7 +11,7 @@
#include <linux/const.h>

/*
 * Size of kernel stack for each process
 * General size of kernel stacks
 */
#define THREAD_SIZE_ORDER 2
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
@@ -21,6 +21,9 @@
#include <asm/page.h>
#include <asm/processor.h>

#define STACK_INIT_OFFSET \
	(THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs))

/*
 * low level task data that entry.S needs immediate access to
 * - this struct should fit entirely inside of one cache line
+2 −2
Original line number Diff line number Diff line
@@ -77,11 +77,11 @@ void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
	frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
#ifdef CONFIG_CHECK_STACK
	sp = __dump_trace(func, data, sp,
			  S390_lowcore.nodat_stack + frame_size - STACK_SIZE,
			  S390_lowcore.nodat_stack + frame_size - THREAD_SIZE,
			  S390_lowcore.nodat_stack + frame_size);
#endif
	sp = __dump_trace(func, data, sp,
			  S390_lowcore.async_stack + frame_size - STACK_SIZE,
			  S390_lowcore.async_stack + frame_size - THREAD_SIZE,
			  S390_lowcore.async_stack + frame_size);
	task = task ?: current;
	__dump_trace(func, data, sp,
+1 −1
Original line number Diff line number Diff line
@@ -171,7 +171,7 @@ void do_softirq_own_stack(void)
	old = current_stack_pointer();
	/* Check against async. stack address range. */
	new = S390_lowcore.async_stack;
	if (((new - old) >> (PAGE_SHIFT + STACK_ORDER)) != 0) {
	if (((new - old) >> (PAGE_SHIFT + THREAD_SIZE_ORDER)) != 0) {
		CALL_ON_STACK(__do_softirq, new, 0);
	} else {
		/* We are already on the async stack. */
+6 −6
Original line number Diff line number Diff line
@@ -308,13 +308,13 @@ unsigned long stack_alloc(void)
{
#ifdef CONFIG_VMAP_STACK
	return (unsigned long)
		__vmalloc_node_range(STACK_SIZE, STACK_SIZE,
		__vmalloc_node_range(THREAD_SIZE, THREAD_SIZE,
				     VMALLOC_START, VMALLOC_END,
				     THREADINFO_GFP,
				     PAGE_KERNEL, 0, NUMA_NO_NODE,
				     __builtin_return_address(0));
#else
	return __get_free_pages(GFP_KERNEL, STACK_ORDER);
	return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
#endif
}

@@ -323,7 +323,7 @@ void stack_free(unsigned long stack)
#ifdef CONFIG_VMAP_STACK
	vfree((void *) stack);
#else
	free_pages(stack, STACK_ORDER);
	free_pages(stack, THREAD_SIZE_ORDER);
#endif
}

@@ -331,7 +331,7 @@ int __init arch_early_irq_init(void)
{
	unsigned long stack;

	stack = __get_free_pages(GFP_KERNEL, STACK_ORDER);
	stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
	if (!stack)
		panic("Couldn't allocate async stack");
	S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
@@ -347,7 +347,7 @@ static int __init async_stack_realloc(void)
	if (!new)
		panic("Couldn't allocate async stack");
	S390_lowcore.async_stack = new + STACK_INIT_OFFSET;
	free_pages(old, STACK_ORDER);
	free_pages(old, THREAD_SIZE_ORDER);
	return 0;
}
early_initcall(async_stack_realloc);
@@ -428,7 +428,7 @@ static void __init setup_lowcore(void)
	 * Allocate the global restart stack which is the same for
	 * all CPUs in cast *one* of them does a PSW restart.
	 */
	restart_stack = memblock_virt_alloc(STACK_SIZE, STACK_SIZE);
	restart_stack = memblock_virt_alloc(THREAD_SIZE, THREAD_SIZE);
	restart_stack += STACK_INIT_OFFSET;

	/*
Loading