Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit beef560b authored by Martin Schwidefsky's avatar Martin Schwidefsky
Browse files

s390/uaccess: simplify control register updates



Always switch to the kernel ASCE in switch_mm. Load the secondary
space ASCE in finish_arch_post_lock_switch after checking that
any pending page table operations have completed. The primary
ASCE is loaded in entry[64].S. With this the update_primary_asce
call can be removed from the switch_to macro and from the start
of switch_mm function. Remove the load_primary argument from
update_user_asce/clear_user_asce, rename update_user_asce to
set_user_asce and rename update_primary_asce to load_kernel_asce.

Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent f4192bf2
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -29,7 +29,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
	int cmparg = (encoded_op << 20) >> 20;
	int oldval = 0, newval, ret;

	update_primary_asce(current);
	load_kernel_asce();
	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
		oparg = 1 << oparg;

@@ -79,7 +79,7 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
{
	int ret;

	update_primary_asce(current);
	load_kernel_asce();
	asm volatile(
		"   sacf 256\n"
		"0: cs   %1,%4,0(%5)\n"
+17 −26
Original line number Diff line number Diff line
@@ -30,33 +30,31 @@ static inline int init_new_context(struct task_struct *tsk,

#define destroy_context(mm)             do { } while (0)

static inline void update_user_asce(struct mm_struct *mm, int load_primary)
static inline void set_user_asce(struct mm_struct *mm)
{
	pgd_t *pgd = mm->pgd;

	S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
	if (load_primary)
		__ctl_load(S390_lowcore.user_asce, 1, 1);
	set_fs(current->thread.mm_segment);
	set_thread_flag(TIF_ASCE);
}

static inline void clear_user_asce(struct mm_struct *mm, int load_primary)
static inline void clear_user_asce(void)
{
	S390_lowcore.user_asce = S390_lowcore.kernel_asce;

	if (load_primary)
	__ctl_load(S390_lowcore.user_asce, 1, 1);
	__ctl_load(S390_lowcore.user_asce, 7, 7);
}

static inline void update_primary_asce(struct task_struct *tsk)
static inline void load_kernel_asce(void)
{
	unsigned long asce;

	__ctl_store(asce, 1, 1);
	if (asce != S390_lowcore.kernel_asce)
		__ctl_load(S390_lowcore.kernel_asce, 1, 1);
	set_tsk_thread_flag(tsk, TIF_ASCE);
	set_thread_flag(TIF_ASCE);
}

static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
@@ -64,25 +62,17 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
	int cpu = smp_processor_id();

	update_primary_asce(tsk);
	if (prev == next)
		return;
	if (MACHINE_HAS_TLB_LC)
		cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
	if (atomic_inc_return(&next->context.attach_count) >> 16) {
		/* Delay update_user_asce until all TLB flushes are done. */
		set_tsk_thread_flag(tsk, TIF_TLB_WAIT);
	/* Clear old ASCE by loading the kernel ASCE. */
		clear_user_asce(next, 0);
	} else {
		cpumask_set_cpu(cpu, mm_cpumask(next));
		update_user_asce(next, 0);
		if (next->context.flush_mm)
			/* Flush pending TLBs */
			__tlb_flush_mm(next);
	}
	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
	__ctl_load(S390_lowcore.kernel_asce, 7, 7);
	/* Delay loading of the new ASCE to control registers CR1 & CR7 */
	set_thread_flag(TIF_ASCE);
	atomic_inc(&next->context.attach_count);
	atomic_dec(&prev->context.attach_count);
	WARN_ON(atomic_read(&prev->context.attach_count) < 0);
	if (MACHINE_HAS_TLB_LC)
		cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
}
@@ -93,15 +83,14 @@ static inline void finish_arch_post_lock_switch(void)
	struct task_struct *tsk = current;
	struct mm_struct *mm = tsk->mm;

	if (!test_tsk_thread_flag(tsk, TIF_TLB_WAIT))
	if (!mm)
		return;
	preempt_disable();
	clear_tsk_thread_flag(tsk, TIF_TLB_WAIT);
	while (atomic_read(&mm->context.attach_count) >> 16)
		cpu_relax();

	cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
	update_user_asce(mm, 0);
	set_user_asce(mm);
	if (mm->context.flush_mm)
		__tlb_flush_mm(mm);
	preempt_enable();
@@ -114,6 +103,8 @@ static inline void activate_mm(struct mm_struct *prev,
                               struct mm_struct *next)
{
	switch_mm(prev, next, current);
	cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
	set_user_asce(next);
}

static inline void arch_dup_mmap(struct mm_struct *oldmm,
+0 −1
Original line number Diff line number Diff line
@@ -132,7 +132,6 @@ static inline void restore_access_regs(unsigned int *acrs)
		update_cr_regs(next);					\
	}								\
	prev = __switch_to(prev,next);					\
	update_primary_asce(current);					\
} while (0)

#define finish_arch_switch(prev) do {					     \
+1 −3
Original line number Diff line number Diff line
@@ -81,8 +81,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_NOTIFY_RESUME	1	/* callback before returning to user */
#define TIF_SIGPENDING		2	/* signal pending */
#define TIF_NEED_RESCHED	3	/* rescheduling necessary */
#define TIF_TLB_WAIT		4	/* wait for TLB flush completion */
#define TIF_ASCE		5	/* primary asce needs fixup / uaccess */
#define TIF_ASCE		5	/* user asce needs fixup / uaccess */
#define TIF_PER_TRAP		6	/* deliver sigtrap on return to user */
#define TIF_MCCK_PENDING	7	/* machine check handling is pending */
#define TIF_SYSCALL_TRACE	8	/* syscall trace active */
@@ -99,7 +98,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
#define _TIF_TLB_WAIT		(1<<TIF_TLB_WAIT)
#define _TIF_ASCE		(1<<TIF_ASCE)
#define _TIF_PER_TRAP		(1<<TIF_PER_TRAP)
#define _TIF_MCCK_PENDING	(1<<TIF_MCCK_PENDING)
+1 −1
Original line number Diff line number Diff line
@@ -43,7 +43,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
		 _TIF_MCCK_PENDING | _TIF_ASCE)
_TIF_TRACE    = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
		 _TIF_SYSCALL_TRACEPOINT)
_TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT)
_TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_ASCE)

STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE  = 1 << STACK_SHIFT
Loading