Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f8b13505 authored by Martin Schwidefsky's avatar Martin Schwidefsky
Browse files

s390/uaccess: always load the kernel ASCE after task switch



This patch fixes a problem introduced with git commit beef560b
"s390/uaccess: simplify control register updates".

The switch_mm function is not called if the next process is a kernel
thread without an attached mm or is a nop if the mm does not change.
But CR1 still needs to be loaded with the kernel ASCE in case the
code returns to a uaccess function that uses the secondary space mode.

In addition move the set_fs call from finish_arch_switch to
finish_arch_post_lock_switch and then remove finish_arch_switch.

Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent c1a42f49
Loading
Loading
Loading
Loading
+16 −17
Original line number Diff line number Diff line
@@ -33,10 +33,9 @@ static inline int init_new_context(struct task_struct *tsk,

static inline void set_user_asce(struct mm_struct *mm)
{
	pgd_t *pgd = mm->pgd;

	S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
	set_fs(current->thread.mm_segment);
	S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd);
	if (current->thread.mm_segment.ar4)
		__ctl_load(S390_lowcore.user_asce, 7, 7);
	set_cpu_flag(CIF_ASCE);
}

@@ -70,12 +69,11 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
	/* Clear old ASCE by loading the kernel ASCE. */
	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
	__ctl_load(S390_lowcore.kernel_asce, 7, 7);
	/* Delay loading of the new ASCE to control registers CR1 & CR7 */
	set_cpu_flag(CIF_ASCE);
	atomic_inc(&next->context.attach_count);
	atomic_dec(&prev->context.attach_count);
	if (MACHINE_HAS_TLB_LC)
		cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
	S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
}

#define finish_arch_post_lock_switch finish_arch_post_lock_switch
@@ -84,18 +82,19 @@ static inline void finish_arch_post_lock_switch(void)
	struct task_struct *tsk = current;
	struct mm_struct *mm = tsk->mm;

	if (!mm)
		return;
	load_kernel_asce();
	if (mm) {
		preempt_disable();
		while (atomic_read(&mm->context.attach_count) >> 16)
			cpu_relax();

		cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
	set_user_asce(mm);
		if (mm->context.flush_mm)
			__tlb_flush_mm(mm);
		preempt_enable();
	}
	set_fs(current->thread.mm_segment);
}

#define enter_lazy_tlb(mm,tsk)	do { } while (0)
#define deactivate_mm(tsk,mm)	do { } while (0)
+0 −4
Original line number Diff line number Diff line
@@ -134,8 +134,4 @@ static inline void restore_access_regs(unsigned int *acrs)
	prev = __switch_to(prev,next);					\
} while (0)

#define finish_arch_switch(prev) do {					     \
	set_fs(current->thread.mm_segment);				     \
} while (0)

#endif /* __ASM_SWITCH_TO_H */