Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e423c0c3 authored by Russell King's avatar Russell King
Browse files

Merge branch 'intr-ctxsw' of...

Merge branch 'intr-ctxsw' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux into devel-stable
parents 69964ea4 b9d4d42a
Loading
Loading
Loading
Loading
+0 −7
Original line number Diff line number Diff line
@@ -34,11 +34,4 @@ typedef struct {

#endif

/*
 * switch_mm() may do a full cache flush over the context switch,
 * so enable interrupts over the context switch to avoid high
 * latency.
 */
#define __ARCH_WANT_INTERRUPTS_ON_CTXSW

#endif
+79 −25
Original line number Diff line number Diff line
@@ -43,45 +43,104 @@ void __check_kvm_seq(struct mm_struct *mm);
#define ASID_FIRST_VERSION	(1 << ASID_BITS)

extern unsigned int cpu_last_asid;
#ifdef CONFIG_SMP
DECLARE_PER_CPU(struct mm_struct *, current_mm);
#endif

void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void __new_context(struct mm_struct *mm);
void cpu_set_reserved_ttbr0(void);

static inline void check_context(struct mm_struct *mm)
static inline void switch_new_context(struct mm_struct *mm)
{
	/*
	 * This code is executed with interrupts enabled. Therefore,
	 * mm->context.id cannot be updated to the latest ASID version
	 * on a different CPU (and condition below not triggered)
	 * without first getting an IPI to reset the context. The
	 * alternative is to take a read_lock on mm->context.id_lock
	 * (after changing its type to rwlock_t).
	 */
	if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
	unsigned long flags;

	__new_context(mm);

	local_irq_save(flags);
	cpu_switch_mm(mm->pgd, mm);
	local_irq_restore(flags);
}

static inline void check_and_switch_context(struct mm_struct *mm,
					    struct task_struct *tsk)
{
	if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
		__check_kvm_seq(mm);

	/*
	 * Required during context switch to avoid speculative page table
	 * walking with the wrong TTBR.
	 */
	cpu_set_reserved_ttbr0();

	if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
		/*
		 * The ASID is from the current generation, just switch to the
		 * new pgd. This condition is only true for calls from
		 * context_switch() and interrupts are already disabled.
		 */
		cpu_switch_mm(mm->pgd, mm);
	else if (irqs_disabled())
		/*
		 * Defer the new ASID allocation until after the context
		 * switch critical region since __new_context() cannot be
		 * called with interrupts disabled (it sends IPIs).
		 */
		set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
	else
		/*
		 * That is a direct call to switch_mm() or activate_mm() with
		 * interrupts enabled and a new context.
		 */
		switch_new_context(mm);
}

#define init_new_context(tsk,mm)	(__init_new_context(tsk,mm),0)

#else

static inline void check_context(struct mm_struct *mm)
#define finish_arch_post_lock_switch \
	finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
{
	if (test_and_clear_thread_flag(TIF_SWITCH_MM))
		switch_new_context(current->mm);
}

#else	/* !CONFIG_CPU_HAS_ASID */

#ifdef CONFIG_MMU

static inline void check_and_switch_context(struct mm_struct *mm,
					    struct task_struct *tsk)
{
	if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
		__check_kvm_seq(mm);
#endif

	if (irqs_disabled())
		/*
		 * cpu_switch_mm() needs to flush the VIVT caches. To avoid
		 * high interrupt latencies, defer the call and continue
		 * running with the old mm. Since we only support UP systems
		 * on non-ASID CPUs, the old mm will remain valid until the
		 * finish_arch_post_lock_switch() call.
		 */
		set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
	else
		cpu_switch_mm(mm->pgd, mm);
}

#define finish_arch_post_lock_switch \
	finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
{
	if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
		struct mm_struct *mm = current->mm;
		cpu_switch_mm(mm->pgd, mm);
	}
}

#endif	/* CONFIG_MMU */

#define init_new_context(tsk,mm)	0

#endif
#endif	/* CONFIG_CPU_HAS_ASID */

#define destroy_context(mm)		do { } while(0)

@@ -119,12 +178,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
		__flush_icache_all();
#endif
	if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
#ifdef CONFIG_SMP
		struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
		*crt_mm = next;
#endif
		check_context(next);
		cpu_switch_mm(next->pgd, next);
		check_and_switch_context(next, tsk);
		if (cache_is_vivt())
			cpumask_clear_cpu(cpu, mm_cpumask(prev));
	}
+1 −0
Original line number Diff line number Diff line
@@ -146,6 +146,7 @@ extern void vfp_flush_hwstate(struct thread_info *);
#define TIF_MEMDIE		18	/* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK	20
#define TIF_SECCOMP		21
#define TIF_SWITCH_MM		22	/* deferred switch_mm */

#define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
+28 −29
Original line number Diff line number Diff line
@@ -18,30 +18,39 @@

static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
unsigned int cpu_last_asid = ASID_FIRST_VERSION;
#ifdef CONFIG_SMP
DEFINE_PER_CPU(struct mm_struct *, current_mm);
#endif

#ifdef CONFIG_ARM_LPAE
#define cpu_set_asid(asid) {						\
	unsigned long ttbl, ttbh;					\
	asm volatile(							\
	"	mrrc	p15, 0, %0, %1, c2		@ read TTBR0\n"	\
	"	mov	%1, %2, lsl #(48 - 32)		@ set ASID\n"	\
	"	mcrr	p15, 0, %0, %1, c2		@ set TTBR0\n"	\
	: "=&r" (ttbl), "=&r" (ttbh)					\
	: "r" (asid & ~ASID_MASK));					\
void cpu_set_reserved_ttbr0(void)
{
	unsigned long ttbl = __pa(swapper_pg_dir);
	unsigned long ttbh = 0;

	/*
	 * Set TTBR0 to swapper_pg_dir which contains only global entries. The
	 * ASID is set to 0.
	 */
	asm volatile(
	"	mcrr	p15, 0, %0, %1, c2		@ set TTBR0\n"
	:
	: "r" (ttbl), "r" (ttbh));
	isb();
}
#else
#define cpu_set_asid(asid) \
	asm("	mcr	p15, 0, %0, c13, c0, 1\n" : : "r" (asid))
void cpu_set_reserved_ttbr0(void)
{
	u32 ttb;
	/* Copy TTBR1 into TTBR0 */
	asm volatile(
	"	mrc	p15, 0, %0, c2, c0, 1		@ read TTBR1\n"
	"	mcr	p15, 0, %0, c2, c0, 0		@ set TTBR0\n"
	: "=r" (ttb));
	isb();
}
#endif

/*
 * We fork()ed a process, and we need a new context for the child
 * to run in.  We reserve version 0 for initial tasks so we will
 * always allocate an ASID. The ASID 0 is reserved for the TTBR
 * register changing sequence.
 * to run in.
 */
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
@@ -51,9 +60,7 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)

static void flush_context(void)
{
	/* set the reserved ASID before flushing the TLB */
	cpu_set_asid(0);
	isb();
	cpu_set_reserved_ttbr0();
	local_flush_tlb_all();
	if (icache_is_vivt_asid_tagged()) {
		__flush_icache_all();
@@ -98,14 +105,7 @@ static void reset_context(void *info)
{
	unsigned int asid;
	unsigned int cpu = smp_processor_id();
	struct mm_struct *mm = per_cpu(current_mm, cpu);

	/*
	 * Check if a current_mm was set on this CPU as it might still
	 * be in the early booting stages and using the reserved ASID.
	 */
	if (!mm)
		return;
	struct mm_struct *mm = current->active_mm;

	smp_rmb();
	asid = cpu_last_asid + cpu + 1;
@@ -114,8 +114,7 @@ static void reset_context(void *info)
	set_mm_context(mm, asid);

	/* set the new ASID */
	cpu_set_asid(mm->context.id);
	isb();
	cpu_switch_mm(mm->pgd, mm);
}

#else
+2 −7
Original line number Diff line number Diff line
@@ -46,18 +46,13 @@ ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_ARM_ERRATA_430973
	mcr	p15, 0, r2, c7, c5, 6		@ flush BTAC/BTB
#endif
#ifdef CONFIG_ARM_ERRATA_754322
	dsb
#endif
	mcr	p15, 0, r2, c13, c0, 1		@ set reserved context ID
	isb
1:	mcr	p15, 0, r0, c2, c0, 0		@ set TTB 0
	isb
#ifdef CONFIG_ARM_ERRATA_754322
	dsb
#endif
	mcr	p15, 0, r1, c13, c0, 1		@ set context ID
	isb
	mcr	p15, 0, r0, c2, c0, 0		@ set TTB 0
	isb
#endif
	mov	pc, lr
ENDPROC(cpu_v7_switch_mm)