Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2079f30e authored by Russell King's avatar Russell King
Browse files

Merge branch 'asid-allocation' of...

Merge branch 'asid-allocation' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into devel-stable
parents f27d9b71 bf51bb82
Loading
Loading
Loading
Loading
+4 −7
Original line number Diff line number Diff line
@@ -5,18 +5,15 @@

typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
	unsigned int id;
	raw_spinlock_t id_lock;
	u64 id;
#endif
	unsigned int kvm_seq;
} mm_context_t;

#ifdef CONFIG_CPU_HAS_ASID
#define ASID(mm)	((mm)->context.id & 255)

/* init_mm.context.id_lock should be initialized. */
#define INIT_MM_CONTEXT(name)                                                 \
	.context.id_lock    = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
#define ASID_BITS	8
#define ASID_MASK	((~0ULL) << ASID_BITS)
#define ASID(mm)	((mm)->context.id & ~ASID_MASK)
#else
#define ASID(mm)	(0)
#endif
+3 −79
Original line number Diff line number Diff line
@@ -24,84 +24,8 @@ void __check_kvm_seq(struct mm_struct *mm);

#ifdef CONFIG_CPU_HAS_ASID

/*
 * On ARMv6, we have the following structure in the Context ID:
 *
 * 31                         7          0
 * +-------------------------+-----------+
 * |      process ID         |   ASID    |
 * +-------------------------+-----------+
 * |              context ID             |
 * +-------------------------------------+
 *
 * The ASID is used to tag entries in the CPU caches and TLBs.
 * The context ID is used by debuggers and trace logic, and
 * should be unique within all running processes.
 */
#define ASID_BITS		8
#define ASID_MASK		((~0) << ASID_BITS)
#define ASID_FIRST_VERSION	(1 << ASID_BITS)

extern unsigned int cpu_last_asid;

void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void __new_context(struct mm_struct *mm);
void cpu_set_reserved_ttbr0(void);

static inline void switch_new_context(struct mm_struct *mm)
{
	unsigned long flags;

	__new_context(mm);

	local_irq_save(flags);
	cpu_switch_mm(mm->pgd, mm);
	local_irq_restore(flags);
}

static inline void check_and_switch_context(struct mm_struct *mm,
					    struct task_struct *tsk)
{
	if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
		__check_kvm_seq(mm);

	/*
	 * Required during context switch to avoid speculative page table
	 * walking with the wrong TTBR.
	 */
	cpu_set_reserved_ttbr0();

	if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
		/*
		 * The ASID is from the current generation, just switch to the
		 * new pgd. This condition is only true for calls from
		 * context_switch() and interrupts are already disabled.
		 */
		cpu_switch_mm(mm->pgd, mm);
	else if (irqs_disabled())
		/*
		 * Defer the new ASID allocation until after the context
		 * switch critical region since __new_context() cannot be
		 * called with interrupts disabled (it sends IPIs).
		 */
		set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
	else
		/*
		 * That is a direct call to switch_mm() or activate_mm() with
		 * interrupts enabled and a new context.
		 */
		switch_new_context(mm);
}

#define init_new_context(tsk,mm)	(__init_new_context(tsk,mm),0)

#define finish_arch_post_lock_switch \
	finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
{
	if (test_and_clear_thread_flag(TIF_SWITCH_MM))
		switch_new_context(current->mm);
}
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
#define init_new_context(tsk,mm)	({ mm->context.id = 0; })

#else	/* !CONFIG_CPU_HAS_ASID */

@@ -143,6 +67,7 @@ static inline void finish_arch_post_lock_switch(void)
#endif	/* CONFIG_CPU_HAS_ASID */

#define destroy_context(mm)		do { } while(0)
#define activate_mm(prev,next)		switch_mm(prev, next, NULL)

/*
 * This is called when "tsk" is about to enter lazy TLB mode.
@@ -186,6 +111,5 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
}

#define deactivate_mm(tsk,mm)	do { } while (0)
#define activate_mm(prev,next)	switch_mm(prev, next, NULL)

#endif
+108 −99
Original line number Diff line number Diff line
@@ -2,6 +2,9 @@
 *  linux/arch/arm/mm/context.c
 *
 *  Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
 *  Copyright (C) 2012 ARM Limited
 *
 *  Author: Will Deacon <will.deacon@arm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
@@ -14,14 +17,40 @@
#include <linux/percpu.h>

#include <asm/mmu_context.h>
#include <asm/smp_plat.h>
#include <asm/thread_notify.h>
#include <asm/tlbflush.h>

/*
 * On ARMv6, we have the following structure in the Context ID:
 *
 * 31                         7          0
 * +-------------------------+-----------+
 * |      process ID         |   ASID    |
 * +-------------------------+-----------+
 * |              context ID             |
 * +-------------------------------------+
 *
 * The ASID is used to tag entries in the CPU caches and TLBs.
 * The context ID is used by debuggers and trace logic, and
 * should be unique within all running processes.
 */
#define ASID_FIRST_VERSION	(1ULL << ASID_BITS)
#define NUM_USER_ASIDS		(ASID_FIRST_VERSION - 1)

#define ASID_TO_IDX(asid)	((asid & ~ASID_MASK) - 1)
#define IDX_TO_ASID(idx)	((idx + 1) & ~ASID_MASK)

static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
unsigned int cpu_last_asid = ASID_FIRST_VERSION;
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);

static DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;

#ifdef CONFIG_ARM_LPAE
void cpu_set_reserved_ttbr0(void)
static void cpu_set_reserved_ttbr0(void)
{
	unsigned long ttbl = __pa(swapper_pg_dir);
	unsigned long ttbh = 0;
@@ -37,7 +66,7 @@ void cpu_set_reserved_ttbr0(void)
	isb();
}
#else
void cpu_set_reserved_ttbr0(void)
static void cpu_set_reserved_ttbr0(void)
{
	u32 ttb;
	/* Copy TTBR1 into TTBR0 */
@@ -84,124 +113,104 @@ static int __init contextidr_notifier_init(void)
arch_initcall(contextidr_notifier_init);
#endif

/*
 * We fork()ed a process, and we need a new context for the child
 * to run in.
 */
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
static void flush_context(unsigned int cpu)
{
	mm->context.id = 0;
	raw_spin_lock_init(&mm->context.id_lock);
	int i;
	u64 asid;

	/* Update the list of reserved ASIDs and the ASID bitmap. */
	bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
	for_each_possible_cpu(i) {
		if (i == cpu) {
			asid = 0;
		} else {
			asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
			__set_bit(ASID_TO_IDX(asid), asid_map);
		}
		per_cpu(reserved_asids, i) = asid;
	}

static void flush_context(void)
{
	cpu_set_reserved_ttbr0();
	local_flush_tlb_all();
	if (icache_is_vivt_asid_tagged()) {
	/* Queue a TLB invalidate and flush the I-cache if necessary. */
	if (!tlb_ops_need_broadcast())
		cpumask_set_cpu(cpu, &tlb_flush_pending);
	else
		cpumask_setall(&tlb_flush_pending);

	if (icache_is_vivt_asid_tagged())
		__flush_icache_all();
		dsb();
	}
}

#ifdef CONFIG_SMP
static int is_reserved_asid(u64 asid)
{
	int cpu;
	for_each_possible_cpu(cpu)
		if (per_cpu(reserved_asids, cpu) == asid)
			return 1;
	return 0;
}

static void set_mm_context(struct mm_struct *mm, unsigned int asid)
static void new_context(struct mm_struct *mm, unsigned int cpu)
{
	unsigned long flags;
	u64 asid = mm->context.id;
	u64 generation = atomic64_read(&asid_generation);

	if (asid != 0 && is_reserved_asid(asid)) {
		/*
	 * Locking needed for multi-threaded applications where the
	 * same mm->context.id could be set from different CPUs during
	 * the broadcast. This function is also called via IPI so the
	 * mm->context.id_lock has to be IRQ-safe.
		 * Our current ASID was active during a rollover, we can
		 * continue to use it and this was just a false alarm.
		 */
	raw_spin_lock_irqsave(&mm->context.id_lock, flags);
	if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
		asid = generation | (asid & ~ASID_MASK);
	} else {
		/*
		 * Old version of ASID found. Set the new one and
		 * reset mm_cpumask(mm).
		 * Allocate a free ASID. If we can't find one, take a
		 * note of the currently active ASIDs and mark the TLBs
		 * as requiring flushes.
		 */
		mm->context.id = asid;
		asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
		if (asid == NUM_USER_ASIDS) {
			generation = atomic64_add_return(ASID_FIRST_VERSION,
							 &asid_generation);
			flush_context(cpu);
			asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
		}
		__set_bit(asid, asid_map);
		asid = generation | IDX_TO_ASID(asid);
		cpumask_clear(mm_cpumask(mm));
	}
	raw_spin_unlock_irqrestore(&mm->context.id_lock, flags);

	/*
	 * Set the mm_cpumask(mm) bit for the current CPU.
	 */
	cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
	mm->context.id = asid;
}

/*
 * Reset the ASID on the current CPU. This function call is broadcast
 * from the CPU handling the ASID rollover and holding cpu_asid_lock.
 */
static void reset_context(void *info)
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
{
	unsigned int asid;
	unsigned long flags;
	unsigned int cpu = smp_processor_id();
	struct mm_struct *mm = current->active_mm;

	smp_rmb();
	asid = cpu_last_asid + cpu + 1;
	if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
		__check_kvm_seq(mm);

	flush_context();
	set_mm_context(mm, asid);

	/* set the new ASID */
	cpu_switch_mm(mm->pgd, mm);
}

#else

static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
{
	mm->context.id = asid;
	cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
}
	/*
	 * Required during context switch to avoid speculative page table
	 * walking with the wrong TTBR.
	 */
	cpu_set_reserved_ttbr0();

#endif
	if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
	    && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id))
		goto switch_mm_fastpath;

void __new_context(struct mm_struct *mm)
{
	unsigned int asid;
	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
	/* Check that our ASID belongs to the current generation. */
	if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
		new_context(mm, cpu);

	raw_spin_lock(&cpu_asid_lock);
#ifdef CONFIG_SMP
	/*
	 * Check the ASID again, in case the change was broadcast from
	 * another CPU before we acquired the lock.
	 */
	if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
		cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
		raw_spin_unlock(&cpu_asid_lock);
		return;
	}
#endif
	/*
	 * At this point, it is guaranteed that the current mm (with
	 * an old ASID) isn't active on any other CPU since the ASIDs
	 * are changed simultaneously via IPI.
	 */
	asid = ++cpu_last_asid;
	if (asid == 0)
		asid = cpu_last_asid = ASID_FIRST_VERSION;
	atomic64_set(&per_cpu(active_asids, cpu), mm->context.id);
	cpumask_set_cpu(cpu, mm_cpumask(mm));

	/*
	 * If we've used up all our ASIDs, we need
	 * to start a new version and flush the TLB.
	 */
	if (unlikely((asid & ~ASID_MASK) == 0)) {
		asid = cpu_last_asid + smp_processor_id() + 1;
		flush_context();
#ifdef CONFIG_SMP
		smp_wmb();
		smp_call_function(reset_context, NULL, 1);
#endif
		cpu_last_asid += NR_CPUS;
	}
	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
		local_flush_tlb_all();
	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);

	set_mm_context(mm, asid);
	raw_spin_unlock(&cpu_asid_lock);
switch_mm_fastpath:
	cpu_switch_mm(mm->pgd, mm);
}