Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6e7e7f4d authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull arm64 fixes from Will Deacon:
 "Fix some more FP register fallout from the SVE patches and also some
  problems with the PGD tracking in our software PAN emulation code,
  after we received a crash report from a 3.18 kernel running a
  backport.

  Summary:

   - fix SW PAN pgd shadowing for kernel threads, EFI and exiting user
     tasks

   - fix FP register leak when a task_struct is re-allocated

   - fix potential use-after-free in FP state tracking used by KVM"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64/sve: Avoid dereference of dead task_struct in KVM guest entry
  arm64: SW PAN: Update saved ttbr0 value on enter_lazy_tlb
  arm64: SW PAN: Point saved ttbr0 at the zero page when switching to init_mm
  arm64: fpsimd: Abstract out binding of task's fpsimd context to the cpu.
  arm64: fpsimd: Prevent registers leaking from dead tasks
parents 3625de4b cb968afc
Loading
Loading
Loading
Loading
+1 −3
Original line number Diff line number Diff line
@@ -132,10 +132,8 @@ static inline void efi_set_pgd(struct mm_struct *mm)
			 * Defer the switch to the current thread's TTBR0_EL1
			 * until uaccess_enable(). Restore the current
			 * thread's saved ttbr0 corresponding to its active_mm
			 * (if different from init_mm).
			 */
			cpu_set_reserved_ttbr0();
			if (current->active_mm != &init_mm)
			update_saved_ttbr0(current, current->active_mm);
		}
	}
+23 −23
Original line number Diff line number Diff line
@@ -156,29 +156,21 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);

#define init_new_context(tsk,mm)	({ atomic64_set(&(mm)->context.id, 0); 0; })

/*
 * This is called when "tsk" is about to enter lazy TLB mode.
 *
 * mm:  describes the currently active mm context
 * tsk: task which is entering lazy tlb
 * cpu: cpu number which is entering lazy tlb
 *
 * tsk->mm will be NULL
 */
static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}

#ifdef CONFIG_ARM64_SW_TTBR0_PAN
static inline void update_saved_ttbr0(struct task_struct *tsk,
				      struct mm_struct *mm)
{
	if (system_uses_ttbr0_pan()) {
		BUG_ON(mm->pgd == swapper_pg_dir);
		task_thread_info(tsk)->ttbr0 =
			virt_to_phys(mm->pgd) | ASID(mm) << 48;
	}
	u64 ttbr;

	if (!system_uses_ttbr0_pan())
		return;

	if (mm == &init_mm)
		ttbr = __pa_symbol(empty_zero_page);
	else
		ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;

	task_thread_info(tsk)->ttbr0 = ttbr;
}
#else
static inline void update_saved_ttbr0(struct task_struct *tsk,
@@ -187,6 +179,16 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
}
#endif

static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
	/*
	 * We don't actually care about the ttbr0 mapping, so point it at the
	 * zero page.
	 */
	update_saved_ttbr0(tsk, &init_mm);
}

static inline void __switch_mm(struct mm_struct *next)
{
	unsigned int cpu = smp_processor_id();
@@ -214,10 +216,8 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
	 * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
	 * value may have not been initialised yet (activate_mm caller) or the
	 * ASID has changed since the last run (following the context switch
	 * of another thread of the same process). Avoid setting the reserved
	 * TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit).
	 * of another thread of the same process).
	 */
	if (next != &init_mm)
	update_saved_ttbr0(tsk, next);
}

+30 −21
Original line number Diff line number Diff line
@@ -114,7 +114,12 @@
 *   returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
 *   whatever is in the FPSIMD registers is not saved to memory, but discarded.
 */
static DEFINE_PER_CPU(struct fpsimd_state *, fpsimd_last_state);
struct fpsimd_last_state_struct {
	struct fpsimd_state *st;
	bool sve_in_use;
};

static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);

/* Default VL for tasks that don't set it explicitly: */
static int sve_default_vl = -1;
@@ -905,7 +910,7 @@ void fpsimd_thread_switch(struct task_struct *next)
		 */
		struct fpsimd_state *st = &next->thread.fpsimd_state;

		if (__this_cpu_read(fpsimd_last_state) == st
		if (__this_cpu_read(fpsimd_last_state.st) == st
		    && st->cpu == smp_processor_id())
			clear_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
		else
@@ -991,6 +996,21 @@ void fpsimd_signal_preserve_current_state(void)
		sve_to_fpsimd(current);
}

/*
 * Associate current's FPSIMD context with this cpu
 * Preemption must be disabled when calling this function.
 */
static void fpsimd_bind_to_cpu(void)
{
	struct fpsimd_last_state_struct *last =
		this_cpu_ptr(&fpsimd_last_state);
	struct fpsimd_state *st = &current->thread.fpsimd_state;

	last->st = st;
	last->sve_in_use = test_thread_flag(TIF_SVE);
	st->cpu = smp_processor_id();
}

/*
 * Load the userland FPSIMD state of 'current' from memory, but only if the
 * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
@@ -1004,11 +1024,8 @@ void fpsimd_restore_current_state(void)
	local_bh_disable();

	if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
		struct fpsimd_state *st = &current->thread.fpsimd_state;

		task_fpsimd_load();
		__this_cpu_write(fpsimd_last_state, st);
		st->cpu = smp_processor_id();
		fpsimd_bind_to_cpu();
	}

	local_bh_enable();
@@ -1032,12 +1049,8 @@ void fpsimd_update_current_state(struct fpsimd_state *state)

	task_fpsimd_load();

	if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
		struct fpsimd_state *st = &current->thread.fpsimd_state;

		__this_cpu_write(fpsimd_last_state, st);
		st->cpu = smp_processor_id();
	}
	if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE))
		fpsimd_bind_to_cpu();

	local_bh_enable();
}
@@ -1052,7 +1065,7 @@ void fpsimd_flush_task_state(struct task_struct *t)

static inline void fpsimd_flush_cpu_state(void)
{
	__this_cpu_write(fpsimd_last_state, NULL);
	__this_cpu_write(fpsimd_last_state.st, NULL);
}

/*
@@ -1065,14 +1078,10 @@ static inline void fpsimd_flush_cpu_state(void)
#ifdef CONFIG_ARM64_SVE
void sve_flush_cpu_state(void)
{
	struct fpsimd_state *const fpstate = __this_cpu_read(fpsimd_last_state);
	struct task_struct *tsk;

	if (!fpstate)
		return;
	struct fpsimd_last_state_struct const *last =
		this_cpu_ptr(&fpsimd_last_state);

	tsk = container_of(fpstate, struct task_struct, thread.fpsimd_state);
	if (test_tsk_thread_flag(tsk, TIF_SVE))
	if (last->st && last->sve_in_use)
		fpsimd_flush_cpu_state();
}
#endif /* CONFIG_ARM64_SVE */
@@ -1267,7 +1276,7 @@ static inline void fpsimd_pm_init(void) { }
#ifdef CONFIG_HOTPLUG_CPU
static int fpsimd_cpu_dead(unsigned int cpu)
{
	per_cpu(fpsimd_last_state, cpu) = NULL;
	per_cpu(fpsimd_last_state.st, cpu) = NULL;
	return 0;
}

+9 −0
Original line number Diff line number Diff line
@@ -314,6 +314,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
	clear_tsk_thread_flag(p, TIF_SVE);
	p->thread.sve_state = NULL;

	/*
	 * In case p was allocated the same task_struct pointer as some
	 * other recently-exited task, make sure p is disassociated from
	 * any cpu that may have run that now-exited task recently.
	 * Otherwise we could erroneously skip reloading the FPSIMD
	 * registers for p.
	 */
	fpsimd_flush_task_state(p);

	if (likely(!(p->flags & PF_KTHREAD))) {
		*childregs = *current_pt_regs();
		childregs->regs[0] = 0;