Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 37868fe1 authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Ingo Molnar
Browse files

x86/ldt: Make modify_ldt synchronous



modify_ldt() has questionable locking and does not synchronize
threads.  Improve it: redesign the locking and synchronize all
threads' LDTs using an IPI on all modifications.

This will dramatically slow down modify_ldt in multithreaded
programs, but there shouldn't be any multithreaded programs that
care about modify_ldt's performance in the first place.

This fixes some fallout from the CVE-2015-5157 fixes.

Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: security@kernel.org <security@kernel.org>
Cc: <stable@vger.kernel.org>
Cc: xen-devel <xen-devel@lists.xen.org>
Link: http://lkml.kernel.org/r/4c6978476782160600471bd865b318db34c7b628.1438291540.git.luto@kernel.org


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent aa1acff3
Loading
Loading
Loading
Loading
+0 −15
Original line number Original line Diff line number Diff line
@@ -280,21 +280,6 @@ static inline void clear_LDT(void)
	set_ldt(NULL, 0);
	set_ldt(NULL, 0);
}
}


/*
 * load one particular LDT into the current CPU
 */
static inline void load_LDT_nolock(mm_context_t *pc)
{
	set_ldt(pc->ldt, pc->size);
}

static inline void load_LDT(mm_context_t *pc)
{
	preempt_disable();
	load_LDT_nolock(pc);
	preempt_enable();
}

static inline unsigned long get_desc_base(const struct desc_struct *desc)
static inline unsigned long get_desc_base(const struct desc_struct *desc)
{
{
	return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
	return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
+1 −2
Original line number Original line Diff line number Diff line
@@ -9,8 +9,7 @@
 * we put the segment information here.
 * we put the segment information here.
 */
 */
typedef struct {
typedef struct {
	void *ldt;
	struct ldt_struct *ldt;
	int size;


#ifdef CONFIG_X86_64
#ifdef CONFIG_X86_64
	/* True if mm supports a task running in 32 bit compatibility mode. */
	/* True if mm supports a task running in 32 bit compatibility mode. */
+49 −5
Original line number Original line Diff line number Diff line
@@ -33,6 +33,50 @@ static inline void load_mm_cr4(struct mm_struct *mm)
static inline void load_mm_cr4(struct mm_struct *mm) {}
static inline void load_mm_cr4(struct mm_struct *mm) {}
#endif
#endif


/*
 * ldt_structs can be allocated, used, and freed, but they are never
 * modified while live.
 */
struct ldt_struct {
	/*
	 * Xen requires page-aligned LDTs with special permissions.  This is
	 * needed to prevent us from installing evil descriptors such as
	 * call gates.  On native, we could merge the ldt_struct and LDT
	 * allocations, but it's not worth trying to optimize.
	 */
	struct desc_struct *entries;
	int size;
};

static inline void load_mm_ldt(struct mm_struct *mm)
{
	struct ldt_struct *ldt;

	/* lockless_dereference synchronizes with smp_store_release */
	ldt = lockless_dereference(mm->context.ldt);

	/*
	 * Any change to mm->context.ldt is followed by an IPI to all
	 * CPUs with the mm active.  The LDT will not be freed until
	 * after the IPI is handled by all such CPUs.  This means that,
	 * if the ldt_struct changes before we return, the values we see
	 * will be safe, and the new values will be loaded before we run
	 * any user code.
	 *
	 * NB: don't try to convert this to use RCU without extreme care.
	 * We would still need IRQs off, because we don't want to change
	 * the local LDT after an IPI loaded a newer value than the one
	 * that we can see.
	 */

	if (unlikely(ldt))
		set_ldt(ldt->entries, ldt->size);
	else
		clear_LDT();

	DEBUG_LOCKS_WARN_ON(preemptible());
}

/*
/*
 * Used for LDT copy/destruction.
 * Used for LDT copy/destruction.
 */
 */
@@ -78,12 +122,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
		 * was called and then modify_ldt changed
		 * was called and then modify_ldt changed
		 * prev->context.ldt but suppressed an IPI to this CPU.
		 * prev->context.ldt but suppressed an IPI to this CPU.
		 * In this case, prev->context.ldt != NULL, because we
		 * In this case, prev->context.ldt != NULL, because we
		 * never free an LDT while the mm still exists.  That
		 * never set context.ldt to NULL while the mm still
		 * means that next->context.ldt != prev->context.ldt,
		 * exists.  That means that next->context.ldt !=
		 * because mms never share an LDT.
		 * prev->context.ldt, because mms never share an LDT.
		 */
		 */
		if (unlikely(prev->context.ldt != next->context.ldt))
		if (unlikely(prev->context.ldt != next->context.ldt))
			load_LDT_nolock(&next->context);
			load_mm_ldt(next);
	}
	}
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
	  else {
	  else {
@@ -106,7 +150,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
			load_cr3(next->pgd);
			load_cr3(next->pgd);
			trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
			trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
			load_mm_cr4(next);
			load_mm_cr4(next);
			load_LDT_nolock(&next->context);
			load_mm_ldt(next);
		}
		}
	}
	}
#endif
#endif
+2 −2
Original line number Original line Diff line number Diff line
@@ -1410,7 +1410,7 @@ void cpu_init(void)
	load_sp0(t, &current->thread);
	load_sp0(t, &current->thread);
	set_tss_desc(cpu, t);
	set_tss_desc(cpu, t);
	load_TR_desc();
	load_TR_desc();
	load_LDT(&init_mm.context);
	load_mm_ldt(&init_mm);


	clear_all_debug_regs();
	clear_all_debug_regs();
	dbg_restore_debug_regs();
	dbg_restore_debug_regs();
@@ -1459,7 +1459,7 @@ void cpu_init(void)
	load_sp0(t, thread);
	load_sp0(t, thread);
	set_tss_desc(cpu, t);
	set_tss_desc(cpu, t);
	load_TR_desc();
	load_TR_desc();
	load_LDT(&init_mm.context);
	load_mm_ldt(&init_mm);


	t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
	t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);


+8 −4
Original line number Original line Diff line number Diff line
@@ -2179,21 +2179,25 @@ static unsigned long get_segment_base(unsigned int segment)
	int idx = segment >> 3;
	int idx = segment >> 3;


	if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
	if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
		struct ldt_struct *ldt;

		if (idx > LDT_ENTRIES)
		if (idx > LDT_ENTRIES)
			return 0;
			return 0;


		if (idx > current->active_mm->context.size)
		/* IRQs are off, so this synchronizes with smp_store_release */
		ldt = lockless_dereference(current->active_mm->context.ldt);
		if (!ldt || idx > ldt->size)
			return 0;
			return 0;


		desc = current->active_mm->context.ldt;
		desc = &ldt->entries[idx];
	} else {
	} else {
		if (idx > GDT_ENTRIES)
		if (idx > GDT_ENTRIES)
			return 0;
			return 0;


		desc = raw_cpu_ptr(gdt_page.gdt);
		desc = raw_cpu_ptr(gdt_page.gdt) + idx;
	}
	}


	return get_desc_base(desc + idx);
	return get_desc_base(desc);
}
}


#ifdef CONFIG_COMPAT
#ifdef CONFIG_COMPAT
Loading