Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ada85708 authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge Committed by Ingo Molnar
Browse files

x86: remove open-coded save/load segment operations



This removes a pile of buggy open-coded implementations of savesegment
and loadsegment.

(They are buggy because they don't have memory barriers to prevent
them from being reordered with respect to memory accesses.)

Signed-off-by: default avatarJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: xen-devel <xen-devel@lists.xensource.com>
Cc: Stephen Tweedie <sct@redhat.com>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent af2b1c60
Loading
Loading
Loading
Loading
+2 −1
Original line number Original line Diff line number Diff line
@@ -480,7 +480,8 @@ void pda_init(int cpu)
	struct x8664_pda *pda = cpu_pda(cpu);
	struct x8664_pda *pda = cpu_pda(cpu);


	/* Setup up data that may be needed in __get_free_pages early */
	/* Setup up data that may be needed in __get_free_pages early */
	asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
	loadsegment(fs, 0);
	loadsegment(gs, 0);
	/* Memory clobbers used to order PDA accessed */
	/* Memory clobbers used to order PDA accessed */
	mb();
	mb();
	wrmsrl(MSR_GS_BASE, pda);
	wrmsrl(MSR_GS_BASE, pda);
+15 −13
Original line number Original line Diff line number Diff line
@@ -335,10 +335,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
	p->thread.fs = me->thread.fs;
	p->thread.fs = me->thread.fs;
	p->thread.gs = me->thread.gs;
	p->thread.gs = me->thread.gs;


	asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
	savesegment(gs, p->thread.gsindex);
	asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
	savesegment(fs, p->thread.fsindex);
	asm("mov %%es,%0" : "=m" (p->thread.es));
	savesegment(es, p->thread.es);
	asm("mov %%ds,%0" : "=m" (p->thread.ds));
	savesegment(ds, p->thread.ds);


	if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
	if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
		p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
		p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
@@ -377,7 +377,9 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
void
void
start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
{
{
	asm volatile("movl %0, %%fs; movl %0, %%es; movl %0, %%ds" :: "r"(0));
	loadsegment(fs, 0);
	loadsegment(es, 0);
	loadsegment(ds, 0);
	load_gs_index(0);
	load_gs_index(0);
	regs->ip		= new_ip;
	regs->ip		= new_ip;
	regs->sp		= new_sp;
	regs->sp		= new_sp;
@@ -550,11 +552,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
	 * Switch DS and ES.
	 * Switch DS and ES.
	 * This won't pick up thread selector changes, but I guess that is ok.
	 * This won't pick up thread selector changes, but I guess that is ok.
	 */
	 */
	asm volatile("mov %%es,%0" : "=m" (prev->es));
	savesegment(es, prev->es);
	if (unlikely(next->es | prev->es))
	if (unlikely(next->es | prev->es))
		loadsegment(es, next->es); 
		loadsegment(es, next->es); 


	asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
	savesegment(ds, prev->ds);
	if (unlikely(next->ds | prev->ds))
	if (unlikely(next->ds | prev->ds))
		loadsegment(ds, next->ds);
		loadsegment(ds, next->ds);


@@ -565,7 +567,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
	 */
	 */
	{ 
	{ 
		unsigned fsindex;
		unsigned fsindex;
		asm volatile("movl %%fs,%0" : "=r" (fsindex)); 
		savesegment(fs, fsindex);
		/* segment register != 0 always requires a reload. 
		/* segment register != 0 always requires a reload. 
		   also reload when it has changed. 
		   also reload when it has changed. 
		   when prev process used 64bit base always reload
		   when prev process used 64bit base always reload
@@ -586,7 +588,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
	}
	}
	{ 
	{ 
		unsigned gsindex;
		unsigned gsindex;
		asm volatile("movl %%gs,%0" : "=r" (gsindex)); 
		savesegment(gs, gsindex);
		if (unlikely(gsindex | next->gsindex | prev->gs)) {
		if (unlikely(gsindex | next->gsindex | prev->gs)) {
			load_gs_index(next->gsindex);
			load_gs_index(next->gsindex);
			if (gsindex)
			if (gsindex)
@@ -767,7 +769,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
			set_32bit_tls(task, FS_TLS, addr);
			set_32bit_tls(task, FS_TLS, addr);
			if (doit) {
			if (doit) {
				load_TLS(&task->thread, cpu);
				load_TLS(&task->thread, cpu);
				asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
				loadsegment(fs, FS_TLS_SEL);
			}
			}
			task->thread.fsindex = FS_TLS_SEL;
			task->thread.fsindex = FS_TLS_SEL;
			task->thread.fs = 0;
			task->thread.fs = 0;
@@ -777,7 +779,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
			if (doit) {
			if (doit) {
				/* set the selector to 0 to not confuse
				/* set the selector to 0 to not confuse
				   __switch_to */
				   __switch_to */
				asm volatile("movl %0,%%fs" :: "r" (0));
				loadsegment(fs, 0);
				ret = checking_wrmsrl(MSR_FS_BASE, addr);
				ret = checking_wrmsrl(MSR_FS_BASE, addr);
			}
			}
		}
		}
@@ -800,7 +802,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
		if (task->thread.gsindex == GS_TLS_SEL)
		if (task->thread.gsindex == GS_TLS_SEL)
			base = read_32bit_tls(task, GS_TLS);
			base = read_32bit_tls(task, GS_TLS);
		else if (doit) {
		else if (doit) {
			asm("movl %%gs,%0" : "=r" (gsindex));
			savesegment(gs, gsindex);
			if (gsindex)
			if (gsindex)
				rdmsrl(MSR_KERNEL_GS_BASE, base);
				rdmsrl(MSR_KERNEL_GS_BASE, base);
			else
			else