Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 296f781a authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Ingo Molnar
Browse files

x86/asm/64: Rename thread_struct's fs and gs to fsbase and gsbase



Unlike ds and es, these are base addresses, not selectors.  Rename
them so their meaning is more obvious.

On x86_32, the field is still called fs.  Fixing that could make sense
as a future cleanup.

Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/69a18a51c4cba0ce29a241e570fc618ad721d908.1461698311.git.luto@kernel.org


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 731e33e3
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -176,7 +176,7 @@ static inline void elf_common_init(struct thread_struct *t,
	regs->si = regs->di = regs->bp = 0;
	regs->r8 = regs->r9 = regs->r10 = regs->r11 = 0;
	regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;
	t->fs = t->gs = 0;
	t->fsbase = t->gsbase = 0;
	t->fsindex = t->gsindex = 0;
	t->ds = t->es = ds;
}
@@ -226,8 +226,8 @@ do { \
	(pr_reg)[18] = (regs)->flags;				\
	(pr_reg)[19] = (regs)->sp;				\
	(pr_reg)[20] = (regs)->ss;				\
	(pr_reg)[21] = current->thread.fs;			\
	(pr_reg)[22] = current->thread.gs;			\
	(pr_reg)[21] = current->thread.fsbase;			\
	(pr_reg)[22] = current->thread.gsbase;			\
	asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v;	\
	asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v;	\
	asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v;	\
+9 −2
Original line number Diff line number Diff line
@@ -388,9 +388,16 @@ struct thread_struct {
	unsigned long		ip;
#endif
#ifdef CONFIG_X86_64
	unsigned long		fsbase;
	unsigned long		gsbase;
#else
	/*
	 * XXX: this could presumably be unsigned short.  Alternatively,
	 * 32-bit kernels could be taught to use fsindex instead.
	 */
	unsigned long fs;
#endif
	unsigned long gs;
#endif

	/* Save middle states of ptrace breakpoints */
	struct perf_event	*ptrace_bps[HBP_NUM];
+15 −15
Original line number Diff line number Diff line
@@ -150,9 +150,9 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
	p->thread.io_bitmap_ptr = NULL;

	savesegment(gs, p->thread.gsindex);
	p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
	p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
	savesegment(fs, p->thread.fsindex);
	p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
	p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
	savesegment(es, p->thread.es);
	savesegment(ds, p->thread.ds);
	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
@@ -329,18 +329,18 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
	 * stronger guarantees.)
	 *
	 * As an invariant,
	 * (fs != 0 && fsindex != 0) || (gs != 0 && gsindex != 0) is
	 * (fsbase != 0 && fsindex != 0) || (gsbase != 0 && gsindex != 0) is
	 * impossible.
	 */
	if (next->fsindex) {
		/* Loading a nonzero value into FS sets the index and base. */
		loadsegment(fs, next->fsindex);
	} else {
		if (next->fs) {
		if (next->fsbase) {
			/* Next index is zero but next base is nonzero. */
			if (prev_fsindex)
				loadsegment(fs, 0);
			wrmsrl(MSR_FS_BASE, next->fs);
			wrmsrl(MSR_FS_BASE, next->fsbase);
		} else {
			/* Next base and index are both zero. */
			if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
@@ -356,7 +356,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
				 * didn't change the base, then the base is
				 * also zero and we don't need to do anything.
				 */
				if (prev->fs || prev_fsindex)
				if (prev->fsbase || prev_fsindex)
					loadsegment(fs, 0);
			}
		}
@@ -369,18 +369,18 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
	 * us.
	 */
	if (prev_fsindex)
		prev->fs = 0;
		prev->fsbase = 0;
	prev->fsindex = prev_fsindex;

	if (next->gsindex) {
		/* Loading a nonzero value into GS sets the index and base. */
		load_gs_index(next->gsindex);
	} else {
		if (next->gs) {
		if (next->gsbase) {
			/* Next index is zero but next base is nonzero. */
			if (prev_gsindex)
				load_gs_index(0);
			wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
			wrmsrl(MSR_KERNEL_GS_BASE, next->gsbase);
		} else {
			/* Next base and index are both zero. */
			if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
@@ -400,7 +400,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
				 * didn't change the base, then the base is
				 * also zero and we don't need to do anything.
				 */
				if (prev->gs || prev_gsindex)
				if (prev->gsbase || prev_gsindex)
					load_gs_index(0);
			}
		}
@@ -413,7 +413,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
	 * us.
	 */
	if (prev_gsindex)
		prev->gs = 0;
		prev->gsbase = 0;
	prev->gsindex = prev_gsindex;

	switch_fpu_finish(next_fpu, fpu_switch);
@@ -536,7 +536,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
			return -EPERM;
		cpu = get_cpu();
		task->thread.gsindex = 0;
		task->thread.gs = addr;
		task->thread.gsbase = addr;
		if (doit) {
			load_gs_index(0);
			ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
@@ -549,7 +549,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
			return -EPERM;
		cpu = get_cpu();
		task->thread.fsindex = 0;
		task->thread.fs = addr;
		task->thread.fsbase = addr;
		if (doit) {
			/* set the selector to 0 to not confuse __switch_to */
			loadsegment(fs, 0);
@@ -562,7 +562,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
		if (doit)
			rdmsrl(MSR_FS_BASE, base);
		else
			base = task->thread.fs;
			base = task->thread.fsbase;
		ret = put_user(base, (unsigned long __user *)addr);
		break;
	}
@@ -571,7 +571,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
		if (doit)
			rdmsrl(MSR_KERNEL_GS_BASE, base);
		else
			base = task->thread.gs;
			base = task->thread.gsbase;
		ret = put_user(base, (unsigned long __user *)addr);
		break;
	}
+4 −4
Original line number Diff line number Diff line
@@ -399,7 +399,7 @@ static int putreg(struct task_struct *child,
		 * to set either thread.fs or thread.fsindex and the
		 * corresponding GDT slot.
		 */
		if (child->thread.fs != value)
		if (child->thread.fsbase != value)
			return do_arch_prctl(child, ARCH_SET_FS, value);
		return 0;
	case offsetof(struct user_regs_struct,gs_base):
@@ -408,7 +408,7 @@ static int putreg(struct task_struct *child,
		 */
		if (value >= TASK_SIZE_OF(child))
			return -EIO;
		if (child->thread.gs != value)
		if (child->thread.gsbase != value)
			return do_arch_prctl(child, ARCH_SET_GS, value);
		return 0;
#endif
@@ -438,14 +438,14 @@ static unsigned long getreg(struct task_struct *task, unsigned long offset)
		 * XXX: This will not behave as expected if called on
		 * current or if fsindex != 0.
		 */
		return task->thread.fs;
		return task->thread.fsbase;
	}
	case offsetof(struct user_regs_struct, gs_base): {
		/*
		 * XXX: This will not behave as expected if called on
		 * current or if fsindex != 0.
		 */
		return task->thread.gs;
		return task->thread.gsbase;
	}
#endif
	}
+1 −1
Original line number Diff line number Diff line
@@ -1254,7 +1254,7 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
	kvm_load_ldt(svm->host.ldt);
#ifdef CONFIG_X86_64
	loadsegment(fs, svm->host.fs);
	wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
	wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
	load_gs_index(svm->host.gs);
#else
#ifdef CONFIG_X86_32_LAZY_GS