Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0d7b8547 authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Ingo Molnar
Browse files

x86-64: Remove kernel.vsyscall64 sysctl



It's unnecessary overhead in code that's supposed to be highly
optimized.  Removing it allows us to remove one of the two
syscall instructions in the vsyscall page.

The only sensible use for it is for UML users, and it doesn't
fully address inconsistent vsyscall results on UML.  The real
fix for UML is to stop using vsyscalls entirely.

Signed-off-by: default avatarAndy Lutomirski <luto@mit.edu>
Cc: Jesper Juhl <jj@chaosbits.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Jan Beulich <JBeulich@novell.com>
Cc: richard -rw- weinberger <richard.weinberger@gmail.com>
Cc: Mikael Pettersson <mikpe@it.uu.se>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Louis Rilling <Louis.Rilling@kerlabs.com>
Cc: Valdis.Kletnieks@vt.edu
Cc: pageexec@freemail.hu
Link: http://lkml.kernel.org/r/973ae803fe76f712da4b2740e66dccf452d3b1e4.1307292171.git.luto@mit.edu


Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 9fd67b4e
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -11,7 +11,6 @@ struct vsyscall_gtod_data {
	time_t		wall_time_sec;
	u32		wall_time_nsec;

	int		sysctl_enabled;
	struct timezone sys_tz;
	struct { /* extract of a clocksource struct */
		cycle_t (*vread)(void);
+1 −33
Original line number Diff line number Diff line
@@ -53,7 +53,6 @@ DEFINE_VVAR(int, vgetcpu_mode);
DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
{
	.lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
	.sysctl_enabled = 1,
};

void update_vsyscall_tz(void)
@@ -103,15 +102,6 @@ static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
	return ret;
}

static __always_inline long time_syscall(long *t)
{
	long secs;
	asm volatile("syscall"
		: "=a" (secs)
		: "0" (__NR_time),"D" (t) : __syscall_clobber);
	return secs;
}

static __always_inline void do_vgettimeofday(struct timeval * tv)
{
	cycle_t now, base, mask, cycle_delta;
@@ -122,8 +112,7 @@ static __always_inline void do_vgettimeofday(struct timeval * tv)
		seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock);

		vread = VVAR(vsyscall_gtod_data).clock.vread;
		if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled ||
			     !vread)) {
		if (unlikely(!vread)) {
			gettimeofday(tv,NULL);
			return;
		}
@@ -165,8 +154,6 @@ time_t __vsyscall(1) vtime(time_t *t)
{
	unsigned seq;
	time_t result;
	if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled))
		return time_syscall(t);

	do {
		seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock);
@@ -227,22 +214,6 @@ static long __vsyscall(3) venosys_1(void)
	return -ENOSYS;
}

#ifdef CONFIG_SYSCTL
static ctl_table kernel_table2[] = {
	{ .procname = "vsyscall64",
	  .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
	  .mode = 0644,
	  .proc_handler = proc_dointvec },
	{}
};

static ctl_table kernel_root_table2[] = {
	{ .procname = "kernel", .mode = 0555,
	  .child = kernel_table2 },
	{}
};
#endif

/* Assume __initcall executes before all user space. Hopefully kmod
   doesn't violate that. We'll find out if it does. */
static void __cpuinit vsyscall_set_cpu(int cpu)
@@ -301,9 +272,6 @@ static int __init vsyscall_init(void)
	BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
	BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
	BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
#ifdef CONFIG_SYSCTL
	register_sysctl_table(kernel_root_table2);
#endif
	on_each_cpu(cpu_vsyscall_init, NULL, 1);
	/* notifier priority > KVM */
	hotcpu_notifier(cpu_vsyscall_notifier, 30);
+21 −34
Original line number Diff line number Diff line
@@ -116,7 +116,6 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)

notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
{
	if (likely(gtod->sysctl_enabled))
	switch (clock) {
	case CLOCK_REALTIME:
		if (likely(gtod->clock.vread))
@@ -131,6 +130,7 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
	case CLOCK_MONOTONIC_COARSE:
		return do_monotonic_coarse(ts);
	}

	return vdso_fallback_gettime(clock, ts);
}
int clock_gettime(clockid_t, struct timespec *)
@@ -139,7 +139,7 @@ int clock_gettime(clockid_t, struct timespec *)
notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
{
	long ret;
	if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
	if (likely(gtod->clock.vread)) {
		if (likely(tv != NULL)) {
			BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
				     offsetof(struct timespec, tv_nsec) ||
@@ -161,27 +161,14 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
int gettimeofday(struct timeval *, struct timezone *)
	__attribute__((weak, alias("__vdso_gettimeofday")));

/* This will break when the xtime seconds get inaccurate, but that is
 * unlikely */

static __always_inline long time_syscall(long *t)
{
	long secs;
	asm volatile("syscall"
		     : "=a" (secs)
		     : "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory");
	return secs;
}

/*
 * This will break when the xtime seconds get inaccurate, but that is
 * unlikely
 */
notrace time_t __vdso_time(time_t *t)
{
	time_t result;

	if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled))
		return time_syscall(t);

	/* This is atomic on x86_64 so we don't need any locks. */
	result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);
	time_t result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);

	if (t)
		*t = result;