Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a3a85a76 authored by Richard Weinberger's avatar Richard Weinberger
Browse files

um: Disintegrate asm/system.h



Signed-off-by: default avatarRichard Weinberger <richard@nod.at>
Reported-by: default avatarToralf Förster <toralf.foerster@gmx.de>
CC: dhowells@redhat.com
parent d824d063
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -22,6 +22,7 @@
#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
#include <asm/switch_to.h>

#include "init.h"
#include "irq_kern.h"
+1 −1
Original line number Diff line number Diff line
generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h
generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h
generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h
generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
+75 −0
Original line number Diff line number Diff line
#ifndef _ASM_X86_SYSTEM_H_
#define _ASM_X86_SYSTEM_H_
#ifndef _ASM_UM_BARRIER_H_
#define _ASM_UM_BARRIER_H_

#include <asm/asm.h>
#include <asm/segment.h>
@@ -10,111 +10,54 @@
#include <linux/kernel.h>
#include <linux/irqflags.h>

/* entries in ARCH_DLINFO: */
#ifdef CONFIG_IA32_EMULATION
# define AT_VECTOR_SIZE_ARCH 2
#else
# define AT_VECTOR_SIZE_ARCH 1
#endif

extern unsigned long arch_align_stack(unsigned long sp);

void default_idle(void);

/*
 * Force strict CPU ordering.
 * And yes, this is required on UP too when we're talking
 * to devices.
 */
#ifdef CONFIG_X86_32
/*
 * Some non-Intel clones support out of order store. wmb() ceases to be a
 * nop for these.
 */

#define mb()	alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
#define rmb()	alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
#define wmb()	alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
#else

#else /* CONFIG_X86_32 */

#define mb()	asm volatile("mfence" : : : "memory")
#define rmb()	asm volatile("lfence" : : : "memory")
#define wmb()	asm volatile("sfence" : : : "memory")
#endif

/**
 * read_barrier_depends - Flush all pending reads that subsequents reads
 * depend on.
 *
 * No data-dependent reads from memory-like regions are ever reordered
 * over this barrier.  All reads preceding this primitive are guaranteed
 * to access memory (but not necessarily other CPUs' caches) before any
 * reads following this primitive that depend on the data return by
 * any of the preceding reads.  This primitive is much lighter weight than
 * rmb() on most CPUs, and is never heavier weight than is
 * rmb().
 *
 * These ordering constraints are respected by both the local CPU
 * and the compiler.
 *
 * Ordering is not guaranteed by anything other than these primitives,
 * not even by data dependencies.  See the documentation for
 * memory_barrier() for examples and URLs to more information.
 *
 * For example, the following code would force ordering (the initial
 * value of "a" is zero, "b" is one, and "p" is "&a"):
 *
 * <programlisting>
 *	CPU 0				CPU 1
 *
 *	b = 2;
 *	memory_barrier();
 *	p = &b;				q = p;
 *					read_barrier_depends();
 *					d = *q;
 * </programlisting>
 *
 * because the read of "*q" depends on the read of "p" and these
 * two reads are separated by a read_barrier_depends().  However,
 * the following code, with the same initial values for "a" and "b":
 *
 * <programlisting>
 *	CPU 0				CPU 1
 *
 *	a = 2;
 *	memory_barrier();
 *	b = 3;				y = b;
 *					read_barrier_depends();
 *					x = a;
 * </programlisting>
 *
 * does not enforce ordering, since there is no data dependency between
 * the read of "a" and the read of "b".  Therefore, on some CPUs, such
 * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
 * in cases like this where there are no data dependencies.
 **/
#endif /* CONFIG_X86_32 */

#define read_barrier_depends()	do { } while (0)

#ifdef CONFIG_SMP

#define smp_mb()	mb()
#ifdef CONFIG_X86_PPRO_FENCE
#define smp_rmb()	rmb()
#else
#else /* CONFIG_X86_PPRO_FENCE */
#define smp_rmb()	barrier()
#endif
#endif /* CONFIG_X86_PPRO_FENCE */

#ifdef CONFIG_X86_OOSTORE
#define smp_wmb()	wmb()
#else
#else /* CONFIG_X86_OOSTORE */
#define smp_wmb()	barrier()
#endif
#endif /* CONFIG_X86_OOSTORE */

#define smp_read_barrier_depends()	read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else

#else /* CONFIG_SMP */

#define smp_mb()	barrier()
#define smp_rmb()	barrier()
#define smp_wmb()	barrier()
#define smp_read_barrier_depends()	do { } while (0)
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif

#endif /* CONFIG_SMP */

/*
 * Stop RDTSC speculation. This is needed when you need to use RDTSC
@@ -129,7 +72,4 @@ static inline void rdtsc_barrier(void)
	alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
}

extern void *_switch_to(void *prev, void *next, void *last);
#define switch_to(prev, next, last) prev = _switch_to(prev, next, last)

#endif
+7 −0
Original line number Diff line number Diff line
#ifndef _ASM_UM_SWITCH_TO_H_
#define _ASM_UM_SWITCH_TO_H_

extern void *_switch_to(void *prev, void *next, void *last);
#define switch_to(prev, next, last) prev = _switch_to(prev, next, last)

#endif