Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b5aa97e8 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branches 'x86/signal' and 'x86/irq' into perfcounters/core

Merge these pending x86 tree changes into the perfcounters tree
to avoid conflicts.
Loading
Loading
Loading
Loading
+2 −10
Original line number Diff line number Diff line
@@ -242,21 +242,13 @@ config X86_FIND_SMP_CONFIG
	def_bool y
	depends on X86_MPPARSE || X86_VOYAGER

if ACPI
config X86_MPPARSE
	def_bool y
	bool "Enable MPS table"
	bool "Enable MPS table" if ACPI
	default y
	depends on X86_LOCAL_APIC
	help
	  For old smp systems that do not have proper acpi support. Newer systems
	  (esp with 64bit cpus) with acpi support, MADT and DSDT will override it
endif

if !ACPI
config X86_MPPARSE
	def_bool y
	depends on X86_LOCAL_APIC
endif

choice
	prompt "Subarchitecture Type"
+29 −39
Original line number Diff line number Diff line
@@ -198,22 +198,27 @@ struct rt_sigframe
};

#define COPY(x)			{		\
	unsigned int reg;		\
	err |= __get_user(reg, &sc->x);	\
	regs->x = reg;			\
	err |= __get_user(regs->x, &sc->x);	\
}

#define RELOAD_SEG(seg,mask)						\
	{ unsigned int cur;						\
	  unsigned short pre;						\
#define COPY_SEG_CPL3(seg)	{			\
		unsigned short tmp;			\
		err |= __get_user(tmp, &sc->seg);	\
		regs->seg = tmp | 3;			\
}

#define RELOAD_SEG(seg)		{		\
	unsigned int cur, pre;			\
	err |= __get_user(pre, &sc->seg);	\
	savesegment(seg, cur);			\
	  pre |= mask;							\
	  if (pre != cur) loadsegment(seg, pre); }
	pre |= 3;				\
	if (pre != cur)				\
		loadsegment(seg, pre);		\
}

static int ia32_restore_sigcontext(struct pt_regs *regs,
				   struct sigcontext_ia32 __user *sc,
				   unsigned int *peax)
				   unsigned int *pax)
{
	unsigned int tmpflags, gs, oldgs, err = 0;
	void __user *buf;
@@ -240,18 +245,16 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
	if (gs != oldgs)
		load_gs_index(gs);

	RELOAD_SEG(fs, 3);
	RELOAD_SEG(ds, 3);
	RELOAD_SEG(es, 3);
	RELOAD_SEG(fs);
	RELOAD_SEG(ds);
	RELOAD_SEG(es);

	COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
	COPY(dx); COPY(cx); COPY(ip);
	/* Don't touch extended registers */

	err |= __get_user(regs->cs, &sc->cs);
	regs->cs |= 3;
	err |= __get_user(regs->ss, &sc->ss);
	regs->ss |= 3;
	COPY_SEG_CPL3(cs);
	COPY_SEG_CPL3(ss);

	err |= __get_user(tmpflags, &sc->flags);
	regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
@@ -262,9 +265,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
	buf = compat_ptr(tmp);
	err |= restore_i387_xstate_ia32(buf);

	err |= __get_user(tmp, &sc->ax);
	*peax = tmp;

	err |= __get_user(*pax, &sc->ax);
	return err;
}

@@ -359,20 +360,15 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
	err |= __put_user(regs->dx, &sc->dx);
	err |= __put_user(regs->cx, &sc->cx);
	err |= __put_user(regs->ax, &sc->ax);
	err |= __put_user(regs->cs, &sc->cs);
	err |= __put_user(regs->ss, &sc->ss);
	err |= __put_user(current->thread.trap_no, &sc->trapno);
	err |= __put_user(current->thread.error_code, &sc->err);
	err |= __put_user(regs->ip, &sc->ip);
	err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs);
	err |= __put_user(regs->flags, &sc->flags);
	err |= __put_user(regs->sp, &sc->sp_at_signal);
	err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss);

	tmp = save_i387_xstate_ia32(fpstate);
	if (tmp < 0)
		err = -EFAULT;
	else
		err |= __put_user(ptr_to_compat(tmp ? fpstate : NULL),
					&sc->fpstate);
	err |= __put_user(ptr_to_compat(fpstate), &sc->fpstate);

	/* non-iBCS2 extensions.. */
	err |= __put_user(mask, &sc->oldmask);
@@ -408,6 +404,8 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
	if (used_math()) {
		sp = sp - sig_xstate_ia32_size;
		*fpstate = (struct _fpstate_ia32 *) sp;
		if (save_i387_xstate_ia32(*fpstate) < 0)
			return (void __user *) -1L;
	}

	sp -= frame_size;
@@ -430,12 +428,10 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
		u16 poplmovl;
		u32 val;
		u16 int80;
		u16 pad;
	} __attribute__((packed)) code = {
		0xb858,		 /* popl %eax ; movl $...,%eax */
		__NR_ia32_sigreturn,
		0x80cd,		/* int $0x80 */
		0,
	};

	frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
@@ -511,8 +507,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
		u8 movl;
		u32 val;
		u16 int80;
		u16 pad;
		u8  pad2;
		u8  pad;
	} __attribute__((packed)) code = {
		0xb8,
		__NR_ia32_rt_sigreturn,
@@ -572,11 +567,6 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
	regs->dx = (unsigned long) &frame->info;
	regs->cx = (unsigned long) &frame->uc;

	/* Make -mregparm=3 work */
	regs->ax = sig;
	regs->dx = (unsigned long) &frame->info;
	regs->cx = (unsigned long) &frame->uc;

	loadsegment(ds, __USER32_DS);
	loadsegment(es, __USER32_DS);

+9 −1
Original line number Diff line number Diff line
@@ -168,7 +168,15 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
 */
static inline void change_bit(int nr, volatile unsigned long *addr)
{
	asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr));
	if (IS_IMMEDIATE(nr)) {
		asm volatile(LOCK_PREFIX "xorb %1,%0"
			: CONST_MASK_ADDR(nr, addr)
			: "iq" ((u8)CONST_MASK(nr)));
	} else {
		asm volatile(LOCK_PREFIX "btc %1,%0"
			: BITOP_ADDR(addr)
			: "Ir" (nr));
	}
}

/**
+29 −45
Original line number Diff line number Diff line
@@ -4,26 +4,33 @@
#include <asm/types.h>
#include <linux/compiler.h>

#ifdef __GNUC__
#define __LITTLE_ENDIAN

#ifdef __i386__

static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
static inline __attribute_const__ __u32 __arch_swab32(__u32 val)
{
#ifdef __i386__
# ifdef CONFIG_X86_BSWAP
	asm("bswap %0" : "=r" (x) : "0" (x));
	asm("bswap %0" : "=r" (val) : "0" (val));
# else
	asm("xchgb %b0,%h0\n\t"	/* swap lower bytes	*/
	    "rorl $16,%0\n\t"	/* swap words		*/
	    "xchgb %b0,%h0"	/* swap higher bytes	*/
	    : "=q" (x)
	    : "0" (x));
	    : "=q" (val)
	    : "0" (val));
# endif

#else /* __i386__ */
	asm("bswapl %0"
	    : "=r" (val)
	    : "0" (val));
#endif
	return x;
	return val;
}
#define __arch_swab32 __arch_swab32

static inline __attribute_const__ __u64 ___arch__swab64(__u64 val)
static inline __attribute_const__ __u64 __arch_swab64(__u64 val)
{
#ifdef __i386__
	union {
		struct {
			__u32 a;
@@ -37,45 +44,22 @@ static inline __attribute_const__ __u64 ___arch__swab64(__u64 val)
	    : "=r" (v.s.a), "=r" (v.s.b)
	    : "0" (v.s.a), "1" (v.s.b));
# else
	v.s.a = ___arch__swab32(v.s.a);
	v.s.b = ___arch__swab32(v.s.b);
	v.s.a = __arch_swab32(v.s.a);
	v.s.b = __arch_swab32(v.s.b);
	asm("xchgl %0,%1"
	    : "=r" (v.s.a), "=r" (v.s.b)
	    : "0" (v.s.a), "1" (v.s.b));
# endif
	return v.u;
}

#else /* __i386__ */

static inline __attribute_const__ __u64 ___arch__swab64(__u64 x)
{
	asm("bswapq %0"
	    : "=r" (x)
	    : "0" (x));
	return x;
}

static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
{
	asm("bswapl %0"
	    : "=r" (x)
	    : "0" (x));
	return x;
}

	    : "=r" (val)
	    : "0" (val));
	return val;
#endif
}
#define __arch_swab64 __arch_swab64

/* Do not define swab16.  Gcc is smart enough to recognize "C" version and
   convert it into rotation or exhange.  */

#define __arch__swab64(x) ___arch__swab64(x)
#define __arch__swab32(x) ___arch__swab32(x)

#define __BYTEORDER_HAS_U64__

#endif /* __GNUC__ */

#include <linux/byteorder/little_endian.h>
#include <linux/byteorder.h>

#endif /* _ASM_X86_BYTEORDER_H */
+66 −31
Original line number Diff line number Diff line
@@ -6,10 +6,10 @@
#endif

/*
   Macros for dwarf2 CFI unwind table entries.
   See "as.info" for details on these pseudo ops. Unfortunately
   they are only supported in very new binutils, so define them
   away for older version.
 * Macros for dwarf2 CFI unwind table entries.
 * See "as.info" for details on these pseudo ops. Unfortunately
 * they are only supported in very new binutils, so define them
 * away for older version.
 */

#ifdef CONFIG_AS_CFI
@@ -36,8 +36,10 @@

#else

/* Due to the structure of pre-exisiting code, don't use assembler line
   comment character # to ignore the arguments. Instead, use a dummy macro. */
/*
 * Due to the structure of pre-exisiting code, don't use assembler line
 * comment character # to ignore the arguments. Instead, use a dummy macro.
 */
.macro cfi_ignore a=0, b=0, c=0, d=0
.endm

@@ -58,4 +60,37 @@

#endif

/*
 * An attempt to make CFI annotations more or less
 * correct and shorter. It is implied that you know
 * what you're doing if you use them.
 */
#ifdef __ASSEMBLY__
#ifdef CONFIG_X86_64
	.macro pushq_cfi reg
	pushq \reg
	CFI_ADJUST_CFA_OFFSET 8
	.endm

	.macro popq_cfi reg
	popq \reg
	CFI_ADJUST_CFA_OFFSET -8
	.endm

	.macro movq_cfi reg offset=0
	movq %\reg, \offset(%rsp)
	CFI_REL_OFFSET \reg, \offset
	.endm

	.macro movq_cfi_restore offset reg
	movq \offset(%rsp), %\reg
	CFI_RESTORE \reg
	.endm
#else /*!CONFIG_X86_64*/

	/* 32bit defenitions are missed yet */

#endif /*!CONFIG_X86_64*/
#endif /*__ASSEMBLY__*/

#endif /* _ASM_X86_DWARF2_H */
Loading