Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6abcd98f authored by Glauber de Oliveira Costa's avatar Glauber de Oliveira Costa Committed by Ingo Molnar
Browse files

x86: irqflags consolidation



This patch consolidates the irqflags include files containing common
paravirt definitions. The native definition for interrupt handling, halt,
and such, are the same for 32 and 64 bit, and they are kept in irqflags.h.
the differences are split in the arch-specific files.

The syscall function, irq_enable_sysexit, has a very specific i386 naming,
and its name is then changed to a more general one.

Signed-off-by: default avatarGlauber de Oliveira Costa <gcosta@redhat.com>
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
Acked-by: default avatarJeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 416b7218
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -123,7 +123,7 @@ void foo(void)
	OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
	OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
	OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
	OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
	OFFSET(PV_CPU_irq_enable_syscall_ret, pv_cpu_ops, irq_enable_syscall_ret);
	OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
#endif

+4 −4
Original line number Diff line number Diff line
@@ -58,7 +58,7 @@
 * for paravirtualization.  The following will never clobber any registers:
 *   INTERRUPT_RETURN (aka. "iret")
 *   GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
 *   ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
 *   ENABLE_INTERRUPTS_SYSCALL_RET (aka "sti; sysexit").
 *
 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
@@ -351,7 +351,7 @@ sysenter_past_esp:
	xorl %ebp,%ebp
	TRACE_IRQS_ON
1:	mov  PT_FS(%esp), %fs
	ENABLE_INTERRUPTS_SYSEXIT
	ENABLE_INTERRUPTS_SYSCALL_RET
	CFI_ENDPROC
.pushsection .fixup,"ax"
2:	movl $0,PT_FS(%esp)
@@ -882,10 +882,10 @@ ENTRY(native_iret)
.previous
END(native_iret)

ENTRY(native_irq_enable_sysexit)
ENTRY(native_irq_enable_syscall_ret)
	sti
	sysexit
END(native_irq_enable_sysexit)
END(native_irq_enable_syscall_ret)
#endif

KPROBE_ENTRY(int3)
+5 −5
Original line number Diff line number Diff line
@@ -60,7 +60,7 @@ DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
DEF_NATIVE(pv_cpu_ops, iret, "iret");
DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "sti; sysexit");
DEF_NATIVE(pv_cpu_ops, irq_enable_syscall_ret, "sti; sysexit");
DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
@@ -88,7 +88,7 @@ static unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
	SITE(pv_irq_ops, restore_fl);
	SITE(pv_irq_ops, save_fl);
	SITE(pv_cpu_ops, iret);
	SITE(pv_cpu_ops, irq_enable_sysexit);
	SITE(pv_cpu_ops, irq_enable_syscall_ret);
	SITE(pv_mmu_ops, read_cr2);
	SITE(pv_mmu_ops, read_cr3);
	SITE(pv_mmu_ops, write_cr3);
@@ -186,7 +186,7 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
		/* If the operation is a nop, then nop the callsite */
		ret = paravirt_patch_nop();
	else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
		 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit))
		 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_syscall_ret))
		/* If operation requires a jmp, then jmp */
		ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
	else
@@ -237,7 +237,7 @@ static void native_flush_tlb_single(unsigned long addr)

/* These are in entry.S */
extern void native_iret(void);
extern void native_irq_enable_sysexit(void);
extern void native_irq_enable_syscall_ret(void);

static int __init print_banner(void)
{
@@ -384,7 +384,7 @@ struct pv_cpu_ops pv_cpu_ops = {
	.write_idt_entry = write_dt_entry,
	.load_esp0 = native_load_esp0,

	.irq_enable_sysexit = native_irq_enable_sysexit,
	.irq_enable_syscall_ret = native_irq_enable_syscall_ret,
	.iret = native_iret,

	.set_iopl_mask = native_set_iopl_mask,
+2 −2
Original line number Diff line number Diff line
@@ -148,7 +148,7 @@ static unsigned vmi_patch(u8 type, u16 clobbers, void *insns,
					      insns, eip);
		case PARAVIRT_PATCH(pv_cpu_ops.iret):
			return patch_internal(VMI_CALL_IRET, len, insns, eip);
		case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit):
		case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_syscall_ret):
			return patch_internal(VMI_CALL_SYSEXIT, len, insns, eip);
		default:
			break;
@@ -870,7 +870,7 @@ static inline int __init activate_vmi(void)
	 * the backend.  They are performance critical anyway, so requiring
	 * a patch is not a big problem.
	 */
	pv_cpu_ops.irq_enable_sysexit = (void *)0xfeedbab0;
	pv_cpu_ops.irq_enable_syscall_ret = (void *)0xfeedbab0;
	pv_cpu_ops.iret = (void *)0xbadbab0;

#ifdef CONFIG_SMP
+1 −1
Original line number Diff line number Diff line
@@ -953,7 +953,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
	.read_pmc = native_read_pmc,

	.iret = (void *)&hypercall_page[__HYPERVISOR_iret],
	.irq_enable_sysexit = NULL,  /* never called */
	.irq_enable_syscall_ret = NULL,  /* never called */

	.load_tr_desc = paravirt_nop,
	.set_ldt = xen_set_ldt,
Loading