Loading arch/x86/lguest/boot.c +13 −13 Original line number Original line Diff line number Diff line Loading @@ -113,17 +113,6 @@ static void lguest_leave_lazy_mode(void) hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0); hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0); } } static void lazy_hcall(unsigned long call, unsigned long arg1, unsigned long arg2, unsigned long arg3) { if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) hcall(call, arg1, arg2, arg3); else async_hcall(call, arg1, arg2, arg3); } /* async_hcall() is pretty simple: I'm quite proud of it really. We have a /* async_hcall() is pretty simple: I'm quite proud of it really. We have a * ring buffer of stored hypercalls which the Host will run though next time we * ring buffer of stored hypercalls which the Host will run though next time we * do a normal hypercall. Each entry in the ring has 4 slots for the hypercall * do a normal hypercall. Each entry in the ring has 4 slots for the hypercall Loading @@ -134,8 +123,8 @@ static void lazy_hcall(unsigned long call, * full and we just make the hypercall directly. This has the nice side * full and we just make the hypercall directly. This has the nice side * effect of causing the Host to run all the stored calls in the ring buffer * effect of causing the Host to run all the stored calls in the ring buffer * which empties it for next time! */ * which empties it for next time! */ void async_hcall(unsigned long call, static void async_hcall(unsigned long call, unsigned long arg1, unsigned long arg1, unsigned long arg2, unsigned long arg3) unsigned long arg2, unsigned long arg3) { { /* Note: This code assumes we're uniprocessor. */ /* Note: This code assumes we're uniprocessor. */ static unsigned int next_call; static unsigned int next_call; Loading @@ -161,6 +150,17 @@ void async_hcall(unsigned long call, } } local_irq_restore(flags); local_irq_restore(flags); } } static void lazy_hcall(unsigned long call, unsigned long arg1, unsigned long arg2, unsigned long arg3) { if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) hcall(call, arg1, arg2, arg3); else async_hcall(call, arg1, arg2, arg3); } /*:*/ /*:*/ /*G:033 /*G:033 Loading include/asm-x86/lguest_hcall.h +0 −3 Original line number Original line Diff line number Diff line Loading @@ -54,9 +54,6 @@ hcall(unsigned long call, } } /*:*/ /*:*/ void async_hcall(unsigned long call, unsigned long arg1, unsigned long arg2, unsigned long arg3); /* Can't use our min() macro here: needs to be a constant */ /* Can't use our min() macro here: needs to be a constant */ #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) Loading Loading
arch/x86/lguest/boot.c +13 −13 Original line number Original line Diff line number Diff line Loading @@ -113,17 +113,6 @@ static void lguest_leave_lazy_mode(void) hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0); hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0); } } static void lazy_hcall(unsigned long call, unsigned long arg1, unsigned long arg2, unsigned long arg3) { if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) hcall(call, arg1, arg2, arg3); else async_hcall(call, arg1, arg2, arg3); } /* async_hcall() is pretty simple: I'm quite proud of it really. We have a /* async_hcall() is pretty simple: I'm quite proud of it really. We have a * ring buffer of stored hypercalls which the Host will run though next time we * ring buffer of stored hypercalls which the Host will run though next time we * do a normal hypercall. Each entry in the ring has 4 slots for the hypercall * do a normal hypercall. Each entry in the ring has 4 slots for the hypercall Loading @@ -134,8 +123,8 @@ static void lazy_hcall(unsigned long call, * full and we just make the hypercall directly. This has the nice side * full and we just make the hypercall directly. This has the nice side * effect of causing the Host to run all the stored calls in the ring buffer * effect of causing the Host to run all the stored calls in the ring buffer * which empties it for next time! */ * which empties it for next time! */ void async_hcall(unsigned long call, static void async_hcall(unsigned long call, unsigned long arg1, unsigned long arg1, unsigned long arg2, unsigned long arg3) unsigned long arg2, unsigned long arg3) { { /* Note: This code assumes we're uniprocessor. */ /* Note: This code assumes we're uniprocessor. */ static unsigned int next_call; static unsigned int next_call; Loading @@ -161,6 +150,17 @@ void async_hcall(unsigned long call, } } local_irq_restore(flags); local_irq_restore(flags); } } static void lazy_hcall(unsigned long call, unsigned long arg1, unsigned long arg2, unsigned long arg3) { if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) hcall(call, arg1, arg2, arg3); else async_hcall(call, arg1, arg2, arg3); } /*:*/ /*:*/ /*G:033 /*G:033 Loading
include/asm-x86/lguest_hcall.h +0 −3 Original line number Original line Diff line number Diff line Loading @@ -54,9 +54,6 @@ hcall(unsigned long call, } } /*:*/ /*:*/ void async_hcall(unsigned long call, unsigned long arg1, unsigned long arg2, unsigned long arg3); /* Can't use our min() macro here: needs to be a constant */ /* Can't use our min() macro here: needs to be a constant */ #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) Loading