Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9a2d78e2 authored by K. Y. Srinivasan's avatar K. Y. Srinivasan Committed by Thomas Gleixner
Browse files

X86/Hyper-V: Consolidate the allocation of the hypercall input page



Consolidate the allocation of the hypercall input page.

Signed-off-by: default avatarK. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarMichael Kelley <mikelley@microsoft.com>
Cc: olaf@aepfle.de
Cc: sthemmin@microsoft.com
Cc: gregkh@linuxfoundation.org
Cc: jasowang@redhat.com
Cc: Michael.H.Kelley@microsoft.com
Cc: hpa@zytor.com
Cc: apw@canonical.com
Cc: devel@linuxdriverproject.org
Cc: vkuznets@redhat.com
Link: https://lkml.kernel.org/r/20180516215334.6547-5-kys@linuxonhyperv.com
parent 800b8f03
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -324,8 +324,6 @@ void __init hyperv_init(void)
	hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg);
	wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);

	hyper_alloc_mmu();

	hv_apic_init();

	/*
+6 −24
Original line number Diff line number Diff line
@@ -32,9 +32,6 @@ struct hv_flush_pcpu_ex {
/* Each gva in gva_list encodes up to 4096 pages to flush */
#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)

static struct hv_flush_pcpu __percpu **pcpu_flush;

static struct hv_flush_pcpu_ex __percpu **pcpu_flush_ex;

/*
 * Fills in gva_list starting from offset. Returns the number of items added.
@@ -77,7 +74,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,

	trace_hyperv_mmu_flush_tlb_others(cpus, info);

	if (!pcpu_flush || !hv_hypercall_pg)
	if (!hv_hypercall_pg)
		goto do_native;

	if (cpumask_empty(cpus))
@@ -85,10 +82,8 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,

	local_irq_save(flags);

	flush_pcpu = this_cpu_ptr(pcpu_flush);

	if (unlikely(!*flush_pcpu))
		*flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
	flush_pcpu = (struct hv_flush_pcpu **)
		     this_cpu_ptr(hyperv_pcpu_input_arg);

	flush = *flush_pcpu;

@@ -164,7 +159,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,

	trace_hyperv_mmu_flush_tlb_others(cpus, info);

	if (!pcpu_flush_ex || !hv_hypercall_pg)
	if (!hv_hypercall_pg)
		goto do_native;

	if (cpumask_empty(cpus))
@@ -172,10 +167,8 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,

	local_irq_save(flags);

	flush_pcpu = this_cpu_ptr(pcpu_flush_ex);

	if (unlikely(!*flush_pcpu))
		*flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
	flush_pcpu = (struct hv_flush_pcpu_ex **)
		     this_cpu_ptr(hyperv_pcpu_input_arg);

	flush = *flush_pcpu;

@@ -257,14 +250,3 @@ void hyperv_setup_mmu_ops(void)
		pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others_ex;
	}
}

void hyper_alloc_mmu(void)
{
	if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED))
		return;

	if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
		pcpu_flush = alloc_percpu(struct hv_flush_pcpu *);
	else
		pcpu_flush_ex = alloc_percpu(struct hv_flush_pcpu_ex *);
}
+0 −1
Original line number Diff line number Diff line
@@ -294,7 +294,6 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,

void __init hyperv_init(void);
void hyperv_setup_mmu_ops(void);
void hyper_alloc_mmu(void);
void hyperv_report_panic(struct pt_regs *regs, long err);
bool hv_is_hyperv_initialized(void);
void hyperv_cleanup(void);