Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 16da2f93 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar
Browse files

x86: smp_64.c: remove unused exports and cleanup while at it



The exports are nowhere used. There is even no reason why they were
ever introduced.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 081e10b9
Loading
Loading
Loading
Loading
+35 −39
Original line number Diff line number Diff line
@@ -176,9 +176,11 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
	sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
	f = &per_cpu(flush_state, sender);

	/* Could avoid this lock when
	   num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
	   probably not worth checking this for a cache-hot lock. */
	/*
	 * Could avoid this lock when
	 * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
	 * probably not worth checking this for a cache-hot lock.
	 */
	spin_lock(&f->tlbstate_lock);

	f->flush_mm = mm;
@@ -202,12 +204,12 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
int __cpuinit init_smp_flush(void)
{
	int i;

	for_each_cpu_mask(i, cpu_possible_map) {
		spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
	}
	return 0;
}

core_initcall(init_smp_flush);

void flush_tlb_current_task(void)
@@ -224,7 +226,6 @@ void flush_tlb_current_task(void)
		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
	preempt_enable();
}
EXPORT_SYMBOL(flush_tlb_current_task);

void flush_tlb_mm (struct mm_struct * mm)
{
@@ -245,7 +246,6 @@ void flush_tlb_mm (struct mm_struct * mm)

	preempt_enable();
}
EXPORT_SYMBOL(flush_tlb_mm);

void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
{
@@ -268,7 +268,6 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)

	preempt_enable();
}
EXPORT_SYMBOL(flush_tlb_page);

static void do_flush_tlb_all(void* info)
{
@@ -325,9 +324,7 @@ void unlock_ipi_call_lock(void)
 * this function sends a 'generic call function' IPI to all other CPU
 * of the system defined in the mask.
 */

static int
__smp_call_function_mask(cpumask_t mask,
static int __smp_call_function_mask(cpumask_t mask,
				    void (*func)(void *), void *info,
				    int wait)
{
@@ -420,8 +417,7 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
			      int nonatomic, int wait)
{
	/* prevent preemption and reschedule on another processor */
	int ret;
	int me = get_cpu();
	int ret, me = get_cpu();

	/* Can deadlock when called with interrupts disabled */
	WARN_ON(irqs_disabled());