Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dffe8449 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman
Browse files

powerpc/64s/radix: Improve preempt handling in TLB code



Preempt should be consistently disabled for mm_is_thread_local tests,
so bring the rest of these under preempt_disable().

Preempt does not need to be disabled for the mm->context.id tests,
which allows simplification and removal of gotos.

Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 63c9d8a4
Loading
Loading
Loading
Loading
+23 −25
Original line number Diff line number Diff line
@@ -186,16 +186,15 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
{
	unsigned long pid;

	preempt_disable();
	pid = mm->context.id;
	if (unlikely(pid == MMU_NO_CONTEXT))
		goto no_context;
		return;

	preempt_disable();
	if (!mm_is_thread_local(mm))
		_tlbie_pid(pid, RIC_FLUSH_TLB);
	else
		_tlbiel_pid(pid, RIC_FLUSH_TLB);
no_context:
	preempt_enable();
}
EXPORT_SYMBOL(radix__flush_tlb_mm);
@@ -204,16 +203,15 @@ void radix__flush_all_mm(struct mm_struct *mm)
{
	unsigned long pid;

	preempt_disable();
	pid = mm->context.id;
	if (unlikely(pid == MMU_NO_CONTEXT))
		goto no_context;
		return;

	preempt_disable();
	if (!mm_is_thread_local(mm))
		_tlbie_pid(pid, RIC_FLUSH_ALL);
	else
		_tlbiel_pid(pid, RIC_FLUSH_ALL);
no_context:
	preempt_enable();
}
EXPORT_SYMBOL(radix__flush_all_mm);
@@ -230,15 +228,15 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
	unsigned long pid;
	unsigned long ap = mmu_get_ap(psize);

	preempt_disable();
	pid = mm->context.id;
	if (unlikely(pid == MMU_NO_CONTEXT))
		goto bail;
		return;

	preempt_disable();
	if (!mm_is_thread_local(mm))
		_tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
	else
		_tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
bail:
	preempt_enable();
}

@@ -322,24 +320,23 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
{
	unsigned long pid;
	unsigned long addr;
	int local = mm_is_thread_local(mm);
	bool local;
	unsigned long ap = mmu_get_ap(psize);
	unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;


	preempt_disable();
	pid = mm->context.id;
	if (unlikely(pid == MMU_NO_CONTEXT))
		goto err_out;
		return;

	preempt_disable();
	local = mm_is_thread_local(mm);
	if (end == TLB_FLUSH_ALL ||
	    (end - start) > tlb_single_page_flush_ceiling * page_size) {
		if (local)
			_tlbiel_pid(pid, RIC_FLUSH_TLB);
		else
			_tlbie_pid(pid, RIC_FLUSH_TLB);
		goto err_out;
	}
	} else {
		for (addr = start; addr < end; addr += page_size) {

			if (local)
@@ -347,21 +344,20 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
			else
				_tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
		}
err_out:
	}
	preempt_enable();
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
{
	int local = mm_is_thread_local(mm);
	unsigned long ap = mmu_get_ap(mmu_virtual_psize);
	unsigned long pid, end;

	bool local;

	pid = mm->context.id;
	if (unlikely(pid == MMU_NO_CONTEXT))
		goto no_context;
		return;

	/* 4k page size, just blow the world */
	if (PAGE_SIZE == 0x1000) {
@@ -369,6 +365,8 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
		return;
	}

	preempt_disable();
	local = mm_is_thread_local(mm);
	/* Otherwise first do the PWC */
	if (local)
		_tlbiel_pid(pid, RIC_FLUSH_PWC);
@@ -383,7 +381,7 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
		else
			_tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
	}
no_context:

	preempt_enable();
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */