Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a1e20301 authored by Paul Mundt's avatar Paul Mundt
Browse files

sh64: Port OOM changes to do_page_fault



Reflect the sh32 OOM changes for the sh64 page fault handler, too.

Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 11fd9824
Loading
Loading
Loading
Loading
+30 −10
Original line number Diff line number Diff line
@@ -3,7 +3,7 @@
 *
 * Copyright (C) 2000, 2001  Paolo Alberelli
 * Copyright (C) 2003  Richard Curnow (/proc/tlb, bug fixes)
 * Copyright (C) 2003 - 2009 Paul Mundt
 * Copyright (C) 2003 - 2012 Paul Mundt
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
@@ -95,6 +95,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
	struct mm_struct *mm;
	struct vm_area_struct * vma;
	const struct exception_table_entry *fixup;
	unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
			      (writeaccess ? FAULT_FLAG_WRITE : 0));
	pte_t *pte;
	int fault;

@@ -124,6 +126,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
	if (in_atomic() || !mm)
		goto no_context;

retry:
	/* TLB misses upon some cache flushes get done under cli() */
	down_read(&mm->mmap_sem);

@@ -188,7 +191,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */
	fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
	fault = handle_mm_fault(mm, vma, address, flags);

	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
		return;

	if (unlikely(fault & VM_FAULT_ERROR)) {
		if (fault & VM_FAULT_OOM)
			goto out_of_memory;
@@ -197,6 +204,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
		BUG();
	}

	if (flags & FAULT_FLAG_ALLOW_RETRY) {
		if (fault & VM_FAULT_MAJOR) {
			tsk->maj_flt++;
			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
@@ -207,6 +215,18 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
				      regs, address);
		}

		if (fault & VM_FAULT_RETRY) {
			flags &= ~FAULT_FLAG_ALLOW_RETRY;

			/*
			 * No need to up_read(&mm->mmap_sem) as we would
			 * have already released it in __lock_page_or_retry
			 * in mm/filemap.c.
			 */
			goto retry;
		}
	}

	/* If we get here, the page fault has been handled.  Do the TLB refill
	   now from the newly-setup PTE, to avoid having to fault again right
	   away on the same instruction. */