Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f28fa729 authored by Kautuk Consul's avatar Kautuk Consul Committed by Tony Luck
Browse files

[IA64] Port OOM changes to ia64_do_page_fault



Commit d065bd81
(mm: retry page fault when blocking on disk transfer) and
commit 37b23e05
(x86,mm: make pagefault killable)

The above commits introduced changes into the x86 pagefault handler
for making the page fault handler retryable as well as killable.

These changes reduce the mmap_sem hold time, which is crucial
during OOM killer invocation.

Port these changes to ia64.

Signed-off-by: default avatarKautuk Consul <consul.kautuk@gmail.com>
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
parent cfaf0251
Loading
Loading
Loading
Loading
+34 −12
Original line number Diff line number Diff line
@@ -72,6 +72,10 @@ mapped_kernel_page_is_present (unsigned long address)
	return pte_present(pte);
}

#	define VM_READ_BIT	0
#	define VM_WRITE_BIT	1
#	define VM_EXEC_BIT	2

void __kprobes
ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
{
@@ -81,6 +85,12 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
	struct siginfo si;
	unsigned long mask;
	int fault;
	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;

	mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));

	flags |= ((mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);

	/* mmap_sem is performance critical.... */
	prefetchw(&mm->mmap_sem);
@@ -109,6 +119,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
	if (notify_page_fault(regs, TRAP_BRKPT))
		return;

retry:
	down_read(&mm->mmap_sem);

	vma = find_vma_prev(mm, address, &prev_vma);
@@ -130,10 +141,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re

	/* OK, we've got a good vm_area for this memory area.  Check the access permissions: */

#	define VM_READ_BIT	0
#	define VM_WRITE_BIT	1
#	define VM_EXEC_BIT	2

#	if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
	    || (1 << VM_EXEC_BIT) != VM_EXEC)
#		error File is out of sync with <linux/mm.h>.  Please update.
@@ -142,9 +149,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
	if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
		goto bad_area;

	mask = (  (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));

	if ((vma->vm_flags & mask) != mask)
		goto bad_area;

@@ -153,7 +157,11 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
	 * sure we exit gracefully rather than endlessly redo the
	 * fault.
	 */
	fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
	fault = handle_mm_fault(mm, vma, address, flags);

	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
		return;

	if (unlikely(fault & VM_FAULT_ERROR)) {
		/*
		 * We ran out of memory, or some other thing happened
@@ -168,10 +176,24 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
		}
		BUG();
	}

	if (flags & FAULT_FLAG_ALLOW_RETRY) {
		if (fault & VM_FAULT_MAJOR)
			current->maj_flt++;
		else
			current->min_flt++;
		if (fault & VM_FAULT_RETRY) {
			flags &= ~FAULT_FLAG_ALLOW_RETRY;

			 /* No need to up_read(&mm->mmap_sem) as we would
			 * have already released it in __lock_page_or_retry
			 * in mm/filemap.c.
			 */

			goto retry;
		}
	}

	up_read(&mm->mmap_sem);
	return;