Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d06063cc authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Move FAULT_FLAG_xyz into handle_mm_fault() callers



This allows the callers to now pass down the full set of FAULT_FLAG_xyz
flags to handle_mm_fault().  All callers have been (mechanically)
converted to the new calling convention, there's almost certainly room
for architectures to clean up their code and then add FAULT_FLAG_RETRY
when that support is added.

Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 30c9f3a9
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -146,7 +146,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
	/* If for any reason at all we couldn't handle the fault,
	/* If for any reason at all we couldn't handle the fault,
	   make sure we exit gracefully rather than endlessly redo
	   make sure we exit gracefully rather than endlessly redo
	   the fault.  */
	   the fault.  */
	fault = handle_mm_fault(mm, vma, address, cause > 0);
	fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0);
	up_read(&mm->mmap_sem);
	up_read(&mm->mmap_sem);
	if (unlikely(fault & VM_FAULT_ERROR)) {
	if (unlikely(fault & VM_FAULT_ERROR)) {
		if (fault & VM_FAULT_OOM)
		if (fault & VM_FAULT_OOM)
+1 −1
Original line number Original line Diff line number Diff line
@@ -208,7 +208,7 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
	 * than endlessly redo the fault.
	 * than endlessly redo the fault.
	 */
	 */
survive:
survive:
	fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, fsr & (1 << 11));
	fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & (1 << 11)) ? FAULT_FLAG_WRITE : 0);
	if (unlikely(fault & VM_FAULT_ERROR)) {
	if (unlikely(fault & VM_FAULT_ERROR)) {
		if (fault & VM_FAULT_OOM)
		if (fault & VM_FAULT_OOM)
			goto out_of_memory;
			goto out_of_memory;
+1 −1
Original line number Original line Diff line number Diff line
@@ -133,7 +133,7 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
	 * fault.
	 * fault.
	 */
	 */
survive:
survive:
	fault = handle_mm_fault(mm, vma, address, writeaccess);
	fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
	if (unlikely(fault & VM_FAULT_ERROR)) {
	if (unlikely(fault & VM_FAULT_ERROR)) {
		if (fault & VM_FAULT_OOM)
		if (fault & VM_FAULT_OOM)
			goto out_of_memory;
			goto out_of_memory;
+1 −1
Original line number Original line Diff line number Diff line
@@ -163,7 +163,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
	 * the fault.
	 * the fault.
	 */
	 */


	fault = handle_mm_fault(mm, vma, address, writeaccess & 1);
	fault = handle_mm_fault(mm, vma, address, (writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
	if (unlikely(fault & VM_FAULT_ERROR)) {
	if (unlikely(fault & VM_FAULT_ERROR)) {
		if (fault & VM_FAULT_OOM)
		if (fault & VM_FAULT_OOM)
			goto out_of_memory;
			goto out_of_memory;
+1 −1
Original line number Original line Diff line number Diff line
@@ -163,7 +163,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
	 * make sure we exit gracefully rather than endlessly redo
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 * the fault.
	 */
	 */
	fault = handle_mm_fault(mm, vma, ear0, write);
	fault = handle_mm_fault(mm, vma, ear0, write ? FAULT_FLAG_WRITE : 0);
	if (unlikely(fault & VM_FAULT_ERROR)) {
	if (unlikely(fault & VM_FAULT_ERROR)) {
		if (fault & VM_FAULT_OOM)
		if (fault & VM_FAULT_OOM)
			goto out_of_memory;
			goto out_of_memory;
Loading