Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1550290b authored by Ross Zwisler's avatar Ross Zwisler Committed by Dave Chinner
Browse files

dax: dax_iomap_fault() needs to call iomap_end()



Currently iomap_end() doesn't do anything for DAX page faults for both ext2
and XFS.  ext2_iomap_end() just checks for a write underrun, and
xfs_file_iomap_end() checks to see if it needs to finish a delayed
allocation.  However, in the future iomap_end() calls might be needed to
make sure we have balanced allocations, locks, etc.  So, add calls to
iomap_end() with appropriate error handling to dax_iomap_fault().

Signed-off-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Suggested-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarDave Chinner <david@fromorbit.com>
parent 333ccc97
Loading
Loading
Loading
Loading
+29 −8
Original line number Diff line number Diff line
@@ -1165,6 +1165,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
	struct iomap iomap = { 0 };
	unsigned flags = 0;
	int error, major = 0;
	int locked_status = 0;
	void *entry;

	/*
@@ -1194,7 +1195,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
		goto unlock_entry;
	if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
		error = -EIO;		/* fs corruption? */
		goto unlock_entry;
		goto finish_iomap;
	}

	sector = dax_iomap_sector(&iomap, pos);
@@ -1216,13 +1217,15 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
		}

		if (error)
			goto unlock_entry;
			goto finish_iomap;
		if (!radix_tree_exceptional_entry(entry)) {
			vmf->page = entry;
			return VM_FAULT_LOCKED;
		}
			locked_status = VM_FAULT_LOCKED;
		} else {
			vmf->entry = entry;
		return VM_FAULT_DAX_LOCKED;
			locked_status = VM_FAULT_DAX_LOCKED;
		}
		goto finish_iomap;
	}

	switch (iomap.type) {
@@ -1237,8 +1240,10 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
		break;
	case IOMAP_UNWRITTEN:
	case IOMAP_HOLE:
		if (!(vmf->flags & FAULT_FLAG_WRITE))
			return dax_load_hole(mapping, entry, vmf);
		if (!(vmf->flags & FAULT_FLAG_WRITE)) {
			locked_status = dax_load_hole(mapping, entry, vmf);
			break;
		}
		/*FALLTHRU*/
	default:
		WARN_ON_ONCE(1);
@@ -1246,7 +1251,19 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
		break;
	}

 finish_iomap:
	if (ops->iomap_end) {
		if (error) {
			/* keep previous error */
			ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags,
					&iomap);
		} else {
			error = ops->iomap_end(inode, pos, PAGE_SIZE,
					PAGE_SIZE, flags, &iomap);
		}
	}
 unlock_entry:
	if (!locked_status || error)
		put_locked_mapping_entry(mapping, vmf->pgoff, entry);
 out:
	if (error == -ENOMEM)
@@ -1254,6 +1271,10 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
	/* -EBUSY is fine, somebody else faulted on the same PTE */
	if (error < 0 && error != -EBUSY)
		return VM_FAULT_SIGBUS | major;
	if (locked_status) {
		WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */
		return locked_status;
	}
	return VM_FAULT_NOPAGE | major;
}
EXPORT_SYMBOL_GPL(dax_iomap_fault);