Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f449b936 authored by Jan Kara's avatar Jan Kara Committed by Dan Williams
Browse files

dax: Finish fault completely when loading holes



The only case when we do not finish the page fault completely is when we
are loading hole pages into a radix tree. Avoid this special case and
finish the fault in that case as well inside the DAX fault handler. It
will allow us for easier iomap handling.

Reviewed-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent e3fce68c
Loading
Loading
Loading
Loading
+18 −9
Original line number Original line Diff line number Diff line
@@ -539,15 +539,16 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
 * otherwise it will simply fall out of the page cache under memory
 * otherwise it will simply fall out of the page cache under memory
 * pressure without ever having been dirtied.
 * pressure without ever having been dirtied.
 */
 */
static int dax_load_hole(struct address_space *mapping, void *entry,
static int dax_load_hole(struct address_space *mapping, void **entry,
			 struct vm_fault *vmf)
			 struct vm_fault *vmf)
{
{
	struct page *page;
	struct page *page;
	int ret;


	/* Hole page already exists? Return it...  */
	/* Hole page already exists? Return it...  */
	if (!radix_tree_exceptional_entry(entry)) {
	if (!radix_tree_exceptional_entry(*entry)) {
		vmf->page = entry;
		page = *entry;
		return VM_FAULT_LOCKED;
		goto out;
	}
	}


	/* This will replace locked radix tree entry with a hole page */
	/* This will replace locked radix tree entry with a hole page */
@@ -555,8 +556,17 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
				   vmf->gfp_mask | __GFP_ZERO);
				   vmf->gfp_mask | __GFP_ZERO);
	if (!page)
	if (!page)
		return VM_FAULT_OOM;
		return VM_FAULT_OOM;
 out:
	vmf->page = page;
	vmf->page = page;
	return VM_FAULT_LOCKED;
	ret = finish_fault(vmf);
	vmf->page = NULL;
	*entry = page;
	if (!ret) {
		/* Grab reference for PTE that is now referencing the page */
		get_page(page);
		return VM_FAULT_NOPAGE;
	}
	return ret;
}
}


static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
@@ -1163,8 +1173,8 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
	case IOMAP_UNWRITTEN:
	case IOMAP_UNWRITTEN:
	case IOMAP_HOLE:
	case IOMAP_HOLE:
		if (!(vmf->flags & FAULT_FLAG_WRITE)) {
		if (!(vmf->flags & FAULT_FLAG_WRITE)) {
			vmf_ret = dax_load_hole(mapping, entry, vmf);
			vmf_ret = dax_load_hole(mapping, &entry, vmf);
			break;
			goto finish_iomap;
		}
		}
		/*FALLTHRU*/
		/*FALLTHRU*/
	default:
	default:
@@ -1185,7 +1195,6 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
		}
		}
	}
	}
 unlock_entry:
 unlock_entry:
	if (vmf_ret != VM_FAULT_LOCKED || error)
	put_locked_mapping_entry(mapping, vmf->pgoff, entry);
	put_locked_mapping_entry(mapping, vmf->pgoff, entry);
 out:
 out:
	if (error == -ENOMEM)
	if (error == -ENOMEM)