Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3bb0f28d authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull filesystem-dax updates from Dan Williams:

 - Fix handling of PMD-sized entries in the Xarray that lead to a crash
   scenario

 - Miscellaneous cleanups and small fixes

* tag 'fsdax-for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm:
  dax: Flush partial PMDs correctly
  fs/dax: NIT fix comment regarding start/end vs range
  fs/dax: Convert to use vmf_error()
parents a840b56b e4b3448b
Loading
Loading
Loading
Loading
+11 −14
Original line number Diff line number Diff line
@@ -788,7 +788,7 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
		address = pgoff_address(index, vma);

		/*
		 * Note because we provide start/end to follow_pte_pmd it will
		 * Note because we provide range to follow_pte_pmd it will
		 * call mmu_notifier_invalidate_range_start() on our behalf
		 * before taking any lock.
		 */
@@ -843,9 +843,8 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
		struct address_space *mapping, void *entry)
{
	unsigned long pfn;
	unsigned long pfn, index, count;
	long ret = 0;
	size_t size;

	/*
	 * A page got tagged dirty in DAX mapping? Something is seriously
@@ -894,17 +893,18 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
	xas_unlock_irq(xas);

	/*
	 * Even if dax_writeback_mapping_range() was given a wbc->range_start
	 * in the middle of a PMD, the 'index' we are given will be aligned to
	 * the start index of the PMD, as will the pfn we pull from 'entry'.
	 * If dax_writeback_mapping_range() was given a wbc->range_start
	 * in the middle of a PMD, the 'index' we use needs to be
	 * aligned to the start of the PMD.
	 * This allows us to flush for PMD_SIZE and not have to worry about
	 * partial PMD writebacks.
	 */
	pfn = dax_to_pfn(entry);
	size = PAGE_SIZE << dax_entry_order(entry);
	count = 1UL << dax_entry_order(entry);
	index = xas->xa_index & ~(count - 1);

	dax_entry_mkclean(mapping, xas->xa_index, pfn);
	dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
	dax_entry_mkclean(mapping, index, pfn);
	dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
	/*
	 * After we have flushed the cache, we can clear the dirty tag. There
	 * cannot be new dirty data in the pfn after the flush has completed as
@@ -917,8 +917,7 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
	xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
	dax_wake_entry(xas, entry, false);

	trace_dax_writeback_one(mapping->host, xas->xa_index,
			size >> PAGE_SHIFT);
	trace_dax_writeback_one(mapping->host, index, count);
	return ret;

 put_unlocked:
@@ -1220,9 +1219,7 @@ static vm_fault_t dax_fault_return(int error)
{
	if (error == 0)
		return VM_FAULT_NOPAGE;
	if (error == -ENOMEM)
		return VM_FAULT_OOM;
	return VM_FAULT_SIGBUS;
	return vmf_error(error);
}

/*