Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3a272031 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman
Browse files

Merge tag 'libnvdimm-fixes-4.19-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm

Dan writes:
  "libnvdimm/dax 4.19-rc8

   * Fix a livelock in dax_layout_busy_page() present since v4.18. The
     lockup triggers when truncating an actively mapped huge page out of
     a mapping pinned for direct-I/O.

   * Fix mprotect() clobbers of _PAGE_DEVMAP. Broken since v4.5
     mprotect() clears this flag that is needed to communicate the
     liveness of device pages to the get_user_pages() path."

* tag 'libnvdimm-fixes-4.19-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm:
  mm: Preserve _PAGE_DEVMAP across mprotect() calls
  filesystem-dax: Fix dax_layout_busy_page() livelock
parents a24a0eb9 4628a645
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -114,7 +114,7 @@
 */
#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
			 _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \
			 _PAGE_SOFT_DIRTY)
			 _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
/*
 * user access blocked by key
 */
@@ -132,7 +132,7 @@
 */
#define _PAGE_CHG_MASK	(PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
			 _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE |	\
			 _PAGE_SOFT_DIRTY)
			 _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)

#define H_PTE_PKEY  (H_PTE_PKEY_BIT0 | H_PTE_PKEY_BIT1 | H_PTE_PKEY_BIT2 | \
		     H_PTE_PKEY_BIT3 | H_PTE_PKEY_BIT4)
+1 −1
Original line number Diff line number Diff line
@@ -124,7 +124,7 @@
 */
#define _PAGE_CHG_MASK	(PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |		\
			 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY |	\
			 _PAGE_SOFT_DIRTY)
			 _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)

/*
+11 −2
Original line number Diff line number Diff line
@@ -666,6 +666,8 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
	while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
				min(end - index, (pgoff_t)PAGEVEC_SIZE),
				indices)) {
		pgoff_t nr_pages = 1;

		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *pvec_ent = pvec.pages[i];
			void *entry;
@@ -680,8 +682,15 @@ struct page *dax_layout_busy_page(struct address_space *mapping)

			xa_lock_irq(&mapping->i_pages);
			entry = get_unlocked_mapping_entry(mapping, index, NULL);
			if (entry)
			if (entry) {
				page = dax_busy_page(entry);
				/*
				 * Account for multi-order entries at
				 * the end of the pagevec.
				 */
				if (i + 1 >= pagevec_count(&pvec))
					nr_pages = 1UL << dax_radix_order(entry);
			}
			put_unlocked_mapping_entry(mapping, index, entry);
			xa_unlock_irq(&mapping->i_pages);
			if (page)
@@ -696,7 +705,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
		 */
		pagevec_remove_exceptionals(&pvec);
		pagevec_release(&pvec);
		index++;
		index += nr_pages;

		if (page)
			break;