Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 81250437 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'stable/for-linus-3.14-rc8-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull Xen bugfixes from David Vrabel:
 "Fix two bugs that cause x86 PV guest crashes.

  1. Ballooning a 32-bit guest would eventually crash it.

  2. Revert a broken fix for a regression with NUMA_BALACING.  The bad
     fix caused PV guests to crash after migration.  This is not ideal
     but unpicking the madness that is _PAGE_NUMA == _PAGE_PROTNONE will
     take a while longer"

* tag 'stable/for-linus-3.14-rc8-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  Revert "xen: properly account for _PAGE_NUMA during xen pte translations"
  xen/balloon: flush persistent kmaps in correct position
parents 75c5a52d 5926f87f
Loading
Loading
Loading
Loading
+2 −12
Original line number Diff line number Diff line
@@ -445,20 +445,10 @@ static inline int pte_same(pte_t a, pte_t b)
	return a.pte == b.pte;
}

static inline int pteval_present(pteval_t pteval)
{
	/*
	 * Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this
	 * way clearly states that the intent is that protnone and numa
	 * hinting ptes are considered present for the purposes of
	 * pagetable operations like zapping, protection changes, gup etc.
	 */
	return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA);
}

static inline int pte_present(pte_t a)
{
	return pteval_present(pte_flags(a));
	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
			       _PAGE_NUMA);
}

#define pte_accessible pte_accessible
+2 −2
Original line number Diff line number Diff line
@@ -365,7 +365,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
/* Assume pteval_t is equivalent to all the other *val_t types. */
static pteval_t pte_mfn_to_pfn(pteval_t val)
{
	if (pteval_present(val)) {
	if (val & _PAGE_PRESENT) {
		unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
		unsigned long pfn = mfn_to_pfn(mfn);

@@ -381,7 +381,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)

static pteval_t pte_pfn_to_mfn(pteval_t val)
{
	if (pteval_present(val)) {
	if (val & _PAGE_PRESENT) {
		unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
		pteval_t flags = val & PTE_FLAGS_MASK;
		unsigned long mfn;
+18 −6
Original line number Diff line number Diff line
@@ -399,11 +399,25 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
			state = BP_EAGAIN;
			break;
		}
		scrub_page(page);

		pfn = page_to_pfn(page);
		frame_list[i] = pfn_to_mfn(pfn);
		frame_list[i] = page_to_pfn(page);
	}

		scrub_page(page);
	/*
	 * Ensure that ballooned highmem pages don't have kmaps.
	 *
	 * Do this before changing the p2m as kmap_flush_unused()
	 * reads PTEs to obtain pages (and hence needs the original
	 * p2m entry).
	 */
	kmap_flush_unused();

	/* Update direct mapping, invalidate P2M, and add to balloon. */
	for (i = 0; i < nr_pages; i++) {
		pfn = frame_list[i];
		frame_list[i] = pfn_to_mfn(pfn);
		page = pfn_to_page(pfn);

#ifdef CONFIG_XEN_HAVE_PVMMU
		/*
@@ -429,11 +443,9 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
		}
#endif

		balloon_append(pfn_to_page(pfn));
		balloon_append(page);
	}

	/* Ensure that ballooned highmem pages don't have kmaps. */
	kmap_flush_unused();
	flush_tlb_all();

	set_xen_guest_handle(reservation.extent_start, frame_list);