Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1cd731df authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'stable/for-linus-3.14-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull Xen fixes from Konrad Rzeszutek Wilk:
 "Bug-fixes:
   - Revert "xen/grant-table: Avoid m2p_override during mapping" as it
     broke Xen ARM build.
   - Fix CR4 not being set on AP processors in Xen PVH mode"

* tag 'stable/for-linus-3.14-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/pvh: set CR4 flags for APs
  Revert "xen/grant-table: Avoid m2p_override during mapping"
parents 251aa0fd afca5013
Loading
Loading
Loading
Loading
+2 −3
Original line number Diff line number Diff line
@@ -52,8 +52,7 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s,
extern int m2p_add_override(unsigned long mfn, struct page *page,
			    struct gnttab_map_grant_ref *kmap_op);
extern int m2p_remove_override(struct page *page,
			       struct gnttab_map_grant_ref *kmap_op,
			       unsigned long mfn);
				struct gnttab_map_grant_ref *kmap_op);
extern struct page *m2p_find_override(unsigned long mfn);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);

+12 −0
Original line number Diff line number Diff line
@@ -1473,6 +1473,18 @@ static void xen_pvh_set_cr_flags(int cpu)
	 * X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests
	 * (which PVH shared codepaths), while X86_CR0_PG is for PVH. */
	write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM);

	if (!cpu)
		return;
	/*
	 * For BSP, PSE PGE are set in probe_page_size_mask(), for APs
	 * set them here. For all, OSFXSR OSXMMEXCPT are set in fpu_init.
	*/
	if (cpu_has_pse)
		set_in_cr4(X86_CR4_PSE);

	if (cpu_has_pge)
		set_in_cr4(X86_CR4_PGE);
}

/*
+15 −2
Original line number Diff line number Diff line
@@ -899,6 +899,13 @@ int m2p_add_override(unsigned long mfn, struct page *page,
					"m2p_add_override: pfn %lx not mapped", pfn))
			return -EINVAL;
	}
	WARN_ON(PagePrivate(page));
	SetPagePrivate(page);
	set_page_private(page, mfn);
	page->index = pfn_to_mfn(pfn);

	if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
		return -ENOMEM;

	if (kmap_op != NULL) {
		if (!PageHighMem(page)) {
@@ -937,16 +944,19 @@ int m2p_add_override(unsigned long mfn, struct page *page,
}
EXPORT_SYMBOL_GPL(m2p_add_override);
int m2p_remove_override(struct page *page,
			struct gnttab_map_grant_ref *kmap_op,
			unsigned long mfn)
		struct gnttab_map_grant_ref *kmap_op)
{
	unsigned long flags;
	unsigned long mfn;
	unsigned long pfn;
	unsigned long uninitialized_var(address);
	unsigned level;
	pte_t *ptep = NULL;

	pfn = page_to_pfn(page);
	mfn = get_phys_to_machine(pfn);
	if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT))
		return -EINVAL;

	if (!PageHighMem(page)) {
		address = (unsigned long)__va(pfn << PAGE_SHIFT);
@@ -960,7 +970,10 @@ int m2p_remove_override(struct page *page,
	spin_lock_irqsave(&m2p_override_lock, flags);
	list_del(&page->lru);
	spin_unlock_irqrestore(&m2p_override_lock, flags);
	WARN_ON(!PagePrivate(page));
	ClearPagePrivate(page);

	set_phys_to_machine(pfn, page->index);
	if (kmap_op != NULL) {
		if (!PageHighMem(page)) {
			struct multicall_space mcs;
+9 −6
Original line number Diff line number Diff line
@@ -285,7 +285,8 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,

		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
			!rb_next(&persistent_gnt->node)) {
			ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
			ret = gnttab_unmap_refs(unmap, NULL, pages,
				segs_to_unmap);
			BUG_ON(ret);
			put_free_pages(blkif, pages, segs_to_unmap);
			segs_to_unmap = 0;
@@ -320,7 +321,8 @@ static void unmap_purged_grants(struct work_struct *work)
		pages[segs_to_unmap] = persistent_gnt->page;

		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
			ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
			ret = gnttab_unmap_refs(unmap, NULL, pages,
				segs_to_unmap);
			BUG_ON(ret);
			put_free_pages(blkif, pages, segs_to_unmap);
			segs_to_unmap = 0;
@@ -328,7 +330,7 @@ static void unmap_purged_grants(struct work_struct *work)
		kfree(persistent_gnt);
	}
	if (segs_to_unmap > 0) {
		ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
		ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
		BUG_ON(ret);
		put_free_pages(blkif, pages, segs_to_unmap);
	}
@@ -668,14 +670,15 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif,
				    GNTMAP_host_map, pages[i]->handle);
		pages[i]->handle = BLKBACK_INVALID_HANDLE;
		if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
			ret = gnttab_unmap_refs(unmap, unmap_pages, invcount);
			ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
			                        invcount);
			BUG_ON(ret);
			put_free_pages(blkif, unmap_pages, invcount);
			invcount = 0;
		}
	}
	if (invcount) {
		ret = gnttab_unmap_refs(unmap, unmap_pages, invcount);
		ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
		BUG_ON(ret);
		put_free_pages(blkif, unmap_pages, invcount);
	}
@@ -737,7 +740,7 @@ again:
	}

	if (segs_to_map) {
		ret = gnttab_map_refs(map, pages_to_gnt, segs_to_map);
		ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
		BUG_ON(ret);
	}

+5 −8
Original line number Diff line number Diff line
@@ -284,10 +284,8 @@ static int map_grant_pages(struct grant_map *map)
	}

	pr_debug("map %d+%d\n", map->index, map->count);
	err = gnttab_map_refs_userspace(map->map_ops,
					use_ptemod ? map->kmap_ops : NULL,
					map->pages,
					map->count);
	err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
			map->pages, map->count);
	if (err)
		return err;

@@ -317,9 +315,8 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
		}
	}

	err = gnttab_unmap_refs_userspace(map->unmap_ops + offset,
					  use_ptemod ? map->kmap_ops + offset : NULL,
					  map->pages + offset,
	err = gnttab_unmap_refs(map->unmap_ops + offset,
			use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset,
			pages);
	if (err)
		return err;
Loading