Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6eaa412f authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk
Browse files

xen: Mark all initial reserved pages for the balloon as INVALID_P2M_ENTRY.



With this patch, we diligently set regions that will be used by the
balloon driver to be INVALID_P2M_ENTRY and under the ownership
of the balloon driver. We are OK using the __set_phys_to_machine
as we do not expect to be allocating any P2M middle or entries pages.
The set_phys_to_machine has the side-effect of potentially allocating
new pages and we do not want that at this stage.

We can do this because xen_build_mfn_list_list will have already
allocated all such pages up to xen_max_p2m_pfn.

We also move the check for auto translated physmap down the
stack so it is present in __set_phys_to_machine.

[v2: Rebased with mmu->p2m code split]
Reviewed-by: default avatarIan Campbell <ian.campbell@citrix.com>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 1bae4ce2
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -41,6 +41,7 @@ extern unsigned int machine_to_phys_order;

extern unsigned long get_phys_to_machine(unsigned long pfn);
extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);

extern int m2p_add_override(unsigned long mfn, struct page *page);
extern int m2p_remove_override(struct page *page);
+1 −1
Original line number Diff line number Diff line
@@ -2074,7 +2074,7 @@ static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
			in_frames[i] = virt_to_mfn(vaddr);

		MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
		set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
		__set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);

		if (out_frames)
			out_frames[i] = virt_to_pfn(vaddr);
+4 −5
Original line number Diff line number Diff line
@@ -365,6 +365,10 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
	unsigned topidx, mididx, idx;

	if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
		BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
		return true;
	}
	if (unlikely(pfn >= MAX_P2M_PFN)) {
		BUG_ON(mfn != INVALID_P2M_ENTRY);
		return true;
@@ -384,11 +388,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)

bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
	if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
		BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
		return true;
	}

	if (unlikely(!__set_phys_to_machine(pfn, mfn)))  {
		if (!alloc_p2m(pfn))
			return false;
+6 −1
Original line number Diff line number Diff line
@@ -52,6 +52,8 @@ phys_addr_t xen_extra_mem_start, xen_extra_mem_size;

static __init void xen_add_extra_mem(unsigned long pages)
{
	unsigned long pfn;

	u64 size = (u64)pages * PAGE_SIZE;
	u64 extra_start = xen_extra_mem_start + xen_extra_mem_size;

@@ -66,6 +68,9 @@ static __init void xen_add_extra_mem(unsigned long pages)
	xen_extra_mem_size += size;

	xen_max_p2m_pfn = PFN_DOWN(extra_start + size);

	for (pfn = PFN_DOWN(extra_start); pfn <= xen_max_p2m_pfn; pfn++)
		__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}

static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
@@ -104,7 +109,7 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
		WARN(ret != 1, "Failed to release memory %lx-%lx err=%d\n",
		     start, end, ret);
		if (ret == 1) {
			set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
			__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
			len++;
		}
	}
+1 −1
Original line number Diff line number Diff line
@@ -296,7 +296,7 @@ static int decrease_reservation(unsigned long nr_pages)
	/* No more mappings: invalidate P2M and add to balloon. */
	for (i = 0; i < nr_pages; i++) {
		pfn = mfn_to_pfn(frame_list[i]);
		set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
		__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
		balloon_append(pfn_to_page(pfn));
	}