Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 497a5df7 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'stable/for-linus-4.1-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen features and fixes from David Vrabel:

 - use a single source list of hypercalls, generating other tables etc.
   at build time.

 - add a "Xen PV" APIC driver to support >255 VCPUs in PV guests.

 - significant performance improve to guest save/restore/migration.

 - scsiback/front save/restore support.

 - infrastructure for multi-page xenbus rings.

 - misc fixes.

* tag 'stable/for-linus-4.1-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/pci: Try harder to get PXM information for Xen
  xenbus_client: Extend interface to support multi-page ring
  xen-pciback: also support disabling of bus-mastering and memory-write-invalidate
  xen: support suspend/resume in pvscsi frontend
  xen: scsiback: add LUN of restored domain
  xen-scsiback: define a pr_fmt macro with xen-pvscsi
  xen/mce: fix up xen_late_init_mcelog() error handling
  xen/privcmd: improve performance of MMAPBATCH_V2
  xen: unify foreign GFN map/unmap for auto-xlated physmap guests
  x86/xen/apic: WARN with details.
  x86/xen: Provide a "Xen PV" APIC driver to support >255 VCPUs
  xen/pciback: Don't print scary messages when unsupported by hypervisor.
  xen: use generated hypercall symbols in arch/x86/xen/xen-head.S
  xen: use generated hypervisor symbols in arch/x86/xen/trace.c
  xen: synchronize include/xen/interface/xen.h with xen
  xen: build infrastructure for generating hypercall depending symbols
  xen: balloon: Use static attribute groups for sysfs entries
  xen: pcpu: Use static attribute groups for sysfs entry
parents 714d8e7e 0b97b03d
Loading
Loading
Loading
Loading
+16 −88
Original line number Diff line number Diff line
@@ -53,105 +53,33 @@ EXPORT_SYMBOL_GPL(xen_platform_pci_unplug);

static __read_mostly int xen_events_irq = -1;

/* map fgmfn of domid to lpfn in the current domain */
static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
			    unsigned int domid)
{
	int rc;
	struct xen_add_to_physmap_range xatp = {
		.domid = DOMID_SELF,
		.foreign_domid = domid,
		.size = 1,
		.space = XENMAPSPACE_gmfn_foreign,
	};
	xen_ulong_t idx = fgmfn;
	xen_pfn_t gpfn = lpfn;
	int err = 0;

	set_xen_guest_handle(xatp.idxs, &idx);
	set_xen_guest_handle(xatp.gpfns, &gpfn);
	set_xen_guest_handle(xatp.errs, &err);

	rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
	if (rc || err) {
		pr_warn("Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx\n",
			rc, err, lpfn, fgmfn);
		return 1;
	}
	return 0;
}

struct remap_data {
	xen_pfn_t fgmfn; /* foreign domain's gmfn */
	pgprot_t prot;
	domid_t  domid;
	struct vm_area_struct *vma;
	int index;
	struct page **pages;
	struct xen_remap_mfn_info *info;
};

static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
			void *data)
int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
			       unsigned long addr,
			       xen_pfn_t *mfn, int nr,
			       int *err_ptr, pgprot_t prot,
			       unsigned domid,
			       struct page **pages)
{
	struct remap_data *info = data;
	struct page *page = info->pages[info->index++];
	unsigned long pfn = page_to_pfn(page);
	pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));

	if (map_foreign_page(pfn, info->fgmfn, info->domid))
		return -EFAULT;
	set_pte_at(info->vma->vm_mm, addr, ptep, pte);

	return 0;
	return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr,
					 prot, domid, pages);
}
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);

/* Not used by XENFEAT_auto_translated guests. */
int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
                              unsigned long addr,
                              xen_pfn_t mfn, int nr,
                              pgprot_t prot, unsigned domid,
                              struct page **pages)
{
	int err;
	struct remap_data data;

	/* TBD: Batching, current sole caller only does page at a time */
	if (nr > 1)
		return -EINVAL;

	data.fgmfn = mfn;
	data.prot = prot;
	data.domid = domid;
	data.vma = vma;
	data.index = 0;
	data.pages = pages;
	err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
				  remap_pte_fn, &data);
	return err;
	return -ENOSYS;
}
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);

int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
			       int nr, struct page **pages)
{
	int i;

	for (i = 0; i < nr; i++) {
		struct xen_remove_from_physmap xrp;
		unsigned long rc, pfn;

		pfn = page_to_pfn(pages[i]);

		xrp.domid = DOMID_SELF;
		xrp.gpfn = pfn;
		rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
		if (rc) {
			pr_warn("Failed to unmap pfn:%lx rc:%ld\n",
				pfn, rc);
			return rc;
		}
	}
	return 0;
	return xen_xlate_unmap_gfn_range(vma, nr, pages);
}
EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);

+9 −0
Original line number Diff line number Diff line
@@ -19,6 +19,9 @@ quiet_cmd_syshdr = SYSHDR $@
quiet_cmd_systbl = SYSTBL  $@
      cmd_systbl = $(CONFIG_SHELL) '$(systbl)' $< $@

quiet_cmd_hypercalls = HYPERCALLS $@
      cmd_hypercalls = $(CONFIG_SHELL) '$<' $@ $(filter-out $<,$^)

syshdr_abi_unistd_32 := i386
$(uapi)/unistd_32.h: $(syscall32) $(syshdr)
	$(call if_changed,syshdr)
@@ -47,10 +50,16 @@ $(out)/syscalls_32.h: $(syscall32) $(systbl)
$(out)/syscalls_64.h: $(syscall64) $(systbl)
	$(call if_changed,systbl)

$(out)/xen-hypercalls.h: $(srctree)/scripts/xen-hypercalls.sh
	$(call if_changed,hypercalls)

$(out)/xen-hypercalls.h: $(srctree)/include/xen/interface/xen*.h

uapisyshdr-y			+= unistd_32.h unistd_64.h unistd_x32.h
syshdr-y			+= syscalls_32.h
syshdr-$(CONFIG_X86_64)		+= unistd_32_ia32.h unistd_64_x32.h
syshdr-$(CONFIG_X86_64)		+= syscalls_64.h
syshdr-$(CONFIG_XEN)		+= xen-hypercalls.h

targets	+= $(uapisyshdr-y) $(syshdr-y)

+180 −0
Original line number Diff line number Diff line
@@ -7,6 +7,7 @@
#include <xen/xen.h>
#include <xen/interface/physdev.h>
#include "xen-ops.h"
#include "smp.h"

static unsigned int xen_io_apic_read(unsigned apic, unsigned reg)
{
@@ -28,7 +29,186 @@ static unsigned int xen_io_apic_read(unsigned apic, unsigned reg)
	return 0xfd;
}

static unsigned long xen_set_apic_id(unsigned int x)
{
	WARN_ON(1);
	return x;
}

static unsigned int xen_get_apic_id(unsigned long x)
{
	return ((x)>>24) & 0xFFu;
}

static u32 xen_apic_read(u32 reg)
{
	struct xen_platform_op op = {
		.cmd = XENPF_get_cpuinfo,
		.interface_version = XENPF_INTERFACE_VERSION,
		.u.pcpu_info.xen_cpuid = 0,
	};
	int ret = 0;

	/* Shouldn't need this as APIC is turned off for PV, and we only
	 * get called on the bootup processor. But just in case. */
	if (!xen_initial_domain() || smp_processor_id())
		return 0;

	if (reg == APIC_LVR)
		return 0x10;
#ifdef CONFIG_X86_32
	if (reg == APIC_LDR)
		return SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
#endif
	if (reg != APIC_ID)
		return 0;

	ret = HYPERVISOR_dom0_op(&op);
	if (ret)
		return 0;

	return op.u.pcpu_info.apic_id << 24;
}

static void xen_apic_write(u32 reg, u32 val)
{
	/* Warn to see if there's any stray references */
	WARN(1,"register: %x, value: %x\n", reg, val);
}

static u64 xen_apic_icr_read(void)
{
	return 0;
}

static void xen_apic_icr_write(u32 low, u32 id)
{
	/* Warn to see if there's any stray references */
	WARN_ON(1);
}

static u32 xen_safe_apic_wait_icr_idle(void)
{
        return 0;
}

static int xen_apic_probe_pv(void)
{
	if (xen_pv_domain())
		return 1;

	return 0;
}

static int xen_madt_oem_check(char *oem_id, char *oem_table_id)
{
	return xen_pv_domain();
}

static int xen_id_always_valid(int apicid)
{
	return 1;
}

static int xen_id_always_registered(void)
{
	return 1;
}

static int xen_phys_pkg_id(int initial_apic_id, int index_msb)
{
	return initial_apic_id >> index_msb;
}

#ifdef CONFIG_X86_32
static int xen_x86_32_early_logical_apicid(int cpu)
{
	/* Match with APIC_LDR read. Otherwise setup_local_APIC complains. */
	return 1 << cpu;
}
#endif

static void xen_noop(void)
{
}

static void xen_silent_inquire(int apicid)
{
}

static struct apic xen_pv_apic = {
	.name 				= "Xen PV",
	.probe 				= xen_apic_probe_pv,
	.acpi_madt_oem_check		= xen_madt_oem_check,
	.apic_id_valid 			= xen_id_always_valid,
	.apic_id_registered 		= xen_id_always_registered,

	/* .irq_delivery_mode - used in native_compose_msi_msg only */
	/* .irq_dest_mode     - used in native_compose_msi_msg only */

	.target_cpus			= default_target_cpus,
	.disable_esr			= 0,
	/* .dest_logical      -  default_send_IPI_ use it but we use our own. */
	.check_apicid_used		= default_check_apicid_used, /* Used on 32-bit */

	.vector_allocation_domain	= flat_vector_allocation_domain,
	.init_apic_ldr			= xen_noop, /* setup_local_APIC calls it */

	.ioapic_phys_id_map		= default_ioapic_phys_id_map, /* Used on 32-bit */
	.setup_apic_routing		= NULL,
	.cpu_present_to_apicid		= default_cpu_present_to_apicid,
	.apicid_to_cpu_present		= physid_set_mask_of_physid, /* Used on 32-bit */
	.check_phys_apicid_present	= default_check_phys_apicid_present, /* smp_sanity_check needs it */
	.phys_pkg_id			= xen_phys_pkg_id, /* detect_ht */

	.get_apic_id 			= xen_get_apic_id,
	.set_apic_id 			= xen_set_apic_id, /* Can be NULL on 32-bit. */
	.apic_id_mask			= 0xFF << 24, /* Used by verify_local_APIC. Match with what xen_get_apic_id does. */

	.cpu_mask_to_apicid_and		= flat_cpu_mask_to_apicid_and,

#ifdef CONFIG_SMP
	.send_IPI_mask 			= xen_send_IPI_mask,
	.send_IPI_mask_allbutself 	= xen_send_IPI_mask_allbutself,
	.send_IPI_allbutself 		= xen_send_IPI_allbutself,
	.send_IPI_all 			= xen_send_IPI_all,
	.send_IPI_self 			= xen_send_IPI_self,
#endif
	/* .wait_for_init_deassert- used  by AP bootup - smp_callin which we don't use */
	.inquire_remote_apic		= xen_silent_inquire,

	.read				= xen_apic_read,
	.write				= xen_apic_write,
	.eoi_write			= xen_apic_write,

	.icr_read 			= xen_apic_icr_read,
	.icr_write 			= xen_apic_icr_write,
	.wait_icr_idle 			= xen_noop,
	.safe_wait_icr_idle 		= xen_safe_apic_wait_icr_idle,

#ifdef CONFIG_X86_32
	/* generic_processor_info and setup_local_APIC. */
	.x86_32_early_logical_apicid	= xen_x86_32_early_logical_apicid,
#endif
};

static void __init xen_apic_check(void)
{
	if (apic == &xen_pv_apic)
		return;

	pr_info("Switched APIC routing from %s to %s.\n", apic->name,
		xen_pv_apic.name);
	apic = &xen_pv_apic;
}
void __init xen_init_apic(void)
{
	x86_io_apic_ops.read = xen_io_apic_read;
	/* On PV guests the APIC CPUID bit is disabled so none of the
	 * routines end up executing. */
	if (!xen_initial_domain())
		apic = &xen_pv_apic;

	x86_platform.apic_post_init = xen_apic_check;
}
apic_driver(xen_pv_apic);
+1 −89
Original line number Diff line number Diff line
@@ -928,92 +928,6 @@ static void xen_io_delay(void)
{
}

#ifdef CONFIG_X86_LOCAL_APIC
static unsigned long xen_set_apic_id(unsigned int x)
{
	WARN_ON(1);
	return x;
}
static unsigned int xen_get_apic_id(unsigned long x)
{
	return ((x)>>24) & 0xFFu;
}
static u32 xen_apic_read(u32 reg)
{
	struct xen_platform_op op = {
		.cmd = XENPF_get_cpuinfo,
		.interface_version = XENPF_INTERFACE_VERSION,
		.u.pcpu_info.xen_cpuid = 0,
	};
	int ret = 0;

	/* Shouldn't need this as APIC is turned off for PV, and we only
	 * get called on the bootup processor. But just in case. */
	if (!xen_initial_domain() || smp_processor_id())
		return 0;

	if (reg == APIC_LVR)
		return 0x10;

	if (reg != APIC_ID)
		return 0;

	ret = HYPERVISOR_dom0_op(&op);
	if (ret)
		return 0;

	return op.u.pcpu_info.apic_id << 24;
}

static void xen_apic_write(u32 reg, u32 val)
{
	/* Warn to see if there's any stray references */
	WARN_ON(1);
}

static u64 xen_apic_icr_read(void)
{
	return 0;
}

static void xen_apic_icr_write(u32 low, u32 id)
{
	/* Warn to see if there's any stray references */
	WARN_ON(1);
}

static void xen_apic_wait_icr_idle(void)
{
        return;
}

static u32 xen_safe_apic_wait_icr_idle(void)
{
        return 0;
}

static void set_xen_basic_apic_ops(void)
{
	apic->read = xen_apic_read;
	apic->write = xen_apic_write;
	apic->icr_read = xen_apic_icr_read;
	apic->icr_write = xen_apic_icr_write;
	apic->wait_icr_idle = xen_apic_wait_icr_idle;
	apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
	apic->set_apic_id = xen_set_apic_id;
	apic->get_apic_id = xen_get_apic_id;

#ifdef CONFIG_SMP
	apic->send_IPI_allbutself = xen_send_IPI_allbutself;
	apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
	apic->send_IPI_mask = xen_send_IPI_mask;
	apic->send_IPI_all = xen_send_IPI_all;
	apic->send_IPI_self = xen_send_IPI_self;
#endif
}

#endif

static void xen_clts(void)
{
	struct multicall_space mcs;
@@ -1619,7 +1533,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
	/*
	 * set up the basic apic ops.
	 */
	set_xen_basic_apic_ops();
	xen_init_apic();
#endif

	if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
@@ -1732,8 +1646,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
		if (HYPERVISOR_dom0_op(&op) == 0)
			boot_params.kbd_status = op.u.firmware_info.u.kbd_shift_flags;

		xen_init_apic();

		/* Make sure ACS will be enabled */
		pci_request_acs();

+83 −124
Original line number Diff line number Diff line
@@ -2436,99 +2436,11 @@ void __init xen_hvm_init_mmu_ops(void)
}
#endif

#ifdef CONFIG_XEN_PVH
/*
 * Map foreign gfn (fgfn), to local pfn (lpfn). This for the user
 * space creating new guest on pvh dom0 and needing to map domU pages.
 */
static int xlate_add_to_p2m(unsigned long lpfn, unsigned long fgfn,
			    unsigned int domid)
{
	int rc, err = 0;
	xen_pfn_t gpfn = lpfn;
	xen_ulong_t idx = fgfn;

	struct xen_add_to_physmap_range xatp = {
		.domid = DOMID_SELF,
		.foreign_domid = domid,
		.size = 1,
		.space = XENMAPSPACE_gmfn_foreign,
	};
	set_xen_guest_handle(xatp.idxs, &idx);
	set_xen_guest_handle(xatp.gpfns, &gpfn);
	set_xen_guest_handle(xatp.errs, &err);

	rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
	if (rc < 0)
		return rc;
	return err;
}

static int xlate_remove_from_p2m(unsigned long spfn, int count)
{
	struct xen_remove_from_physmap xrp;
	int i, rc;

	for (i = 0; i < count; i++) {
		xrp.domid = DOMID_SELF;
		xrp.gpfn = spfn+i;
		rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
		if (rc)
			break;
	}
	return rc;
}

struct xlate_remap_data {
	unsigned long fgfn; /* foreign domain's gfn */
	pgprot_t prot;
	domid_t  domid;
	int index;
	struct page **pages;
};

static int xlate_map_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
			    void *data)
{
	int rc;
	struct xlate_remap_data *remap = data;
	unsigned long pfn = page_to_pfn(remap->pages[remap->index++]);
	pte_t pteval = pte_mkspecial(pfn_pte(pfn, remap->prot));

	rc = xlate_add_to_p2m(pfn, remap->fgfn, remap->domid);
	if (rc)
		return rc;
	native_set_pte(ptep, pteval);

	return 0;
}

static int xlate_remap_gfn_range(struct vm_area_struct *vma,
				 unsigned long addr, unsigned long mfn,
				 int nr, pgprot_t prot, unsigned domid,
				 struct page **pages)
{
	int err;
	struct xlate_remap_data pvhdata;

	BUG_ON(!pages);

	pvhdata.fgfn = mfn;
	pvhdata.prot = prot;
	pvhdata.domid = domid;
	pvhdata.index = 0;
	pvhdata.pages = pages;
	err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
				  xlate_map_pte_fn, &pvhdata);
	flush_tlb_all();
	return err;
}
#endif

#define REMAP_BATCH_SIZE 16

struct remap_data {
	unsigned long mfn;
	xen_pfn_t *mfn;
	bool contiguous;
	pgprot_t prot;
	struct mmu_update *mmu_update;
};
@@ -2537,7 +2449,14 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
				 unsigned long addr, void *data)
{
	struct remap_data *rmd = data;
	pte_t pte = pte_mkspecial(mfn_pte(rmd->mfn++, rmd->prot));
	pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot));

	/* If we have a contigious range, just update the mfn itself,
	   else update pointer to be "next mfn". */
	if (rmd->contiguous)
		(*rmd->mfn)++;
	else
		rmd->mfn++;

	rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
	rmd->mmu_update->val = pte_val_ma(pte);
@@ -2546,26 +2465,26 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
	return 0;
}

int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
static int do_remap_mfn(struct vm_area_struct *vma,
			unsigned long addr,
			       xen_pfn_t mfn, int nr,
			       pgprot_t prot, unsigned domid,
			xen_pfn_t *mfn, int nr,
			int *err_ptr, pgprot_t prot,
			unsigned domid,
			struct page **pages)

{
	int err = 0;
	struct remap_data rmd;
	struct mmu_update mmu_update[REMAP_BATCH_SIZE];
	int batch;
	unsigned long range;
	int err = 0;
	int mapped = 0;

	BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));

	if (xen_feature(XENFEAT_auto_translated_physmap)) {
#ifdef CONFIG_XEN_PVH
		/* We need to update the local page tables and the xen HAP */
		return xlate_remap_gfn_range(vma, addr, mfn, nr, prot,
					     domid, pages);
		return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr,
						 prot, domid, pages);
#else
		return -EINVAL;
#endif
@@ -2573,9 +2492,15 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,

	rmd.mfn = mfn;
	rmd.prot = prot;
	/* We use the err_ptr to indicate if there we are doing a contigious
	 * mapping or a discontigious mapping. */
	rmd.contiguous = !err_ptr;

	while (nr) {
		batch = min(REMAP_BATCH_SIZE, nr);
		int index = 0;
		int done = 0;
		int batch = min(REMAP_BATCH_SIZE, nr);
		int batch_left = batch;
		range = (unsigned long)batch << PAGE_SHIFT;

		rmd.mmu_update = mmu_update;
@@ -2584,23 +2509,72 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
		if (err)
			goto out;

		err = HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid);
		if (err < 0)
		/* We record the error for each page that gives an error, but
		 * continue mapping until the whole set is done */
		do {
			int i;

			err = HYPERVISOR_mmu_update(&mmu_update[index],
						    batch_left, &done, domid);

			/*
			 * @err_ptr may be the same buffer as @mfn, so
			 * only clear it after each chunk of @mfn is
			 * used.
			 */
			if (err_ptr) {
				for (i = index; i < index + done; i++)
					err_ptr[i] = 0;
			}
			if (err < 0) {
				if (!err_ptr)
					goto out;
				err_ptr[i] = err;
				done++; /* Skip failed frame. */
			} else
				mapped += done;
			batch_left -= done;
			index += done;
		} while (batch_left);

		nr -= batch;
		addr += range;
		if (err_ptr)
			err_ptr += batch;
	}

	err = 0;
out:

	xen_flush_tlb_all();

	return err;
	return err < 0 ? err : mapped;
}

int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
			       unsigned long addr,
			       xen_pfn_t mfn, int nr,
			       pgprot_t prot, unsigned domid,
			       struct page **pages)
{
	return do_remap_mfn(vma, addr, &mfn, nr, NULL, prot, domid, pages);
}
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);

int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
			       unsigned long addr,
			       xen_pfn_t *mfn, int nr,
			       int *err_ptr, pgprot_t prot,
			       unsigned domid, struct page **pages)
{
	/* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
	 * and the consequences later is quite hard to detect what the actual
	 * cause of "wrong memory was mapped in".
	 */
	BUG_ON(err_ptr == NULL);
	return do_remap_mfn(vma, addr, mfn, nr, err_ptr, prot, domid, pages);
}
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);


/* Returns: 0 success */
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
			       int numpgs, struct page **pages)
@@ -2609,22 +2583,7 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
		return 0;

#ifdef CONFIG_XEN_PVH
	while (numpgs--) {
		/*
		 * The mmu has already cleaned up the process mmu
		 * resources at this point (lookup_address will return
		 * NULL).
		 */
		unsigned long pfn = page_to_pfn(pages[numpgs]);

		xlate_remove_from_p2m(pfn, 1);
	}
	/*
	 * We don't need to flush tlbs because as part of
	 * xlate_remove_from_p2m, the hypervisor will do tlb flushes
	 * after removing the p2m entries from the EPT/NPT
	 */
	return 0;
	return xen_xlate_unmap_gfn_range(vma, numpgs, pages);
#else
	return -EINVAL;
#endif
Loading