Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 06ab838c authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-linus-4.3-rc0b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen terminology fixes from David Vrabel:
 "Use the correct GFN/BFN terms more consistently"

* tag 'for-linus-4.3-rc0b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/xenbus: Rename the variable xen_store_mfn to xen_store_gfn
  xen/privcmd: Further s/MFN/GFN/ clean-up
  hvc/xen: Further s/MFN/GFN clean-up
  video/xen-fbfront: Further s/MFN/GFN clean-up
  xen/tmem: Use xen_page_to_gfn rather than pfn_to_gfn
  xen: Use correctly the Xen memory terminologies
  arm/xen: implement correctly pfn_to_mfn
  xen: Make clear that swiotlb and biomerge are dealing with DMA address
parents 573c577a 5f51042f
Loading
Loading
Loading
Loading
+20 −8
Original line number Diff line number Diff line
@@ -34,7 +34,19 @@ typedef struct xpaddr {
unsigned long __pfn_to_mfn(unsigned long pfn);
extern struct rb_root phys_to_mach;

static inline unsigned long pfn_to_mfn(unsigned long pfn)
/* Pseudo-physical <-> Guest conversion */
static inline unsigned long pfn_to_gfn(unsigned long pfn)
{
	return pfn;
}

static inline unsigned long gfn_to_pfn(unsigned long gfn)
{
	return gfn;
}

/* Pseudo-physical <-> BUS conversion */
static inline unsigned long pfn_to_bfn(unsigned long pfn)
{
	unsigned long mfn;

@@ -47,16 +59,16 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
	return pfn;
}

static inline unsigned long mfn_to_pfn(unsigned long mfn)
static inline unsigned long bfn_to_pfn(unsigned long bfn)
{
	return mfn;
	return bfn;
}

#define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn)
#define bfn_to_local_pfn(bfn)	bfn_to_pfn(bfn)

/* VIRT <-> MACHINE conversion */
#define virt_to_mfn(v)		(pfn_to_mfn(virt_to_pfn(v)))
#define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))
/* VIRT <-> GUEST conversion */
#define virt_to_gfn(v)		(pfn_to_gfn(virt_to_pfn(v)))
#define gfn_to_virt(m)		(__va(gfn_to_pfn(m) << PAGE_SHIFT))

/* Only used in PV code. But ARM guests are always HVM. */
static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
@@ -96,7 +108,7 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)

bool xen_arch_need_swiotlb(struct device *dev,
			   unsigned long pfn,
			   unsigned long mfn);
			   unsigned long bfn);
unsigned long xen_get_swiotlb_free_pages(unsigned int order);

#endif /* _ASM_ARM_XEN_PAGE_H */
+9 −9
Original line number Diff line number Diff line
@@ -49,35 +49,35 @@ static __read_mostly unsigned int xen_events_irq;

static __initdata struct device_node *xen_node;

int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
			       unsigned long addr,
			       xen_pfn_t *mfn, int nr,
			       xen_pfn_t *gfn, int nr,
			       int *err_ptr, pgprot_t prot,
			       unsigned domid,
			       struct page **pages)
{
	return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr,
	return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
					 prot, domid, pages);
}
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);

/* Not used by XENFEAT_auto_translated guests. */
int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
                              unsigned long addr,
                              xen_pfn_t mfn, int nr,
                              xen_pfn_t gfn, int nr,
                              pgprot_t prot, unsigned domid,
                              struct page **pages)
{
	return -ENOSYS;
}
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);

int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
			       int nr, struct page **pages)
{
	return xen_xlate_unmap_gfn_range(vma, nr, pages);
}
EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);

static void xen_percpu_init(void)
{
+2 −2
Original line number Diff line number Diff line
@@ -139,9 +139,9 @@ void __xen_dma_sync_single_for_device(struct device *hwdev,

bool xen_arch_need_swiotlb(struct device *dev,
			   unsigned long pfn,
			   unsigned long mfn)
			   unsigned long bfn)
{
	return (!hypercall_cflush && (pfn != mfn) && !is_device_dma_coherent(dev));
	return (!hypercall_cflush && (pfn != bfn) && !is_device_dma_coherent(dev));
}

int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
+37 −2
Original line number Diff line number Diff line
@@ -101,6 +101,11 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
{
	unsigned long mfn;

	/*
	 * Some x86 code are still using pfn_to_mfn instead of
	 * pfn_to_mfn. This will have to be removed when we figured
	 * out which call.
	 */
	if (xen_feature(XENFEAT_auto_translated_physmap))
		return pfn;

@@ -147,6 +152,11 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
{
	unsigned long pfn;

	/*
	 * Some x86 code are still using mfn_to_pfn instead of
	 * gfn_to_pfn. This will have to be removed when we figure
	 * out which call.
	 */
	if (xen_feature(XENFEAT_auto_translated_physmap))
		return mfn;

@@ -176,6 +186,27 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine)
	return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
}

/* Pseudo-physical <-> Guest conversion */
static inline unsigned long pfn_to_gfn(unsigned long pfn)
{
	if (xen_feature(XENFEAT_auto_translated_physmap))
		return pfn;
	else
		return pfn_to_mfn(pfn);
}

static inline unsigned long gfn_to_pfn(unsigned long gfn)
{
	if (xen_feature(XENFEAT_auto_translated_physmap))
		return gfn;
	else
		return mfn_to_pfn(gfn);
}

/* Pseudo-physical <-> Bus conversion */
#define pfn_to_bfn(pfn)		pfn_to_gfn(pfn)
#define bfn_to_pfn(bfn)		gfn_to_pfn(bfn)

/*
 * We detect special mappings in one of two ways:
 *  1. If the MFN is an I/O page then Xen will set the m2p entry
@@ -196,7 +227,7 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine)
 *      require. In all the cases we care about, the FOREIGN_FRAME bit is
 *      masked (e.g., pfn_to_mfn()) so behaviour there is correct.
 */
static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
static inline unsigned long bfn_to_local_pfn(unsigned long mfn)
{
	unsigned long pfn;

@@ -215,6 +246,10 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
#define virt_to_mfn(v)		(pfn_to_mfn(virt_to_pfn(v)))
#define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))

/* VIRT <-> GUEST conversion */
#define virt_to_gfn(v)		(pfn_to_gfn(virt_to_pfn(v)))
#define gfn_to_virt(g)		(__va(gfn_to_pfn(g) << PAGE_SHIFT))

static inline unsigned long pte_mfn(pte_t pte)
{
	return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
@@ -262,7 +297,7 @@ void make_lowmem_page_readwrite(void *vaddr);

static inline bool xen_arch_need_swiotlb(struct device *dev,
					 unsigned long pfn,
					 unsigned long mfn)
					 unsigned long bfn)
{
	return false;
}
+16 −16
Original line number Diff line number Diff line
@@ -2812,9 +2812,9 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
	return 0;
}

static int do_remap_mfn(struct vm_area_struct *vma,
static int do_remap_gfn(struct vm_area_struct *vma,
			unsigned long addr,
			xen_pfn_t *mfn, int nr,
			xen_pfn_t *gfn, int nr,
			int *err_ptr, pgprot_t prot,
			unsigned domid,
			struct page **pages)
@@ -2830,14 +2830,14 @@ static int do_remap_mfn(struct vm_area_struct *vma,
	if (xen_feature(XENFEAT_auto_translated_physmap)) {
#ifdef CONFIG_XEN_PVH
		/* We need to update the local page tables and the xen HAP */
		return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr,
		return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
						 prot, domid, pages);
#else
		return -EINVAL;
#endif
        }

	rmd.mfn = mfn;
	rmd.mfn = gfn;
	rmd.prot = prot;
	/* We use the err_ptr to indicate if there we are doing a contigious
	 * mapping or a discontigious mapping. */
@@ -2865,8 +2865,8 @@ static int do_remap_mfn(struct vm_area_struct *vma,
						    batch_left, &done, domid);

			/*
			 * @err_ptr may be the same buffer as @mfn, so
			 * only clear it after each chunk of @mfn is
			 * @err_ptr may be the same buffer as @gfn, so
			 * only clear it after each chunk of @gfn is
			 * used.
			 */
			if (err_ptr) {
@@ -2896,19 +2896,19 @@ static int do_remap_mfn(struct vm_area_struct *vma,
	return err < 0 ? err : mapped;
}

int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
			       unsigned long addr,
			       xen_pfn_t mfn, int nr,
			       xen_pfn_t gfn, int nr,
			       pgprot_t prot, unsigned domid,
			       struct page **pages)
{
	return do_remap_mfn(vma, addr, &mfn, nr, NULL, prot, domid, pages);
	return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
}
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);

int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
			       unsigned long addr,
			       xen_pfn_t *mfn, int nr,
			       xen_pfn_t *gfn, int nr,
			       int *err_ptr, pgprot_t prot,
			       unsigned domid, struct page **pages)
{
@@ -2917,13 +2917,13 @@ int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
	 * cause of "wrong memory was mapped in".
	 */
	BUG_ON(err_ptr == NULL);
	return do_remap_mfn(vma, addr, mfn, nr, err_ptr, prot, domid, pages);
	return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
}
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);


/* Returns: 0 success */
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
			       int numpgs, struct page **pages)
{
	if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
@@ -2935,4 +2935,4 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
	return -EINVAL;
#endif
}
EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
Loading