Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8c2c8ed8 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'stable/for-linus-4.11' of...

Merge branch 'stable/for-linus-4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb

Pull swiotlb updates from Konrad Rzeszutek Wilk:
 "Two tiny implementations of the DMA API for callback in ARM (for Xen)"

* 'stable/for-linus-4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb:
  swiotlb-xen: implement xen_swiotlb_get_sgtable callback
  swiotlb-xen: implement xen_swiotlb_dma_mmap callback
parents 304362a8 69369f52
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -198,6 +198,8 @@ static const struct dma_map_ops xen_swiotlb_dma_ops = {
	.unmap_page = xen_swiotlb_unmap_page,
	.dma_supported = xen_swiotlb_dma_supported,
	.set_dma_mask = xen_swiotlb_set_dma_mask,
	.mmap = xen_swiotlb_dma_mmap,
	.get_sgtable = xen_swiotlb_get_sgtable,
};

int __init xen_mm_init(void)
+47 −0
Original line number Diff line number Diff line
@@ -681,3 +681,50 @@ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
	return 0;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);

/*
 * Create userspace mapping for the DMA-coherent memory.
 * This function should be called with the pages from the current domain only,
 * passing pages mapped from other domains would lead to memory corruption.
 */
int
xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
		     void *cpu_addr, dma_addr_t dma_addr, size_t size,
		     unsigned long attrs)
{
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
	if (__generic_dma_ops(dev)->mmap)
		return __generic_dma_ops(dev)->mmap(dev, vma, cpu_addr,
						    dma_addr, size, attrs);
#endif
	return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
}
EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);

/*
 * This function should be called with the pages from the current domain only,
 * passing pages mapped from other domains would lead to memory corruption.
 */
int
xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
			void *cpu_addr, dma_addr_t handle, size_t size,
			unsigned long attrs)
{
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
	if (__generic_dma_ops(dev)->get_sgtable) {
#if 0
	/*
	 * This check verifies that the page belongs to the current domain and
	 * is not one mapped from another domain.
	 * This check is for debug only, and should not go to production build
	 */
		unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle));
		BUG_ON (!page_is_ram(bfn));
#endif
		return __generic_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr,
							   handle, size, attrs);
	}
#endif
	return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size);
}
EXPORT_SYMBOL_GPL(xen_swiotlb_get_sgtable);
+11 −0
Original line number Diff line number Diff line
@@ -2,6 +2,7 @@
#define __LINUX_SWIOTLB_XEN_H

#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
#include <linux/swiotlb.h>

extern int xen_swiotlb_init(int verbose, bool early);
@@ -55,4 +56,14 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);

extern int
xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);

extern int
xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
		     void *cpu_addr, dma_addr_t dma_addr, size_t size,
		     unsigned long attrs);

extern int
xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
			void *cpu_addr, dma_addr_t handle, size_t size,
			unsigned long attrs);
#endif /* __LINUX_SWIOTLB_XEN_H */