Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d4563746 authored by Laura Abbott's avatar Laura Abbott Committed by Matt Wagantall
Browse files

dma-mapping: Add dma_remap functions



After getting an allocation from dma_alloc_coherent, there
may be cases where it is neccessary to remap the handle
into the CPU's address space (e.g. no CPU side mapping was
requested at allocation time but now one is needed). Add
APIs to bring a handle into the CPU address space again.

Signed-off-by: default avatarLaura Abbott <lauraa@codeaurora.org>
[imaund@codeaurora.org: resolved context conflicts and added support
  for remap 'no_warn' argument]
Signed-off-by: default avatarIan Maund <imaund@codeaurora.org>
parent f58e2432
Loading
Loading
Loading
Loading
+48 −1
Original line number Diff line number Diff line
@@ -58,6 +58,21 @@ static void __dma_page_cpu_to_dev(struct page *, unsigned long,
static void __dma_page_dev_to_cpu(struct page *, unsigned long,
		size_t, enum dma_data_direction);

static void *
__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
		const void *caller);

static void __dma_free_remap(void *cpu_addr, size_t size, bool no_warn);

static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot);

static void *arm_dma_remap(struct device *dev, void *cpu_addr,
			dma_addr_t handle, size_t size,
			struct dma_attrs *attrs);

static void arm_dma_unremap(struct device *dev, void *remapped_addr,
				size_t size);

/**
 * arm_dma_map_page - map a portion of a page for streaming DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -141,6 +156,8 @@ struct dma_map_ops arm_dma_ops = {
	.sync_sg_for_cpu	= arm_dma_sync_sg_for_cpu,
	.sync_sg_for_device	= arm_dma_sync_sg_for_device,
	.set_dma_mask		= arm_dma_set_mask,
	.remap			= arm_dma_remap,
	.unremap		= arm_dma_unremap,
};
EXPORT_SYMBOL(arm_dma_ops);

@@ -736,6 +753,36 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
	return ret;
}

static void *arm_dma_remap(struct device *dev, void *cpu_addr,
			dma_addr_t handle, size_t size,
			struct dma_attrs *attrs)
{
	struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
	unsigned long offset = handle & ~PAGE_MASK;

	size = PAGE_ALIGN(size + offset);
	return __dma_alloc_remap(page, size, GFP_KERNEL, prot,
				__builtin_return_address(0)) + offset;
}

static void arm_dma_unremap(struct device *dev, void *remapped_addr,
				size_t size)
{
	unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
	struct vm_struct *area;

	remapped_addr = (void *)((unsigned long)remapped_addr & PAGE_MASK);

	area = find_vm_area(remapped_addr);
	if (!area || (area->flags & flags) != flags) {
		WARN(1, "trying to free invalid coherent area: %p\n",
			remapped_addr);
		return;
	}

	vunmap(remapped_addr);
}
/*
 * Free a buffer as defined by the above mapping.
 */
@@ -1436,7 +1483,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,

	if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
		dma_common_free_remap(cpu_addr, size,
			VM_ARM_DMA_CONSISTENT | VM_USERMAP);
			VM_ARM_DMA_CONSISTENT | VM_USERMAP, true);
	}

	__iommu_remove_mapping(dev, handle, size);
+50 −2
Original line number Diff line number Diff line
@@ -331,6 +331,52 @@ static int __swiotlb_mmap_coherent(struct device *dev,
	return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
}

static void *arm64_dma_remap(struct device *dev, void *cpu_addr,
			dma_addr_t handle, size_t size,
			struct dma_attrs *attrs)
{
	struct page *page = phys_to_page(dma_to_phys(dev, handle));
	pgprot_t prot = __get_dma_pgprot(PAGE_KERNEL, attrs);
	unsigned long offset = handle & ~PAGE_MASK;
	struct vm_struct *area;
	unsigned long addr;

	size = PAGE_ALIGN(size + offset);

	/*
	 * DMA allocation can be mapped to user space, so lets
	 * set VM_USERMAP flags too.
	 */
	area = get_vm_area(size, VM_USERMAP);
	if (!area)
		return NULL;

	addr = (unsigned long)area->addr;
	area->phys_addr = __pfn_to_phys(page_to_pfn(page));

	if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
		vunmap((void *)addr);
		return NULL;
	}
	return (void *)addr + offset;
}

static void arm64_dma_unremap(struct device *dev, void *remapped_addr,
				size_t size)
{
	struct vm_struct *area;

	remapped_addr = (void *)((unsigned long)remapped_addr & PAGE_MASK);

	area = find_vm_area(remapped_addr);
	if (!area) {
		WARN(1, "trying to free invalid coherent area: %p\n",
			remapped_addr);
		return;
	}
	vunmap(remapped_addr);
}

const struct dma_map_ops noncoherent_swiotlb_dma_ops = {
	.alloc = __dma_alloc_noncoherent,
	.free = __dma_free_noncoherent,
@@ -345,6 +391,8 @@ const struct dma_map_ops noncoherent_swiotlb_dma_ops = {
	.sync_sg_for_device = __swiotlb_sync_sg_for_device,
	.dma_supported = swiotlb_dma_supported,
	.mapping_error = swiotlb_dma_mapping_error,
	.dma_remap = arm64_dma_remap,
	.dma_unremap = arm64_dma_unremap,
};
EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);

@@ -415,7 +463,7 @@ static int __init atomic_pool_init(void)
	goto out;

remove_mapping:
	dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
	dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP, true);
destroy_genpool:
	gen_pool_destroy(atomic_pool);
	atomic_pool = NULL;
@@ -861,7 +909,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
	}

	if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
		dma_common_free_remap(cpu_addr, size, VM_USERMAP, true);

	__iommu_remove_mapping(dev, handle, size);
	__iommu_free_buffer(dev, pages, size, attrs);
+4 −2
Original line number Diff line number Diff line
@@ -326,12 +326,14 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
/*
 * unmaps a range previously mapped by dma_common_*_remap
 */
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags,
			   bool no_warn)
{
	struct vm_struct *area = find_vm_area(cpu_addr);

	if (!area || (area->flags & vm_flags) != vm_flags) {
		WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
		WARN(!no_warn, "trying to free invalid coherent area: %p\n",
			cpu_addr);
		return;
	}

+2 −1
Original line number Diff line number Diff line
@@ -186,7 +186,8 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
void *dma_common_pages_remap(struct page **pages, size_t size,
			unsigned long vm_flags, pgprot_t prot,
			const void *caller);
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags,
				bool no_warn);

/**
 * dma_mmap_attrs - map a coherent DMA allocation into user space
+35 −0
Original line number Diff line number Diff line
@@ -56,6 +56,10 @@ struct dma_map_ops {
	int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
	int (*dma_supported)(struct device *dev, u64 mask);
	int (*set_dma_mask)(struct device *dev, u64 mask);
	void *(*remap)(struct device *dev, void *cpu_addr, dma_addr_t handle,
			size_t size, struct dma_attrs *attrs);
	void (*unremap)(struct device *dev, void *remapped_address,
			size_t size);
#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
	u64 (*get_required_mask)(struct device *dev);
#endif
@@ -83,6 +87,37 @@ static inline int is_device_dma_capable(struct device *dev)
#else
#include <asm-generic/dma-mapping-broken.h>
#endif
static inline void *dma_remap(struct device *dev, void *cpu_addr,
		dma_addr_t dma_handle, size_t size, struct dma_attrs *attrs)
{
	const struct dma_map_ops *ops = get_dma_ops(dev);
	BUG_ON(!ops);

	if (!ops->remap) {
		WARN_ONCE(1, "Remap function not implemented for %pS\n",
				ops->remap);
		return NULL;
	}

	return ops->remap(dev, cpu_addr, dma_handle, size, attrs);
}


static inline void dma_unremap(struct device *dev, void *remapped_addr,
				size_t size)
{
	const struct dma_map_ops *ops = get_dma_ops(dev);
	BUG_ON(!ops);

	if (!ops->unremap) {
		WARN_ONCE(1, "unremap function not implemented for %pS\n",
				ops->unremap);
		return;
	}

	return ops->unremap(dev, remapped_addr, size);
}


static inline u64 dma_get_mask(struct device *dev)
{