Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 36e68e77 authored by Liam Mark's avatar Liam Mark
Browse files

dma-mapping: Add dma_remap functions



After getting an allocation from dma_alloc_coherent, there
may be cases where it is necessary to remap the handle
into the CPU's address space (e.g. no CPU side mapping was
requested at allocation time but now one is needed). Add
APIs to bring a handle into the CPU address space again.

Change-Id: Ieb9231c5f24d45aeb7436f643c9b87b93871f85d
Signed-off-by: default avatarLaura Abbott <lauraa@codeaurora.org>
Signed-off-by: default avatarPatrick Daly <pdaly@codeaurora.org>
[lmark@codeaurora.org: fix additional call to dma_common_free_remap]
Signed-off-by: default avatarLiam Mark <lmark@codeaurora.org>
parent 6f270a33
Loading
Loading
Loading
Loading
+3 −3
Original line number Original line Diff line number Diff line
@@ -444,7 +444,7 @@ static int __init atomic_pool_init(void)
	goto out;
	goto out;


remove_mapping:
remove_mapping:
	dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
	dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP, false);
destroy_genpool:
destroy_genpool:
	gen_pool_destroy(atomic_pool);
	gen_pool_destroy(atomic_pool);
	atomic_pool = NULL;
	atomic_pool = NULL;
@@ -692,14 +692,14 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,


		iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
		iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
		dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
		dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
		dma_common_free_remap(cpu_addr, size, VM_USERMAP, false);
	} else if (is_vmalloc_addr(cpu_addr)){
	} else if (is_vmalloc_addr(cpu_addr)){
		struct vm_struct *area = find_vm_area(cpu_addr);
		struct vm_struct *area = find_vm_area(cpu_addr);


		if (WARN_ON(!area || !area->pages))
		if (WARN_ON(!area || !area->pages))
			return;
			return;
		iommu_dma_free(dev, area->pages, iosize, &handle);
		iommu_dma_free(dev, area->pages, iosize, &handle);
		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
		dma_common_free_remap(cpu_addr, size, VM_USERMAP, false);
	} else {
	} else {
		iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
		iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
		__free_pages(virt_to_page(cpu_addr), get_order(size));
		__free_pages(virt_to_page(cpu_addr), get_order(size));
+4 −2
Original line number Original line Diff line number Diff line
@@ -329,12 +329,14 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
/*
/*
 * unmaps a range previously mapped by dma_common_*_remap
 * unmaps a range previously mapped by dma_common_*_remap
 */
 */
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags,
			   bool no_warn)
{
{
	struct vm_struct *area = find_vm_area(cpu_addr);
	struct vm_struct *area = find_vm_area(cpu_addr);


	if (!area || (area->flags & vm_flags) != vm_flags) {
	if (!area || (area->flags & vm_flags) != vm_flags) {
		WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
		WARN(!no_warn, "trying to free invalid coherent area: %p\n",
			cpu_addr);
		return;
		return;
	}
	}


+35 −1
Original line number Original line Diff line number Diff line
@@ -133,6 +133,10 @@ struct dma_map_ops {
	int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
	int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
	int (*dma_supported)(struct device *dev, u64 mask);
	int (*dma_supported)(struct device *dev, u64 mask);
	int (*set_dma_mask)(struct device *dev, u64 mask);
	int (*set_dma_mask)(struct device *dev, u64 mask);
	void *(*remap)(struct device *dev, void *cpu_addr, dma_addr_t handle,
			size_t size, unsigned long attrs);
	void (*unremap)(struct device *dev, void *remapped_address,
			size_t size);
#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
	u64 (*get_required_mask)(struct device *dev);
	u64 (*get_required_mask)(struct device *dev);
#endif
#endif
@@ -427,7 +431,8 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
void *dma_common_pages_remap(struct page **pages, size_t size,
void *dma_common_pages_remap(struct page **pages, size_t size,
			unsigned long vm_flags, pgprot_t prot,
			unsigned long vm_flags, pgprot_t prot,
			const void *caller);
			const void *caller);
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags,
			   bool nowarn);


/**
/**
 * dma_mmap_attrs - map a coherent DMA allocation into user space
 * dma_mmap_attrs - map a coherent DMA allocation into user space
@@ -586,6 +591,35 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
	return 0;
	return 0;
}
}
#endif
#endif
static inline void *dma_remap(struct device *dev, void *cpu_addr,
		dma_addr_t dma_handle, size_t size, unsigned long attrs)
{
	const struct dma_map_ops *ops = get_dma_ops(dev);

	if (!ops->remap) {
		WARN_ONCE(1, "Remap function not implemented for %pS\n",
				ops->remap);
		return NULL;
	}

	return ops->remap(dev, cpu_addr, dma_handle, size, attrs);
}


static inline void dma_unremap(struct device *dev, void *remapped_addr,
				size_t size)
{
	const struct dma_map_ops *ops = get_dma_ops(dev);

	if (!ops->unremap) {
		WARN_ONCE(1, "unremap function not implemented for %pS\n",
				ops->unremap);
		return;
	}

	return ops->unremap(dev, remapped_addr, size);
}



static inline u64 dma_get_mask(struct device *dev)
static inline u64 dma_get_mask(struct device *dev)
{
{