Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e18b18ab authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "dma-mapping: Add dma_remap functions"

parents 959f1dc6 a106f65b
Loading
Loading
Loading
Loading
+39 −0
Original line number Original line Diff line number Diff line
@@ -55,7 +55,13 @@ static void __dma_page_cpu_to_dev(struct page *, unsigned long,
		size_t, enum dma_data_direction);
		size_t, enum dma_data_direction);
static void __dma_page_dev_to_cpu(struct page *, unsigned long,
static void __dma_page_dev_to_cpu(struct page *, unsigned long,
		size_t, enum dma_data_direction);
		size_t, enum dma_data_direction);
static void *
__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
	const void *caller);


static void __dma_free_remap(void *cpu_addr, size_t size, bool no_warn);

static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot);
/**
/**
 * arm_dma_map_page - map a portion of a page for streaming DMA
 * arm_dma_map_page - map a portion of a page for streaming DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -125,6 +131,37 @@ static void arm_dma_sync_single_for_device(struct device *dev,
	__dma_page_cpu_to_dev(page, offset, size, dir);
	__dma_page_cpu_to_dev(page, offset, size, dir);
}
}


static void *arm_dma_remap(struct device *dev, void *cpu_addr,
			dma_addr_t handle, size_t size,
			struct dma_attrs *attrs)
{
	struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
	unsigned long offset = handle & ~PAGE_MASK;

	size = PAGE_ALIGN(size + offset);
	return __dma_alloc_remap(page, size, GFP_KERNEL, prot,
					__builtin_return_address(0)) + offset;

}

static void arm_dma_unremap(struct device *dev, void *remapped_addr,
				size_t size)
{
	unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
	struct vm_struct *area;

	remapped_addr = (void *)((unsigned long)remapped_addr & PAGE_MASK);

	area = find_vm_area(remapped_addr);
	if (!area || (area->flags & flags) != flags) {
		WARN(1, "trying to free invalid coherent area: %p\n",
				remapped_addr);
		return;
	}
	vunmap(remapped_addr);
}

struct dma_map_ops arm_dma_ops = {
struct dma_map_ops arm_dma_ops = {
	.alloc			= arm_dma_alloc,
	.alloc			= arm_dma_alloc,
	.free			= arm_dma_free,
	.free			= arm_dma_free,
@@ -139,6 +176,8 @@ struct dma_map_ops arm_dma_ops = {
	.sync_sg_for_cpu	= arm_dma_sync_sg_for_cpu,
	.sync_sg_for_cpu	= arm_dma_sync_sg_for_cpu,
	.sync_sg_for_device	= arm_dma_sync_sg_for_device,
	.sync_sg_for_device	= arm_dma_sync_sg_for_device,
	.set_dma_mask		= arm_dma_set_mask,
	.set_dma_mask		= arm_dma_set_mask,
	.remap			= arm_dma_remap,
	.unremap		= arm_dma_unremap,
};
};
EXPORT_SYMBOL(arm_dma_ops);
EXPORT_SYMBOL(arm_dma_ops);


+49 −0
Original line number Original line Diff line number Diff line
@@ -25,6 +25,7 @@
#include <linux/vmalloc.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
#include <linux/swiotlb.h>
#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/io.h>


#include <asm/cacheflush.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/tlbflush.h>
@@ -445,7 +446,51 @@ int arm64_swiotlb_mmap(struct device *dev, struct vm_area_struct *vma,
	return ret;
	return ret;
}
}


static void *arm64_dma_remap(struct device *dev, void *cpu_addr,
			dma_addr_t handle, size_t size,
			struct dma_attrs *attrs)
{
	struct page *page = phys_to_page(dma_to_phys(dev, handle));
	pgprot_t prot = __get_dma_pgprot(PAGE_KERNEL, attrs);
	unsigned long offset = handle & ~PAGE_MASK;
	struct vm_struct *area;
	unsigned long addr;

	size = PAGE_ALIGN(size + offset);

	/*
	 * DMA allocation can be mapped to user space, so lets
	 * set VM_USERMAP flags too.
	 */
	area = get_vm_area(size, VM_USERMAP);
	if (!area)
		return NULL;

	addr = (unsigned long)area->addr;
	area->phys_addr = __pfn_to_phys(page_to_pfn(page));

	if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
		vunmap((void *)addr);
		return NULL;
	}
	return (void *)addr + offset;
}

static void arm64_dma_unremap(struct device *dev, void *remapped_addr,
				size_t size)
{
	struct vm_struct *area;


	remapped_addr = (void *)((unsigned long)remapped_addr & PAGE_MASK);

	area = find_vm_area(remapped_addr);
	if (!area) {
		WARN(1, "trying to free invalid coherent area: %p\n",
			remapped_addr);
		return;
	}
	vunmap(remapped_addr);
}


struct dma_map_ops noncoherent_swiotlb_dma_ops = {
struct dma_map_ops noncoherent_swiotlb_dma_ops = {
	.alloc = arm64_swiotlb_alloc_noncoherent,
	.alloc = arm64_swiotlb_alloc_noncoherent,
@@ -461,6 +506,8 @@ struct dma_map_ops noncoherent_swiotlb_dma_ops = {
	.sync_sg_for_device = arm64_swiotlb_sync_sg_for_device,
	.sync_sg_for_device = arm64_swiotlb_sync_sg_for_device,
	.dma_supported = swiotlb_dma_supported,
	.dma_supported = swiotlb_dma_supported,
	.mapping_error = swiotlb_dma_mapping_error,
	.mapping_error = swiotlb_dma_mapping_error,
	.remap = arm64_dma_remap,
	.unremap = arm64_dma_unremap,
};
};
EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);


@@ -477,6 +524,8 @@ struct dma_map_ops coherent_swiotlb_dma_ops = {
	.sync_sg_for_device = swiotlb_sync_sg_for_device,
	.sync_sg_for_device = swiotlb_sync_sg_for_device,
	.dma_supported = swiotlb_dma_supported,
	.dma_supported = swiotlb_dma_supported,
	.mapping_error = swiotlb_dma_mapping_error,
	.mapping_error = swiotlb_dma_mapping_error,
	.remap = arm64_dma_remap,
	.unremap = arm64_dma_unremap,
};
};
EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
EXPORT_SYMBOL(coherent_swiotlb_dma_ops);


+13 −0
Original line number Original line Diff line number Diff line
@@ -144,6 +144,17 @@ void removed_sync_sg_for_device(struct device *dev,
	return;
	return;
}
}


void *removed_remap(struct device *dev, void *cpu_addr, dma_addr_t handle,
			size_t size, struct dma_attrs *attrs)
{
	return ioremap(handle, size);
}

void removed_unremap(struct device *dev, void *remapped_address, size_t size)
{
	iounmap(remapped_address);
}

struct dma_map_ops removed_dma_ops = {
struct dma_map_ops removed_dma_ops = {
	.alloc			= removed_alloc,
	.alloc			= removed_alloc,
	.free			= removed_free,
	.free			= removed_free,
@@ -156,6 +167,8 @@ struct dma_map_ops removed_dma_ops = {
	.sync_single_for_device	= removed_sync_single_for_device,
	.sync_single_for_device	= removed_sync_single_for_device,
	.sync_sg_for_cpu	= removed_sync_sg_for_cpu,
	.sync_sg_for_cpu	= removed_sync_sg_for_cpu,
	.sync_sg_for_device	= removed_sync_sg_for_device,
	.sync_sg_for_device	= removed_sync_sg_for_device,
	.remap			= removed_remap,
	.unremap		= removed_unremap,
};
};
EXPORT_SYMBOL(removed_dma_ops);
EXPORT_SYMBOL(removed_dma_ops);


+36 −0
Original line number Original line Diff line number Diff line
@@ -50,6 +50,11 @@ struct dma_map_ops {
	int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
	int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
	int (*dma_supported)(struct device *dev, u64 mask);
	int (*dma_supported)(struct device *dev, u64 mask);
	int (*set_dma_mask)(struct device *dev, u64 mask);
	int (*set_dma_mask)(struct device *dev, u64 mask);
	void *(*remap)(struct device *dev, void *cpu_addr,
			dma_addr_t dma_handle, size_t size,
			struct dma_attrs *attrs);
	void (*unremap)(struct device *dev, void *remapped_address,
			size_t size);
#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
	u64 (*get_required_mask)(struct device *dev);
	u64 (*get_required_mask)(struct device *dev);
#endif
#endif
@@ -78,6 +83,37 @@ static inline int is_device_dma_capable(struct device *dev)
#include <asm-generic/dma-mapping-broken.h>
#include <asm-generic/dma-mapping-broken.h>
#endif
#endif


static inline void *dma_remap(struct device *dev, void *cpu_addr,
		dma_addr_t dma_handle, size_t size, struct dma_attrs *attrs)
{
	const struct dma_map_ops *ops = get_dma_ops(dev);
	BUG_ON(!ops);

	if (!ops->remap) {
		WARN_ONCE(1, "Remap function not implemented for %pS\n",
				ops->remap);
		return NULL;
	}

	return ops->remap(dev, cpu_addr, dma_handle, size, attrs);
}


static inline void dma_unremap(struct device *dev, void *remapped_addr,
				size_t size)
{
	const struct dma_map_ops *ops = get_dma_ops(dev);
	BUG_ON(!ops);

	if (!ops->unremap) {
		WARN_ONCE(1, "unremap function not implemented for %pS\n",
				ops->unremap);
		return;
	}

	return ops->unremap(dev, remapped_addr, size);
}

static inline u64 dma_get_mask(struct device *dev)
static inline u64 dma_get_mask(struct device *dev)
{
{
	if (dev && dev->dma_mask && *dev->dma_mask)
	if (dev && dev->dma_mask && *dev->dma_mask)