Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d484864d authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull CMA and ARM DMA-mapping updates from Marek Szyprowski:
 "These patches contain two major updates for DMA mapping subsystem
  (mainly for ARM architecture).  First one is Contiguous Memory
  Allocator (CMA) which makes it possible for device drivers to allocate
  big contiguous chunks of memory after the system has booted.

  The main difference from the similar frameworks is the fact that CMA
  allows to transparently reuse the memory region reserved for the big
  chunk allocation as a system memory, so no memory is wasted when no
  big chunk is allocated.  Once the alloc request is issued, the
  framework migrates system pages to create space for the required big
  chunk of physically contiguous memory.

  For more information one can refer to nice LWN articles:

   - 'A reworked contiguous memory allocator':
		http://lwn.net/Articles/447405/

   - 'CMA and ARM':
		http://lwn.net/Articles/450286/

   - 'A deep dive into CMA':
		http://lwn.net/Articles/486301/

   - and the following thread with the patches and links to all previous
     versions:
		https://lkml.org/lkml/2012/4/3/204

  The main client for this new framework is ARM DMA-mapping subsystem.

  The second part provides a complete redesign in ARM DMA-mapping
  subsystem.  The core implementation has been changed to use common
  struct dma_map_ops based infrastructure with the recent updates for
  new dma attributes merged in v3.4-rc2.  This allows to use more than
  one implementation of dma-mapping calls and change/select them on the
  struct device basis.  The first client of this new infractructure is
  dmabounce implementation which has been completely cut out of the
  core, common code.

  The last patch of this redesign update introduces a new, experimental
  implementation of dma-mapping calls on top of generic IOMMU framework.
  This lets ARM sub-platform to transparently use IOMMU for DMA-mapping
  calls if one provides required IOMMU hardware.

  For more information please refer to the following thread:
		http://www.spinics.net/lists/arm-kernel/msg175729.html

  The last patch merges changes from both updates and provides a
  resolution for the conflicts which cannot be avoided when patches have
  been applied on the same files (mainly arch/arm/mm/dma-mapping.c)."

Acked by Andrew Morton <akpm@linux-foundation.org>:
 "Yup, this one please.  It's had much work, plenty of review and I
  think even Russell is happy with it."

* 'for-linus' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping: (28 commits)
  ARM: dma-mapping: use PMD size for section unmap
  cma: fix migration mode
  ARM: integrate CMA with DMA-mapping subsystem
  X86: integrate CMA with DMA-mapping subsystem
  drivers: add Contiguous Memory Allocator
  mm: trigger page reclaim in alloc_contig_range() to stabilise watermarks
  mm: extract reclaim code from __alloc_pages_direct_reclaim()
  mm: Serialize access to min_free_kbytes
  mm: page_isolation: MIGRATE_CMA isolation functions added
  mm: mmzone: MIGRATE_CMA migration type added
  mm: page_alloc: change fallbacks array handling
  mm: page_alloc: introduce alloc_contig_range()
  mm: compaction: export some of the functions
  mm: compaction: introduce isolate_freepages_range()
  mm: compaction: introduce map_pages()
  mm: compaction: introduce isolate_migratepages_range()
  mm: page_alloc: remove trailing whitespace
  ARM: dma-mapping: add support for IOMMU mapper
  ARM: dma-mapping: use alloc, mmap, free from dma_ops
  ARM: dma-mapping: remove redundant code and do the cleanup
  ...

Conflicts:
	arch/x86/include/asm/dma-mapping.h
parents be87cfb4 0f51596b
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -508,6 +508,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
			Also note the kernel might malfunction if you disable
			some critical bits.

	cma=nn[MG]	[ARM,KNL]
			Sets the size of kernel global memory area for contiguous
			memory allocations. For more information, see
			include/linux/dma-contiguous.h

	cmo_free_hint=	[PPC] Format: { yes | no }
			Specify whether pages are marked as being inactive
			when they are freed.  This is used in CMO environments
@@ -515,6 +520,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
			a hypervisor.
			Default: yes

	coherent_pool=nn[KMG]	[ARM,KNL]
			Sets the size of memory pool for coherent, atomic dma
			allocations if Contiguous Memory Allocator (CMA) is used.

	code_bytes	[X86] How many bytes of object code to print
			in an oops report.
			Range: 0 - 8192
+3 −0
Original line number Diff line number Diff line
@@ -159,6 +159,9 @@ config HAVE_ARCH_TRACEHOOK
config HAVE_DMA_ATTRS
	bool

config HAVE_DMA_CONTIGUOUS
	bool

config USE_GENERIC_SMP_HELPERS
	bool

+11 −0
Original line number Diff line number Diff line
@@ -5,6 +5,9 @@ config ARM
	select HAVE_AOUT
	select HAVE_DMA_API_DEBUG
	select HAVE_IDE if PCI || ISA || PCMCIA
	select HAVE_DMA_ATTRS
	select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
	select CMA if (CPU_V6 || CPU_V6K || CPU_V7)
	select HAVE_MEMBLOCK
	select RTC_LIB
	select SYS_SUPPORTS_APM_EMULATION
@@ -54,6 +57,14 @@ config ARM
config ARM_HAS_SG_CHAIN
	bool

config NEED_SG_DMA_LENGTH
	bool

config ARM_DMA_USE_IOMMU
	select NEED_SG_DMA_LENGTH
	select ARM_HAS_SG_CHAIN
	bool

config HAVE_PWM
	bool

+65 −19
Original line number Diff line number Diff line
@@ -173,7 +173,8 @@ find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_
	read_lock_irqsave(&device_info->lock, flags);

	list_for_each_entry(b, &device_info->safe_buffers, node)
		if (b->safe_dma_addr == safe_dma_addr) {
		if (b->safe_dma_addr <= safe_dma_addr &&
		    b->safe_dma_addr + b->size > safe_dma_addr) {
			rb = b;
			break;
		}
@@ -254,7 +255,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
	if (buf == NULL) {
		dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
		       __func__, ptr);
		return ~0;
		return DMA_ERROR_CODE;
	}

	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -307,8 +308,9 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
 * substitute the safe buffer for the unsafe one.
 * (basically move the buffer from an unsafe area to a safe one)
 */
dma_addr_t __dma_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size, enum dma_data_direction dir)
static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size, enum dma_data_direction dir,
		struct dma_attrs *attrs)
{
	dma_addr_t dma_addr;
	int ret;
@@ -320,21 +322,20 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page,

	ret = needs_bounce(dev, dma_addr, size);
	if (ret < 0)
		return ~0;
		return DMA_ERROR_CODE;

	if (ret == 0) {
		__dma_page_cpu_to_dev(page, offset, size, dir);
		arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
		return dma_addr;
	}

	if (PageHighMem(page)) {
		dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
		return ~0;
		return DMA_ERROR_CODE;
	}

	return map_single(dev, page_address(page) + offset, size, dir);
}
EXPORT_SYMBOL(__dma_map_page);

/*
 * see if a mapped address was really a "safe" buffer and if so, copy
@@ -342,8 +343,8 @@ EXPORT_SYMBOL(__dma_map_page);
 * the safe buffer.  (basically return things back to the way they
 * should be)
 */
void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
		enum dma_data_direction dir)
static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
		enum dma_data_direction dir, struct dma_attrs *attrs)
{
	struct safe_buffer *buf;

@@ -352,19 +353,18 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,

	buf = find_safe_buffer_dev(dev, dma_addr, __func__);
	if (!buf) {
		__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
			dma_addr & ~PAGE_MASK, size, dir);
		arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
		return;
	}

	unmap_single(dev, buf, size, dir);
}
EXPORT_SYMBOL(__dma_unmap_page);

int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
		unsigned long off, size_t sz, enum dma_data_direction dir)
static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
		size_t sz, enum dma_data_direction dir)
{
	struct safe_buffer *buf;
	unsigned long off;

	dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
		__func__, addr, off, sz, dir);
@@ -373,6 +373,8 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
	if (!buf)
		return 1;

	off = addr - buf->safe_dma_addr;

	BUG_ON(buf->direction != dir);

	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -388,12 +390,21 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
	}
	return 0;
}
EXPORT_SYMBOL(dmabounce_sync_for_cpu);

int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
		unsigned long off, size_t sz, enum dma_data_direction dir)
static void dmabounce_sync_for_cpu(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
	if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
		return;

	arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
}

static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
		size_t sz, enum dma_data_direction dir)
{
	struct safe_buffer *buf;
	unsigned long off;

	dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
		__func__, addr, off, sz, dir);
@@ -402,6 +413,8 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
	if (!buf)
		return 1;

	off = addr - buf->safe_dma_addr;

	BUG_ON(buf->direction != dir);

	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -417,7 +430,38 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
	}
	return 0;
}
EXPORT_SYMBOL(dmabounce_sync_for_device);

static void dmabounce_sync_for_device(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
	if (!__dmabounce_sync_for_device(dev, handle, size, dir))
		return;

	arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
}

static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
{
	if (dev->archdata.dmabounce)
		return 0;

	return arm_dma_ops.set_dma_mask(dev, dma_mask);
}

static struct dma_map_ops dmabounce_ops = {
	.alloc			= arm_dma_alloc,
	.free			= arm_dma_free,
	.mmap			= arm_dma_mmap,
	.map_page		= dmabounce_map_page,
	.unmap_page		= dmabounce_unmap_page,
	.sync_single_for_cpu	= dmabounce_sync_for_cpu,
	.sync_single_for_device	= dmabounce_sync_for_device,
	.map_sg			= arm_dma_map_sg,
	.unmap_sg		= arm_dma_unmap_sg,
	.sync_sg_for_cpu	= arm_dma_sync_sg_for_cpu,
	.sync_sg_for_device	= arm_dma_sync_sg_for_device,
	.set_dma_mask		= dmabounce_set_mask,
};

static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
		const char *name, unsigned long size)
@@ -479,6 +523,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
#endif

	dev->archdata.dmabounce = device_info;
	set_dma_ops(dev, &dmabounce_ops);

	dev_info(dev, "dmabounce: registered device\n");

@@ -497,6 +542,7 @@ void dmabounce_unregister_dev(struct device *dev)
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;

	dev->archdata.dmabounce = NULL;
	set_dma_ops(dev, NULL);

	if (!device_info) {
		dev_warn(dev,
+4 −0
Original line number Diff line number Diff line
@@ -7,12 +7,16 @@
#define ASMARM_DEVICE_H

struct dev_archdata {
	struct dma_map_ops	*dma_ops;
#ifdef CONFIG_DMABOUNCE
	struct dmabounce_device_info *dmabounce;
#endif
#ifdef CONFIG_IOMMU_API
	void *iommu; /* private IOMMU data */
#endif
#ifdef CONFIG_ARM_DMA_USE_IOMMU
	struct dma_iommu_mapping	*mapping;
#endif
};

struct omap_device;
Loading