Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dd3641fc authored by Russell King's avatar Russell King
Browse files

ARM: dmabounce: move decision for bouncing into __dma_map_page()



Move the decision whether to bounce into __dma_map_page(), before
the check for high pages.  This avoids triggering the high page
check for devices which aren't using dmabounce.  Fix the unmap path
to cope too.

Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 23bc9873
Loading
Loading
Loading
Loading
+58 −66
Original line number Diff line number Diff line
@@ -246,21 +246,11 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
		enum dma_data_direction dir)
{
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	dma_addr_t dma_addr;
	int ret;
	struct safe_buffer *buf;

	if (device_info)
		DO_STATS ( device_info->map_op_count++ );

	dma_addr = virt_to_dma(dev, ptr);

	ret = needs_bounce(dev, dma_addr, size);
	if (ret < 0)
		return ~0;

	if (ret > 0) {
		struct safe_buffer *buf;

	buf = alloc_safe_buffer(device_info, ptr, size, dir);
	if (buf == 0) {
		dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
@@ -268,42 +258,26 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
		return ~0;
	}

		dev_dbg(dev,
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
		buf->safe, buf->safe_dma_addr);

		if ((dir == DMA_TO_DEVICE) ||
		    (dir == DMA_BIDIRECTIONAL)) {
	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
		dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
			__func__, ptr, buf->safe, size);
		memcpy(buf->safe, ptr, size);
	}
		ptr = buf->safe;

		dma_addr = buf->safe_dma_addr;
	} else {
		/*
		 * We don't need to sync the DMA buffer since
		 * it was allocated via the coherent allocators.
		 */
		__dma_single_cpu_to_dev(ptr, size, dir);
	}

	return dma_addr;
	return buf->safe_dma_addr;
}

static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
		size_t size, enum dma_data_direction dir)
{
	struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");

	if (buf) {
	BUG_ON(buf->size != size);
	BUG_ON(buf->direction != dir);

		dev_dbg(dev,
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
		buf->safe, buf->safe_dma_addr);

@@ -312,8 +286,7 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
		void *ptr = buf->ptr;

			dev_dbg(dev,
				"%s: copy back safe %p to unsafe %p size %d\n",
		dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
			__func__, buf->safe, ptr, size);
		memcpy(ptr, buf->safe, size);

@@ -325,9 +298,6 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
		__cpuc_flush_dcache_area(ptr, size);
	}
	free_safe_buffer(dev->archdata.dmabounce, buf);
	} else {
		__dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
	}
}

/* ************************************************** */
@@ -341,12 +311,25 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
dma_addr_t __dma_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size, enum dma_data_direction dir)
{
	dma_addr_t dma_addr;
	int ret;

	dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
		__func__, page, offset, size, dir);

	dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;

	ret = needs_bounce(dev, dma_addr, size);
	if (ret < 0)
		return ~0;

	if (ret == 0) {
		__dma_page_cpu_to_dev(page, offset, size, dir);
		return dma_addr;
	}

	if (PageHighMem(page)) {
		dev_err(dev, "DMA buffer bouncing of HIGHMEM pages "
			     "is not supported\n");
		dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
		return ~0;
	}

@@ -363,10 +346,19 @@ EXPORT_SYMBOL(__dma_map_page);
void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
		enum dma_data_direction dir)
{
	struct safe_buffer *buf;

	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
		__func__, (void *) dma_addr, size, dir);

	unmap_single(dev, dma_addr, size, dir);
	buf = find_safe_buffer_dev(dev, dma_addr, __func__);
	if (!buf) {
		__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
			dma_addr & ~PAGE_MASK, size, dir);
		return;
	}

	unmap_single(dev, buf, size, dir);
}
EXPORT_SYMBOL(__dma_unmap_page);