Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d618382b authored by David S. Miller's avatar David S. Miller
Browse files

iommu-common: Fix error code used in iommu_tbl_range_{alloc,free}().



The value returned from iommu_tbl_range_alloc() (and the one passed
in as a fourth argument to iommu_tbl_range_free) is not a DMA address,
it is rather an index into the IOMMU page table.

Therefore using DMA_ERROR_CODE is not appropriate.

Use a more type matching error code define, IOMMU_ERROR_CODE, and
update all users of this interface.

Reported-by: default avatarAndre Przywara <andre.przywara@arm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 73958c65
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -161,7 +161,7 @@ static inline iopte_t *alloc_npages(struct device *dev,

	entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
				      (unsigned long)(-1), 0);
	if (unlikely(entry == DMA_ERROR_CODE))
	if (unlikely(entry == IOMMU_ERROR_CODE))
		return NULL;

	return iommu->page_table + entry;
@@ -253,7 +253,7 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
	iommu = dev->archdata.iommu;

	iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE);
	iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);

	order = get_order(size);
	if (order < 10)
@@ -426,7 +426,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
	iommu_free_ctx(iommu, ctx);
	spin_unlock_irqrestore(&iommu->lock, flags);

	iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE);
	iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
}

static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -492,7 +492,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
					      &handle, (unsigned long)(-1), 0);

		/* Handle failure */
		if (unlikely(entry == DMA_ERROR_CODE)) {
		if (unlikely(entry == IOMMU_ERROR_CODE)) {
			if (printk_ratelimit())
				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
				       " npages %lx\n", iommu, paddr, npages);
@@ -571,7 +571,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
				iopte_make_dummy(iommu, base + j);

			iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
					     DMA_ERROR_CODE);
					     IOMMU_ERROR_CODE);

			s->dma_address = DMA_ERROR_CODE;
			s->dma_length = 0;
@@ -648,7 +648,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
			iopte_make_dummy(iommu, base + i);

		iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
				     DMA_ERROR_CODE);
				     IOMMU_ERROR_CODE);
		sg = sg_next(sg);
	}

+1 −1
Original line number Diff line number Diff line
@@ -1953,7 +1953,7 @@ static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu,

	entry = iommu_tbl_range_alloc(NULL, &iommu->iommu_map_table,
				      npages, NULL, (unsigned long)-1, 0);
	if (unlikely(entry < 0))
	if (unlikely(entry == IOMMU_ERROR_CODE))
		return NULL;

	return iommu->page_table + entry;
+9 −9
Original line number Diff line number Diff line
@@ -159,7 +159,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
	entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
				      (unsigned long)(-1), 0);

	if (unlikely(entry == DMA_ERROR_CODE))
	if (unlikely(entry == IOMMU_ERROR_CODE))
		goto range_alloc_fail;

	*dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
@@ -187,7 +187,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
	return ret;

iommu_map_fail:
	iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, DMA_ERROR_CODE);
	iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);

range_alloc_fail:
	free_pages(first_page, order);
@@ -226,7 +226,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
	devhandle = pbm->devhandle;
	entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
	dma_4v_iommu_demap(&devhandle, entry, npages);
	iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE);
	iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
	order = get_order(size);
	if (order < 10)
		free_pages((unsigned long)cpu, order);
@@ -256,7 +256,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
	entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
				      (unsigned long)(-1), 0);

	if (unlikely(entry == DMA_ERROR_CODE))
	if (unlikely(entry == IOMMU_ERROR_CODE))
		goto bad;

	bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
@@ -288,7 +288,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
	return DMA_ERROR_CODE;

iommu_map_fail:
	iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE);
	iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
	return DMA_ERROR_CODE;
}

@@ -317,7 +317,7 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
	bus_addr &= IO_PAGE_MASK;
	entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT;
	dma_4v_iommu_demap(&devhandle, entry, npages);
	iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE);
	iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
}

static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -376,7 +376,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
					      &handle, (unsigned long)(-1), 0);

		/* Handle failure */
		if (unlikely(entry == DMA_ERROR_CODE)) {
		if (unlikely(entry == IOMMU_ERROR_CODE)) {
			if (printk_ratelimit())
				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
				       " npages %lx\n", iommu, paddr, npages);
@@ -451,7 +451,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
			npages = iommu_num_pages(s->dma_address, s->dma_length,
						 IO_PAGE_SIZE);
			iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
					     DMA_ERROR_CODE);
					     IOMMU_ERROR_CODE);
			/* XXX demap? XXX */
			s->dma_address = DMA_ERROR_CODE;
			s->dma_length = 0;
@@ -496,7 +496,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
		entry = ((dma_handle - tbl->table_map_base) >> shift);
		dma_4v_iommu_demap(&devhandle, entry, npages);
		iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
				     DMA_ERROR_CODE);
				     IOMMU_ERROR_CODE);
		sg = sg_next(sg);
	}

+1 −0
Original line number Diff line number Diff line
@@ -7,6 +7,7 @@

#define IOMMU_POOL_HASHBITS     4
#define IOMMU_NR_POOLS          (1 << IOMMU_POOL_HASHBITS)
#define IOMMU_ERROR_CODE	(~(unsigned long) 0)

struct iommu_pool {
	unsigned long	start;
+3 −7
Original line number Diff line number Diff line
@@ -11,10 +11,6 @@
#include <linux/dma-mapping.h>
#include <linux/hash.h>

#ifndef	DMA_ERROR_CODE
#define	DMA_ERROR_CODE (~(dma_addr_t)0x0)
#endif

static unsigned long iommu_large_alloc = 15;

static	DEFINE_PER_CPU(unsigned int, iommu_hash_common);
@@ -124,7 +120,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
	/* Sanity check */
	if (unlikely(npages == 0)) {
		WARN_ON_ONCE(1);
		return DMA_ERROR_CODE;
		return IOMMU_ERROR_CODE;
	}

	if (largealloc) {
@@ -207,7 +203,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
			goto again;
		} else {
			/* give up */
			n = DMA_ERROR_CODE;
			n = IOMMU_ERROR_CODE;
			goto bail;
		}
	}
@@ -259,7 +255,7 @@ void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr,
	unsigned long flags;
	unsigned long shift = iommu->table_shift;

	if (entry == DMA_ERROR_CODE) /* use default addr->entry mapping */
	if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */
		entry = (dma_addr - iommu->table_map_base) >> shift;
	pool = get_pool(iommu, entry);