Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e589a440 authored by Alistair Popple's avatar Alistair Popple Committed by Benjamin Herrenschmidt
Browse files

powerpc/iommu: Update constant names to reflect their hardcoded page size



The powerpc iommu uses a hardcoded page size of 4K. This patch changes
the name of the IOMMU_PAGE_* macros to reflect the hardcoded values. A
future patch will use the existing names to support dynamic page
sizes.

Signed-off-by: default avatarAlistair Popple <alistair@popple.id.au>
Signed-off-by: default avatarAlexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent fee26f6d
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -30,10 +30,10 @@
#include <asm/machdep.h>
#include <asm/types.h>

#define IOMMU_PAGE_SHIFT      12
#define IOMMU_PAGE_SIZE       (ASM_CONST(1) << IOMMU_PAGE_SHIFT)
#define IOMMU_PAGE_MASK       (~((1 << IOMMU_PAGE_SHIFT) - 1))
#define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE)
#define IOMMU_PAGE_SHIFT_4K      12
#define IOMMU_PAGE_SIZE_4K       (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
#define IOMMU_PAGE_MASK_4K       (~((1 << IOMMU_PAGE_SHIFT_4K) - 1))
#define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K)

/* Boot time flags */
extern int iommu_is_off;
@@ -42,7 +42,7 @@ extern int iommu_force_on;
/* Pure 2^n version of get_order */
static __inline__ __attribute_const__ int get_iommu_order(unsigned long size)
{
	return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT) + 1;
	return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT_4K) + 1;
}


+2 −2
Original line number Diff line number Diff line
@@ -83,10 +83,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
		return 0;
	}

	if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT)) {
	if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT_4K)) {
		dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
		dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
				mask, tbl->it_offset << IOMMU_PAGE_SHIFT);
				mask, tbl->it_offset << IOMMU_PAGE_SHIFT_4K);
		return 0;
	} else
		return 1;
+39 −39
Original line number Diff line number Diff line
@@ -251,13 +251,13 @@ static unsigned long iommu_range_alloc(struct device *dev,

	if (dev)
		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
				      1 << IOMMU_PAGE_SHIFT);
				      1 << IOMMU_PAGE_SHIFT_4K);
	else
		boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
		boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT_4K);
	/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */

	n = iommu_area_alloc(tbl->it_map, limit, start, npages,
			     tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
			tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT_4K,
			align_mask);
	if (n == -1) {
		if (likely(pass == 0)) {
@@ -320,11 +320,11 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
		return DMA_ERROR_CODE;

	entry += tbl->it_offset;	/* Offset into real TCE table */
	ret = entry << IOMMU_PAGE_SHIFT;	/* Set the return dma address */
	ret = entry << IOMMU_PAGE_SHIFT_4K;	/* Set the return dma address */

	/* Put the TCEs in the HW table */
	build_fail = ppc_md.tce_build(tbl, entry, npages,
	                              (unsigned long)page & IOMMU_PAGE_MASK,
				(unsigned long)page & IOMMU_PAGE_MASK_4K,
				direction, attrs);

	/* ppc_md.tce_build() only returns non-zero for transient errors.
@@ -352,7 +352,7 @@ static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
{
	unsigned long entry, free_entry;

	entry = dma_addr >> IOMMU_PAGE_SHIFT;
	entry = dma_addr >> IOMMU_PAGE_SHIFT_4K;
	free_entry = entry - tbl->it_offset;

	if (((free_entry + npages) > tbl->it_size) ||
@@ -401,7 +401,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
	unsigned long flags;
	struct iommu_pool *pool;

	entry = dma_addr >> IOMMU_PAGE_SHIFT;
	entry = dma_addr >> IOMMU_PAGE_SHIFT_4K;
	free_entry = entry - tbl->it_offset;

	pool = get_pool(tbl, free_entry);
@@ -468,13 +468,13 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
		}
		/* Allocate iommu entries for that segment */
		vaddr = (unsigned long) sg_virt(s);
		npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE);
		npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE_4K);
		align = 0;
		if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
		if (IOMMU_PAGE_SHIFT_4K < PAGE_SHIFT && slen >= PAGE_SIZE &&
		    (vaddr & ~PAGE_MASK) == 0)
			align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
			align = PAGE_SHIFT - IOMMU_PAGE_SHIFT_4K;
		entry = iommu_range_alloc(dev, tbl, npages, &handle,
					  mask >> IOMMU_PAGE_SHIFT, align);
					  mask >> IOMMU_PAGE_SHIFT_4K, align);

		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);

@@ -489,15 +489,15 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,

		/* Convert entry to a dma_addr_t */
		entry += tbl->it_offset;
		dma_addr = entry << IOMMU_PAGE_SHIFT;
		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
		dma_addr = entry << IOMMU_PAGE_SHIFT_4K;
		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK_4K);

		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
			    npages, entry, dma_addr);

		/* Insert into HW table */
		build_fail = ppc_md.tce_build(tbl, entry, npages,
		                              vaddr & IOMMU_PAGE_MASK,
					vaddr & IOMMU_PAGE_MASK_4K,
					direction, attrs);
		if(unlikely(build_fail))
			goto failure;
@@ -559,9 +559,9 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
		if (s->dma_length != 0) {
			unsigned long vaddr, npages;

			vaddr = s->dma_address & IOMMU_PAGE_MASK;
			vaddr = s->dma_address & IOMMU_PAGE_MASK_4K;
			npages = iommu_num_pages(s->dma_address, s->dma_length,
						 IOMMU_PAGE_SIZE);
						 IOMMU_PAGE_SIZE_4K);
			__iommu_free(tbl, vaddr, npages);
			s->dma_address = DMA_ERROR_CODE;
			s->dma_length = 0;
@@ -592,7 +592,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
		if (sg->dma_length == 0)
			break;
		npages = iommu_num_pages(dma_handle, sg->dma_length,
					 IOMMU_PAGE_SIZE);
					 IOMMU_PAGE_SIZE_4K);
		__iommu_free(tbl, dma_handle, npages);
		sg = sg_next(sg);
	}
@@ -676,7 +676,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
		set_bit(0, tbl->it_map);

	/* We only split the IOMMU table if we have 1GB or more of space */
	if ((tbl->it_size << IOMMU_PAGE_SHIFT) >= (1UL * 1024 * 1024 * 1024))
	if ((tbl->it_size << IOMMU_PAGE_SHIFT_4K) >= (1UL * 1024 * 1024 * 1024))
		tbl->nr_pools = IOMMU_NR_POOLS;
	else
		tbl->nr_pools = 1;
@@ -768,16 +768,16 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,

	vaddr = page_address(page) + offset;
	uaddr = (unsigned long)vaddr;
	npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
	npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE_4K);

	if (tbl) {
		align = 0;
		if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
		if (IOMMU_PAGE_SHIFT_4K < PAGE_SHIFT && size >= PAGE_SIZE &&
		    ((unsigned long)vaddr & ~PAGE_MASK) == 0)
			align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
			align = PAGE_SHIFT - IOMMU_PAGE_SHIFT_4K;

		dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
					 mask >> IOMMU_PAGE_SHIFT, align,
					 mask >> IOMMU_PAGE_SHIFT_4K, align,
					 attrs);
		if (dma_handle == DMA_ERROR_CODE) {
			if (printk_ratelimit())  {
@@ -786,7 +786,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
					 npages);
			}
		} else
			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK_4K);
	}

	return dma_handle;
@@ -801,7 +801,7 @@ void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
	BUG_ON(direction == DMA_NONE);

	if (tbl) {
		npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE);
		npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE_4K);
		iommu_free(tbl, dma_handle, npages);
	}
}
@@ -845,10 +845,10 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
	memset(ret, 0, size);

	/* Set up tces to cover the allocated range */
	nio_pages = size >> IOMMU_PAGE_SHIFT;
	nio_pages = size >> IOMMU_PAGE_SHIFT_4K;
	io_order = get_iommu_order(size);
	mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
			      mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
			      mask >> IOMMU_PAGE_SHIFT_4K, io_order, NULL);
	if (mapping == DMA_ERROR_CODE) {
		free_pages((unsigned long)ret, order);
		return NULL;
@@ -864,7 +864,7 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
		unsigned int nio_pages;

		size = PAGE_ALIGN(size);
		nio_pages = size >> IOMMU_PAGE_SHIFT;
		nio_pages = size >> IOMMU_PAGE_SHIFT_4K;
		iommu_free(tbl, dma_handle, nio_pages);
		size = PAGE_ALIGN(size);
		free_pages((unsigned long)vaddr, get_order(size));
@@ -935,10 +935,10 @@ int iommu_tce_clear_param_check(struct iommu_table *tbl,
	if (tce_value)
		return -EINVAL;

	if (ioba & ~IOMMU_PAGE_MASK)
	if (ioba & ~IOMMU_PAGE_MASK_4K)
		return -EINVAL;

	ioba >>= IOMMU_PAGE_SHIFT;
	ioba >>= IOMMU_PAGE_SHIFT_4K;
	if (ioba < tbl->it_offset)
		return -EINVAL;

@@ -955,13 +955,13 @@ int iommu_tce_put_param_check(struct iommu_table *tbl,
	if (!(tce & (TCE_PCI_WRITE | TCE_PCI_READ)))
		return -EINVAL;

	if (tce & ~(IOMMU_PAGE_MASK | TCE_PCI_WRITE | TCE_PCI_READ))
	if (tce & ~(IOMMU_PAGE_MASK_4K | TCE_PCI_WRITE | TCE_PCI_READ))
		return -EINVAL;

	if (ioba & ~IOMMU_PAGE_MASK)
	if (ioba & ~IOMMU_PAGE_MASK_4K)
		return -EINVAL;

	ioba >>= IOMMU_PAGE_SHIFT;
	ioba >>= IOMMU_PAGE_SHIFT_4K;
	if (ioba < tbl->it_offset)
		return -EINVAL;

@@ -1037,7 +1037,7 @@ int iommu_tce_build(struct iommu_table *tbl, unsigned long entry,

	/* if (unlikely(ret))
		pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n",
				__func__, hwaddr, entry << IOMMU_PAGE_SHIFT,
				__func__, hwaddr, entry << IOMMU_PAGE_SHIFT_4K,
				hwaddr, ret); */

	return ret;
@@ -1049,14 +1049,14 @@ int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry,
{
	int ret;
	struct page *page = NULL;
	unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK & ~PAGE_MASK;
	unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK_4K & ~PAGE_MASK;
	enum dma_data_direction direction = iommu_tce_direction(tce);

	ret = get_user_pages_fast(tce & PAGE_MASK, 1,
			direction != DMA_TO_DEVICE, &page);
	if (unlikely(ret != 1)) {
		/* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n",
				tce, entry << IOMMU_PAGE_SHIFT, ret); */
				tce, entry << IOMMU_PAGE_SHIFT_4K, ret); */
		return -EFAULT;
	}
	hwaddr = (unsigned long) page_address(page) + offset;
@@ -1067,7 +1067,7 @@ int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry,

	if (ret < 0)
		pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n",
				__func__, entry << IOMMU_PAGE_SHIFT, tce, ret);
			__func__, entry << IOMMU_PAGE_SHIFT_4K, tce, ret);

	return ret;
}
+10 −9
Original line number Diff line number Diff line
@@ -520,14 +520,14 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
	struct vio_dev *viodev = to_vio_dev(dev);
	dma_addr_t ret = DMA_ERROR_CODE;

	if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) {
	if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K))) {
		atomic_inc(&viodev->cmo.allocs_failed);
		return ret;
	}

	ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
	if (unlikely(dma_mapping_error(dev, ret))) {
		vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
		vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K));
		atomic_inc(&viodev->cmo.allocs_failed);
	}

@@ -543,7 +543,7 @@ static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,

	dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);

	vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
	vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K));
}

static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -556,7 +556,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
	size_t alloc_size = 0;

	for (sgl = sglist; count < nelems; count++, sgl++)
		alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE);
		alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE_4K);

	if (vio_cmo_alloc(viodev, alloc_size)) {
		atomic_inc(&viodev->cmo.allocs_failed);
@@ -572,7 +572,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
	}

	for (sgl = sglist, count = 0; count < ret; count++, sgl++)
		alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
		alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE_4K);
	if (alloc_size)
		vio_cmo_dealloc(viodev, alloc_size);

@@ -590,7 +590,7 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
	int count = 0;

	for (sgl = sglist; count < nelems; count++, sgl++)
		alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
		alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE_4K);

	dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);

@@ -736,7 +736,8 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev)
			return -EINVAL;
		}

		viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev));
		viodev->cmo.desired =
			IOMMU_PAGE_ALIGN_4K(viodrv->get_desired_dma(viodev));
		if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
			viodev->cmo.desired = VIO_CMO_MIN_ENT;
		size = VIO_CMO_MIN_ENT;
@@ -1176,9 +1177,9 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
			    &tbl->it_index, &offset, &size);

	/* TCE table size - measured in tce entries */
	tbl->it_size = size >> IOMMU_PAGE_SHIFT;
	tbl->it_size = size >> IOMMU_PAGE_SHIFT_4K;
	/* offset for VIO should always be 0 */
	tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
	tbl->it_offset = offset >> IOMMU_PAGE_SHIFT_4K;
	tbl->it_busno = 0;
	tbl->it_type = TCE_VB;
	tbl->it_blocksize = 16;
+6 −6
Original line number Diff line number Diff line
@@ -197,7 +197,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,

	io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);

	for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE)
	for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE_4K)
		io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);

	mb();
@@ -430,7 +430,7 @@ static void cell_iommu_setup_hardware(struct cbe_iommu *iommu,
{
	cell_iommu_setup_stab(iommu, base, size, 0, 0);
	iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0,
					    IOMMU_PAGE_SHIFT);
					    IOMMU_PAGE_SHIFT_4K);
	cell_iommu_enable_hardware(iommu);
}

@@ -487,8 +487,8 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
	window->table.it_blocksize = 16;
	window->table.it_base = (unsigned long)iommu->ptab;
	window->table.it_index = iommu->nid;
	window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + pte_offset;
	window->table.it_size = size >> IOMMU_PAGE_SHIFT;
	window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT_4K) + pte_offset;
	window->table.it_size = size >> IOMMU_PAGE_SHIFT_4K;

	iommu_init_table(&window->table, iommu->nid);

@@ -773,7 +773,7 @@ static void __init cell_iommu_init_one(struct device_node *np,

	/* Setup the iommu_table */
	cell_iommu_setup_window(iommu, np, base, size,
				offset >> IOMMU_PAGE_SHIFT);
				offset >> IOMMU_PAGE_SHIFT_4K);
}

static void __init cell_disable_iommus(void)
@@ -1122,7 +1122,7 @@ static int __init cell_iommu_fixed_mapping_init(void)

		cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize);
		iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0,
						    IOMMU_PAGE_SHIFT);
						    IOMMU_PAGE_SHIFT_4K);
		cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize,
					     fbase, fsize);
		cell_iommu_enable_hardware(iommu);
Loading