Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit da004c36 authored by Alexey Kardashevskiy's avatar Alexey Kardashevskiy Committed by Michael Ellerman
Browse files

powerpc/iommu: Move tce_xxx callbacks from ppc_md to iommu_table



This adds a iommu_table_ops struct and puts pointer to it into
the iommu_table struct. This moves tce_build/tce_free/tce_get/tce_flush
callbacks from ppc_md to the new struct where they really belong to.

This adds the requirement for @it_ops to be initialized before calling
iommu_init_table() to make sure that we do not leave any IOMMU table
with iommu_table_ops uninitialized. This is not a parameter of
iommu_init_table() though as there will be cases when iommu_init_table()
will not be called on TCE tables, for example - VFIO.

This does s/tce_build/set/, s/tce_free/clear/ and removes "tce_"
redundant prefixes.

This removes tce_xxx_rm handlers from ppc_md but does not add
them to iommu_table_ops as this will be done later if we decide to
support TCE hypercalls in real mode. This removes _vm callbacks as
only virtual mode is supported by now so this also removes @rm parameter.

For pSeries, this always uses tce_buildmulti_pSeriesLP/
tce_buildmulti_pSeriesLP. This changes multi callback to fall back to
tce_build_pSeriesLP/tce_free_pSeriesLP if FW_FEATURE_MULTITCE is not
present. The reason for this is we still have to support "multitce=off"
boot parameter in disable_multitce() and we do not want to walk through
all IOMMU tables in the system and replace "multi" callbacks with single
ones.

For powernv, this defines _ops per PHB type which are P5IOC2/IODA1/IODA2.
This makes the callbacks for them public. Later patches will extend
callbacks for IODA1/2.

No change in behaviour is expected.

Signed-off-by: default avatarAlexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Reviewed-by: default avatarGavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 10b35b2b
Loading
Loading
Loading
Loading
+17 −0
Original line number Original line Diff line number Diff line
@@ -44,6 +44,22 @@
extern int iommu_is_off;
extern int iommu_is_off;
extern int iommu_force_on;
extern int iommu_force_on;


struct iommu_table_ops {
	int (*set)(struct iommu_table *tbl,
			long index, long npages,
			unsigned long uaddr,
			enum dma_data_direction direction,
			struct dma_attrs *attrs);
	void (*clear)(struct iommu_table *tbl,
			long index, long npages);
	unsigned long (*get)(struct iommu_table *tbl, long index);
	void (*flush)(struct iommu_table *tbl);
};

/* These are used by VIO */
extern struct iommu_table_ops iommu_table_lpar_multi_ops;
extern struct iommu_table_ops iommu_table_pseries_ops;

/*
/*
 * IOMAP_MAX_ORDER defines the largest contiguous block
 * IOMAP_MAX_ORDER defines the largest contiguous block
 * of dma space we can get.  IOMAP_MAX_ORDER = 13
 * of dma space we can get.  IOMAP_MAX_ORDER = 13
@@ -78,6 +94,7 @@ struct iommu_table {
#ifdef CONFIG_IOMMU_API
#ifdef CONFIG_IOMMU_API
	struct iommu_group *it_group;
	struct iommu_group *it_group;
#endif
#endif
	struct iommu_table_ops *it_ops;
	void (*set_bypass)(struct iommu_table *tbl, bool enable);
	void (*set_bypass)(struct iommu_table *tbl, bool enable);
#ifdef CONFIG_PPC_POWERNV
#ifdef CONFIG_PPC_POWERNV
	void           *data;
	void           *data;
+0 −25
Original line number Original line Diff line number Diff line
@@ -65,31 +65,6 @@ struct machdep_calls {
	 * destroyed as well */
	 * destroyed as well */
	void		(*hpte_clear_all)(void);
	void		(*hpte_clear_all)(void);


	int		(*tce_build)(struct iommu_table *tbl,
				     long index,
				     long npages,
				     unsigned long uaddr,
				     enum dma_data_direction direction,
				     struct dma_attrs *attrs);
	void		(*tce_free)(struct iommu_table *tbl,
				    long index,
				    long npages);
	unsigned long	(*tce_get)(struct iommu_table *tbl,
				    long index);
	void		(*tce_flush)(struct iommu_table *tbl);

	/* _rm versions are for real mode use only */
	int		(*tce_build_rm)(struct iommu_table *tbl,
				     long index,
				     long npages,
				     unsigned long uaddr,
				     enum dma_data_direction direction,
				     struct dma_attrs *attrs);
	void		(*tce_free_rm)(struct iommu_table *tbl,
				    long index,
				    long npages);
	void		(*tce_flush_rm)(struct iommu_table *tbl);

	void __iomem *	(*ioremap)(phys_addr_t addr, unsigned long size,
	void __iomem *	(*ioremap)(phys_addr_t addr, unsigned long size,
				   unsigned long flags, void *caller);
				   unsigned long flags, void *caller);
	void		(*iounmap)(volatile void __iomem *token);
	void		(*iounmap)(volatile void __iomem *token);
+24 −22
Original line number Original line Diff line number Diff line
@@ -322,11 +322,11 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
	ret = entry << tbl->it_page_shift;	/* Set the return dma address */
	ret = entry << tbl->it_page_shift;	/* Set the return dma address */


	/* Put the TCEs in the HW table */
	/* Put the TCEs in the HW table */
	build_fail = ppc_md.tce_build(tbl, entry, npages,
	build_fail = tbl->it_ops->set(tbl, entry, npages,
				      (unsigned long)page &
				      (unsigned long)page &
				      IOMMU_PAGE_MASK(tbl), direction, attrs);
				      IOMMU_PAGE_MASK(tbl), direction, attrs);


	/* ppc_md.tce_build() only returns non-zero for transient errors.
	/* tbl->it_ops->set() only returns non-zero for transient errors.
	 * Clean up the table bitmap in this case and return
	 * Clean up the table bitmap in this case and return
	 * DMA_ERROR_CODE. For all other errors the functionality is
	 * DMA_ERROR_CODE. For all other errors the functionality is
	 * not altered.
	 * not altered.
@@ -337,8 +337,8 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
	}
	}


	/* Flush/invalidate TLB caches if necessary */
	/* Flush/invalidate TLB caches if necessary */
	if (ppc_md.tce_flush)
	if (tbl->it_ops->flush)
		ppc_md.tce_flush(tbl);
		tbl->it_ops->flush(tbl);


	/* Make sure updates are seen by hardware */
	/* Make sure updates are seen by hardware */
	mb();
	mb();
@@ -408,7 +408,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
	if (!iommu_free_check(tbl, dma_addr, npages))
	if (!iommu_free_check(tbl, dma_addr, npages))
		return;
		return;


	ppc_md.tce_free(tbl, entry, npages);
	tbl->it_ops->clear(tbl, entry, npages);


	spin_lock_irqsave(&(pool->lock), flags);
	spin_lock_irqsave(&(pool->lock), flags);
	bitmap_clear(tbl->it_map, free_entry, npages);
	bitmap_clear(tbl->it_map, free_entry, npages);
@@ -424,8 +424,8 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
	 * not do an mb() here on purpose, it is not needed on any of
	 * not do an mb() here on purpose, it is not needed on any of
	 * the current platforms.
	 * the current platforms.
	 */
	 */
	if (ppc_md.tce_flush)
	if (tbl->it_ops->flush)
		ppc_md.tce_flush(tbl);
		tbl->it_ops->flush(tbl);
}
}


int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
@@ -495,7 +495,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
			    npages, entry, dma_addr);
			    npages, entry, dma_addr);


		/* Insert into HW table */
		/* Insert into HW table */
		build_fail = ppc_md.tce_build(tbl, entry, npages,
		build_fail = tbl->it_ops->set(tbl, entry, npages,
					      vaddr & IOMMU_PAGE_MASK(tbl),
					      vaddr & IOMMU_PAGE_MASK(tbl),
					      direction, attrs);
					      direction, attrs);
		if(unlikely(build_fail))
		if(unlikely(build_fail))
@@ -534,8 +534,8 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
	}
	}


	/* Flush/invalidate TLB caches if necessary */
	/* Flush/invalidate TLB caches if necessary */
	if (ppc_md.tce_flush)
	if (tbl->it_ops->flush)
		ppc_md.tce_flush(tbl);
		tbl->it_ops->flush(tbl);


	DBG("mapped %d elements:\n", outcount);
	DBG("mapped %d elements:\n", outcount);


@@ -600,8 +600,8 @@ void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
	 * do not do an mb() here, the affected platforms do not need it
	 * do not do an mb() here, the affected platforms do not need it
	 * when freeing.
	 * when freeing.
	 */
	 */
	if (ppc_md.tce_flush)
	if (tbl->it_ops->flush)
		ppc_md.tce_flush(tbl);
		tbl->it_ops->flush(tbl);
}
}


static void iommu_table_clear(struct iommu_table *tbl)
static void iommu_table_clear(struct iommu_table *tbl)
@@ -613,17 +613,17 @@ static void iommu_table_clear(struct iommu_table *tbl)
	 */
	 */
	if (!is_kdump_kernel() || is_fadump_active()) {
	if (!is_kdump_kernel() || is_fadump_active()) {
		/* Clear the table in case firmware left allocations in it */
		/* Clear the table in case firmware left allocations in it */
		ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
		tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
		return;
		return;
	}
	}


#ifdef CONFIG_CRASH_DUMP
#ifdef CONFIG_CRASH_DUMP
	if (ppc_md.tce_get) {
	if (tbl->it_ops->get) {
		unsigned long index, tceval, tcecount = 0;
		unsigned long index, tceval, tcecount = 0;


		/* Reserve the existing mappings left by the first kernel. */
		/* Reserve the existing mappings left by the first kernel. */
		for (index = 0; index < tbl->it_size; index++) {
		for (index = 0; index < tbl->it_size; index++) {
			tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
			tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
			/*
			/*
			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
			 */
			 */
@@ -657,6 +657,8 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
	unsigned int i;
	unsigned int i;
	struct iommu_pool *p;
	struct iommu_pool *p;


	BUG_ON(!tbl->it_ops);

	/* number of bytes needed for the bitmap */
	/* number of bytes needed for the bitmap */
	sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
	sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);


@@ -929,8 +931,8 @@ EXPORT_SYMBOL_GPL(iommu_tce_direction);
void iommu_flush_tce(struct iommu_table *tbl)
void iommu_flush_tce(struct iommu_table *tbl)
{
{
	/* Flush/invalidate TLB caches if necessary */
	/* Flush/invalidate TLB caches if necessary */
	if (ppc_md.tce_flush)
	if (tbl->it_ops->flush)
		ppc_md.tce_flush(tbl);
		tbl->it_ops->flush(tbl);


	/* Make sure updates are seen by hardware */
	/* Make sure updates are seen by hardware */
	mb();
	mb();
@@ -941,7 +943,7 @@ int iommu_tce_clear_param_check(struct iommu_table *tbl,
		unsigned long ioba, unsigned long tce_value,
		unsigned long ioba, unsigned long tce_value,
		unsigned long npages)
		unsigned long npages)
{
{
	/* ppc_md.tce_free() does not support any value but 0 */
	/* tbl->it_ops->clear() does not support any value but 0 */
	if (tce_value)
	if (tce_value)
		return -EINVAL;
		return -EINVAL;


@@ -989,9 +991,9 @@ unsigned long iommu_clear_tce(struct iommu_table *tbl, unsigned long entry)


	spin_lock(&(pool->lock));
	spin_lock(&(pool->lock));


	oldtce = ppc_md.tce_get(tbl, entry);
	oldtce = tbl->it_ops->get(tbl, entry);
	if (oldtce & (TCE_PCI_WRITE | TCE_PCI_READ))
	if (oldtce & (TCE_PCI_WRITE | TCE_PCI_READ))
		ppc_md.tce_free(tbl, entry, 1);
		tbl->it_ops->clear(tbl, entry, 1);
	else
	else
		oldtce = 0;
		oldtce = 0;


@@ -1014,10 +1016,10 @@ int iommu_tce_build(struct iommu_table *tbl, unsigned long entry,


	spin_lock(&(pool->lock));
	spin_lock(&(pool->lock));


	oldtce = ppc_md.tce_get(tbl, entry);
	oldtce = tbl->it_ops->get(tbl, entry);
	/* Add new entry if it is not busy */
	/* Add new entry if it is not busy */
	if (!(oldtce & (TCE_PCI_WRITE | TCE_PCI_READ)))
	if (!(oldtce & (TCE_PCI_WRITE | TCE_PCI_READ)))
		ret = ppc_md.tce_build(tbl, entry, 1, hwaddr, direction, NULL);
		ret = tbl->it_ops->set(tbl, entry, 1, hwaddr, direction, NULL);


	spin_unlock(&(pool->lock));
	spin_unlock(&(pool->lock));


+5 −0
Original line number Original line Diff line number Diff line
@@ -1196,6 +1196,11 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
	tbl->it_type = TCE_VB;
	tbl->it_type = TCE_VB;
	tbl->it_blocksize = 16;
	tbl->it_blocksize = 16;


	if (firmware_has_feature(FW_FEATURE_LPAR))
		tbl->it_ops = &iommu_table_lpar_multi_ops;
	else
		tbl->it_ops = &iommu_table_pseries_ops;

	return iommu_init_table(tbl, -1);
	return iommu_init_table(tbl, -1);
}
}


+6 −2
Original line number Original line Diff line number Diff line
@@ -466,6 +466,11 @@ static inline u32 cell_iommu_get_ioid(struct device_node *np)
	return *ioid;
	return *ioid;
}
}


static struct iommu_table_ops cell_iommu_ops = {
	.set = tce_build_cell,
	.clear = tce_free_cell
};

static struct iommu_window * __init
static struct iommu_window * __init
cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
			unsigned long offset, unsigned long size,
			unsigned long offset, unsigned long size,
@@ -492,6 +497,7 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
	window->table.it_offset =
	window->table.it_offset =
		(offset >> window->table.it_page_shift) + pte_offset;
		(offset >> window->table.it_page_shift) + pte_offset;
	window->table.it_size = size >> window->table.it_page_shift;
	window->table.it_size = size >> window->table.it_page_shift;
	window->table.it_ops = &cell_iommu_ops;


	iommu_init_table(&window->table, iommu->nid);
	iommu_init_table(&window->table, iommu->nid);


@@ -1201,8 +1207,6 @@ static int __init cell_iommu_init(void)
	/* Setup various callbacks */
	/* Setup various callbacks */
	cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
	cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
	ppc_md.dma_get_required_mask = cell_dma_get_required_mask;
	ppc_md.dma_get_required_mask = cell_dma_get_required_mask;
	ppc_md.tce_build = tce_build_cell;
	ppc_md.tce_free = tce_free_cell;


	if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0)
	if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0)
		goto bail;
		goto bail;
Loading