Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b348aa65 authored by Alexey Kardashevskiy's avatar Alexey Kardashevskiy Committed by Michael Ellerman
Browse files

powerpc/spapr: vfio: Replace iommu_table with iommu_table_group



Modern IBM POWERPC systems support multiple (currently two) TCE tables
per IOMMU group (a.k.a. PE). This adds a iommu_table_group container
for TCE tables. Right now just one table is supported.

This defines iommu_table_group struct which stores pointers to
iommu_group and iommu_table(s). This replaces iommu_table with
iommu_table_group where iommu_table was used to identify a group:
- iommu_register_group();
- iommudata of generic iommu_group;

This removes @data from iommu_table as it_table_group provides
same access to pnv_ioda_pe.

For IODA, instead of embedding iommu_table, the new iommu_table_group
keeps pointers to those. The iommu_table structs are allocated
dynamically.

For P5IOC2, both iommu_table_group and iommu_table are embedded into
PE struct. As there is no EEH and SRIOV support for P5IOC2,
iommu_free_table() should not be called on iommu_table struct pointers
so we can keep it embedded in pnv_phb::p5ioc2.

For pSeries, this replaces multiple calls of kzalloc_node() with a new
iommu_pseries_alloc_group() helper and stores the table group struct
pointer into the pci_dn struct. For release, a iommu_table_free_group()
helper is added.

This moves iommu_table struct allocation from SR-IOV code to
the generic DMA initialization code in pnv_pci_ioda_setup_dma_pe and
pnv_pci_ioda2_setup_dma_pe as this is where DMA is actually initialized.
This change is here because those lines had to be changed anyway.

This should cause no behavioural change.

Signed-off-by: default avatarAlexey Kardashevskiy <aik@ozlabs.ru>
[aw: for the vfio related changes]
Acked-by: default avatarAlex Williamson <alex.williamson@redhat.com>
Reviewed-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Reviewed-by: default avatarGavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent decbda25
Loading
Loading
Loading
Loading
+11 −8
Original line number Diff line number Diff line
@@ -91,14 +91,9 @@ struct iommu_table {
	struct iommu_pool pools[IOMMU_NR_POOLS];
	unsigned long *it_map;       /* A simple allocation bitmap for now */
	unsigned long  it_page_shift;/* table iommu page size */
#ifdef CONFIG_IOMMU_API
	struct iommu_group *it_group;
#endif
	struct iommu_table_group *it_table_group;
	struct iommu_table_ops *it_ops;
	void (*set_bypass)(struct iommu_table *tbl, bool enable);
#ifdef CONFIG_PPC_POWERNV
	void           *data;
#endif
};

/* Pure 2^n version of get_order */
@@ -129,14 +124,22 @@ extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
 */
extern struct iommu_table *iommu_init_table(struct iommu_table * tbl,
					    int nid);
#define IOMMU_TABLE_GROUP_MAX_TABLES	1

struct iommu_table_group {
	struct iommu_group *group;
	struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
};

#ifdef CONFIG_IOMMU_API
extern void iommu_register_group(struct iommu_table *tbl,

extern void iommu_register_group(struct iommu_table_group *table_group,
				 int pci_domain_number, unsigned long pe_num);
extern int iommu_add_device(struct device *dev);
extern void iommu_del_device(struct device *dev);
extern int __init tce_iommu_bus_notifier_init(void);
#else
static inline void iommu_register_group(struct iommu_table *tbl,
static inline void iommu_register_group(struct iommu_table_group *table_group,
					int pci_domain_number,
					unsigned long pe_num)
{
+1 −1
Original line number Diff line number Diff line
@@ -199,7 +199,7 @@ struct pci_dn {

	struct  pci_dn *parent;
	struct  pci_controller *phb;	/* for pci devices */
	struct	iommu_table *iommu_table;	/* for phb's or bridges */
	struct	iommu_table_group *table_group;	/* for phb's or bridges */
	struct	device_node *node;	/* back-pointer to the device_node */

	int	pci_ext_config_space;	/* for pci devices */
+9 −8
Original line number Diff line number Diff line
@@ -889,11 +889,12 @@ EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
 */
static void group_release(void *iommu_data)
{
	struct iommu_table *tbl = iommu_data;
	tbl->it_group = NULL;
	struct iommu_table_group *table_group = iommu_data;

	table_group->group = NULL;
}

void iommu_register_group(struct iommu_table *tbl,
void iommu_register_group(struct iommu_table_group *table_group,
		int pci_domain_number, unsigned long pe_num)
{
	struct iommu_group *grp;
@@ -905,8 +906,8 @@ void iommu_register_group(struct iommu_table *tbl,
				PTR_ERR(grp));
		return;
	}
	tbl->it_group = grp;
	iommu_group_set_iommudata(grp, tbl, group_release);
	table_group->group = grp;
	iommu_group_set_iommudata(grp, table_group, group_release);
	name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
			pci_domain_number, pe_num);
	if (!name)
@@ -1094,7 +1095,7 @@ int iommu_add_device(struct device *dev)
	}

	tbl = get_iommu_table_base(dev);
	if (!tbl || !tbl->it_group) {
	if (!tbl || !tbl->it_table_group || !tbl->it_table_group->group) {
		pr_debug("%s: Skipping device %s with no tbl\n",
			 __func__, dev_name(dev));
		return 0;
@@ -1102,7 +1103,7 @@ int iommu_add_device(struct device *dev)

	pr_debug("%s: Adding %s to iommu group %d\n",
		 __func__, dev_name(dev),
		 iommu_group_id(tbl->it_group));
		 iommu_group_id(tbl->it_table_group->group));

	if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) {
		pr_err("%s: Invalid IOMMU page size %lx (%lx) on %s\n",
@@ -1111,7 +1112,7 @@ int iommu_add_device(struct device *dev)
		return -EINVAL;
	}

	return iommu_group_add_device(tbl->it_group, dev);
	return iommu_group_add_device(tbl->it_table_group->group, dev);
}
EXPORT_SYMBOL_GPL(iommu_add_device);

+30 −25
Original line number Diff line number Diff line
@@ -1087,10 +1087,6 @@ static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
		return;
	}

	pe->tce32_table = kzalloc_node(sizeof(struct iommu_table),
			GFP_KERNEL, hose->node);
	pe->tce32_table->data = pe;

	/* Associate it with all child devices */
	pnv_ioda_setup_same_PE(bus, pe);

@@ -1292,11 +1288,12 @@ static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe
	struct iommu_table    *tbl;
	unsigned long         addr;
	int64_t               rc;
	struct iommu_table_group *table_group;

	bus = dev->bus;
	hose = pci_bus_to_host(bus);
	phb = hose->private_data;
	tbl = pe->tce32_table;
	tbl = pe->table_group.tables[0];
	addr = tbl->it_base;

	opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
@@ -1311,13 +1308,14 @@ static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe
	if (rc)
		pe_warn(pe, "OPAL error %ld release DMA window\n", rc);

	if (tbl->it_group) {
		iommu_group_put(tbl->it_group);
		BUG_ON(tbl->it_group);
	table_group = tbl->it_table_group;
	if (table_group->group) {
		iommu_group_put(table_group->group);
		BUG_ON(table_group->group);
	}
	iommu_free_table(tbl, of_node_full_name(dev->dev.of_node));
	free_pages(addr, get_order(TCE32_TABLE_SIZE));
	pe->tce32_table = NULL;
	pe->table_group.tables[0] = NULL;
}

static void pnv_ioda_release_vf_PE(struct pci_dev *pdev, u16 num_vfs)
@@ -1465,10 +1463,6 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
			continue;
		}

		pe->tce32_table = kzalloc_node(sizeof(struct iommu_table),
				GFP_KERNEL, hose->node);
		pe->tce32_table->data = pe;

		/* Put PE to the list */
		mutex_lock(&phb->ioda.pe_list_mutex);
		list_add_tail(&pe->list, &phb->ioda.pe_list);
@@ -1603,7 +1597,7 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev

	pe = &phb->ioda.pe_array[pdn->pe_number];
	WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
	set_iommu_table_base(&pdev->dev, pe->tce32_table);
	set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
	/*
	 * Note: iommu_add_device() will fail here as
	 * for physical PE: the device is already added by now;
@@ -1637,7 +1631,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
	} else {
		dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
		set_dma_ops(&pdev->dev, &dma_iommu_ops);
		set_iommu_table_base(&pdev->dev, pe->tce32_table);
		set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
	}
	*pdev->dev.dma_mask = dma_mask;
	return 0;
@@ -1671,7 +1665,7 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
	struct pci_dev *dev;

	list_for_each_entry(dev, &bus->devices, bus_list) {
		set_iommu_table_base(&dev->dev, pe->tce32_table);
		set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
		iommu_add_device(&dev->dev);

		if (dev->subordinate)
@@ -1682,7 +1676,8 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
		unsigned long index, unsigned long npages, bool rm)
{
	struct pnv_ioda_pe *pe = tbl->data;
	struct pnv_ioda_pe *pe = container_of(tbl->it_table_group,
			struct pnv_ioda_pe, table_group);
	__be64 __iomem *invalidate = rm ?
		(__be64 __iomem *)pe->tce_inval_reg_phys :
		(__be64 __iomem *)tbl->it_index;
@@ -1759,7 +1754,8 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = {
static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
		unsigned long index, unsigned long npages, bool rm)
{
	struct pnv_ioda_pe *pe = tbl->data;
	struct pnv_ioda_pe *pe = container_of(tbl->it_table_group,
			struct pnv_ioda_pe, table_group);
	unsigned long start, end, inc;
	__be64 __iomem *invalidate = rm ?
		(__be64 __iomem *)pe->tce_inval_reg_phys :
@@ -1835,8 +1831,12 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
	if (WARN_ON(pe->tce32_seg >= 0))
		return;

	tbl = pe->tce32_table;
	iommu_register_group(tbl, phb->hose->global_number, pe->pe_number);
	tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
			phb->hose->node);
	tbl->it_table_group = &pe->table_group;
	pe->table_group.tables[0] = tbl;
	iommu_register_group(&pe->table_group, phb->hose->global_number,
			pe->pe_number);

	/* Grab a 32-bit TCE table */
	pe->tce32_seg = base;
@@ -1915,7 +1915,8 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,

static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
{
	struct pnv_ioda_pe *pe = tbl->data;
	struct pnv_ioda_pe *pe = container_of(tbl->it_table_group,
			struct pnv_ioda_pe, table_group);
	uint16_t window_id = (pe->pe_number << 1 ) + 1;
	int64_t rc;

@@ -1949,10 +1950,10 @@ static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb,
	pe->tce_bypass_base = 1ull << 59;

	/* Install set_bypass callback for VFIO */
	pe->tce32_table->set_bypass = pnv_pci_ioda2_set_bypass;
	pe->table_group.tables[0]->set_bypass = pnv_pci_ioda2_set_bypass;

	/* Enable bypass by default */
	pnv_pci_ioda2_set_bypass(pe->tce32_table, true);
	pnv_pci_ioda2_set_bypass(pe->table_group.tables[0], true);
}

static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
@@ -1969,8 +1970,12 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
	if (WARN_ON(pe->tce32_seg >= 0))
		return;

	tbl = pe->tce32_table;
	iommu_register_group(tbl, phb->hose->global_number, pe->pe_number);
	tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
			phb->hose->node);
	tbl->it_table_group = &pe->table_group;
	pe->table_group.tables[0] = tbl;
	iommu_register_group(&pe->table_group, phb->hose->global_number,
			pe->pe_number);

	/* The PE will reserve all possible 32-bits space */
	pe->tce32_seg = 0;
+13 −5
Original line number Diff line number Diff line
@@ -92,14 +92,16 @@ static struct iommu_table_ops pnv_p5ioc2_iommu_ops = {
static void pnv_pci_p5ioc2_dma_dev_setup(struct pnv_phb *phb,
					 struct pci_dev *pdev)
{
	if (phb->p5ioc2.iommu_table.it_map == NULL) {
		phb->p5ioc2.iommu_table.it_ops = &pnv_p5ioc2_iommu_ops;
		iommu_init_table(&phb->p5ioc2.iommu_table, phb->hose->node);
		iommu_register_group(&phb->p5ioc2.iommu_table,
	struct iommu_table *tbl = phb->p5ioc2.table_group.tables[0];

	if (!tbl->it_map) {
		tbl->it_ops = &pnv_p5ioc2_iommu_ops;
		iommu_init_table(tbl, phb->hose->node);
		iommu_register_group(&phb->p5ioc2.table_group,
				pci_domain_nr(phb->hose->bus), phb->opal_id);
	}

	set_iommu_table_base(&pdev->dev, &phb->p5ioc2.iommu_table);
	set_iommu_table_base(&pdev->dev, tbl);
	iommu_add_device(&pdev->dev);
}

@@ -188,6 +190,12 @@ static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
	pnv_pci_setup_iommu_table(&phb->p5ioc2.iommu_table,
				  tce_mem, tce_size, 0,
				  IOMMU_PAGE_SHIFT_4K);
	/*
	 * We do not allocate iommu_table as we do not support
	 * hotplug or SRIOV on P5IOC2 and therefore iommu_free_table()
	 * should not be called for phb->p5ioc2.table_group.tables[0] ever.
	 */
	phb->p5ioc2.table_group.tables[0] = &phb->p5ioc2.iommu_table;
}

void __init pnv_pci_init_p5ioc2_hub(struct device_node *np)
Loading