Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a5934725 authored by Joerg Roedel's avatar Joerg Roedel
Browse files

Merge branches 'iommu/fixes', 'arm/omap', 'arm/exynos', 'x86/amd', 'x86/vt-d' and 'core' into next

Loading
Loading
Loading
Loading
+1 −2
Original line number Original line Diff line number Diff line
@@ -155,8 +155,7 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)


		order = __ffs(tegra->domain->pgsize_bitmap);
		order = __ffs(tegra->domain->pgsize_bitmap);
		init_iova_domain(&tegra->carveout.domain, 1UL << order,
		init_iova_domain(&tegra->carveout.domain, 1UL << order,
				 carveout_start >> order,
				 carveout_start >> order);
				 carveout_end >> order);


		tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
		tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
		tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
		tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
+1 −2
Original line number Original line Diff line number Diff line
@@ -198,8 +198,7 @@ static int host1x_probe(struct platform_device *pdev)


		order = __ffs(host->domain->pgsize_bitmap);
		order = __ffs(host->domain->pgsize_bitmap);
		init_iova_domain(&host->iova, 1UL << order,
		init_iova_domain(&host->iova, 1UL << order,
				 geometry->aperture_start >> order,
				 geometry->aperture_start >> order);
				 geometry->aperture_end >> order);
		host->iova_end = geometry->aperture_end;
		host->iova_end = geometry->aperture_end;
	}
	}


+21 −15
Original line number Original line Diff line number Diff line
@@ -63,7 +63,6 @@
/* IO virtual address start page frame number */
/* IO virtual address start page frame number */
#define IOVA_START_PFN		(1)
#define IOVA_START_PFN		(1)
#define IOVA_PFN(addr)		((addr) >> PAGE_SHIFT)
#define IOVA_PFN(addr)		((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN		IOVA_PFN(DMA_BIT_MASK(32))


/* Reserved IOVA ranges */
/* Reserved IOVA ranges */
#define MSI_RANGE_START		(0xfee00000)
#define MSI_RANGE_START		(0xfee00000)
@@ -1547,10 +1546,11 @@ static unsigned long dma_ops_alloc_iova(struct device *dev,


	if (dma_mask > DMA_BIT_MASK(32))
	if (dma_mask > DMA_BIT_MASK(32))
		pfn = alloc_iova_fast(&dma_dom->iovad, pages,
		pfn = alloc_iova_fast(&dma_dom->iovad, pages,
				      IOVA_PFN(DMA_BIT_MASK(32)));
				      IOVA_PFN(DMA_BIT_MASK(32)), false);


	if (!pfn)
	if (!pfn)
		pfn = alloc_iova_fast(&dma_dom->iovad, pages, IOVA_PFN(dma_mask));
		pfn = alloc_iova_fast(&dma_dom->iovad, pages,
				      IOVA_PFN(dma_mask), true);


	return (pfn << PAGE_SHIFT);
	return (pfn << PAGE_SHIFT);
}
}
@@ -1788,8 +1788,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void)
	if (!dma_dom->domain.pt_root)
	if (!dma_dom->domain.pt_root)
		goto free_dma_dom;
		goto free_dma_dom;


	init_iova_domain(&dma_dom->iovad, PAGE_SIZE,
	init_iova_domain(&dma_dom->iovad, PAGE_SIZE, IOVA_START_PFN);
			 IOVA_START_PFN, DMA_32BIT_PFN);


	if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL))
	if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL))
		goto free_dma_dom;
		goto free_dma_dom;
@@ -2696,8 +2695,7 @@ static int init_reserved_iova_ranges(void)
	struct pci_dev *pdev = NULL;
	struct pci_dev *pdev = NULL;
	struct iova *val;
	struct iova *val;


	init_iova_domain(&reserved_iova_ranges, PAGE_SIZE,
	init_iova_domain(&reserved_iova_ranges, PAGE_SIZE, IOVA_START_PFN);
			 IOVA_START_PFN, DMA_32BIT_PFN);


	lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock,
	lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock,
			  &reserved_rbtree_key);
			  &reserved_rbtree_key);
@@ -3663,11 +3661,11 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
	return table;
	return table;
}
}


static int alloc_irq_index(u16 devid, int count)
static int alloc_irq_index(u16 devid, int count, bool align)
{
{
	struct irq_remap_table *table;
	struct irq_remap_table *table;
	int index, c, alignment = 1;
	unsigned long flags;
	unsigned long flags;
	int index, c;
	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];


	if (!iommu)
	if (!iommu)
@@ -3677,16 +3675,22 @@ static int alloc_irq_index(u16 devid, int count)
	if (!table)
	if (!table)
		return -ENODEV;
		return -ENODEV;


	if (align)
		alignment = roundup_pow_of_two(count);

	spin_lock_irqsave(&table->lock, flags);
	spin_lock_irqsave(&table->lock, flags);


	/* Scan table for free entries */
	/* Scan table for free entries */
	for (c = 0, index = table->min_index;
	for (index = ALIGN(table->min_index, alignment), c = 0;
	     index < MAX_IRQS_PER_TABLE;
	     index < MAX_IRQS_PER_TABLE;
	     ++index) {
	     index++) {
		if (!iommu->irte_ops->is_allocated(table, index))
		if (!iommu->irte_ops->is_allocated(table, index)) {
			c += 1;
			c += 1;
		else
		} else {
			c     = 0;
			c     = 0;
			index = ALIGN(index, alignment);
			continue;
		}


		if (c == count)	{
		if (c == count)	{
			for (; c != 0; --c)
			for (; c != 0; --c)
@@ -4099,7 +4103,9 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
		else
		else
			ret = -ENOMEM;
			ret = -ENOMEM;
	} else {
	} else {
		index = alloc_irq_index(devid, nr_irqs);
		bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);

		index = alloc_irq_index(devid, nr_irqs, align);
	}
	}
	if (index < 0) {
	if (index < 0) {
		pr_warn("Failed to allocate IRTE\n");
		pr_warn("Failed to allocate IRTE\n");
+10 −0
Original line number Original line Diff line number Diff line
@@ -1743,6 +1743,14 @@ arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
	return ops->unmap(ops, iova, size);
	return ops->unmap(ops, iova, size);
}
}


static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
{
	struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;

	if (smmu)
		__arm_smmu_tlb_sync(smmu);
}

static phys_addr_t
static phys_addr_t
arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
{
{
@@ -1963,6 +1971,8 @@ static struct iommu_ops arm_smmu_ops = {
	.map			= arm_smmu_map,
	.map			= arm_smmu_map,
	.unmap			= arm_smmu_unmap,
	.unmap			= arm_smmu_unmap,
	.map_sg			= default_iommu_map_sg,
	.map_sg			= default_iommu_map_sg,
	.flush_iotlb_all	= arm_smmu_iotlb_sync,
	.iotlb_sync		= arm_smmu_iotlb_sync,
	.iova_to_phys		= arm_smmu_iova_to_phys,
	.iova_to_phys		= arm_smmu_iova_to_phys,
	.add_device		= arm_smmu_add_device,
	.add_device		= arm_smmu_add_device,
	.remove_device		= arm_smmu_remove_device,
	.remove_device		= arm_smmu_remove_device,
+15 −5
Original line number Original line Diff line number Diff line
@@ -250,6 +250,7 @@ enum arm_smmu_domain_stage {
struct arm_smmu_domain {
struct arm_smmu_domain {
	struct arm_smmu_device		*smmu;
	struct arm_smmu_device		*smmu;
	struct io_pgtable_ops		*pgtbl_ops;
	struct io_pgtable_ops		*pgtbl_ops;
	const struct iommu_gather_ops	*tlb_ops;
	struct arm_smmu_cfg		cfg;
	struct arm_smmu_cfg		cfg;
	enum arm_smmu_domain_stage	stage;
	enum arm_smmu_domain_stage	stage;
	struct mutex			init_mutex; /* Protects smmu pointer */
	struct mutex			init_mutex; /* Protects smmu pointer */
@@ -735,7 +736,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
	enum io_pgtable_fmt fmt;
	enum io_pgtable_fmt fmt;
	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
	const struct iommu_gather_ops *tlb_ops;


	mutex_lock(&smmu_domain->init_mutex);
	mutex_lock(&smmu_domain->init_mutex);
	if (smmu_domain->smmu)
	if (smmu_domain->smmu)
@@ -813,7 +813,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
			ias = min(ias, 32UL);
			ias = min(ias, 32UL);
			oas = min(oas, 32UL);
			oas = min(oas, 32UL);
		}
		}
		tlb_ops = &arm_smmu_s1_tlb_ops;
		smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
		break;
		break;
	case ARM_SMMU_DOMAIN_NESTED:
	case ARM_SMMU_DOMAIN_NESTED:
		/*
		/*
@@ -833,9 +833,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
			oas = min(oas, 40UL);
			oas = min(oas, 40UL);
		}
		}
		if (smmu->version == ARM_SMMU_V2)
		if (smmu->version == ARM_SMMU_V2)
			tlb_ops = &arm_smmu_s2_tlb_ops_v2;
			smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2;
		else
		else
			tlb_ops = &arm_smmu_s2_tlb_ops_v1;
			smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1;
		break;
		break;
	default:
	default:
		ret = -EINVAL;
		ret = -EINVAL;
@@ -863,7 +863,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
		.pgsize_bitmap	= smmu->pgsize_bitmap,
		.pgsize_bitmap	= smmu->pgsize_bitmap,
		.ias		= ias,
		.ias		= ias,
		.oas		= oas,
		.oas		= oas,
		.tlb		= tlb_ops,
		.tlb		= smmu_domain->tlb_ops,
		.iommu_dev	= smmu->dev,
		.iommu_dev	= smmu->dev,
	};
	};


@@ -1259,6 +1259,14 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
	return ops->unmap(ops, iova, size);
	return ops->unmap(ops, iova, size);
}
}


static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
{
	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);

	if (smmu_domain->tlb_ops)
		smmu_domain->tlb_ops->tlb_sync(smmu_domain);
}

static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
					      dma_addr_t iova)
					      dma_addr_t iova)
{
{
@@ -1562,6 +1570,8 @@ static struct iommu_ops arm_smmu_ops = {
	.map			= arm_smmu_map,
	.map			= arm_smmu_map,
	.unmap			= arm_smmu_unmap,
	.unmap			= arm_smmu_unmap,
	.map_sg			= default_iommu_map_sg,
	.map_sg			= default_iommu_map_sg,
	.flush_iotlb_all	= arm_smmu_iotlb_sync,
	.iotlb_sync		= arm_smmu_iotlb_sync,
	.iova_to_phys		= arm_smmu_iova_to_phys,
	.iova_to_phys		= arm_smmu_iova_to_phys,
	.add_device		= arm_smmu_add_device,
	.add_device		= arm_smmu_add_device,
	.remove_device		= arm_smmu_remove_device,
	.remove_device		= arm_smmu_remove_device,
Loading