Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3684b03d authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'iommu-fixes-v4.8-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU fixes from Joerg Roedel:

 - Some functions defined in a header file for the mediatek driver were
   not marked inline.  Fix that oversight.

 - Fix a potential crash in the ARM64 dma-mapping code when freeing a
   partially initialized domain.

 - Another fix for ARM64 dma-mapping to respect IOMMU mapping
   constraints when allocating IOVA addresses.

* tag 'iommu-fixes-v4.8-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/dma: Respect IOMMU aperture when allocating
  iommu/dma: Don't put uninitialised IOVA domains
  iommu/mediatek: Mark static functions in headers inline
parents f2fa30a8 c987ff0d
Loading
Loading
Loading
Loading
+9 −5
Original line number Diff line number Diff line
@@ -68,6 +68,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
	if (!iovad)
		return;

	if (iovad->granule)
		put_iova_domain(iovad);
	kfree(iovad);
	domain->iova_cookie = NULL;
@@ -151,12 +152,15 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
	}
}

static struct iova *__alloc_iova(struct iova_domain *iovad, size_t size,
static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
		dma_addr_t dma_limit)
{
	struct iova_domain *iovad = domain->iova_cookie;
	unsigned long shift = iova_shift(iovad);
	unsigned long length = iova_align(iovad, size) >> shift;

	if (domain->geometry.force_aperture)
		dma_limit = min(dma_limit, domain->geometry.aperture_end);
	/*
	 * Enforce size-alignment to be safe - there could perhaps be an
	 * attribute to control this per-device, or at least per-domain...
@@ -314,7 +318,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
	if (!pages)
		return NULL;

	iova = __alloc_iova(iovad, size, dev->coherent_dma_mask);
	iova = __alloc_iova(domain, size, dev->coherent_dma_mask);
	if (!iova)
		goto out_free_pages;

@@ -386,7 +390,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
	phys_addr_t phys = page_to_phys(page) + offset;
	size_t iova_off = iova_offset(iovad, phys);
	size_t len = iova_align(iovad, size + iova_off);
	struct iova *iova = __alloc_iova(iovad, len, dma_get_mask(dev));
	struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev));

	if (!iova)
		return DMA_ERROR_CODE;
@@ -538,7 +542,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
		prev = s;
	}

	iova = __alloc_iova(iovad, iova_len, dma_get_mask(dev));
	iova = __alloc_iova(domain, iova_len, dma_get_mask(dev));
	if (!iova)
		goto out_restore_sg;

+3 −3
Original line number Diff line number Diff line
@@ -55,19 +55,19 @@ struct mtk_iommu_data {
	bool                            enable_4GB;
};

static int compare_of(struct device *dev, void *data)
static inline int compare_of(struct device *dev, void *data)
{
	return dev->of_node == data;
}

static int mtk_iommu_bind(struct device *dev)
static inline int mtk_iommu_bind(struct device *dev)
{
	struct mtk_iommu_data *data = dev_get_drvdata(dev);

	return component_bind_all(dev, &data->smi_imu);
}

static void mtk_iommu_unbind(struct device *dev)
static inline void mtk_iommu_unbind(struct device *dev)
{
	struct mtk_iommu_data *data = dev_get_drvdata(dev);