Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bb9e6d65 authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by David Woodhouse
Browse files

intel-iommu: use coherent_dma_mask in alloc_coherent



This patch fixes intel-iommu to use dev->coherent_dma_mask in
alloc_coherent. Currently, intel-iommu uses dev->dma_mask in
alloc_coherent but alloc_coherent is supposed to use
coherent_dma_mask. It could break drivers that uses smaller
coherent_dma_mask than dma_mask (though the current code works for the
majority that use the same mask for coherent_dma_mask and dma_mask).

[dwmw2: dma_mask can be bigger than 'unsigned long']
Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Reviewed-by: default avatarGrant Grundler <grundler@parisc-linux.org>
Signed-off-by: default avatarDavid Woodhouse <David.Woodhouse@intel.com>
parent f609891f
Loading
Loading
Loading
Loading
+19 −10
Original line number Diff line number Diff line
@@ -1762,14 +1762,14 @@ iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)

static struct iova *
__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
		size_t size)
		   size_t size, u64 dma_mask)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct iova *iova = NULL;

	if ((pdev->dma_mask <= DMA_32BIT_MASK) || (dmar_forcedac)) {
		iova = iommu_alloc_iova(domain, size, pdev->dma_mask);
	} else  {
	if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac)
		iova = iommu_alloc_iova(domain, size, dma_mask);
	else {
		/*
		 * First try to allocate an io virtual address in
		 * DMA_32BIT_MASK and if that fails then try allocating
@@ -1777,7 +1777,7 @@ __intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
		 */
		iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK);
		if (!iova)
			iova = iommu_alloc_iova(domain, size, pdev->dma_mask);
			iova = iommu_alloc_iova(domain, size, dma_mask);
	}

	if (!iova) {
@@ -1816,8 +1816,8 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
	return domain;
}

dma_addr_t
intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
				     size_t size, int dir, u64 dma_mask)
{
	struct pci_dev *pdev = to_pci_dev(hwdev);
	struct dmar_domain *domain;
@@ -1836,7 +1836,7 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)

	size = aligned_size((u64)paddr, size);

	iova = __intel_alloc_iova(hwdev, domain, size);
	iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
	if (!iova)
		goto error;

@@ -1878,6 +1878,13 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
	return 0;
}

dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr,
			    size_t size, int dir)
{
	return __intel_map_single(hwdev, paddr, size, dir,
				  to_pci_dev(hwdev)->dma_mask);
}

static void flush_unmaps(void)
{
	int i, j;
@@ -1993,7 +2000,9 @@ void *intel_alloc_coherent(struct device *hwdev, size_t size,
		return NULL;
	memset(vaddr, 0, size);

	*dma_handle = intel_map_single(hwdev, virt_to_bus(vaddr), size, DMA_BIDIRECTIONAL);
	*dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
					 DMA_BIDIRECTIONAL,
					 hwdev->coherent_dma_mask);
	if (*dma_handle)
		return vaddr;
	free_pages((unsigned long)vaddr, order);
@@ -2097,7 +2106,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
		size += aligned_size((u64)addr, sg->length);
	}

	iova = __intel_alloc_iova(hwdev, domain, size);
	iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
	if (!iova) {
		sglist->dma_length = 0;
		return 0;