Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6b04014f authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull iommu updates from Joerg Roedel:

 - Make the dma-iommu code more generic so that it can be used outside
   of the ARM context with other IOMMU drivers. Goal is to make use of
   it on x86 too.

 - Generic IOMMU domain support for the Intel VT-d driver. This driver
   now makes more use of common IOMMU code to allocate default domains
   for the devices it handles.

 - An IOMMU fault reporting API to userspace. With that the IOMMU fault
   handling can be done in user-space, for example to forward the faults
   to a VM.

 - Better handling for reserved regions requested by the firmware. These
   can be 'relaxed' now, meaning that those don't prevent a device being
   attached to a VM.

 - Suspend/Resume support for the Renesas IOMMU driver.

 - Added support for dumping SVA related fields of the DMAR table in the
   Intel VT-d driver via debugfs.

 - A pile of smaller fixes and cleanups.

* tag 'iommu-updates-v5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (90 commits)
  iommu/omap: No need to check return value of debugfs_create functions
  iommu/arm-smmu-v3: Invalidate ATC when detaching a device
  iommu/arm-smmu-v3: Fix compilation when CONFIG_CMA=n
  iommu/vt-d: Cleanup unused variable
  iommu/amd: Flush not present cache in iommu_map_page
  iommu/amd: Only free resources once on init error
  iommu/amd: Move gart fallback to amd_iommu_init
  iommu/amd: Make iommu_disable safer
  iommu/io-pgtable: Support non-coherent page tables
  iommu/io-pgtable: Replace IO_PGTABLE_QUIRK_NO_DMA with specific flag
  iommu/io-pgtable-arm: Add support to use system cache
  iommu/arm-smmu-v3: Increase maximum size of queues
  iommu/vt-d: Silence a variable set but not used
  iommu/vt-d: Remove an unused variable "length"
  iommu: Fix integer truncation
  iommu: Add padding to struct iommu_fault
  iommu/vt-d: Consolidate domain_init() to avoid duplication
  iommu/vt-d: Cleanup after delegating DMA domain to generic iommu
  iommu/vt-d: Fix suspicious RCU usage in probe_acpi_namespace_devices()
  iommu/vt-d: Allow DMA domain attaching to rmrr locked device
  ...
parents c6b6cebb d95c3885
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -24,3 +24,12 @@ Description: /sys/kernel/iommu_groups/reserved_regions list IOVA
		region is described on a single line: the 1st field is
		the base IOVA, the second is the end IOVA and the third
		field describes the type of the region.

What:		/sys/kernel/iommu_groups/reserved_regions
Date: 		June 2019
KernelVersion:  v5.3
Contact: 	Eric Auger <eric.auger@redhat.com>
Description:    In case an RMRR is used only by graphics or USB devices
		it is now exposed as "direct-relaxable" instead of "direct".
		In device assignment use case, for instance, those RMRR
		are considered to be relaxable and safe.
+4 −408
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * SWIOTLB-based DMA API implementation
 *
 * Copyright (C) 2012 ARM Ltd.
 * Author: Catalin Marinas <catalin.marinas@arm.com>
 */

#include <linux/gfp.h>
#include <linux/acpi.h>
#include <linux/memblock.h>
#include <linux/cache.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/genalloc.h>
#include <linux/dma-direct.h>
#include <linux/dma-noncoherent.h>
#include <linux/dma-contiguous.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
#include <linux/pci.h>
#include <linux/dma-iommu.h>

#include <asm/cacheflush.h>

@@ -47,37 +36,6 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
	__dma_flush_area(page_address(page), size);
}

#ifdef CONFIG_IOMMU_DMA
static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
				      struct page *page, size_t size)
{
	int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);

	if (!ret)
		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);

	return ret;
}

static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
			      unsigned long pfn, size_t size)
{
	int ret = -ENXIO;
	unsigned long nr_vma_pages = vma_pages(vma);
	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned long off = vma->vm_pgoff;

	if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
		ret = remap_pfn_range(vma, vma->vm_start,
				      pfn + off,
				      vma->vm_end - vma->vm_start,
				      vma->vm_page_prot);
	}

	return ret;
}
#endif /* CONFIG_IOMMU_DMA */

static int __init arm64_dma_init(void)
{
	return dma_atomic_pool_init(GFP_DMA32, __pgprot(PROT_NORMAL_NC));
@@ -85,374 +43,11 @@ static int __init arm64_dma_init(void)
arch_initcall(arm64_dma_init);

#ifdef CONFIG_IOMMU_DMA
#include <linux/dma-iommu.h>
#include <linux/platform_device.h>
#include <linux/amba/bus.h>

/* Thankfully, all cache ops are by VA so we can ignore phys here */
static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
{
	__dma_flush_area(virt, PAGE_SIZE);
}

static void *__iommu_alloc_attrs(struct device *dev, size_t size,
				 dma_addr_t *handle, gfp_t gfp,
				 unsigned long attrs)
{
	bool coherent = dev_is_dma_coherent(dev);
	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
	size_t iosize = size;
	void *addr;

	if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
		return NULL;

	size = PAGE_ALIGN(size);

	/*
	 * Some drivers rely on this, and we probably don't want the
	 * possibility of stale kernel data being read by devices anyway.
	 */
	gfp |= __GFP_ZERO;

	if (!gfpflags_allow_blocking(gfp)) {
		struct page *page;
		/*
		 * In atomic context we can't remap anything, so we'll only
		 * get the virtually contiguous buffer we need by way of a
		 * physically contiguous allocation.
		 */
		if (coherent) {
			page = alloc_pages(gfp, get_order(size));
			addr = page ? page_address(page) : NULL;
		} else {
			addr = dma_alloc_from_pool(size, &page, gfp);
		}
		if (!addr)
			return NULL;

		*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
		if (*handle == DMA_MAPPING_ERROR) {
			if (coherent)
				__free_pages(page, get_order(size));
			else
				dma_free_from_pool(addr, size);
			addr = NULL;
		}
	} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
		pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
		struct page *page;

		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
					get_order(size), gfp & __GFP_NOWARN);
		if (!page)
			return NULL;

		*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
		if (*handle == DMA_MAPPING_ERROR) {
			dma_release_from_contiguous(dev, page,
						    size >> PAGE_SHIFT);
			return NULL;
		}
		addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
						   prot,
						   __builtin_return_address(0));
		if (addr) {
			if (!coherent)
				__dma_flush_area(page_to_virt(page), iosize);
			memset(addr, 0, size);
		} else {
			iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
			dma_release_from_contiguous(dev, page,
						    size >> PAGE_SHIFT);
		}
	} else {
		pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
		struct page **pages;

		pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
					handle, flush_page);
		if (!pages)
			return NULL;

		addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
					      __builtin_return_address(0));
		if (!addr)
			iommu_dma_free(dev, pages, iosize, handle);
	}
	return addr;
}

static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
			       dma_addr_t handle, unsigned long attrs)
{
	size_t iosize = size;

	size = PAGE_ALIGN(size);
	/*
	 * @cpu_addr will be one of 4 things depending on how it was allocated:
	 * - A remapped array of pages for contiguous allocations.
	 * - A remapped array of pages from iommu_dma_alloc(), for all
	 *   non-atomic allocations.
	 * - A non-cacheable alias from the atomic pool, for atomic
	 *   allocations by non-coherent devices.
	 * - A normal lowmem address, for atomic allocations by
	 *   coherent devices.
	 * Hence how dodgy the below logic looks...
	 */
	if (dma_in_atomic_pool(cpu_addr, size)) {
		iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
		dma_free_from_pool(cpu_addr, size);
	} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
		struct page *page = vmalloc_to_page(cpu_addr);

		iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
		dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
	} else if (is_vmalloc_addr(cpu_addr)){
		struct vm_struct *area = find_vm_area(cpu_addr);

		if (WARN_ON(!area || !area->pages))
			return;
		iommu_dma_free(dev, area->pages, iosize, &handle);
		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
	} else {
		iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
		__free_pages(virt_to_page(cpu_addr), get_order(size));
	}
}

static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
			      void *cpu_addr, dma_addr_t dma_addr, size_t size,
			      unsigned long attrs)
{
	struct vm_struct *area;
	int ret;

	vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);

	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
		return ret;

	if (!is_vmalloc_addr(cpu_addr)) {
		unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
		return __swiotlb_mmap_pfn(vma, pfn, size);
	}

	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
		/*
		 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
		 * hence in the vmalloc space.
		 */
		unsigned long pfn = vmalloc_to_pfn(cpu_addr);
		return __swiotlb_mmap_pfn(vma, pfn, size);
	}

	area = find_vm_area(cpu_addr);
	if (WARN_ON(!area || !area->pages))
		return -ENXIO;

	return iommu_dma_mmap(area->pages, size, vma);
}

static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
			       void *cpu_addr, dma_addr_t dma_addr,
			       size_t size, unsigned long attrs)
{
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	struct vm_struct *area = find_vm_area(cpu_addr);

	if (!is_vmalloc_addr(cpu_addr)) {
		struct page *page = virt_to_page(cpu_addr);
		return __swiotlb_get_sgtable_page(sgt, page, size);
	}

	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
		/*
		 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
		 * hence in the vmalloc space.
		 */
		struct page *page = vmalloc_to_page(cpu_addr);
		return __swiotlb_get_sgtable_page(sgt, page, size);
	}

	if (WARN_ON(!area || !area->pages))
		return -ENXIO;

	return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
					 GFP_KERNEL);
}

static void __iommu_sync_single_for_cpu(struct device *dev,
					dma_addr_t dev_addr, size_t size,
					enum dma_data_direction dir)
{
	phys_addr_t phys;

	if (dev_is_dma_coherent(dev))
		return;

	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
	arch_sync_dma_for_cpu(dev, phys, size, dir);
}

static void __iommu_sync_single_for_device(struct device *dev,
					   dma_addr_t dev_addr, size_t size,
					   enum dma_data_direction dir)
{
	phys_addr_t phys;

	if (dev_is_dma_coherent(dev))
		return;

	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
	arch_sync_dma_for_device(dev, phys, size, dir);
}

static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
				   unsigned long offset, size_t size,
				   enum dma_data_direction dir,
				   unsigned long attrs)
{
	bool coherent = dev_is_dma_coherent(dev);
	int prot = dma_info_to_prot(dir, coherent, attrs);
	dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);

	if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
	    dev_addr != DMA_MAPPING_ERROR)
		__dma_map_area(page_address(page) + offset, size, dir);

	return dev_addr;
}

static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
			       size_t size, enum dma_data_direction dir,
			       unsigned long attrs)
{
	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
		__iommu_sync_single_for_cpu(dev, dev_addr, size, dir);

	iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
}

static void __iommu_sync_sg_for_cpu(struct device *dev,
				    struct scatterlist *sgl, int nelems,
				    enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	if (dev_is_dma_coherent(dev))
		return;

	for_each_sg(sgl, sg, nelems, i)
		arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
}

static void __iommu_sync_sg_for_device(struct device *dev,
				       struct scatterlist *sgl, int nelems,
				       enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	if (dev_is_dma_coherent(dev))
		return;

	for_each_sg(sgl, sg, nelems, i)
		arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
}

static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
				int nelems, enum dma_data_direction dir,
				unsigned long attrs)
{
	bool coherent = dev_is_dma_coherent(dev);

	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
		__iommu_sync_sg_for_device(dev, sgl, nelems, dir);

	return iommu_dma_map_sg(dev, sgl, nelems,
				dma_info_to_prot(dir, coherent, attrs));
}

static void __iommu_unmap_sg_attrs(struct device *dev,
				   struct scatterlist *sgl, int nelems,
				   enum dma_data_direction dir,
				   unsigned long attrs)
{
	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
		__iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);

	iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
}

static const struct dma_map_ops iommu_dma_ops = {
	.alloc = __iommu_alloc_attrs,
	.free = __iommu_free_attrs,
	.mmap = __iommu_mmap_attrs,
	.get_sgtable = __iommu_get_sgtable,
	.map_page = __iommu_map_page,
	.unmap_page = __iommu_unmap_page,
	.map_sg = __iommu_map_sg_attrs,
	.unmap_sg = __iommu_unmap_sg_attrs,
	.sync_single_for_cpu = __iommu_sync_single_for_cpu,
	.sync_single_for_device = __iommu_sync_single_for_device,
	.sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
	.sync_sg_for_device = __iommu_sync_sg_for_device,
	.map_resource = iommu_dma_map_resource,
	.unmap_resource = iommu_dma_unmap_resource,
};

static int __init __iommu_dma_init(void)
{
	return iommu_dma_init();
}
arch_initcall(__iommu_dma_init);

static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
				  const struct iommu_ops *ops)
{
	struct iommu_domain *domain;

	if (!ops)
		return;

	/*
	 * The IOMMU core code allocates the default DMA domain, which the
	 * underlying IOMMU driver needs to support via the dma-iommu layer.
	 */
	domain = iommu_get_domain_for_dev(dev);

	if (!domain)
		goto out_err;

	if (domain->type == IOMMU_DOMAIN_DMA) {
		if (iommu_dma_init_domain(domain, dma_base, size, dev))
			goto out_err;

		dev->dma_ops = &iommu_dma_ops;
	}

	return;

out_err:
	 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
		 dev_name(dev));
}

void arch_teardown_dma_ops(struct device *dev)
{
	dev->dma_ops = NULL;
}

#else

static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
				  const struct iommu_ops *iommu)
{ }

#endif  /* CONFIG_IOMMU_DMA */
#endif

void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
			const struct iommu_ops *iommu, bool coherent)
@@ -466,7 +61,8 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
		   ARCH_DMA_MINALIGN, cls);

	dev->dma_coherent = coherent;
	__iommu_setup_dma_ops(dev, dma_base, size, iommu);
	if (iommu)
		iommu_setup_dma_ops(dev, dma_base, size);

#ifdef CONFIG_XEN
	if (xen_initial_domain())
+19 −7
Original line number Diff line number Diff line
@@ -619,9 +619,9 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
		pasid = ((event[0] >> 16) & 0xFFFF)
			| ((event[1] << 6) & 0xF0000);
		tag = event[1] & 0x03FF;
		dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
		dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
			PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
			pasid, address, flags);
			pasid, address, flags, tag);
		break;
	default:
		dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
@@ -1295,6 +1295,16 @@ static void domain_flush_complete(struct protection_domain *domain)
	}
}

/* Flush the not present cache if it exists */
static void domain_flush_np_cache(struct protection_domain *domain,
		dma_addr_t iova, size_t size)
{
	if (unlikely(amd_iommu_np_cache)) {
		domain_flush_pages(domain, iova, size);
		domain_flush_complete(domain);
	}
}


/*
 * This function flushes the DTEs for all devices in domain
@@ -2377,10 +2387,7 @@ static dma_addr_t __map_single(struct device *dev,
	}
	address += offset;

	if (unlikely(amd_iommu_np_cache)) {
		domain_flush_pages(&dma_dom->domain, address, size);
		domain_flush_complete(&dma_dom->domain);
	}
	domain_flush_np_cache(&dma_dom->domain, address, size);

out:
	return address;
@@ -2559,6 +2566,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
		s->dma_length   = s->length;
	}

	if (s)
		domain_flush_np_cache(domain, s->dma_address, s->dma_length);

	return nelems;

out_unmap:
@@ -2597,7 +2607,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
	struct protection_domain *domain;
	struct dma_ops_domain *dma_dom;
	unsigned long startaddr;
	int npages = 2;
	int npages;

	domain = get_domain(dev);
	if (IS_ERR(domain))
@@ -3039,6 +3049,8 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
	ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
	mutex_unlock(&domain->api_lock);

	domain_flush_np_cache(domain, iova, page_size);

	return ret;
}

+24 −21
Original line number Diff line number Diff line
@@ -406,6 +406,9 @@ static void iommu_enable(struct amd_iommu *iommu)

static void iommu_disable(struct amd_iommu *iommu)
{
	if (!iommu->mmio_base)
		return;

	/* Disable command buffer */
	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);

@@ -2325,15 +2328,6 @@ static void __init free_iommu_resources(void)
	amd_iommu_dev_table = NULL;

	free_iommu_all();

#ifdef CONFIG_GART_IOMMU
	/*
	 * We failed to initialize the AMD IOMMU - try fallback to GART
	 * if possible.
	 */
	gart_iommu_init();

#endif
}

/* SB IOAPIC is always on this device in AMD systems */
@@ -2625,8 +2619,6 @@ static int __init state_next(void)
		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
		if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
			pr_info("AMD IOMMU disabled on kernel command-line\n");
			free_dma_resources();
			free_iommu_resources();
			init_state = IOMMU_CMDLINE_DISABLED;
			ret = -EINVAL;
		}
@@ -2667,6 +2659,19 @@ static int __init state_next(void)
		BUG();
	}

	if (ret) {
		free_dma_resources();
		if (!irq_remapping_enabled) {
			disable_iommus();
			free_iommu_resources();
		} else {
			struct amd_iommu *iommu;

			uninit_device_table_dma();
			for_each_iommu(iommu)
				iommu_flush_all_caches(iommu);
		}
	}
	return ret;
}

@@ -2740,17 +2745,15 @@ static int __init amd_iommu_init(void)
	int ret;

	ret = iommu_go_to_state(IOMMU_INITIALIZED);
	if (ret) {
		free_dma_resources();
		if (!irq_remapping_enabled) {
			disable_iommus();
			free_iommu_resources();
		} else {
			uninit_device_table_dma();
			for_each_iommu(iommu)
				iommu_flush_all_caches(iommu);
		}
#ifdef CONFIG_GART_IOMMU
	if (ret && list_empty(&amd_iommu_list)) {
		/*
		 * We failed to initialize the AMD IOMMU - try fallback
		 * to GART if possible.
		 */
		gart_iommu_init();
	}
#endif

	for_each_iommu(iommu)
		amd_iommu_debugfs_setup(iommu);
+49 −20
Original line number Diff line number Diff line
@@ -192,6 +192,13 @@
#define Q_BASE_ADDR_MASK		GENMASK_ULL(51, 5)
#define Q_BASE_LOG2SIZE			GENMASK(4, 0)

/* Ensure DMA allocations are naturally aligned */
#ifdef CONFIG_CMA_ALIGNMENT
#define Q_MAX_SZ_SHIFT			(PAGE_SHIFT + CONFIG_CMA_ALIGNMENT)
#else
#define Q_MAX_SZ_SHIFT			(PAGE_SHIFT + MAX_ORDER - 1)
#endif

/*
 * Stream table.
 *
@@ -289,8 +296,9 @@
					FIELD_GET(ARM64_TCR_##fld, tcr))

/* Command queue */
#define CMDQ_ENT_DWORDS			2
#define CMDQ_MAX_SZ_SHIFT		8
#define CMDQ_ENT_SZ_SHIFT		4
#define CMDQ_ENT_DWORDS			((1 << CMDQ_ENT_SZ_SHIFT) >> 3)
#define CMDQ_MAX_SZ_SHIFT		(Q_MAX_SZ_SHIFT - CMDQ_ENT_SZ_SHIFT)

#define CMDQ_CONS_ERR			GENMASK(30, 24)
#define CMDQ_ERR_CERROR_NONE_IDX	0
@@ -336,14 +344,16 @@
#define CMDQ_SYNC_1_MSIADDR_MASK	GENMASK_ULL(51, 2)

/* Event queue */
#define EVTQ_ENT_DWORDS			4
#define EVTQ_MAX_SZ_SHIFT		7
#define EVTQ_ENT_SZ_SHIFT		5
#define EVTQ_ENT_DWORDS			((1 << EVTQ_ENT_SZ_SHIFT) >> 3)
#define EVTQ_MAX_SZ_SHIFT		(Q_MAX_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT)

#define EVTQ_0_ID			GENMASK_ULL(7, 0)

/* PRI queue */
#define PRIQ_ENT_DWORDS			2
#define PRIQ_MAX_SZ_SHIFT		8
#define PRIQ_ENT_SZ_SHIFT		4
#define PRIQ_ENT_DWORDS			((1 << PRIQ_ENT_SZ_SHIFT) >> 3)
#define PRIQ_MAX_SZ_SHIFT		(Q_MAX_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT)

#define PRIQ_0_SID			GENMASK_ULL(31, 0)
#define PRIQ_0_SSID			GENMASK_ULL(51, 32)
@@ -798,7 +808,7 @@ static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
/* High-level queue accessors */
static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
{
	memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
	memset(cmd, 0, 1 << CMDQ_ENT_SZ_SHIFT);
	cmd[0] |= FIELD_PREP(CMDQ_0_OP, ent->opcode);

	switch (ent->opcode) {
@@ -1785,13 +1795,11 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
		.pgsize_bitmap	= smmu->pgsize_bitmap,
		.ias		= ias,
		.oas		= oas,
		.coherent_walk	= smmu->features & ARM_SMMU_FEAT_COHERENCY,
		.tlb		= &arm_smmu_gather_ops,
		.iommu_dev	= smmu->dev,
	};

	if (smmu->features & ARM_SMMU_FEAT_COHERENCY)
		pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;

	if (smmu_domain->non_strict)
		pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;

@@ -1884,9 +1892,13 @@ static int arm_smmu_enable_ats(struct arm_smmu_master *master)

static void arm_smmu_disable_ats(struct arm_smmu_master *master)
{
	struct arm_smmu_cmdq_ent cmd;

	if (!master->ats_enabled || !dev_is_pci(master->dev))
		return;

	arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
	arm_smmu_atc_inv_master(master, &cmd);
	pci_disable_ats(to_pci_dev(master->dev));
	master->ats_enabled = false;
}
@@ -1906,7 +1918,6 @@ static void arm_smmu_detach_dev(struct arm_smmu_master *master)
	master->domain = NULL;
	arm_smmu_install_ste_for_dev(master);

	/* Disabling ATS invalidates all ATC entries */
	arm_smmu_disable_ats(master);
}

@@ -2270,17 +2281,32 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
				   struct arm_smmu_queue *q,
				   unsigned long prod_off,
				   unsigned long cons_off,
				   size_t dwords)
				   size_t dwords, const char *name)
{
	size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
	size_t qsz;

	do {
		qsz = ((1 << q->max_n_shift) * dwords) << 3;
		q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma,
					      GFP_KERNEL);
		if (q->base || qsz < PAGE_SIZE)
			break;

		q->max_n_shift--;
	} while (1);

	q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
	if (!q->base) {
		dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
			qsz);
		dev_err(smmu->dev,
			"failed to allocate queue (0x%zx bytes) for %s\n",
			qsz, name);
		return -ENOMEM;
	}

	if (!WARN_ON(q->base_dma & (qsz - 1))) {
		dev_info(smmu->dev, "allocated %u entries for %s\n",
			 1 << q->max_n_shift, name);
	}

	q->prod_reg	= arm_smmu_page1_fixup(prod_off, smmu);
	q->cons_reg	= arm_smmu_page1_fixup(cons_off, smmu);
	q->ent_dwords	= dwords;
@@ -2300,13 +2326,15 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
	/* cmdq */
	spin_lock_init(&smmu->cmdq.lock);
	ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
				      ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
				      ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS,
				      "cmdq");
	if (ret)
		return ret;

	/* evtq */
	ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
				      ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
				      ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS,
				      "evtq");
	if (ret)
		return ret;

@@ -2315,7 +2343,8 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
		return 0;

	return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
				       ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
				       ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS,
				       "priq");
}

static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
@@ -2879,7 +2908,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
		return -ENXIO;
	}

	/* Queue sizes, capped at 4k */
	/* Queue sizes, capped to ensure natural alignment */
	smmu->cmdq.q.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
					 FIELD_GET(IDR1_CMDQS, reg));
	if (!smmu->cmdq.q.max_n_shift) {
Loading