Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ebb4949e authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull IOMMU UPDATES from Joerg Roedel:

 - KVM PCIe/MSI passthrough support on ARM/ARM64

 - introduction of a core representation for individual hardware iommus

 - support for IOMMU privileged mappings as supported by some ARM IOMMUS

 - 16-bit SID support for ARM-SMMUv2

 - stream table optimization for ARM-SMMUv3

 - various fixes and other small improvements

* tag 'iommu-updates-v4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (61 commits)
  vfio/type1: Fix error return code in vfio_iommu_type1_attach_group()
  iommu: Remove iommu_register_instance interface
  iommu/exynos: Make use of iommu_device_register interface
  iommu/mediatek: Make use of iommu_device_register interface
  iommu/msm: Make use of iommu_device_register interface
  iommu/arm-smmu: Make use of the iommu_register interface
  iommu: Add iommu_device_set_fwnode() interface
  iommu: Make iommu_device_link/unlink take a struct iommu_device
  iommu: Add sysfs bindings for struct iommu_device
  iommu: Introduce new 'struct iommu_device'
  iommu: Rename struct iommu_device
  iommu: Rename iommu_get_instance()
  iommu: Fix static checker warning in iommu_insert_device_resv_regions
  iommu: Avoid unnecessary assignment of dev->iommu_fwspec
  iommu/mediatek: Remove bogus 'select' statements
  iommu/dma: Remove bogus dma_supported() implementation
  iommu/ipmmu-vmsa: Restrict IOMMU Domain Geometry to 32-bit address space
  iommu/vt-d: Don't over-free page table directories
  iommu/vt-d: Tylersburg isoch identity map check is done too late.
  iommu/vt-d: Fix some macros that are incorrectly specified in intel-iommu
  ...
parents 937b5b5d 8d2932dd
Loading
Loading
Loading
Loading
+12 −0
Original line number Original line Diff line number Diff line
@@ -12,3 +12,15 @@ Description: /sys/kernel/iommu_groups/ contains a number of sub-
		file if the IOMMU driver has chosen to register a more
		file if the IOMMU driver has chosen to register a more
		common name for the group.
		common name for the group.
Users:
Users:

What:		/sys/kernel/iommu_groups/reserved_regions
Date: 		January 2017
KernelVersion:  v4.11
Contact: 	Eric Auger <eric.auger@redhat.com>
Description:    /sys/kernel/iommu_groups/reserved_regions list IOVA
		regions that are reserved. Not necessarily all
		reserved regions are listed. This is typically used to
		output direct-mapped, MSI, non mappable regions. Each
		region is described on a single line: the 1st field is
		the base IOVA, the second is the end IOVA and the third
		field describes the type of the region.
+10 −0
Original line number Original line Diff line number Diff line
@@ -143,3 +143,13 @@ So, this provides a way for drivers to avoid those error messages on calls
where allocation failures are not a problem, and shouldn't bother the logs.
where allocation failures are not a problem, and shouldn't bother the logs.


NOTE: At the moment DMA_ATTR_NO_WARN is only implemented on PowerPC.
NOTE: At the moment DMA_ATTR_NO_WARN is only implemented on PowerPC.

DMA_ATTR_PRIVILEGED
------------------------------

Some advanced peripherals such as remote processors and GPUs perform
accesses to DMA buffers in both privileged "supervisor" and unprivileged
"user" modes.  This attribute is used to indicate to the DMA-mapping
subsystem that the buffer is fully accessible at the elevated privilege
level (and ideally inaccessible or at least read-only at the
lesser-privileged levels).
+30 −30
Original line number Original line Diff line number Diff line
@@ -1171,6 +1171,25 @@ core_initcall(dma_debug_do_init);


#ifdef CONFIG_ARM_DMA_USE_IOMMU
#ifdef CONFIG_ARM_DMA_USE_IOMMU


static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
{
	int prot = 0;

	if (attrs & DMA_ATTR_PRIVILEGED)
		prot |= IOMMU_PRIV;

	switch (dir) {
	case DMA_BIDIRECTIONAL:
		return prot | IOMMU_READ | IOMMU_WRITE;
	case DMA_TO_DEVICE:
		return prot | IOMMU_READ;
	case DMA_FROM_DEVICE:
		return prot | IOMMU_WRITE;
	default:
		return prot;
	}
}

/* IOMMU */
/* IOMMU */


static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
@@ -1394,7 +1413,8 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
 * Create a mapping in device IO address space for specified pages
 * Create a mapping in device IO address space for specified pages
 */
 */
static dma_addr_t
static dma_addr_t
__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
__iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
		       unsigned long attrs)
{
{
	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
@@ -1419,7 +1439,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)


		len = (j - i) << PAGE_SHIFT;
		len = (j - i) << PAGE_SHIFT;
		ret = iommu_map(mapping->domain, iova, phys, len,
		ret = iommu_map(mapping->domain, iova, phys, len,
				IOMMU_READ|IOMMU_WRITE);
				__dma_info_to_prot(DMA_BIDIRECTIONAL, attrs));
		if (ret < 0)
		if (ret < 0)
			goto fail;
			goto fail;
		iova += len;
		iova += len;
@@ -1476,7 +1496,8 @@ static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
}
}


static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
				  dma_addr_t *handle, int coherent_flag)
				  dma_addr_t *handle, int coherent_flag,
				  unsigned long attrs)
{
{
	struct page *page;
	struct page *page;
	void *addr;
	void *addr;
@@ -1488,7 +1509,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
	if (!addr)
	if (!addr)
		return NULL;
		return NULL;


	*handle = __iommu_create_mapping(dev, &page, size);
	*handle = __iommu_create_mapping(dev, &page, size, attrs);
	if (*handle == DMA_ERROR_CODE)
	if (*handle == DMA_ERROR_CODE)
		goto err_mapping;
		goto err_mapping;


@@ -1522,7 +1543,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,


	if (coherent_flag  == COHERENT || !gfpflags_allow_blocking(gfp))
	if (coherent_flag  == COHERENT || !gfpflags_allow_blocking(gfp))
		return __iommu_alloc_simple(dev, size, gfp, handle,
		return __iommu_alloc_simple(dev, size, gfp, handle,
					    coherent_flag);
					    coherent_flag, attrs);


	/*
	/*
	 * Following is a work-around (a.k.a. hack) to prevent pages
	 * Following is a work-around (a.k.a. hack) to prevent pages
@@ -1537,7 +1558,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
	if (!pages)
	if (!pages)
		return NULL;
		return NULL;


	*handle = __iommu_create_mapping(dev, pages, size);
	*handle = __iommu_create_mapping(dev, pages, size, attrs);
	if (*handle == DMA_ERROR_CODE)
	if (*handle == DMA_ERROR_CODE)
		goto err_buffer;
		goto err_buffer;


@@ -1672,27 +1693,6 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
					 GFP_KERNEL);
					 GFP_KERNEL);
}
}


static int __dma_direction_to_prot(enum dma_data_direction dir)
{
	int prot;

	switch (dir) {
	case DMA_BIDIRECTIONAL:
		prot = IOMMU_READ | IOMMU_WRITE;
		break;
	case DMA_TO_DEVICE:
		prot = IOMMU_READ;
		break;
	case DMA_FROM_DEVICE:
		prot = IOMMU_WRITE;
		break;
	default:
		prot = 0;
	}

	return prot;
}

/*
/*
 * Map a part of the scatter-gather list into contiguous io address space
 * Map a part of the scatter-gather list into contiguous io address space
 */
 */
@@ -1722,7 +1722,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
		if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
		if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
			__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
			__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);


		prot = __dma_direction_to_prot(dir);
		prot = __dma_info_to_prot(dir, attrs);


		ret = iommu_map(mapping->domain, iova, phys, len, prot);
		ret = iommu_map(mapping->domain, iova, phys, len, prot);
		if (ret < 0)
		if (ret < 0)
@@ -1930,7 +1930,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
	if (dma_addr == DMA_ERROR_CODE)
	if (dma_addr == DMA_ERROR_CODE)
		return dma_addr;
		return dma_addr;


	prot = __dma_direction_to_prot(dir);
	prot = __dma_info_to_prot(dir, attrs);


	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
	if (ret < 0)
	if (ret < 0)
@@ -2036,7 +2036,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
	if (dma_addr == DMA_ERROR_CODE)
	if (dma_addr == DMA_ERROR_CODE)
		return dma_addr;
		return dma_addr;


	prot = __dma_direction_to_prot(dir) | IOMMU_MMIO;
	prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;


	ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
	ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
	if (ret < 0)
	if (ret < 0)
+3 −4
Original line number Original line Diff line number Diff line
@@ -558,7 +558,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
				 unsigned long attrs)
				 unsigned long attrs)
{
{
	bool coherent = is_device_dma_coherent(dev);
	bool coherent = is_device_dma_coherent(dev);
	int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
	size_t iosize = size;
	size_t iosize = size;
	void *addr;
	void *addr;


@@ -712,7 +712,7 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
				   unsigned long attrs)
				   unsigned long attrs)
{
{
	bool coherent = is_device_dma_coherent(dev);
	bool coherent = is_device_dma_coherent(dev);
	int prot = dma_direction_to_prot(dir, coherent);
	int prot = dma_info_to_prot(dir, coherent, attrs);
	dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
	dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);


	if (!iommu_dma_mapping_error(dev, dev_addr) &&
	if (!iommu_dma_mapping_error(dev, dev_addr) &&
@@ -770,7 +770,7 @@ static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
		__iommu_sync_sg_for_device(dev, sgl, nelems, dir);
		__iommu_sync_sg_for_device(dev, sgl, nelems, dir);


	return iommu_dma_map_sg(dev, sgl, nelems,
	return iommu_dma_map_sg(dev, sgl, nelems,
			dma_direction_to_prot(dir, coherent));
				dma_info_to_prot(dir, coherent, attrs));
}
}


static void __iommu_unmap_sg_attrs(struct device *dev,
static void __iommu_unmap_sg_attrs(struct device *dev,
@@ -799,7 +799,6 @@ static struct dma_map_ops iommu_dma_ops = {
	.sync_sg_for_device = __iommu_sync_sg_for_device,
	.sync_sg_for_device = __iommu_sync_sg_for_device,
	.map_resource = iommu_dma_map_resource,
	.map_resource = iommu_dma_map_resource,
	.unmap_resource = iommu_dma_unmap_resource,
	.unmap_resource = iommu_dma_unmap_resource,
	.dma_supported = iommu_dma_supported,
	.mapping_error = iommu_dma_mapping_error,
	.mapping_error = iommu_dma_mapping_error,
};
};


+1 −1
Original line number Original line Diff line number Diff line
@@ -536,7 +536,7 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
		if (!iort_fwnode)
		if (!iort_fwnode)
			return NULL;
			return NULL;


		ops = iommu_get_instance(iort_fwnode);
		ops = iommu_ops_from_fwnode(iort_fwnode);
		if (!ops)
		if (!ops)
			return NULL;
			return NULL;


Loading