Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b3a4bcaa authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull IOMMU Updates from Joerg Roedel:
 "A few patches have been queued up for this merge window:

   - improvements for the ARM-SMMU driver (IOMMU_EXEC support, IOMMU
     group support)
   - updates and fixes for the shmobile IOMMU driver
   - various fixes to generic IOMMU code and the Intel IOMMU driver
   - some cleanups in IOMMU drivers (dev_is_pci() usage)"

* tag 'iommu-updates-v3.14' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (36 commits)
  iommu/vt-d: Fix signedness bug in alloc_irte()
  iommu/vt-d: free all resources if failed to initialize DMARs
  iommu/vt-d, trivial: clean sparse warnings
  iommu/vt-d: fix wrong return value of dmar_table_init()
  iommu/vt-d: release invalidation queue when destroying IOMMU unit
  iommu/vt-d: fix access after free issue in function free_dmar_iommu()
  iommu/vt-d: keep shared resources when failed to initialize iommu devices
  iommu/vt-d: fix invalid memory access when freeing DMAR irq
  iommu/vt-d, trivial: simplify code with existing macros
  iommu/vt-d, trivial: use defined macro instead of hardcoding
  iommu/vt-d: mark internal functions as static
  iommu/vt-d, trivial: clean up unused code
  iommu/vt-d, trivial: check suitable flag in function detect_intel_iommu()
  iommu/vt-d, trivial: print correct domain id of static identity domain
  iommu/vt-d, trivial: refine support of 64bit guest address
  iommu/vt-d: fix resource leakage on error recovery path in iommu_init_domains()
  iommu/vt-d: fix a race window in allocating domain ID for virtual machines
  iommu/vt-d: fix PCI device reference leakage on error recovery path
  drm/msm: Fix link error with !MSM_IOMMU
  iommu/vt-d: use dedicated bitmap to track remapping entry allocation status
  ...
parents 17c7f854 dd1a1756
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -4,6 +4,7 @@ config DRM_MSM
	depends on DRM
	depends on ARCH_MSM
	depends on ARCH_MSM8960
	depends on MSM_IOMMU
	select DRM_KMS_HELPER
	select SHMEM
	select TMPFS
+1 −0
Original line number Diff line number Diff line
@@ -207,6 +207,7 @@ config SHMOBILE_IOMMU
	bool "IOMMU for Renesas IPMMU/IPMMUI"
	default n
	depends on ARM
	depends on SH_MOBILE || COMPILE_TEST
	select IOMMU_API
	select ARM_DMA_USE_IOMMU
	select SHMOBILE_IPMMU
+2 −2
Original line number Diff line number Diff line
@@ -248,8 +248,8 @@ static bool check_device(struct device *dev)
	if (!dev || !dev->dma_mask)
		return false;

	/* No device or no PCI device */
	if (dev->bus != &pci_bus_type)
	/* No PCI device */
	if (!dev_is_pci(dev))
		return false;

	devid = get_device_id(dev);
+26 −7
Original line number Diff line number Diff line
@@ -24,7 +24,7 @@
 *	- v7/v8 long-descriptor format
 *	- Non-secure access to the SMMU
 *	- 4k and 64k pages, with contiguous pte hints.
 *	- Up to 39-bit addressing
 *	- Up to 42-bit addressing (dependent on VA_BITS)
 *	- Context fault reporting
 */

@@ -61,12 +61,13 @@
#define ARM_SMMU_GR1(smmu)		((smmu)->base + (smmu)->pagesize)

/* Page table bits */
#define ARM_SMMU_PTE_PAGE		(((pteval_t)3) << 0)
#define ARM_SMMU_PTE_XN			(((pteval_t)3) << 53)
#define ARM_SMMU_PTE_CONT		(((pteval_t)1) << 52)
#define ARM_SMMU_PTE_AF			(((pteval_t)1) << 10)
#define ARM_SMMU_PTE_SH_NS		(((pteval_t)0) << 8)
#define ARM_SMMU_PTE_SH_OS		(((pteval_t)2) << 8)
#define ARM_SMMU_PTE_SH_IS		(((pteval_t)3) << 8)
#define ARM_SMMU_PTE_PAGE		(((pteval_t)3) << 0)

#if PAGE_SIZE == SZ_4K
#define ARM_SMMU_PTE_CONT_ENTRIES	16
@@ -1205,7 +1206,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
				   unsigned long pfn, int flags, int stage)
{
	pte_t *pte, *start;
	pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
	pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN;

	if (pmd_none(*pmd)) {
		/* Allocate a new set of tables */
@@ -1244,7 +1245,9 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
	}

	/* If no access, create a faulting entry to avoid TLB fills */
	if (!(flags & (IOMMU_READ | IOMMU_WRITE)))
	if (flags & IOMMU_EXEC)
		pteval &= ~ARM_SMMU_PTE_XN;
	else if (!(flags & (IOMMU_READ | IOMMU_WRITE)))
		pteval &= ~ARM_SMMU_PTE_PAGE;

	pteval |= ARM_SMMU_PTE_SH_IS;
@@ -1494,6 +1497,13 @@ static int arm_smmu_add_device(struct device *dev)
{
	struct arm_smmu_device *child, *parent, *smmu;
	struct arm_smmu_master *master = NULL;
	struct iommu_group *group;
	int ret;

	if (dev->archdata.iommu) {
		dev_warn(dev, "IOMMU driver already assigned to device\n");
		return -EINVAL;
	}

	spin_lock(&arm_smmu_devices_lock);
	list_for_each_entry(parent, &arm_smmu_devices, list) {
@@ -1526,13 +1536,23 @@ static int arm_smmu_add_device(struct device *dev)
	if (!master)
		return -ENODEV;

	group = iommu_group_alloc();
	if (IS_ERR(group)) {
		dev_err(dev, "Failed to allocate IOMMU group\n");
		return PTR_ERR(group);
	}

	ret = iommu_group_add_device(group, dev);
	iommu_group_put(group);
	dev->archdata.iommu = smmu;
	return 0;

	return ret;
}

static void arm_smmu_remove_device(struct device *dev)
{
	dev->archdata.iommu = NULL;
	iommu_group_remove_device(dev);
}

static struct iommu_ops arm_smmu_ops = {
@@ -1730,7 +1750,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
	 * allocation (PTRS_PER_PGD).
	 */
#ifdef CONFIG_64BIT
	/* Current maximum output size of 39 bits */
	smmu->s1_output_size = min(39UL, size);
#else
	smmu->s1_output_size = min(32UL, size);
@@ -1745,7 +1764,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
	} else {
#ifdef CONFIG_64BIT
		size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
		size = min(39, arm_smmu_id_size_to_bits(size));
		size = min(VA_BITS, arm_smmu_id_size_to_bits(size));
#else
		size = 32;
#endif
+82 −53
Original line number Diff line number Diff line
@@ -52,6 +52,9 @@ LIST_HEAD(dmar_drhd_units);
struct acpi_table_header * __initdata dmar_tbl;
static acpi_size dmar_tbl_size;

static int alloc_iommu(struct dmar_drhd_unit *drhd);
static void free_iommu(struct intel_iommu *iommu);

static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
{
	/*
@@ -100,7 +103,6 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
	if (!pdev) {
		pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
			segment, scope->bus, path->device, path->function);
		*dev = NULL;
		return 0;
	}
	if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
@@ -151,7 +153,7 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
			ret = dmar_parse_one_dev_scope(scope,
				&(*devices)[index], segment);
			if (ret) {
				kfree(*devices);
				dmar_free_dev_scope(devices, cnt);
				return ret;
			}
			index ++;
@@ -162,6 +164,17 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
	return 0;
}

void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
{
	if (*devices && *cnt) {
		while (--*cnt >= 0)
			pci_dev_put((*devices)[*cnt]);
		kfree(*devices);
		*devices = NULL;
		*cnt = 0;
	}
}

/**
 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
 * structure which uniquely represent one DMA remapping hardware unit
@@ -193,25 +206,28 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
	return 0;
}

static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
{
	if (dmaru->devices && dmaru->devices_cnt)
		dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
	if (dmaru->iommu)
		free_iommu(dmaru->iommu);
	kfree(dmaru);
}

static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
{
	struct acpi_dmar_hardware_unit *drhd;
	int ret = 0;

	drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;

	if (dmaru->include_all)
		return 0;

	ret = dmar_parse_dev_scope((void *)(drhd + 1),
	return dmar_parse_dev_scope((void *)(drhd + 1),
				    ((void *)drhd) + drhd->header.length,
				    &dmaru->devices_cnt, &dmaru->devices,
				    drhd->segment);
	if (ret) {
		list_del(&dmaru->list);
		kfree(dmaru);
	}
	return ret;
}

#ifdef CONFIG_ACPI_NUMA
@@ -423,7 +439,7 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev)
int __init dmar_dev_scope_init(void)
{
	static int dmar_dev_scope_initialized;
	struct dmar_drhd_unit *drhd, *drhd_n;
	struct dmar_drhd_unit *drhd;
	int ret = -ENODEV;

	if (dmar_dev_scope_initialized)
@@ -432,7 +448,7 @@ int __init dmar_dev_scope_init(void)
	if (list_empty(&dmar_drhd_units))
		goto fail;

	list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
	list_for_each_entry(drhd, &dmar_drhd_units, list) {
		ret = dmar_parse_dev(drhd);
		if (ret)
			goto fail;
@@ -456,24 +472,23 @@ int __init dmar_table_init(void)
	static int dmar_table_initialized;
	int ret;

	if (dmar_table_initialized)
		return 0;

	dmar_table_initialized = 1;

	if (dmar_table_initialized == 0) {
		ret = parse_dmar_table();
	if (ret) {
		if (ret < 0) {
			if (ret != -ENODEV)
				pr_info("parse DMAR table failure.\n");
		return ret;
		} else  if (list_empty(&dmar_drhd_units)) {
			pr_info("No DMAR devices found\n");
			ret = -ENODEV;
		}

	if (list_empty(&dmar_drhd_units)) {
		pr_info("No DMAR devices found\n");
		return -ENODEV;
		if (ret < 0)
			dmar_table_initialized = ret;
		else
			dmar_table_initialized = 1;
	}

	return 0;
	return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
}

static void warn_invalid_dmar(u64 addr, const char *message)
@@ -488,7 +503,7 @@ static void warn_invalid_dmar(u64 addr, const char *message)
		dmi_get_system_info(DMI_PRODUCT_VERSION));
}

int __init check_zero_address(void)
static int __init check_zero_address(void)
{
	struct acpi_table_dmar *dmar;
	struct acpi_dmar_header *entry_header;
@@ -546,14 +561,6 @@ int __init detect_intel_iommu(void)
	if (ret)
		ret = check_zero_address();
	{
		struct acpi_table_dmar *dmar;

		dmar = (struct acpi_table_dmar *) dmar_tbl;

		if (ret && irq_remapping_enabled && cpu_has_x2apic &&
		    dmar->flags & 0x1)
			pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");

		if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
			iommu_detected = 1;
			/* Make sure ACS will be enabled */
@@ -565,7 +572,7 @@ int __init detect_intel_iommu(void)
			x86_init.iommu.iommu_init = intel_iommu_init;
#endif
	}
	early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
	early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
	dmar_tbl = NULL;

	return ret ? 1 : -ENODEV;
@@ -647,7 +654,7 @@ static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
	return err;
}

int alloc_iommu(struct dmar_drhd_unit *drhd)
static int alloc_iommu(struct dmar_drhd_unit *drhd)
{
	struct intel_iommu *iommu;
	u32 ver, sts;
@@ -721,12 +728,19 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
	return err;
}

void free_iommu(struct intel_iommu *iommu)
static void free_iommu(struct intel_iommu *iommu)
{
	if (!iommu)
		return;
	if (iommu->irq) {
		free_irq(iommu->irq, iommu);
		irq_set_handler_data(iommu->irq, NULL);
		destroy_irq(iommu->irq);
	}

	free_dmar_iommu(iommu);
	if (iommu->qi) {
		free_page((unsigned long)iommu->qi->desc);
		kfree(iommu->qi->desc_status);
		kfree(iommu->qi);
	}

	if (iommu->reg)
		unmap_iommu(iommu);
@@ -1050,7 +1064,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
	desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
	if (!desc_page) {
		kfree(qi);
		iommu->qi = 0;
		iommu->qi = NULL;
		return -ENOMEM;
	}

@@ -1060,7 +1074,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
	if (!qi->desc_status) {
		free_page((unsigned long) qi->desc);
		kfree(qi);
		iommu->qi = 0;
		iommu->qi = NULL;
		return -ENOMEM;
	}

@@ -1111,9 +1125,7 @@ static const char *irq_remap_fault_reasons[] =
	"Blocked an interrupt request due to source-id verification failure",
};

#define MAX_FAULT_REASON_IDX 	(ARRAY_SIZE(fault_reason_strings) - 1)

const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
{
	if (fault_reason >= 0x20 && (fault_reason - 0x20 <
					ARRAY_SIZE(irq_remap_fault_reasons))) {
@@ -1303,15 +1315,14 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
int __init enable_drhd_fault_handling(void)
{
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu;

	/*
	 * Enable fault control interrupt.
	 */
	for_each_drhd_unit(drhd) {
		int ret;
		struct intel_iommu *iommu = drhd->iommu;
	for_each_iommu(iommu, drhd) {
		u32 fault_status;
		ret = dmar_set_interrupt(iommu);
		int ret = dmar_set_interrupt(iommu);

		if (ret) {
			pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
@@ -1366,4 +1377,22 @@ int __init dmar_ir_support(void)
		return 0;
	return dmar->flags & 0x1;
}

static int __init dmar_free_unused_resources(void)
{
	struct dmar_drhd_unit *dmaru, *dmaru_n;

	/* DMAR units are in use */
	if (irq_remapping_enabled || intel_iommu_enabled)
		return 0;

	list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
		list_del(&dmaru->list);
		dmar_free_drhd(dmaru);
	}

	return 0;
}

late_initcall(dmar_free_unused_resources);
IOMMU_INIT_POST(detect_intel_iommu);
Loading