Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 00948725 authored by Joerg Roedel's avatar Joerg Roedel
Browse files

Merge branches 'dma-debug', 'iommu/fixes', 'arm/tegra', 'arm/exynos',...

Merge branches 'dma-debug', 'iommu/fixes', 'arm/tegra', 'arm/exynos', 'x86/amd', 'x86/vt-d' and 'x86/amd-irq-remapping' into next

Conflicts:
	drivers/iommu/amd_iommu_init.c
Loading
Loading
Loading
Loading
+0 −63
Original line number Diff line number Diff line
/*
 * IOMMU API for SMMU in Tegra30
 *
 * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 */

#ifndef	MACH_SMMU_H
#define	MACH_SMMU_H

enum smmu_hwgrp {
	HWGRP_AFI,
	HWGRP_AVPC,
	HWGRP_DC,
	HWGRP_DCB,
	HWGRP_EPP,
	HWGRP_G2,
	HWGRP_HC,
	HWGRP_HDA,
	HWGRP_ISP,
	HWGRP_MPE,
	HWGRP_NV,
	HWGRP_NV2,
	HWGRP_PPCS,
	HWGRP_SATA,
	HWGRP_VDE,
	HWGRP_VI,

	HWGRP_COUNT,

	HWGRP_END = ~0,
};

#define HWG_AFI		(1 << HWGRP_AFI)
#define HWG_AVPC	(1 << HWGRP_AVPC)
#define HWG_DC		(1 << HWGRP_DC)
#define HWG_DCB		(1 << HWGRP_DCB)
#define HWG_EPP		(1 << HWGRP_EPP)
#define HWG_G2		(1 << HWGRP_G2)
#define HWG_HC		(1 << HWGRP_HC)
#define HWG_HDA		(1 << HWGRP_HDA)
#define HWG_ISP		(1 << HWGRP_ISP)
#define HWG_MPE		(1 << HWGRP_MPE)
#define HWG_NV		(1 << HWGRP_NV)
#define HWG_NV2		(1 << HWGRP_NV2)
#define HWG_PPCS	(1 << HWGRP_PPCS)
#define HWG_SATA	(1 << HWGRP_SATA)
#define HWG_VDE		(1 << HWGRP_VDE)
#define HWG_VI		(1 << HWGRP_VI)

#endif	/* MACH_SMMU_H */
+1 −1
Original line number Diff line number Diff line
@@ -42,7 +42,7 @@ config AMD_IOMMU
	select PCI_PRI
	select PCI_PASID
	select IOMMU_API
	depends on X86_64 && PCI && ACPI
	depends on X86_64 && PCI && ACPI && X86_IO_APIC
	---help---
	  With this option you can enable support for AMD IOMMU hardware in
	  your system. An IOMMU is a hardware component which provides
+509 −5
Original line number Diff line number Diff line
@@ -31,6 +31,12 @@
#include <linux/amd-iommu.h>
#include <linux/notifier.h>
#include <linux/export.h>
#include <linux/irq.h>
#include <linux/msi.h>
#include <asm/irq_remapping.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
#include <asm/hw_irq.h>
#include <asm/msidef.h>
#include <asm/proto.h>
#include <asm/iommu.h>
@@ -39,6 +45,7 @@

#include "amd_iommu_proto.h"
#include "amd_iommu_types.h"
#include "irq_remapping.h"

#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))

@@ -72,6 +79,9 @@ static DEFINE_SPINLOCK(iommu_pd_list_lock);
static LIST_HEAD(dev_data_list);
static DEFINE_SPINLOCK(dev_data_list_lock);

LIST_HEAD(ioapic_map);
LIST_HEAD(hpet_map);

/*
 * Domain for untranslated devices - only allocated
 * if iommu=pt passed on kernel cmd line.
@@ -92,6 +102,8 @@ struct iommu_cmd {
	u32 data[4];
};

struct kmem_cache *amd_iommu_irq_cache;

static void update_domain(struct protection_domain *domain);
static int __init alloc_passthrough_domain(void);

@@ -686,7 +698,7 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)

		/*
		 * Release iommu->lock because ppr-handling might need to
		 * re-aquire it
		 * re-acquire it
		 */
		spin_unlock_irqrestore(&iommu->lock, flags);

@@ -804,7 +816,7 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
	CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
	if (s) /* size bit - we flush more than one 4kb page */
		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
	if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
	if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
		cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
}

@@ -899,6 +911,13 @@ static void build_inv_all(struct iommu_cmd *cmd)
	CMD_SET_TYPE(cmd, CMD_INV_ALL);
}

static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
{
	memset(cmd, 0, sizeof(*cmd));
	cmd->data[0] = devid;
	CMD_SET_TYPE(cmd, CMD_INV_IRT);
}

/*
 * Writes the command to the IOMMUs command buffer and informs the
 * hardware about the new command.
@@ -1020,12 +1039,32 @@ static void iommu_flush_all(struct amd_iommu *iommu)
	iommu_completion_wait(iommu);
}

static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
{
	struct iommu_cmd cmd;

	build_inv_irt(&cmd, devid);

	iommu_queue_command(iommu, &cmd);
}

static void iommu_flush_irt_all(struct amd_iommu *iommu)
{
	u32 devid;

	for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
		iommu_flush_irt(iommu, devid);

	iommu_completion_wait(iommu);
}

void iommu_flush_all_caches(struct amd_iommu *iommu)
{
	if (iommu_feature(iommu, FEATURE_IA)) {
		iommu_flush_all(iommu);
	} else {
		iommu_flush_dte_all(iommu);
		iommu_flush_irt_all(iommu);
		iommu_flush_tlb_all(iommu);
	}
}
@@ -2155,7 +2194,7 @@ static bool pci_pri_tlp_required(struct pci_dev *pdev)
}

/*
 * If a device is not yet associated with a domain, this function does
 * If a device is not yet associated with a domain, this function
 * assigns it visible for the hardware
 */
static int attach_device(struct device *dev,
@@ -2405,7 +2444,7 @@ static struct protection_domain *get_domain(struct device *dev)
	if (domain != NULL)
		return domain;

	/* Device not bount yet - bind it */
	/* Device not bound yet - bind it */
	dma_dom = find_protection_domain(devid);
	if (!dma_dom)
		dma_dom = amd_iommu_rlookup_table[devid]->default_dom;
@@ -2944,7 +2983,7 @@ static void __init prealloc_protection_domains(void)
			alloc_passthrough_domain();
			dev_data->passthrough = true;
			attach_device(&dev->dev, pt_domain);
			pr_info("AMD-Vi: Using passthough domain for device %s\n",
			pr_info("AMD-Vi: Using passthrough domain for device %s\n",
				dev_name(&dev->dev));
		}

@@ -3316,6 +3355,8 @@ static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
	switch (cap) {
	case IOMMU_CAP_CACHE_COHERENCY:
		return 1;
	case IOMMU_CAP_INTR_REMAP:
		return irq_remapping_enabled;
	}

	return 0;
@@ -3743,3 +3784,466 @@ int amd_iommu_device_info(struct pci_dev *pdev,
	return 0;
}
EXPORT_SYMBOL(amd_iommu_device_info);

#ifdef CONFIG_IRQ_REMAP

/*****************************************************************************
 *
 * Interrupt Remapping Implementation
 *
 *****************************************************************************/

union irte {
	u32 val;
	struct {
		u32 valid	: 1,
		    no_fault	: 1,
		    int_type	: 3,
		    rq_eoi	: 1,
		    dm		: 1,
		    rsvd_1	: 1,
		    destination	: 8,
		    vector	: 8,
		    rsvd_2	: 8;
	} fields;
};

#define DTE_IRQ_PHYS_ADDR_MASK	(((1ULL << 45)-1) << 6)
#define DTE_IRQ_REMAP_INTCTL    (2ULL << 60)
#define DTE_IRQ_TABLE_LEN       (8ULL << 1)
#define DTE_IRQ_REMAP_ENABLE    1ULL

static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
{
	u64 dte;

	dte	= amd_iommu_dev_table[devid].data[2];
	dte	&= ~DTE_IRQ_PHYS_ADDR_MASK;
	dte	|= virt_to_phys(table->table);
	dte	|= DTE_IRQ_REMAP_INTCTL;
	dte	|= DTE_IRQ_TABLE_LEN;
	dte	|= DTE_IRQ_REMAP_ENABLE;

	amd_iommu_dev_table[devid].data[2] = dte;
}

#define IRTE_ALLOCATED (~1U)

static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
{
	struct irq_remap_table *table = NULL;
	struct amd_iommu *iommu;
	unsigned long flags;
	u16 alias;

	write_lock_irqsave(&amd_iommu_devtable_lock, flags);

	iommu = amd_iommu_rlookup_table[devid];
	if (!iommu)
		goto out_unlock;

	table = irq_lookup_table[devid];
	if (table)
		goto out;

	alias = amd_iommu_alias_table[devid];
	table = irq_lookup_table[alias];
	if (table) {
		irq_lookup_table[devid] = table;
		set_dte_irq_entry(devid, table);
		iommu_flush_dte(iommu, devid);
		goto out;
	}

	/* Nothing there yet, allocate new irq remapping table */
	table = kzalloc(sizeof(*table), GFP_ATOMIC);
	if (!table)
		goto out;

	if (ioapic)
		/* Keep the first 32 indexes free for IOAPIC interrupts */
		table->min_index = 32;

	table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
	if (!table->table) {
		kfree(table);
		table = NULL;
		goto out;
	}

	memset(table->table, 0, MAX_IRQS_PER_TABLE * sizeof(u32));

	if (ioapic) {
		int i;

		for (i = 0; i < 32; ++i)
			table->table[i] = IRTE_ALLOCATED;
	}

	irq_lookup_table[devid] = table;
	set_dte_irq_entry(devid, table);
	iommu_flush_dte(iommu, devid);
	if (devid != alias) {
		irq_lookup_table[alias] = table;
		set_dte_irq_entry(devid, table);
		iommu_flush_dte(iommu, alias);
	}

out:
	iommu_completion_wait(iommu);

out_unlock:
	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);

	return table;
}

static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
{
	struct irq_remap_table *table;
	unsigned long flags;
	int index, c;

	table = get_irq_table(devid, false);
	if (!table)
		return -ENODEV;

	spin_lock_irqsave(&table->lock, flags);

	/* Scan table for free entries */
	for (c = 0, index = table->min_index;
	     index < MAX_IRQS_PER_TABLE;
	     ++index) {
		if (table->table[index] == 0)
			c += 1;
		else
			c = 0;

		if (c == count)	{
			struct irq_2_iommu *irte_info;

			for (; c != 0; --c)
				table->table[index - c + 1] = IRTE_ALLOCATED;

			index -= count - 1;

			irte_info             = &cfg->irq_2_iommu;
			irte_info->sub_handle = devid;
			irte_info->irte_index = index;
			irte_info->iommu      = (void *)cfg;

			goto out;
		}
	}

	index = -ENOSPC;

out:
	spin_unlock_irqrestore(&table->lock, flags);

	return index;
}

static int get_irte(u16 devid, int index, union irte *irte)
{
	struct irq_remap_table *table;
	unsigned long flags;

	table = get_irq_table(devid, false);
	if (!table)
		return -ENOMEM;

	spin_lock_irqsave(&table->lock, flags);
	irte->val = table->table[index];
	spin_unlock_irqrestore(&table->lock, flags);

	return 0;
}

static int modify_irte(u16 devid, int index, union irte irte)
{
	struct irq_remap_table *table;
	struct amd_iommu *iommu;
	unsigned long flags;

	iommu = amd_iommu_rlookup_table[devid];
	if (iommu == NULL)
		return -EINVAL;

	table = get_irq_table(devid, false);
	if (!table)
		return -ENOMEM;

	spin_lock_irqsave(&table->lock, flags);
	table->table[index] = irte.val;
	spin_unlock_irqrestore(&table->lock, flags);

	iommu_flush_irt(iommu, devid);
	iommu_completion_wait(iommu);

	return 0;
}

static void free_irte(u16 devid, int index)
{
	struct irq_remap_table *table;
	struct amd_iommu *iommu;
	unsigned long flags;

	iommu = amd_iommu_rlookup_table[devid];
	if (iommu == NULL)
		return;

	table = get_irq_table(devid, false);
	if (!table)
		return;

	spin_lock_irqsave(&table->lock, flags);
	table->table[index] = 0;
	spin_unlock_irqrestore(&table->lock, flags);

	iommu_flush_irt(iommu, devid);
	iommu_completion_wait(iommu);
}

static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
			      unsigned int destination, int vector,
			      struct io_apic_irq_attr *attr)
{
	struct irq_remap_table *table;
	struct irq_2_iommu *irte_info;
	struct irq_cfg *cfg;
	union irte irte;
	int ioapic_id;
	int index;
	int devid;
	int ret;

	cfg = irq_get_chip_data(irq);
	if (!cfg)
		return -EINVAL;

	irte_info = &cfg->irq_2_iommu;
	ioapic_id = mpc_ioapic_id(attr->ioapic);
	devid     = get_ioapic_devid(ioapic_id);

	if (devid < 0)
		return devid;

	table = get_irq_table(devid, true);
	if (table == NULL)
		return -ENOMEM;

	index = attr->ioapic_pin;

	/* Setup IRQ remapping info */
	irte_info->sub_handle = devid;
	irte_info->irte_index = index;
	irte_info->iommu      = (void *)cfg;

	/* Setup IRTE for IOMMU */
	irte.val		= 0;
	irte.fields.vector      = vector;
	irte.fields.int_type    = apic->irq_delivery_mode;
	irte.fields.destination = destination;
	irte.fields.dm          = apic->irq_dest_mode;
	irte.fields.valid       = 1;

	ret = modify_irte(devid, index, irte);
	if (ret)
		return ret;

	/* Setup IOAPIC entry */
	memset(entry, 0, sizeof(*entry));

	entry->vector        = index;
	entry->mask          = 0;
	entry->trigger       = attr->trigger;
	entry->polarity      = attr->polarity;

	/*
	 * Mask level triggered irqs.
	 */
	if (attr->trigger)
		entry->mask = 1;

	return 0;
}

static int set_affinity(struct irq_data *data, const struct cpumask *mask,
			bool force)
{
	struct irq_2_iommu *irte_info;
	unsigned int dest, irq;
	struct irq_cfg *cfg;
	union irte irte;
	int err;

	if (!config_enabled(CONFIG_SMP))
		return -1;

	cfg       = data->chip_data;
	irq       = data->irq;
	irte_info = &cfg->irq_2_iommu;

	if (!cpumask_intersects(mask, cpu_online_mask))
		return -EINVAL;

	if (get_irte(irte_info->sub_handle, irte_info->irte_index, &irte))
		return -EBUSY;

	if (assign_irq_vector(irq, cfg, mask))
		return -EBUSY;

	err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
	if (err) {
		if (assign_irq_vector(irq, cfg, data->affinity))
			pr_err("AMD-Vi: Failed to recover vector for irq %d\n", irq);
		return err;
	}

	irte.fields.vector      = cfg->vector;
	irte.fields.destination = dest;

	modify_irte(irte_info->sub_handle, irte_info->irte_index, irte);

	if (cfg->move_in_progress)
		send_cleanup_vector(cfg);

	cpumask_copy(data->affinity, mask);

	return 0;
}

static int free_irq(int irq)
{
	struct irq_2_iommu *irte_info;
	struct irq_cfg *cfg;

	cfg = irq_get_chip_data(irq);
	if (!cfg)
		return -EINVAL;

	irte_info = &cfg->irq_2_iommu;

	free_irte(irte_info->sub_handle, irte_info->irte_index);

	return 0;
}

static void compose_msi_msg(struct pci_dev *pdev,
			    unsigned int irq, unsigned int dest,
			    struct msi_msg *msg, u8 hpet_id)
{
	struct irq_2_iommu *irte_info;
	struct irq_cfg *cfg;
	union irte irte;

	cfg = irq_get_chip_data(irq);
	if (!cfg)
		return;

	irte_info = &cfg->irq_2_iommu;

	irte.val		= 0;
	irte.fields.vector	= cfg->vector;
	irte.fields.int_type    = apic->irq_delivery_mode;
	irte.fields.destination	= dest;
	irte.fields.dm		= apic->irq_dest_mode;
	irte.fields.valid	= 1;

	modify_irte(irte_info->sub_handle, irte_info->irte_index, irte);

	msg->address_hi = MSI_ADDR_BASE_HI;
	msg->address_lo = MSI_ADDR_BASE_LO;
	msg->data       = irte_info->irte_index;
}

static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec)
{
	struct irq_cfg *cfg;
	int index;
	u16 devid;

	if (!pdev)
		return -EINVAL;

	cfg = irq_get_chip_data(irq);
	if (!cfg)
		return -EINVAL;

	devid = get_device_id(&pdev->dev);
	index = alloc_irq_index(cfg, devid, nvec);

	return index < 0 ? MAX_IRQS_PER_TABLE : index;
}

static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
			 int index, int offset)
{
	struct irq_2_iommu *irte_info;
	struct irq_cfg *cfg;
	u16 devid;

	if (!pdev)
		return -EINVAL;

	cfg = irq_get_chip_data(irq);
	if (!cfg)
		return -EINVAL;

	if (index >= MAX_IRQS_PER_TABLE)
		return 0;

	devid		= get_device_id(&pdev->dev);
	irte_info	= &cfg->irq_2_iommu;

	irte_info->sub_handle = devid;
	irte_info->irte_index = index + offset;
	irte_info->iommu      = (void *)cfg;

	return 0;
}

static int setup_hpet_msi(unsigned int irq, unsigned int id)
{
	struct irq_2_iommu *irte_info;
	struct irq_cfg *cfg;
	int index, devid;

	cfg = irq_get_chip_data(irq);
	if (!cfg)
		return -EINVAL;

	irte_info = &cfg->irq_2_iommu;
	devid     = get_hpet_devid(id);
	if (devid < 0)
		return devid;

	index = alloc_irq_index(cfg, devid, 1);
	if (index < 0)
		return index;

	irte_info->sub_handle = devid;
	irte_info->irte_index = index;
	irte_info->iommu      = (void *)cfg;

	return 0;
}

struct irq_remap_ops amd_iommu_irq_ops = {
	.supported		= amd_iommu_supported,
	.prepare		= amd_iommu_prepare,
	.enable			= amd_iommu_enable,
	.disable		= amd_iommu_disable,
	.reenable		= amd_iommu_reenable,
	.enable_faulting	= amd_iommu_enable_faulting,
	.setup_ioapic_entry	= setup_ioapic_entry,
	.set_affinity		= set_affinity,
	.free_irq		= free_irq,
	.compose_msi_msg	= compose_msi_msg,
	.msi_alloc_irq		= msi_alloc_irq,
	.msi_setup_irq		= msi_setup_irq,
	.setup_hpet_msi		= setup_hpet_msi,
};
#endif
+233 −20
Original line number Diff line number Diff line
@@ -26,16 +26,18 @@
#include <linux/msi.h>
#include <linux/amd-iommu.h>
#include <linux/export.h>
#include <linux/acpi.h>
#include <acpi/acpi.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
#include <asm/gart.h>
#include <asm/x86_init.h>
#include <asm/iommu_table.h>
#include <asm/io_apic.h>
#include <asm/irq_remapping.h>

#include "amd_iommu_proto.h"
#include "amd_iommu_types.h"
#include "irq_remapping.h"

/*
 * definitions for the ACPI scanning code
@@ -55,6 +57,10 @@
#define IVHD_DEV_ALIAS_RANGE            0x43
#define IVHD_DEV_EXT_SELECT             0x46
#define IVHD_DEV_EXT_SELECT_RANGE       0x47
#define IVHD_DEV_SPECIAL		0x48

#define IVHD_SPECIAL_IOAPIC		1
#define IVHD_SPECIAL_HPET		2

#define IVHD_FLAG_HT_TUN_EN_MASK        0x01
#define IVHD_FLAG_PASSPW_EN_MASK        0x02
@@ -123,6 +129,7 @@ struct ivmd_header {
} __attribute__((packed));

bool amd_iommu_dump;
bool amd_iommu_irq_remap __read_mostly;

static bool amd_iommu_detected;
static bool __initdata amd_iommu_disabled;
@@ -178,7 +185,13 @@ u16 *amd_iommu_alias_table;
struct amd_iommu **amd_iommu_rlookup_table;

/*
 * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
 * This table is used to find the irq remapping table for a given device id
 * quickly.
 */
struct irq_remap_table **irq_lookup_table;

/*
 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
 * to know which ones are already in use.
 */
unsigned long *amd_iommu_pd_alloc_bitmap;
@@ -478,7 +491,7 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table)

/****************************************************************************
 *
 * The following functions belong the the code path which parses the ACPI table
 * The following functions belong to the code path which parses the ACPI table
 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
 * data structures, initialize the device/alias/rlookup table and also
 * basically initialize the hardware.
@@ -690,8 +703,33 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
	set_iommu_for_device(iommu, devid);
}

static int add_special_device(u8 type, u8 id, u16 devid)
{
	struct devid_map *entry;
	struct list_head *list;

	if (type != IVHD_SPECIAL_IOAPIC && type != IVHD_SPECIAL_HPET)
		return -EINVAL;

	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
	if (!entry)
		return -ENOMEM;

	entry->id    = id;
	entry->devid = devid;

	if (type == IVHD_SPECIAL_IOAPIC)
		list = &ioapic_map;
	else
		list = &hpet_map;

	list_add_tail(&entry->list, list);

	return 0;
}

/*
 * Reads the device exclusion range from ACPI and initialize IOMMU with
 * Reads the device exclusion range from ACPI and initializes the IOMMU with
 * it
 */
static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
@@ -717,7 +755,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
 * initializes the hardware and our data structures with it.
 */
static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
					struct ivhd_header *h)
{
	u8 *p = (u8 *)h;
@@ -867,12 +905,43 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
							flags, ext_flags);
			}
			break;
		case IVHD_DEV_SPECIAL: {
			u8 handle, type;
			const char *var;
			u16 devid;
			int ret;

			handle = e->ext & 0xff;
			devid  = (e->ext >>  8) & 0xffff;
			type   = (e->ext >> 24) & 0xff;

			if (type == IVHD_SPECIAL_IOAPIC)
				var = "IOAPIC";
			else if (type == IVHD_SPECIAL_HPET)
				var = "HPET";
			else
				var = "UNKNOWN";

			DUMP_printk("  DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
				    var, (int)handle,
				    PCI_BUS(devid),
				    PCI_SLOT(devid),
				    PCI_FUNC(devid));

			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
			ret = add_special_device(type, handle, devid);
			if (ret)
				return ret;
			break;
		}
		default:
			break;
		}

		p += ivhd_entry_length(p);
	}

	return 0;
}

/* Initializes the device->iommu mapping for the driver */
@@ -912,6 +981,8 @@ static void __init free_iommu_all(void)
 */
static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
{
	int ret;

	spin_lock_init(&iommu->lock);

	/* Add IOMMU to internal data structures */
@@ -947,7 +1018,16 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)

	iommu->int_enabled = false;

	init_iommu_from_acpi(iommu, h);
	ret = init_iommu_from_acpi(iommu, h);
	if (ret)
		return ret;

	/*
	 * Make sure IOMMU is not considered to translate itself. The IVRS
	 * table tells us so, but this is a lie!
	 */
	amd_iommu_rlookup_table[iommu->devid] = NULL;

	init_iommu_devices(iommu);

	return 0;
@@ -1115,10 +1195,12 @@ static void print_iommu_info(void)
				if (iommu_feature(iommu, (1ULL << i)))
					pr_cont(" %s", feat_str[i]);
			}
		}
		pr_cont("\n");
		}
	}
	if (irq_remapping_enabled)
		pr_info("AMD-Vi: Interrupt remapping enabled\n");
}

static int __init amd_iommu_init_pci(void)
{
@@ -1141,7 +1223,7 @@ static int __init amd_iommu_init_pci(void)
/****************************************************************************
 *
 * The following functions initialize the MSI interrupts for all IOMMUs
 * in the system. Its a bit challenging because there could be multiple
 * in the system. It's a bit challenging because there could be multiple
 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
 * pci_dev.
 *
@@ -1199,7 +1281,7 @@ enable_faults:
 *
 * The next functions belong to the third pass of parsing the ACPI
 * table. In this last pass the memory mapping requirements are
 * gathered (like exclusion and unity mapping reanges).
 * gathered (like exclusion and unity mapping ranges).
 *
 ****************************************************************************/

@@ -1308,7 +1390,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
 * Init the device table to not allow DMA access for devices and
 * suppress all page faults
 */
static void init_device_table(void)
static void init_device_table_dma(void)
{
	u32 devid;

@@ -1318,6 +1400,27 @@ static void init_device_table(void)
	}
}

static void __init uninit_device_table_dma(void)
{
	u32 devid;

	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
		amd_iommu_dev_table[devid].data[0] = 0ULL;
		amd_iommu_dev_table[devid].data[1] = 0ULL;
	}
}

static void init_device_table(void)
{
	u32 devid;

	if (!amd_iommu_irq_remap)
		return;

	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
		set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
}

static void iommu_init_flags(struct amd_iommu *iommu)
{
	iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
@@ -1466,10 +1569,14 @@ static struct syscore_ops amd_iommu_syscore_ops = {

static void __init free_on_init_error(void)
{
	amd_iommu_uninit_devices();
	free_pages((unsigned long)irq_lookup_table,
		   get_order(rlookup_table_size));

	free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
		   get_order(MAX_DOMAIN_ID/8));
	if (amd_iommu_irq_cache) {
		kmem_cache_destroy(amd_iommu_irq_cache);
		amd_iommu_irq_cache = NULL;

	}

	free_pages((unsigned long)amd_iommu_rlookup_table,
		   get_order(rlookup_table_size));
@@ -1482,8 +1589,6 @@ static void __init free_on_init_error(void)

	free_iommu_all();

	free_unity_maps();

#ifdef CONFIG_GART_IOMMU
	/*
	 * We failed to initialize the AMD IOMMU - try fallback to GART
@@ -1494,6 +1599,33 @@ static void __init free_on_init_error(void)
#endif
}

static bool __init check_ioapic_information(void)
{
	int idx;

	for (idx = 0; idx < nr_ioapics; idx++) {
		int id = mpc_ioapic_id(idx);

		if (get_ioapic_devid(id) < 0) {
			pr_err(FW_BUG "AMD-Vi: IO-APIC[%d] not in IVRS table\n", id);
			pr_err("AMD-Vi: Disabling interrupt remapping due to BIOS Bug\n");
			return false;
		}
	}

	return true;
}

static void __init free_dma_resources(void)
{
	amd_iommu_uninit_devices();

	free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
		   get_order(MAX_DOMAIN_ID/8));

	free_unity_maps();
}

/*
 * This is the hardware init function for AMD IOMMU in the system.
 * This function is called either from amd_iommu_init or from the interrupt
@@ -1580,9 +1712,6 @@ static int __init early_amd_iommu_init(void)
	if (amd_iommu_pd_alloc_bitmap == NULL)
		goto out;

	/* init the device table */
	init_device_table();

	/*
	 * let all alias entries point to itself
	 */
@@ -1605,10 +1734,35 @@ static int __init early_amd_iommu_init(void)
	if (ret)
		goto out;

	if (amd_iommu_irq_remap)
		amd_iommu_irq_remap = check_ioapic_information();

	if (amd_iommu_irq_remap) {
		/*
		 * Interrupt remapping enabled, create kmem_cache for the
		 * remapping tables.
		 */
		amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
				MAX_IRQS_PER_TABLE * sizeof(u32),
				IRQ_TABLE_ALIGNMENT,
				0, NULL);
		if (!amd_iommu_irq_cache)
			goto out;

		irq_lookup_table = (void *)__get_free_pages(
				GFP_KERNEL | __GFP_ZERO,
				get_order(rlookup_table_size));
		if (!irq_lookup_table)
			goto out;
	}

	ret = init_memory_definitions(ivrs_base);
	if (ret)
		goto out;

	/* init the device table */
	init_device_table();

out:
	/* Don't leak any ACPI memory */
	early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
@@ -1652,13 +1806,22 @@ static bool detect_ivrs(void)
	/* Make sure ACS will be enabled during PCI probe */
	pci_request_acs();

	if (!disable_irq_remap)
		amd_iommu_irq_remap = true;

	return true;
}

static int amd_iommu_init_dma(void)
{
	struct amd_iommu *iommu;
	int ret;

	init_device_table_dma();

	for_each_iommu(iommu)
		iommu_flush_all_caches(iommu);

	if (iommu_pass_through)
		ret = amd_iommu_init_passthrough();
	else
@@ -1749,7 +1912,48 @@ static int __init iommu_go_to_state(enum iommu_init_state state)
	return ret;
}

#ifdef CONFIG_IRQ_REMAP
int __init amd_iommu_prepare(void)
{
	return iommu_go_to_state(IOMMU_ACPI_FINISHED);
}

int __init amd_iommu_supported(void)
{
	return amd_iommu_irq_remap ? 1 : 0;
}

int __init amd_iommu_enable(void)
{
	int ret;

	ret = iommu_go_to_state(IOMMU_ENABLED);
	if (ret)
		return ret;

	irq_remapping_enabled = 1;

	return 0;
}

void amd_iommu_disable(void)
{
	amd_iommu_suspend();
}

int amd_iommu_reenable(int mode)
{
	amd_iommu_resume();

	return 0;
}

int __init amd_iommu_enable_faulting(void)
{
	/* We enable MSI later when PCI is initialized */
	return 0;
}
#endif

/*
 * This is the core init function for AMD IOMMU hardware in the system.
@@ -1762,8 +1966,17 @@ static int __init amd_iommu_init(void)

	ret = iommu_go_to_state(IOMMU_INITIALIZED);
	if (ret) {
		free_dma_resources();
		if (!irq_remapping_enabled) {
			disable_iommus();
			free_on_init_error();
		} else {
			struct amd_iommu *iommu;

			uninit_device_table_dma();
			for_each_iommu(iommu)
				iommu_flush_all_caches(iommu);
		}
	}

	return ret;
+8 −0

File changed.

Preview size limit exceeded, changes collapsed.

Loading