Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b364776a authored by David Woodhouse's avatar David Woodhouse
Browse files

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6

Conflicts:

	drivers/pci/intel-iommu.c
parents 6da0b38f 6c8909b4
Loading
Loading
Loading
Loading
+7 −0
Original line number Original line Diff line number Diff line
@@ -2176,6 +2176,13 @@ M: maciej.sosnowski@intel.com
L:	linux-kernel@vger.kernel.org
L:	linux-kernel@vger.kernel.org
S:	Supported
S:	Supported


INTEL IOMMU (VT-d)
P:	David Woodhouse
M:	dwmw2@infradead.org
L:	iommu@lists.linux-foundation.org
T:	git://git.infradead.org/iommu-2.6.git
S:	Supported

INTEL IOP-ADMA DMA DRIVER
INTEL IOP-ADMA DMA DRIVER
P:	Dan Williams
P:	Dan Williams
M:	dan.j.williams@intel.com
M:	dan.j.williams@intel.com
+1 −1
Original line number Original line Diff line number Diff line
@@ -212,7 +212,7 @@ static void __init iommu_set_exclusion_range(struct amd_iommu *iommu)
/* Programs the physical address of the device table into the IOMMU hardware */
/* Programs the physical address of the device table into the IOMMU hardware */
static void __init iommu_set_device_table(struct amd_iommu *iommu)
static void __init iommu_set_device_table(struct amd_iommu *iommu)
{
{
	u32 entry;
	u64 entry;


	BUG_ON(iommu->mmio_base == NULL);
	BUG_ON(iommu->mmio_base == NULL);


+0 −16
Original line number Original line Diff line number Diff line
@@ -9,8 +9,6 @@
#include <asm/calgary.h>
#include <asm/calgary.h>
#include <asm/amd_iommu.h>
#include <asm/amd_iommu.h>


static int forbid_dac __read_mostly;

struct dma_mapping_ops *dma_ops;
struct dma_mapping_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
EXPORT_SYMBOL(dma_ops);


@@ -293,17 +291,3 @@ void pci_iommu_shutdown(void)
}
}
/* Must execute after PCI subsystem */
/* Must execute after PCI subsystem */
fs_initcall(pci_iommu_init);
fs_initcall(pci_iommu_init);

#ifdef CONFIG_PCI
/* Many VIA bridges seem to corrupt data for DAC. Disable it here */

static __devinit void via_no_dac(struct pci_dev *dev)
{
	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
		printk(KERN_INFO "PCI: VIA PCI bridge detected."
				 "Disabling DAC.\n");
		forbid_dac = 1;
	}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
#endif
+87 −32
Original line number Original line Diff line number Diff line
@@ -188,12 +188,11 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
	return 0;
	return 0;
}
}


static int __init
static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
dmar_parse_dev(struct dmar_drhd_unit *dmaru)
{
{
	struct acpi_dmar_hardware_unit *drhd;
	struct acpi_dmar_hardware_unit *drhd;
	static int include_all;
	static int include_all;
	int ret;
	int ret = 0;


	drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
	drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;


@@ -277,14 +276,15 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
		drhd = (struct acpi_dmar_hardware_unit *)header;
		drhd = (struct acpi_dmar_hardware_unit *)header;
		printk (KERN_INFO PREFIX
		printk (KERN_INFO PREFIX
			"DRHD (flags: 0x%08x)base: 0x%016Lx\n",
			"DRHD (flags: 0x%08x)base: 0x%016Lx\n",
			drhd->flags, drhd->address);
			drhd->flags, (unsigned long long)drhd->address);
		break;
		break;
	case ACPI_DMAR_TYPE_RESERVED_MEMORY:
	case ACPI_DMAR_TYPE_RESERVED_MEMORY:
		rmrr = (struct acpi_dmar_reserved_memory *)header;
		rmrr = (struct acpi_dmar_reserved_memory *)header;


		printk (KERN_INFO PREFIX
		printk (KERN_INFO PREFIX
			"RMRR base: 0x%016Lx end: 0x%016Lx\n",
			"RMRR base: 0x%016Lx end: 0x%016Lx\n",
			rmrr->base_address, rmrr->end_address);
			(unsigned long long)rmrr->base_address,
			(unsigned long long)rmrr->end_address);
		break;
		break;
	}
	}
}
}
@@ -304,7 +304,7 @@ parse_dmar_table(void)
	if (!dmar)
	if (!dmar)
		return -ENODEV;
		return -ENODEV;


	if (dmar->width < PAGE_SHIFT_4K - 1) {
	if (dmar->width < PAGE_SHIFT - 1) {
		printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
		printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
		return -EINVAL;
		return -EINVAL;
	}
	}
@@ -455,8 +455,8 @@ void __init detect_intel_iommu(void)


	ret = early_dmar_detect();
	ret = early_dmar_detect();


#ifdef CONFIG_DMAR
	{
	{
#ifdef CONFIG_INTR_REMAP
		struct acpi_table_dmar *dmar;
		struct acpi_table_dmar *dmar;
		/*
		/*
		 * for now we will disable dma-remapping when interrupt
		 * for now we will disable dma-remapping when interrupt
@@ -465,29 +465,19 @@ void __init detect_intel_iommu(void)
		 * is added, we will not need this any more.
		 * is added, we will not need this any more.
		 */
		 */
		dmar = (struct acpi_table_dmar *) dmar_tbl;
		dmar = (struct acpi_table_dmar *) dmar_tbl;
		if (ret && cpu_has_x2apic && dmar->flags & 0x1) {
		if (ret && cpu_has_x2apic && dmar->flags & 0x1)
			printk(KERN_INFO
			printk(KERN_INFO
			       "Queued invalidation will be enabled to support "
			       "Queued invalidation will be enabled to support "
			       "x2apic and Intr-remapping.\n");
			       "x2apic and Intr-remapping.\n");
			printk(KERN_INFO
#endif
			       "Disabling IOMMU detection, because of missing "
			       "queued invalidation support for IOTLB "
			       "invalidation\n");
			printk(KERN_INFO
			       "Use \"nox2apic\", if you want to use Intel "
			       " IOMMU for DMA-remapping and don't care about "
			       " x2apic support\n");

			dmar_disabled = 1;
			return;
		}


#ifdef CONFIG_DMAR
		if (ret && !no_iommu && !iommu_detected && !swiotlb &&
		if (ret && !no_iommu && !iommu_detected && !swiotlb &&
		    !dmar_disabled)
		    !dmar_disabled)
			iommu_detected = 1;
			iommu_detected = 1;
	}
#endif
#endif
	}
	}
}




int alloc_iommu(struct dmar_drhd_unit *drhd)
int alloc_iommu(struct dmar_drhd_unit *drhd)
@@ -503,7 +493,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)


	iommu->seq_id = iommu_allocated++;
	iommu->seq_id = iommu_allocated++;


	iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
	iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
	if (!iommu->reg) {
	if (!iommu->reg) {
		printk(KERN_ERR "IOMMU: can't map the region\n");
		printk(KERN_ERR "IOMMU: can't map the region\n");
		goto error;
		goto error;
@@ -514,8 +504,8 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
	/* the registers might be more than one page */
	/* the registers might be more than one page */
	map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
	map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
		cap_max_fault_reg_offset(iommu->cap));
		cap_max_fault_reg_offset(iommu->cap));
	map_size = PAGE_ALIGN_4K(map_size);
	map_size = VTD_PAGE_ALIGN(map_size);
	if (map_size > PAGE_SIZE_4K) {
	if (map_size > VTD_PAGE_SIZE) {
		iounmap(iommu->reg);
		iounmap(iommu->reg);
		iommu->reg = ioremap(drhd->reg_base_addr, map_size);
		iommu->reg = ioremap(drhd->reg_base_addr, map_size);
		if (!iommu->reg) {
		if (!iommu->reg) {
@@ -526,8 +516,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)


	ver = readl(iommu->reg + DMAR_VER_REG);
	ver = readl(iommu->reg + DMAR_VER_REG);
	pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
	pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
		drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
		(unsigned long long)drhd->reg_base_addr,
		iommu->cap, iommu->ecap);
		DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
		(unsigned long long)iommu->cap,
		(unsigned long long)iommu->ecap);


	spin_lock_init(&iommu->register_lock);
	spin_lock_init(&iommu->register_lock);


@@ -580,11 +572,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)


	hw = qi->desc;
	hw = qi->desc;


	spin_lock(&qi->q_lock);
	spin_lock_irqsave(&qi->q_lock, flags);
	while (qi->free_cnt < 3) {
	while (qi->free_cnt < 3) {
		spin_unlock(&qi->q_lock);
		spin_unlock_irqrestore(&qi->q_lock, flags);
		cpu_relax();
		cpu_relax();
		spin_lock(&qi->q_lock);
		spin_lock_irqsave(&qi->q_lock, flags);
	}
	}


	index = qi->free_head;
	index = qi->free_head;
@@ -605,15 +597,22 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
	qi->free_head = (qi->free_head + 2) % QI_LENGTH;
	qi->free_head = (qi->free_head + 2) % QI_LENGTH;
	qi->free_cnt -= 2;
	qi->free_cnt -= 2;


	spin_lock_irqsave(&iommu->register_lock, flags);
	spin_lock(&iommu->register_lock);
	/*
	/*
	 * update the HW tail register indicating the presence of
	 * update the HW tail register indicating the presence of
	 * new descriptors.
	 * new descriptors.
	 */
	 */
	writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
	writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
	spin_unlock_irqrestore(&iommu->register_lock, flags);
	spin_unlock(&iommu->register_lock);


	while (qi->desc_status[wait_index] != QI_DONE) {
	while (qi->desc_status[wait_index] != QI_DONE) {
		/*
		 * We will leave the interrupts disabled, to prevent interrupt
		 * context to queue another cmd while a cmd is already submitted
		 * and waiting for completion on this cpu. This is to avoid
		 * a deadlock where the interrupt context can wait indefinitely
		 * for free slots in the queue.
		 */
		spin_unlock(&qi->q_lock);
		spin_unlock(&qi->q_lock);
		cpu_relax();
		cpu_relax();
		spin_lock(&qi->q_lock);
		spin_lock(&qi->q_lock);
@@ -622,7 +621,7 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
	qi->desc_status[index] = QI_DONE;
	qi->desc_status[index] = QI_DONE;


	reclaim_free_desc(qi);
	reclaim_free_desc(qi);
	spin_unlock(&qi->q_lock);
	spin_unlock_irqrestore(&qi->q_lock, flags);
}
}


/*
/*
@@ -638,6 +637,62 @@ void qi_global_iec(struct intel_iommu *iommu)
	qi_submit_sync(&desc, iommu);
	qi_submit_sync(&desc, iommu);
}
}


int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
		     u64 type, int non_present_entry_flush)
{

	struct qi_desc desc;

	if (non_present_entry_flush) {
		if (!cap_caching_mode(iommu->cap))
			return 1;
		else
			did = 0;
	}

	desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
			| QI_CC_GRAN(type) | QI_CC_TYPE;
	desc.high = 0;

	qi_submit_sync(&desc, iommu);

	return 0;

}

int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
		   unsigned int size_order, u64 type,
		   int non_present_entry_flush)
{
	u8 dw = 0, dr = 0;

	struct qi_desc desc;
	int ih = 0;

	if (non_present_entry_flush) {
		if (!cap_caching_mode(iommu->cap))
			return 1;
		else
			did = 0;
	}

	if (cap_write_drain(iommu->cap))
		dw = 1;

	if (cap_read_drain(iommu->cap))
		dr = 1;

	desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
		| QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
	desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
		| QI_IOTLB_AM(size_order);

	qi_submit_sync(&desc, iommu);

	return 0;

}

/*
/*
 * Enable Queued Invalidation interface. This is a must to support
 * Enable Queued Invalidation interface. This is a must to support
 * interrupt-remapping. Also used by DMA-remapping, which replaces
 * interrupt-remapping. Also used by DMA-remapping, which replaces
+129 −121

File changed.

Preview size limit exceeded, changes collapsed.

Loading