Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e948990f authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-fixes-for-linus' of...

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: fix early panic with boot option "nosmp"
  x86/oprofile: fix Intel cpu family 6 detection
  oprofile: fix CPU unplug panic in ppro_stop()
  AMD IOMMU: fix possible race while accessing iommu->need_sync
  AMD IOMMU: set device table entry for aliased devices
  AMD IOMMU: struct amd_iommu remove padding on 64 bit
  x86: fix broken flushing in GART nofullflush path
  x86: fix dma_mapping_error for 32bit x86
parents 2b218aea 9adc1386
Loading
Loading
Loading
Loading
+12 −12
Original line number Diff line number Diff line
@@ -251,13 +251,6 @@ struct amd_iommu {
	/* Pointer to PCI device of this IOMMU */
	struct pci_dev *dev;

	/*
	 * Capability pointer. There could be more than one IOMMU per PCI
	 * device function if there are more than one AMD IOMMU capability
	 * pointers.
	 */
	u16 cap_ptr;

	/* physical address of MMIO space */
	u64 mmio_phys;
	/* virtual address of MMIO space */
@@ -266,6 +259,13 @@ struct amd_iommu {
	/* capabilities of that IOMMU read from ACPI */
	u32 cap;

	/*
	 * Capability pointer. There could be more than one IOMMU per PCI
	 * device function if there are more than one AMD IOMMU capability
	 * pointers.
	 */
	u16 cap_ptr;

	/* pci domain of this IOMMU */
	u16 pci_seg;

@@ -284,19 +284,19 @@ struct amd_iommu {
	/* size of command buffer */
	u32 cmd_buf_size;

	/* event buffer virtual address */
	u8 *evt_buf;
	/* size of event buffer */
	u32 evt_buf_size;
	/* event buffer virtual address */
	u8 *evt_buf;
	/* MSI number for event interrupt */
	u16 evt_msi_num;

	/* if one, we need to send a completion wait command */
	int need_sync;

	/* true if interrupts for this IOMMU are already enabled */
	bool int_enabled;

	/* if one, we need to send a completion wait command */
	int need_sync;

	/* default dma_ops domain for that IOMMU */
	struct dma_ops_domain *default_dom;
};
+2 −4
Original line number Diff line number Diff line
@@ -71,15 +71,13 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
/* Make sure we keep the same behaviour */
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
#ifdef CONFIG_X86_32
	return 0;
#else
#ifdef CONFIG_X86_64
	struct dma_mapping_ops *ops = get_dma_ops(dev);
	if (ops->mapping_error)
		return ops->mapping_error(dev, dma_addr);

	return (dma_addr == bad_dma_address);
#endif
	return (dma_addr == bad_dma_address);
}

#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+16 −20
Original line number Diff line number Diff line
@@ -187,6 +187,8 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)

	spin_lock_irqsave(&iommu->lock, flags);
	ret = __iommu_queue_command(iommu, cmd);
	if (!ret)
		iommu->need_sync = 1;
	spin_unlock_irqrestore(&iommu->lock, flags);

	return ret;
@@ -210,10 +212,13 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
	cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
	CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);

	iommu->need_sync = 0;

	spin_lock_irqsave(&iommu->lock, flags);

	if (!iommu->need_sync)
		goto out;

	iommu->need_sync = 0;

	ret = __iommu_queue_command(iommu, &cmd);

	if (ret)
@@ -254,8 +259,6 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)

	ret = iommu_queue_command(iommu, &cmd);

	iommu->need_sync = 1;

	return ret;
}

@@ -281,8 +284,6 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,

	ret = iommu_queue_command(iommu, &cmd);

	iommu->need_sync = 1;

	return ret;
}

@@ -762,8 +763,6 @@ static void set_device_domain(struct amd_iommu *iommu,
	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);

	iommu_queue_inv_dev_entry(iommu, devid);

	iommu->need_sync = 1;
}

/*****************************************************************************
@@ -858,6 +857,9 @@ static int get_device_resources(struct device *dev,
		print_devid(_bdf, 1);
	}

	if (domain_for_device(_bdf) == NULL)
		set_device_domain(*iommu, *domain, _bdf);

	return 1;
}

@@ -1031,7 +1033,6 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
	if (addr == bad_dma_address)
		goto out;

	if (unlikely(iommu->need_sync))
	iommu_completion_wait(iommu);

out:
@@ -1060,7 +1061,6 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,

	__unmap_single(iommu, domain->priv, dma_addr, size, dir);

	if (unlikely(iommu->need_sync))
	iommu_completion_wait(iommu);

	spin_unlock_irqrestore(&domain->lock, flags);
@@ -1127,7 +1127,6 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
			goto unmap;
	}

	if (unlikely(iommu->need_sync))
	iommu_completion_wait(iommu);

out:
@@ -1173,7 +1172,6 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
		s->dma_address = s->dma_length = 0;
	}

	if (unlikely(iommu->need_sync))
	iommu_completion_wait(iommu);

	spin_unlock_irqrestore(&domain->lock, flags);
@@ -1225,7 +1223,6 @@ static void *alloc_coherent(struct device *dev, size_t size,
		goto out;
	}

	if (unlikely(iommu->need_sync))
	iommu_completion_wait(iommu);

out:
@@ -1257,7 +1254,6 @@ static void free_coherent(struct device *dev, size_t size,

	__unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);

	if (unlikely(iommu->need_sync))
	iommu_completion_wait(iommu);

	spin_unlock_irqrestore(&domain->lock, flags);
+3 −0
Original line number Diff line number Diff line
@@ -604,6 +604,9 @@ static void __init __get_smp_config(unsigned int early)
		printk(KERN_INFO "Using ACPI for processor (LAPIC) "
		       "configuration information\n");

	if (!mpf)
		return;

	printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
	       mpf->mpf_specification);
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
+2 −0
Original line number Diff line number Diff line
@@ -123,6 +123,8 @@ static void free_iommu(unsigned long offset, int size)

	spin_lock_irqsave(&iommu_bitmap_lock, flags);
	iommu_area_free(iommu_gart_bitmap, offset, size);
	if (offset >= next_bit)
		next_bit = offset + size;
	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
}

Loading