Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8fd524b3 authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by Ingo Molnar
Browse files

x86: Kill bad_dma_address variable



This kills bad_dma_address variable, the old mechanism to enable
IOMMU drivers to make dma_mapping_error() work in IOMMU's
specific way.

bad_dma_address variable was introduced to enable IOMMU drivers
to make dma_mapping_error() work in IOMMU's specific way.
However, it can't handle systems that use both swiotlb and HW
IOMMU. SO we introduced dma_map_ops->mapping_error to solve that
case.

Intel VT-d, GART, and swiotlb already use
dma_map_ops->mapping_error. Calgary, AMD IOMMU, and nommu use
zero for an error dma address. This adds DMA_ERROR_CODE and
converts them to use it (as SPARC and POWER does).

Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: default avatarJesse Barnes <jbarnes@virtuousgeek.org>
Cc: muli@il.ibm.com
Cc: joerg.roedel@amd.com
LKML-Reference: <1258287594-8777-3-git-send-email-fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 42109197
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -20,7 +20,8 @@
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
#endif

extern dma_addr_t bad_dma_address;
#define DMA_ERROR_CODE	0

extern int iommu_merge;
extern struct device x86_dma_fallback_dev;
extern int panic_on_overflow;
@@ -48,7 +49,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
	if (ops->mapping_error)
		return ops->mapping_error(dev, dma_addr);

	return (dma_addr == bad_dma_address);
	return (dma_addr == DMA_ERROR_CODE);
}

#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+10 −11
Original line number Diff line number Diff line
@@ -928,7 +928,7 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev,
	}

	if (unlikely(address == -1))
		address = bad_dma_address;
		address = DMA_ERROR_CODE;

	WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);

@@ -1544,7 +1544,7 @@ static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,

	pte  = dma_ops_get_pte(dom, address);
	if (!pte)
		return bad_dma_address;
		return DMA_ERROR_CODE;

	__pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;

@@ -1625,7 +1625,7 @@ static dma_addr_t __map_single(struct device *dev,
retry:
	address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
					  dma_mask);
	if (unlikely(address == bad_dma_address)) {
	if (unlikely(address == DMA_ERROR_CODE)) {
		/*
		 * setting next_address here will let the address
		 * allocator only scan the new allocated range in the
@@ -1646,7 +1646,7 @@ static dma_addr_t __map_single(struct device *dev,
	start = address;
	for (i = 0; i < pages; ++i) {
		ret = dma_ops_domain_map(iommu, dma_dom, start, paddr, dir);
		if (ret == bad_dma_address)
		if (ret == DMA_ERROR_CODE)
			goto out_unmap;

		paddr += PAGE_SIZE;
@@ -1674,7 +1674,7 @@ static dma_addr_t __map_single(struct device *dev,

	dma_ops_free_addresses(dma_dom, address, pages);

	return bad_dma_address;
	return DMA_ERROR_CODE;
}

/*
@@ -1690,7 +1690,7 @@ static void __unmap_single(struct amd_iommu *iommu,
	dma_addr_t i, start;
	unsigned int pages;

	if ((dma_addr == bad_dma_address) ||
	if ((dma_addr == DMA_ERROR_CODE) ||
	    (dma_addr + size > dma_dom->aperture_size))
		return;

@@ -1732,7 +1732,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
	INC_STATS_COUNTER(cnt_map_single);

	if (!check_device(dev))
		return bad_dma_address;
		return DMA_ERROR_CODE;

	dma_mask = *dev->dma_mask;

@@ -1743,12 +1743,12 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
		return (dma_addr_t)paddr;

	if (!dma_ops_domain(domain))
		return bad_dma_address;
		return DMA_ERROR_CODE;

	spin_lock_irqsave(&domain->lock, flags);
	addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
			    dma_mask);
	if (addr == bad_dma_address)
	if (addr == DMA_ERROR_CODE)
		goto out;

	iommu_completion_wait(iommu);
@@ -1957,7 +1957,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
	*dma_addr = __map_single(dev, iommu, domain->priv, paddr,
				 size, DMA_BIDIRECTIONAL, true, dma_mask);

	if (*dma_addr == bad_dma_address) {
	if (*dma_addr == DMA_ERROR_CODE) {
		spin_unlock_irqrestore(&domain->lock, flags);
		goto out_free;
	}
@@ -2110,7 +2110,6 @@ int __init amd_iommu_init_dma_ops(void)
		prealloc_protection_domains();

	iommu_detected = 1;
	bad_dma_address = 0;
	swiotlb = 0;
#ifdef CONFIG_GART_IOMMU
	gart_iommu_aperture_disabled = 1;
+10 −12
Original line number Diff line number Diff line
@@ -245,7 +245,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
			if (panic_on_overflow)
				panic("Calgary: fix the allocator.\n");
			else
				return bad_dma_address;
				return DMA_ERROR_CODE;
		}
	}

@@ -261,11 +261,11 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
			      void *vaddr, unsigned int npages, int direction)
{
	unsigned long entry;
	dma_addr_t ret = bad_dma_address;
	dma_addr_t ret = DMA_ERROR_CODE;

	entry = iommu_range_alloc(dev, tbl, npages);

	if (unlikely(entry == bad_dma_address))
	if (unlikely(entry == DMA_ERROR_CODE))
		goto error;

	/* set the return dma address */
@@ -280,7 +280,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
error:
	printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
	       "iommu %p\n", npages, tbl);
	return bad_dma_address;
	return DMA_ERROR_CODE;
}

static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
@@ -291,8 +291,8 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
	unsigned long flags;

	/* were we called with bad_dma_address? */
	badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE);
	if (unlikely((dma_addr >= bad_dma_address) && (dma_addr < badend))) {
	badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE);
	if (unlikely((dma_addr >= DMA_ERROR_CODE) && (dma_addr < badend))) {
		WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
		       "address 0x%Lx\n", dma_addr);
		return;
@@ -374,7 +374,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
		npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE);

		entry = iommu_range_alloc(dev, tbl, npages);
		if (entry == bad_dma_address) {
		if (entry == DMA_ERROR_CODE) {
			/* makes sure unmap knows to stop */
			s->dma_length = 0;
			goto error;
@@ -392,7 +392,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
error:
	calgary_unmap_sg(dev, sg, nelems, dir, NULL);
	for_each_sg(sg, s, nelems, i) {
		sg->dma_address = bad_dma_address;
		sg->dma_address = DMA_ERROR_CODE;
		sg->dma_length = 0;
	}
	return 0;
@@ -447,7 +447,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,

	/* set up tces to cover the allocated range */
	mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
	if (mapping == bad_dma_address)
	if (mapping == DMA_ERROR_CODE)
		goto free;
	*dma_handle = mapping;
	return ret;
@@ -728,7 +728,7 @@ static void __init calgary_reserve_regions(struct pci_dev *dev)
	struct iommu_table *tbl = pci_iommu(dev->bus);

	/* reserve EMERGENCY_PAGES from bad_dma_address and up */
	iommu_range_reserve(tbl, bad_dma_address, EMERGENCY_PAGES);
	iommu_range_reserve(tbl, DMA_ERROR_CODE, EMERGENCY_PAGES);

	/* avoid the BIOS/VGA first 640KB-1MB region */
	/* for CalIOC2 - avoid the entire first MB */
@@ -1359,8 +1359,6 @@ static int __init calgary_iommu_init(void)
		return ret;
	}

	bad_dma_address = 0x0;

	return 0;
}

+0 −3
Original line number Diff line number Diff line
@@ -43,9 +43,6 @@ int iommu_detected __read_mostly = 0;
 */
int iommu_pass_through __read_mostly;

dma_addr_t bad_dma_address __read_mostly = 0;
EXPORT_SYMBOL(bad_dma_address);

/* Dummy device used for NULL arguments (normally ISA). */
struct device x86_dma_fallback_dev = {
	.init_name = "fallback device",
+1 −1
Original line number Diff line number Diff line
@@ -33,7 +33,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
	dma_addr_t bus = page_to_phys(page) + offset;
	WARN_ON(size == 0);
	if (!check_addr("map_single", dev, bus, size))
		return bad_dma_address;
		return DMA_ERROR_CODE;
	flush_write_buffers();
	return bus;
}