Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7feee249 authored by Kristoffer Glembo's avatar Kristoffer Glembo Committed by David S. Miller
Browse files

sparc: Make mmu_inval_dma_area take void * instead of unsigned long to minimize casts.

parent 1b192740
Loading
Loading
Loading
Loading
+27 −26
Original line number Diff line number Diff line
@@ -53,7 +53,7 @@
#ifndef CONFIG_SPARC_LEON
#define mmu_inval_dma_area(p, l)	/* Anton pulled it out for 2.4.0-xx */
#else
static inline void mmu_inval_dma_area(unsigned long va, unsigned long len)
static inline void mmu_inval_dma_area(void *va, unsigned long len)
{
	if (!sparc_leon3_snooping_enabled())
		leon_flush_dcache_all();
@@ -284,7 +284,7 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
		printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
		goto err_nova;
	}
	mmu_inval_dma_area(va, len_total);
	mmu_inval_dma_area((void *)va, len_total);

	// XXX The mmu_map_dma_area does this for us below, see comments.
	// sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
@@ -435,7 +435,7 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len,
				  dma_addr_t *pba, gfp_t gfp)
{
	unsigned long len_total = PAGE_ALIGN(len);
	unsigned long va;
	void *va;
	struct resource *res;
	int order;

@@ -447,30 +447,34 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len,
	}

	order = get_order(len_total);
	va = __get_free_pages(GFP_KERNEL, order);
	if (va == 0) {
	va = (void *) __get_free_pages(GFP_KERNEL, order);
	if (va == NULL) {
		printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
		return NULL;
		goto err_nopages;
	}

	if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
		free_pages(va, order);
		printk("pci_alloc_consistent: no core\n");
		return NULL;
		goto err_nomem;
	}

	if (allocate_resource(&_sparc_dvma, res, len_total,
	    _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
		printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
		free_pages(va, order);
		kfree(res);
		return NULL;
		goto err_nova;
	}
	mmu_inval_dma_area(va, len_total);
	sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);

	*pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
	return (void *) res->start;

err_nova:
	kfree(res);
err_nomem:
	free_pages((unsigned long)va, order);
err_nopages:
	return NULL;
}

/* Free and unmap a consistent DMA buffer.
@@ -485,7 +489,7 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p,
				dma_addr_t ba)
{
	struct resource *res;
	unsigned long pgp;
	void *pgp;

	if ((res = _sparc_find_resource(&_sparc_dvma,
	    (unsigned long)p)) == NULL) {
@@ -505,14 +509,14 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p,
		return;
	}

	pgp = (unsigned long) phys_to_virt(ba);	/* bus_to_virt actually */
	pgp = phys_to_virt(ba);	/* bus_to_virt actually */
	mmu_inval_dma_area(pgp, n);
	sparc_unmapiorange((unsigned long)p, n);

	release_resource(res);
	kfree(res);

	free_pages(pgp, get_order(n));
	free_pages((unsigned long)pgp, get_order(n));
}

/*
@@ -531,7 +535,7 @@ static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
			     enum dma_data_direction dir, struct dma_attrs *attrs)
{
	if (dir != PCI_DMA_TODEVICE)
		mmu_inval_dma_area((unsigned long)phys_to_virt(ba), PAGE_ALIGN(size));
		mmu_inval_dma_area(phys_to_virt(ba), PAGE_ALIGN(size));
}

/* Map a set of buffers described by scatterlist in streaming
@@ -579,8 +583,7 @@ static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
	if (dir != PCI_DMA_TODEVICE) {
		for_each_sg(sgl, sg, nents, n) {
			BUG_ON(page_address(sg_page(sg)) == NULL);
			mmu_inval_dma_area(
			    (unsigned long) page_address(sg_page(sg)),
			mmu_inval_dma_area(page_address(sg_page(sg)),
					   PAGE_ALIGN(sg->length));
		}
	}
@@ -600,7 +603,7 @@ static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
				      size_t size, enum dma_data_direction dir)
{
	if (dir != PCI_DMA_TODEVICE) {
		mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
		mmu_inval_dma_area(phys_to_virt(ba),
				   PAGE_ALIGN(size));
	}
}
@@ -609,7 +612,7 @@ static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
					 size_t size, enum dma_data_direction dir)
{
	if (dir != PCI_DMA_TODEVICE) {
		mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
		mmu_inval_dma_area(phys_to_virt(ba),
				   PAGE_ALIGN(size));
	}
}
@@ -629,8 +632,7 @@ static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
	if (dir != PCI_DMA_TODEVICE) {
		for_each_sg(sgl, sg, nents, n) {
			BUG_ON(page_address(sg_page(sg)) == NULL);
			mmu_inval_dma_area(
			    (unsigned long) page_address(sg_page(sg)),
			mmu_inval_dma_area(page_address(sg_page(sg)),
					   PAGE_ALIGN(sg->length));
		}
	}
@@ -645,8 +647,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
	if (dir != PCI_DMA_TODEVICE) {
		for_each_sg(sgl, sg, nents, n) {
			BUG_ON(page_address(sg_page(sg)) == NULL);
			mmu_inval_dma_area(
			    (unsigned long) page_address(sg_page(sg)),
			mmu_inval_dma_area(page_address(sg_page(sg)),
					   PAGE_ALIGN(sg->length));
		}
	}