Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit be20902b authored by Russell King's avatar Russell King
Browse files

ARM: use ARM_DMA_ZONE_SIZE to adjust the zone sizes



Rather than each platform providing its own function to adjust the
zone sizes, use the new ARM_DMA_ZONE_SIZE definition to perform this
adjustment.  This ensures that the actual DMA zone size and the
ISA_DMA_THRESHOLD/MAX_DMA_ADDRESS definitions are consistent with
each other, and moves this complexity out of the platform code.

Acked-by: default avatarNicolas Pitre <nicolas.pitre@linaro.org>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 2fb3ec5c
Loading
Loading
Loading
Loading
+0 −8
Original line number Diff line number Diff line
@@ -185,14 +185,6 @@ static struct sa1111_dev_info sa1111_devices[] = {
	},
};

void __init sa1111_adjust_zones(unsigned long *size, unsigned long *holes)
{
	unsigned int sz = SZ_1M >> PAGE_SHIFT;

	size[1] = size[0] - sz;
	size[0] = sz;
}

/*
 * SA1111 interrupt support.  Since clearing an IRQ while there are
 * active IRQs causes the interrupt output to pulse, the upper levels
+0 −6
Original line number Diff line number Diff line
@@ -215,12 +215,6 @@ static inline unsigned long __phys_to_virt(unsigned long x)
#define ISA_DMA_THRESHOLD	(PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1)
#endif

#ifndef arch_adjust_zones
#define arch_adjust_zones(size,holes) do { } while (0)
#elif !defined(CONFIG_ZONE_DMA)
#error "custom arch_adjust_zones() requires CONFIG_ZONE_DMA"
#endif

/*
 * PFNs are used to describe any physical page; this means
 * PFN 0 == physical address 0.
+0 −15
Original line number Diff line number Diff line
@@ -41,26 +41,11 @@
 */
#define CONSISTENT_DMA_SIZE (14<<20)

#ifndef __ASSEMBLY__
/*
 * Restrict DMA-able region to workaround silicon bug.  The bug
 * restricts buffers available for DMA to video hardware to be
 * below 128M
 */
static inline void
__arch_adjust_zones(unsigned long *size, unsigned long *holes)
{
	unsigned int sz = (128<<20) >> PAGE_SHIFT;

	size[1] = size[0] - sz;
	size[0] = sz;
}

#define arch_adjust_zones(zone_size, holes) \
        if ((meminfo.bank[0].size >> 20) > 128) __arch_adjust_zones(zone_size, holes)

#define ARM_DMA_ZONE_SIZE	SZ_128M

#endif

#endif /* __ASM_ARCH_MEMORY_H */
+0 −23
Original line number Diff line number Diff line
@@ -342,29 +342,6 @@ int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
	return (dev->bus == &pci_bus_type ) && ((dma_addr + size) >= SZ_64M);
}

/*
 * Only first 64MB of memory can be accessed via PCI.
 * We use GFP_DMA to allocate safe buffers to do map/unmap.
 * This is really ugly and we need a better way of specifying
 * DMA-capable regions of memory.
 */
void __init ixp4xx_adjust_zones(unsigned long *zone_size,
	unsigned long *zhole_size)
{
	unsigned int sz = SZ_64M >> PAGE_SHIFT;

	/*
	 * Only adjust if > 64M on current system
	 */
	if (zone_size[0] <= sz)
		return;

	zone_size[1] = zone_size[0] - sz;
	zone_size[0] = sz;
	zhole_size[1] = zhole_size[0];
	zhole_size[0] = 0;
}

void __init ixp4xx_pci_preinit(void)
{
	unsigned long cpuid = read_cpuid_id();
+1 −8
Original line number Diff line number Diff line
@@ -14,15 +14,8 @@
 */
#define PLAT_PHYS_OFFSET	UL(0x00000000)

#if !defined(__ASSEMBLY__) && defined(CONFIG_PCI)

void ixp4xx_adjust_zones(unsigned long *size, unsigned long *holes);

#define arch_adjust_zones(size, holes) \
	ixp4xx_adjust_zones(size, holes)

#ifdef CONFIG_PCI
#define ARM_DMA_ZONE_SIZE	SZ_64M

#endif

#endif
Loading