Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fbf2b1f9 authored by Russell King's avatar Russell King Committed by Russell King
Browse files

Merge branch 'highmem' into devel

parents 9a38e989 053a96ca
Loading
Loading
Loading
Loading
+8 −1
Original line number Original line Diff line number Diff line
@@ -29,7 +29,14 @@ ffff0000 ffff0fff CPU vector page.
				CPU supports vector relocation (control
				CPU supports vector relocation (control
				register V bit.)
				register V bit.)


ffc00000	fffeffff	DMA memory mapping region.  Memory returned
fffe0000	fffeffff	XScale cache flush area.  This is used
				in proc-xscale.S to flush the whole data
				cache.  Free for other usage on non-XScale.

fff00000	fffdffff	Fixmap mapping region.  Addresses provided
				by fix_to_virt() will be located here.

ffc00000	ffefffff	DMA memory mapping region.  Memory returned
				by the dma_alloc_xxx functions will be
				by the dma_alloc_xxx functions will be
				dynamically mapped here.
				dynamically mapped here.


+17 −0
Original line number Original line Diff line number Diff line
@@ -939,6 +939,23 @@ config NODES_SHIFT
	default "2"
	default "2"
	depends on NEED_MULTIPLE_NODES
	depends on NEED_MULTIPLE_NODES


config HIGHMEM
	bool "High Memory Support (EXPERIMENTAL)"
	depends on MMU && EXPERIMENTAL
	help
	  The address space of ARM processors is only 4 Gigabytes large
	  and it has to accommodate user address space, kernel address
	  space as well as some memory mapped IO. That means that, if you
	  have a large amount of physical memory and/or IO, not all of the
	  memory can be "permanently mapped" by the kernel. The physical
	  memory that is not permanently mapped is called "high memory".

	  Depending on the selected kernel/user memory split, minimum
	  vmalloc space and actual amount of RAM, you may not need this
	  option which should result in a slightly faster kernel.

	  If unsure, say n.

source "mm/Kconfig"
source "mm/Kconfig"


config LEDS
config LEDS
+7 −0
Original line number Original line Diff line number Diff line
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/slab.h>
#include <linux/page-flags.h>
#include <linux/device.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/dmapool.h>
@@ -349,6 +350,12 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,


	BUG_ON(!valid_dma_direction(dir));
	BUG_ON(!valid_dma_direction(dir));


	if (PageHighMem(page)) {
		dev_err(dev, "DMA buffer bouncing of HIGHMEM pages "
			     "is not supported\n");
		return ~0;
	}

	return map_single(dev, page_address(page) + offset, size, dir);
	return map_single(dev, page_address(page) + offset, size, dir);
}
}
EXPORT_SYMBOL(dma_map_page);
EXPORT_SYMBOL(dma_map_page);
+13 −1
Original line number Original line Diff line number Diff line
@@ -15,10 +15,20 @@
 * must not be used by drivers.
 * must not be used by drivers.
 */
 */
#ifndef __arch_page_to_dma
#ifndef __arch_page_to_dma

#if !defined(CONFIG_HIGHMEM)
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
{
{
	return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
	return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
}
}
#elif defined(__pfn_to_bus)
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
{
	return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
}
#else
#error "this machine class needs to define __arch_page_to_dma to use HIGHMEM"
#endif


static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
{
@@ -57,6 +67,8 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
 */
 */
extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
extern void dma_cache_maint_page(struct page *page, unsigned long offset,
				 size_t size, int rw);


/*
/*
 * Return whether the given device DMA address mask can be supported
 * Return whether the given device DMA address mask can be supported
@@ -316,7 +328,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
	BUG_ON(!valid_dma_direction(dir));
	BUG_ON(!valid_dma_direction(dir));


	if (!arch_is_coherent())
	if (!arch_is_coherent())
		dma_cache_maint(page_address(page) + offset, size, dir);
		dma_cache_maint_page(page, offset, size, dir);


	return page_to_dma(dev, page) + offset;
	return page_to_dma(dev, page) + offset;
}
}
+41 −0
Original line number Original line Diff line number Diff line
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H

/*
 * Nothing too fancy for now.
 *
 * On ARM we already have well known fixed virtual addresses imposed by
 * the architecture such as the vector page which is located at 0xffff0000,
 * therefore a second level page table is already allocated covering
 * 0xfff00000 upwards.
 *
 * The cache flushing code in proc-xscale.S uses the virtual area between
 * 0xfffe0000 and 0xfffeffff.
 */

#define FIXADDR_START		0xfff00000UL
#define FIXADDR_TOP		0xfffe0000UL
#define FIXADDR_SIZE		(FIXADDR_TOP - FIXADDR_START)

#define FIX_KMAP_BEGIN		0
#define FIX_KMAP_END		(FIXADDR_SIZE >> PAGE_SHIFT)

#define __fix_to_virt(x)	(FIXADDR_START + ((x) << PAGE_SHIFT))
#define __virt_to_fix(x)	(((x) - FIXADDR_START) >> PAGE_SHIFT)

extern void __this_fixmap_does_not_exist(void);

static inline unsigned long fix_to_virt(const unsigned int idx)
{
	if (idx >= FIX_KMAP_END)
		__this_fixmap_does_not_exist();
	return __fix_to_virt(idx);
}

static inline unsigned int virt_to_fix(const unsigned long vaddr)
{
	BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
	return __virt_to_fix(vaddr);
}

#endif
Loading