Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d6ac4ffc authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:
 "These are late by a week; they should have been merged during the
  merge window, but unfortunately, the ARM kernel build/boot farms were
  indicating random failures, and it wasn't clear whether the cause was
  something in these changes or something during the merge window.

  This is a set of merge window fixes with some documentation additions"

* 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
  ARM: avoid unwanted GCC memset()/memcpy() optimisations for IO variants
  ARM: pgtable: document mapping types
  ARM: io: convert ioremap*() to functions
  ARM: io: fix ioremap_wt() implementation
  ARM: io: document ARM specific behaviour of ioremap*() implementations
  ARM: fix lockdep unannotated irqs-off warning
  ARM: 8397/1: fix vdsomunge not to depend on glibc specific error.h
  ARM: add helpful message when truncating physical memory
  ARM: add help text for HIGHPTE configuration entry
  ARM: fix DEBUG_SET_MODULE_RONX build dependencies
  ARM: 8396/1: use phys_addr_t in pfn_to_kaddr()
  ARM: 8394/1: update memblock limit after mapping lowmem
  ARM: 8393/1: smp: Fix suspicious RCU usage with ipi tracepoints
parents 4f273959 06be5eef
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -1693,6 +1693,12 @@ config HIGHMEM
config HIGHPTE
	bool "Allocate 2nd-level pagetables from highmem"
	depends on HIGHMEM
	help
	  The VM uses one page of physical memory for each page table.
	  For systems with a lot of processes, this can use a lot of
	  precious low memory, eventually leading to low memory being
	  consumed by page tables.  Setting this option will allow
	  user-space 2nd level page tables to reside in high memory.

config HW_PERF_EVENTS
	bool "Enable hardware performance counter support for perf events"
+1 −1
Original line number Diff line number Diff line
@@ -1635,7 +1635,7 @@ config PID_IN_CONTEXTIDR

config DEBUG_SET_MODULE_RONX
	bool "Set loadable kernel module data as NX and text as RO"
	depends on MODULES
	depends on MODULES && MMU
	---help---
	  This option helps catch unintended modifications to loadable
	  kernel module's text and read-only data. It also prevents execution
+58 −17
Original line number Diff line number Diff line
@@ -140,16 +140,11 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
 * The _caller variety takes a __builtin_return_address(0) value for
 * /proc/vmalloc to use - and should only be used in non-inline functions.
 */
extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
	size_t, unsigned int, void *);
extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
	void *);

extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int);
extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
extern void __iounmap(volatile void __iomem *addr);
extern void __arm_iounmap(volatile void __iomem *addr);

extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
	unsigned int, void *);
@@ -321,21 +316,24 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
static inline void memset_io(volatile void __iomem *dst, unsigned c,
	size_t count)
{
	memset((void __force *)dst, c, count);
	extern void mmioset(void *, unsigned int, size_t);
	mmioset((void __force *)dst, c, count);
}
#define memset_io(dst,c,count) memset_io(dst,c,count)

static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
	size_t count)
{
	memcpy(to, (const void __force *)from, count);
	extern void mmiocpy(void *, const void *, size_t);
	mmiocpy(to, (const void __force *)from, count);
}
#define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count)

static inline void memcpy_toio(volatile void __iomem *to, const void *from,
	size_t count)
{
	memcpy((void __force *)to, from, count);
	extern void mmiocpy(void *, const void *, size_t);
	mmiocpy((void __force *)to, from, count);
}
#define memcpy_toio(to,from,count) memcpy_toio(to,from,count)

@@ -348,18 +346,61 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from,
#endif	/* readl */

/*
 * ioremap and friends.
 * ioremap() and friends.
 *
 * ioremap() takes a resource address, and size.  Due to the ARM memory
 * types, it is important to use the correct ioremap() function as each
 * mapping has specific properties.
 *
 * Function		Memory type	Cacheability	Cache hint
 * ioremap()		Device		n/a		n/a
 * ioremap_nocache()	Device		n/a		n/a
 * ioremap_cache()	Normal		Writeback	Read allocate
 * ioremap_wc()		Normal		Non-cacheable	n/a
 * ioremap_wt()		Normal		Non-cacheable	n/a
 *
 * All device mappings have the following properties:
 * - no access speculation
 * - no repetition (eg, on return from an exception)
 * - number, order and size of accesses are maintained
 * - unaligned accesses are "unpredictable"
 * - writes may be delayed before they hit the endpoint device
 *
 * ioremap takes a PCI memory address, as specified in
 * Documentation/io-mapping.txt.
 * ioremap_nocache() is the same as ioremap() as there are too many device
 * drivers using this for device registers, and documentation which tells
 * people to use it for such for this to be any different.  This is not a
 * safe fallback for memory-like mappings, or memory regions where the
 * compiler may generate unaligned accesses - eg, via inlining its own
 * memcpy.
 *
 * All normal memory mappings have the following properties:
 * - reads can be repeated with no side effects
 * - repeated reads return the last value written
 * - reads can fetch additional locations without side effects
 * - writes can be repeated (in certain cases) with no side effects
 * - writes can be merged before accessing the target
 * - unaligned accesses can be supported
 * - ordering is not guaranteed without explicit dependencies or barrier
 *   instructions
 * - writes may be delayed before they hit the endpoint memory
 *
 * The cache hint is only a performance hint: CPUs may alias these hints.
 * Eg, a CPU not implementing read allocate but implementing write allocate
 * will provide a write allocate mapping instead.
 */
#define ioremap(cookie,size)		__arm_ioremap((cookie), (size), MT_DEVICE)
#define ioremap_nocache(cookie,size)	__arm_ioremap((cookie), (size), MT_DEVICE)
#define ioremap_cache(cookie,size)	__arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
#define ioremap_wc(cookie,size)		__arm_ioremap((cookie), (size), MT_DEVICE_WC)
#define ioremap_wt(cookie,size)		__arm_ioremap((cookie), (size), MT_DEVICE)
#define iounmap				__arm_iounmap
void __iomem *ioremap(resource_size_t res_cookie, size_t size);
#define ioremap ioremap
#define ioremap_nocache ioremap

void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
#define ioremap_cache ioremap_cache

void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
#define ioremap_wc ioremap_wc
#define ioremap_wt ioremap_wc

void iounmap(volatile void __iomem *iomem_cookie);
#define iounmap iounmap

/*
 * io{read,write}{16,32}be() macros
+1 −1
Original line number Diff line number Diff line
@@ -275,7 +275,7 @@ static inline void *phys_to_virt(phys_addr_t x)
 */
#define __pa(x)			__virt_to_phys((unsigned long)(x))
#define __va(x)			((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
#define pfn_to_kaddr(pfn)	__va((phys_addr_t)(pfn) << PAGE_SHIFT)

extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);

+30 −1
Original line number Diff line number Diff line
@@ -129,7 +129,36 @@

/*
 * These are the memory types, defined to be compatible with
 * pre-ARMv6 CPUs cacheable and bufferable bits:   XXCB
 * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
 * ARMv6+ without TEX remapping, they are a table index.
 * ARMv6+ with TEX remapping, they correspond to n/a,TEX(0),C,B
 *
 * MT type		Pre-ARMv6	ARMv6+ type / cacheable status
 * UNCACHED		Uncached	Strongly ordered
 * BUFFERABLE		Bufferable	Normal memory / non-cacheable
 * WRITETHROUGH		Writethrough	Normal memory / write through
 * WRITEBACK		Writeback	Normal memory / write back, read alloc
 * MINICACHE		Minicache	N/A
 * WRITEALLOC		Writeback	Normal memory / write back, write alloc
 * DEV_SHARED		Uncached	Device memory (shared)
 * DEV_NONSHARED	Uncached	Device memory (non-shared)
 * DEV_WC		Bufferable	Normal memory / non-cacheable
 * DEV_CACHED		Writeback	Normal memory / write back, read alloc
 * VECTORS		Variable	Normal memory / variable
 *
 * All normal memory mappings have the following properties:
 * - reads can be repeated with no side effects
 * - repeated reads return the last value written
 * - reads can fetch additional locations without side effects
 * - writes can be repeated (in certain cases) with no side effects
 * - writes can be merged before accessing the target
 * - unaligned accesses can be supported
 *
 * All device mappings have the following properties:
 * - no access speculation
 * - no repetition (eg, on return from an exception)
 * - number, order and size of accesses are maintained
 * - unaligned accesses are "unpredictable"
 */
#define L_PTE_MT_UNCACHED	(_AT(pteval_t, 0x00) << 2)	/* 0000 */
#define L_PTE_MT_BUFFERABLE	(_AT(pteval_t, 0x01) << 2)	/* 0001 */
Loading