Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1f0090a1 authored by Russell King's avatar Russell King
Browse files

Merge branch 'misc' into devel

Conflicts:
	arch/arm/Kconfig
parents 2472f3c8 10a8c383
Loading
Loading
Loading
Loading
+48 −8
Original line number Diff line number Diff line
@@ -63,6 +63,10 @@ config GENERIC_CLOCKEVENTS_BROADCAST
	depends on GENERIC_CLOCKEVENTS
	default y if SMP

config KTIME_SCALAR
	bool
	default y

config HAVE_TCM
	bool
	select GENERIC_ALLOCATOR
@@ -178,11 +182,6 @@ config FIQ
config ARCH_MTD_XIP
	bool

config ARM_L1_CACHE_SHIFT_6
	bool
	help
	  Setting ARM L1 cache line size to 64 Bytes.

config VECTORS_BASE
	hex
	default 0xffff0000 if MMU || CPU_HIGH_VECTOR
@@ -1152,7 +1151,7 @@ config ARM_ERRATA_742231

config PL310_ERRATA_588369
	bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
	depends on CACHE_L2X0 && ARCH_OMAP4
	depends on CACHE_L2X0
	help
	   The PL310 L2 cache controller implements three types of Clean &
	   Invalidate maintenance operations: by Physical Address
@@ -1161,8 +1160,7 @@ config PL310_ERRATA_588369
	   clean operation followed immediately by an invalidate operation,
	   both performing to the same memory location. This functionality
	   is not correctly implemented in PL310 as clean lines are not
	   invalidated as a result of these operations. Note that this errata
	   uses Texas Instrument's secure monitor api.
	   invalidated as a result of these operations.

config ARM_ERRATA_720789
	bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
@@ -1176,6 +1174,17 @@ config ARM_ERRATA_720789
	  tables. The workaround changes the TLB flushing routines to invalidate
	  entries regardless of the ASID.

config PL310_ERRATA_727915
	bool "Background Clean & Invalidate by Way operation can cause data corruption"
	depends on CACHE_L2X0
	help
	  PL310 implements the Clean & Invalidate by Way L2 cache maintenance
	  operation (offset 0x7FC). This operation runs in background so that
	  PL310 can handle normal accesses while it is in progress. Under very
	  rare circumstances, due to this erratum, write data can be lost when
	  PL310 treats a cacheable write transaction during a Clean &
	  Invalidate by Way operation.

config ARM_ERRATA_743622
	bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption"
	depends on CPU_V7
@@ -1430,6 +1439,37 @@ config THUMB2_KERNEL

	  If unsure, say N.

config THUMB2_AVOID_R_ARM_THM_JUMP11
	bool "Work around buggy Thumb-2 short branch relocations in gas"
	depends on THUMB2_KERNEL && MODULES
	default y
	help
	  Various binutils versions can resolve Thumb-2 branches to
	  locally-defined, preemptible global symbols as short-range "b.n"
	  branch instructions.

	  This is a problem, because there's no guarantee the final
	  destination of the symbol, or any candidate locations for a
	  trampoline, are within range of the branch.  For this reason, the
	  kernel does not support fixing up the R_ARM_THM_JUMP11 (102)
	  relocation in modules at all, and it makes little sense to add
	  support.

	  The symptom is that the kernel fails with an "unsupported
	  relocation" error when loading some modules.

	  Until fixed tools are available, passing
	  -fno-optimize-sibling-calls to gcc should prevent gcc generating
	  code which hits this problem, at the cost of a bit of extra runtime
	  stack usage in some cases.

	  The problem is described in more detail at:
	      https://bugs.launchpad.net/binutils-linaro/+bug/725126

	  Only Thumb-2 kernels are affected.

	  Unless you are sure your tools don't have this problem, say Y.

config ARM_ASM_UNIFIED
	bool

+6 −1
Original line number Diff line number Diff line
@@ -105,6 +105,10 @@ AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mau
AFLAGS_NOWARN	:=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
CFLAGS_THUMB2	:=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
AFLAGS_THUMB2	:=$(CFLAGS_THUMB2) -Wa$(comma)-mthumb
# Work around buggy relocation from gas if requested:
ifeq ($(CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11),y)
CFLAGS_MODULE	+=-fno-optimize-sibling-calls
endif
endif

# Need -Uarm for gcc < 3.x
@@ -281,7 +285,7 @@ bzImage: zImage
zImage Image xipImage bootpImage uImage: vmlinux
	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@

zinstall install: vmlinux
zinstall uinstall install: vmlinux
	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@

# We use MRPROPER_FILES and CLEAN_FILES now
@@ -302,6 +306,7 @@ define archhelp
  echo  '                  (supply initrd image via make variable INITRD=<path>)'
  echo  '  install       - Install uncompressed kernel'
  echo  '  zinstall      - Install compressed kernel'
  echo  '  uinstall      - Install U-Boot wrapped compressed kernel'
  echo  '                  Install using (your) ~/bin/$(INSTALLKERNEL) or'
  echo  '                  (distribution) /sbin/$(INSTALLKERNEL) or'
  echo  '                  install to $$(INSTALL_PATH) and run lilo'
+4 −0
Original line number Diff line number Diff line
@@ -99,6 +99,10 @@ zinstall: $(obj)/zImage
	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
	$(obj)/zImage System.map "$(INSTALL_PATH)"

uinstall: $(obj)/uImage
	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
	$(obj)/uImage System.map "$(INSTALL_PATH)"

zi:
	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
	$(obj)/zImage System.map "$(INSTALL_PATH)"
+3 −1
Original line number Diff line number Diff line
@@ -98,9 +98,11 @@ endif
EXTRA_CFLAGS  := -fpic -fno-builtin
EXTRA_AFLAGS  := -Wa,-march=all

# Provide size of uncompressed kernel to the decompressor via a linker symbol.
LDFLAGS_vmlinux = --defsym _image_size=$(shell stat -c "%s" $(obj)/../Image)
# Supply ZRELADDR to the decompressor via a linker symbol.
ifneq ($(CONFIG_AUTO_ZRELADDR),y)
LDFLAGS_vmlinux := --defsym zreladdr=$(ZRELADDR)
LDFLAGS_vmlinux += --defsym zreladdr=$(ZRELADDR)
endif
ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
+115 −134
Original line number Diff line number Diff line
@@ -128,14 +128,14 @@ wait: mrc p14, 0, pc, c0, c1, 0
		.arm				@ Always enter in ARM state
start:
		.type	start,#function
 THUMB(		adr	r12, BSYM(1f)	)
 THUMB(		bx	r12		)
 THUMB(		.rept	6		)
 ARM(		.rept	8		)
		.rept	7
		mov	r0, r0
		.endr
   ARM(		mov	r0, r0		)
   ARM(		b	1f		)
 THUMB(		adr	r12, BSYM(1f)	)
 THUMB(		bx	r12		)

		b	1f
		.word	0x016f2818		@ Magic numbers to help the loader
		.word	start			@ absolute load/run zImage address
		.word	_edata			@ zImage end address
@@ -174,9 +174,7 @@ not_angel:
		 */

		.text
		adr	r0, LC0
		ldmia	r0, {r1, r2, r3, r5, r6, r11, ip}
		ldr	sp, [r0, #28]

#ifdef CONFIG_AUTO_ZRELADDR
		@ determine final kernel image address
		mov	r4, pc
@@ -185,35 +183,108 @@ not_angel:
#else
		ldr	r4, =zreladdr
#endif
		subs	r0, r0, r1		@ calculate the delta offset

						@ if delta is zero, we are
		beq	not_relocated		@ running at the address we
						@ were linked at.
		bl	cache_on

restart:	adr	r0, LC0
		ldmia	r0, {r1, r2, r3, r5, r6, r9, r11, r12}
		ldr	sp, [r0, #32]

		/*
		 * We might be running at a different address.  We need
		 * to fix up various pointers.
		 */
		sub	r0, r0, r1		@ calculate the delta offset
		add	r5, r5, r0		@ _start
		add	r6, r6, r0		@ _edata

#ifndef CONFIG_ZBOOT_ROM
		/* malloc space is above the relocated stack (64k max) */
		add	sp, sp, r0
		add	r10, sp, #0x10000
#else
		/*
		 * With ZBOOT_ROM the bss/stack is non relocatable,
		 * but someone could still run this code from RAM,
		 * in which case our reference is _edata.
		 */
		mov	r10, r6
#endif

/*
 * Check to see if we will overwrite ourselves.
 *   r4  = final kernel address
 *   r5  = start of this image
 *   r9  = size of decompressed image
 *   r10 = end of this image, including  bss/stack/malloc space if non XIP
 * We basically want:
 *   r4 >= r10 -> OK
 *   r4 + image length <= r5 -> OK
 */
		cmp	r4, r10
		bhs	wont_overwrite
		add	r10, r4, r9
		cmp	r10, r5
		bls	wont_overwrite

/*
 * Relocate ourselves past the end of the decompressed kernel.
 *   r5  = start of this image
 *   r6  = _edata
 *   r10 = end of the decompressed kernel
 * Because we always copy ahead, we need to do it from the end and go
 * backward in case the source and destination overlap.
 */
		/* Round up to next 256-byte boundary. */
		add	r10, r10, #256
		bic	r10, r10, #255

		sub	r9, r6, r5		@ size to copy
		add	r9, r9, #31		@ rounded up to a multiple
		bic	r9, r9, #31		@ ... of 32 bytes
		add	r6, r9, r5
		add	r9, r9, r10

1:		ldmdb	r6!, {r0 - r3, r10 - r12, lr}
		cmp	r6, r5
		stmdb	r9!, {r0 - r3, r10 - r12, lr}
		bhi	1b

		/* Preserve offset to relocated code. */
		sub	r6, r9, r6

		bl	cache_clean_flush

		adr	r0, BSYM(restart)
		add	r0, r0, r6
		mov	pc, r0

wont_overwrite:
/*
		 * We're running at a different address.  We need to fix
		 * up various pointers:
		 *   r5 - zImage base address (_start)
		 *   r6 - size of decompressed image
		 *   r11 - GOT start
		 *   ip - GOT end
 * If delta is zero, we are running at the address we were linked at.
 *   r0  = delta
 *   r2  = BSS start
 *   r3  = BSS end
 *   r4  = kernel execution address
 *   r7  = architecture ID
 *   r8  = atags pointer
 *   r11 = GOT start
 *   r12 = GOT end
 *   sp  = stack pointer
 */
		add	r5, r5, r0
		teq	r0, #0
		beq	not_relocated
		add	r11, r11, r0
		add	ip, ip, r0
		add	r12, r12, r0

#ifndef CONFIG_ZBOOT_ROM
		/*
		 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
		 * we need to fix up pointers into the BSS region.
		 *   r2 - BSS start
		 *   r3 - BSS end
		 *   sp - stack pointer
		 * Note that the stack pointer has already been fixed up.
		 */
		add	r2, r2, r0
		add	r3, r3, r0
		add	sp, sp, r0

		/*
		 * Relocate all entries in the GOT table.
@@ -221,7 +292,7 @@ not_angel:
1:		ldr	r1, [r11, #0]		@ relocate entries in the GOT
		add	r1, r1, r0		@ table.  This fixes up the
		str	r1, [r11], #4		@ C references.
		cmp	r11, ip
		cmp	r11, r12
		blo	1b
#else

@@ -234,7 +305,7 @@ not_angel:
		cmphs	r3, r1			@ _end < entry
		addlo	r1, r1, r0		@ table.  This fixes up the
		str	r1, [r11], #4		@ C references.
		cmp	r11, ip
		cmp	r11, r12
		blo	1b
#endif

@@ -247,75 +318,23 @@ not_relocated: mov r0, #0
		blo	1b

/*
		 * The C runtime environment should now be setup
		 * sufficiently.  Turn the cache on, set up some
		 * pointers, and start decompressing.
		 */
		bl	cache_on

		mov	r1, sp			@ malloc space above stack
		add	r2, sp, #0x10000	@ 64k max

/*
 * Check to see if we will overwrite ourselves.
 *   r4 = final kernel address
 *   r5 = start of this image
 *   r6 = size of decompressed image
 *   r2 = end of malloc space (and therefore this image)
 * We basically want:
 *   r4 >= r2 -> OK
 *   r4 + image length <= r5 -> OK
 */
		cmp	r4, r2
		bhs	wont_overwrite
		add	r0, r4, r6
		cmp	r0, r5
		bls	wont_overwrite

		mov	r5, r2			@ decompress after malloc space
		mov	r0, r5
		mov	r3, r7
		bl	decompress_kernel

		add	r0, r0, #127 + 128	@ alignment + stack
		bic	r0, r0, #127		@ align the kernel length
/*
 * r0     = decompressed kernel length
 * r1-r3  = unused
 * The C runtime environment should now be setup sufficiently.
 * Set up some pointers, and start decompressing.
 *   r4  = kernel execution address
 * r5     = decompressed kernel start
 *   r7  = architecture ID
 *   r8  = atags pointer
 * r9-r12,r14 = corrupted
 */
		add	r1, r5, r0		@ end of decompressed kernel
		adr	r2, reloc_start
		ldr	r3, LC1
		add	r3, r2, r3
1:		ldmia	r2!, {r9 - r12, r14}	@ copy relocation code
		stmia	r1!, {r9 - r12, r14}
		ldmia	r2!, {r9 - r12, r14}
		stmia	r1!, {r9 - r12, r14}
		cmp	r2, r3
		blo	1b
		mov	sp, r1
		add	sp, sp, #128		@ relocate the stack

		bl	cache_clean_flush
 ARM(		add	pc, r5, r0		) @ call relocation code
 THUMB(		add	r12, r5, r0		)
 THUMB(		mov	pc, r12			) @ call relocation code

/*
 * We're not in danger of overwriting ourselves.  Do this the simple way.
 *
 * r4     = kernel execution address
 * r7     = architecture ID
 */
wont_overwrite:	mov	r0, r4
		mov	r0, r4
		mov	r1, sp			@ malloc space above stack
		add	r2, sp, #0x10000	@ 64k max
		mov	r3, r7
		bl	decompress_kernel
		b	call_kernel
		bl	cache_clean_flush
		bl	cache_off
		mov	r0, #0			@ must be zero
		mov	r1, r7			@ restore architecture number
		mov	r2, r8			@ restore atags pointer
		mov	pc, r4			@ call kernel

		.align	2
		.type	LC0, #object
@@ -323,11 +342,11 @@ LC0: .word LC0 @ r1
		.word	__bss_start		@ r2
		.word	_end			@ r3
		.word	_start			@ r5
		.word	_image_size		@ r6
		.word	_edata			@ r6
		.word	_image_size		@ r9
		.word	_got_start		@ r11
		.word	_got_end		@ ip
		.word	user_stack_end		@ sp
LC1:		.word	reloc_end - reloc_start
		.size	LC0, . - LC0

#ifdef CONFIG_ARCH_RPC
@@ -353,7 +372,7 @@ params: ldr r0, =0x10000100 @ params_phys for RPC
 * On exit,
 *  r0, r1, r2, r3, r9, r10, r12 corrupted
 * This routine must preserve:
 *  r4, r5, r6, r7, r8
 *  r4, r7, r8
 */
		.align	5
cache_on:	mov	r3, #8			@ cache_on function
@@ -550,43 +569,6 @@ __common_mmu_cache_on:
		sub	pc, lr, r0, lsr #32	@ properly flush pipeline
#endif

/*
 * All code following this line is relocatable.  It is relocated by
 * the above code to the end of the decompressed kernel image and
 * executed there.  During this time, we have no stacks.
 *
 * r0     = decompressed kernel length
 * r1-r3  = unused
 * r4     = kernel execution address
 * r5     = decompressed kernel start
 * r7     = architecture ID
 * r8     = atags pointer
 * r9-r12,r14 = corrupted
 */
		.align	5
reloc_start:	add	r9, r5, r0
		sub	r9, r9, #128		@ do not copy the stack
		debug_reloc_start
		mov	r1, r4
1:
		.rept	4
		ldmia	r5!, {r0, r2, r3, r10 - r12, r14}	@ relocate kernel
		stmia	r1!, {r0, r2, r3, r10 - r12, r14}
		.endr

		cmp	r5, r9
		blo	1b
		mov	sp, r1
		add	sp, sp, #128		@ relocate the stack
		debug_reloc_end

call_kernel:	bl	cache_clean_flush
		bl	cache_off
		mov	r0, #0			@ must be zero
		mov	r1, r7			@ restore architecture number
		mov	r2, r8			@ restore atags pointer
		mov	pc, r4			@ call kernel

/*
 * Here follow the relocatable cache support functions for the
 * various processors.  This is a generic hook for locating an
@@ -791,7 +773,7 @@ proc_types:
 * On exit,
 *  r0, r1, r2, r3, r9, r12 corrupted
 * This routine must preserve:
 *  r4, r6, r7
 *  r4, r7, r8
 */
		.align	5
cache_off:	mov	r3, #12			@ cache_off function
@@ -866,7 +848,7 @@ __armv3_mmu_cache_off:
 * On exit,
 *  r1, r2, r3, r9, r10, r11, r12 corrupted
 * This routine must preserve:
 *  r0, r4, r5, r6, r7
 *  r4, r6, r7, r8
 */
		.align	5
cache_clean_flush:
@@ -1088,7 +1070,6 @@ memdump: mov r12, r0
#endif

		.ltorg
reloc_end:

		.align
		.section ".stack", "aw", %nobits
Loading