Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 01acd3ef authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull ARM fixes from Russell King:
 "A number of fixes:

  Patrik found a problem with preempt counting in the VFP assembly
  functions which can cause the preempt count to be upset.

  Nicolas fixed a problem with the parsing of the DT when it straddles a
  1MB boundary.

  Subhash Jadavani reported a problem with sparsemem and our highmem
  support for cache maintanence for DMA areas, and TI found a bug in
  their strongly ordered memory mapping type.

  Also, three fixes by way of Will Deacon's tree from Dave Martin for
  instruction compatibility and Marc Zyngier to fix hypervisor boot mode
  issues."

* 'fixes' of git://git.linaro.org/people/rmk/linux-arm:
  ARM: 7629/1: mm: Fix missing XN flag for for MT_MEMORY_SO
  ARM: DMA: Fix struct page iterator in dma_cache_maint() to work with sparsemem
  ARM: 7628/1: head.S: map one extra section for the ATAG/DTB area
  ARM: 7627/1: Predicate preempt logic on PREEMP_COUNT not PREEMPT alone
  ARM: virt: simplify __hyp_stub_install epilog
  ARM: virt: boot secondary CPUs through the right entry point
  ARM: virt: Avoid bx instruction for compatibility with <=ARMv4
parents 1496ec13 210b1847
Loading
Loading
Loading
Loading
+4 −1
Original line number Diff line number Diff line
@@ -246,6 +246,7 @@ __create_page_tables:

	/*
	 * Then map boot params address in r2 if specified.
	 * We map 2 sections in case the ATAGs/DTB crosses a section boundary.
	 */
	mov	r0, r2, lsr #SECTION_SHIFT
	movs	r0, r0, lsl #SECTION_SHIFT
@@ -253,6 +254,8 @@ __create_page_tables:
	addne	r3, r3, #PAGE_OFFSET
	addne	r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
	orrne	r6, r7, r0
	strne	r6, [r3], #1 << PMD_ORDER
	addne	r6, r6, #1 << SECTION_SHIFT
	strne	r6, [r3]

#ifdef CONFIG_DEBUG_LL
@@ -331,7 +334,7 @@ ENTRY(secondary_startup)
	 * as it has already been validated by the primary processor.
	 */
#ifdef CONFIG_ARM_VIRT_EXT
	bl	__hyp_stub_install
	bl	__hyp_stub_install_secondary
#endif
	safe_svcmode_maskall r9

+6 −12
Original line number Diff line number Diff line
@@ -99,7 +99,7 @@ ENTRY(__hyp_stub_install_secondary)
	 * immediately.
	 */
	compare_cpu_mode_with_primary	r4, r5, r6, r7
	bxne	lr
	movne	pc, lr

	/*
	 * Once we have given up on one CPU, we do not try to install the
@@ -111,7 +111,7 @@ ENTRY(__hyp_stub_install_secondary)
	 */

	cmp	r4, #HYP_MODE
	bxne	lr			@ give up if the CPU is not in HYP mode
	movne	pc, lr			@ give up if the CPU is not in HYP mode

/*
 * Configure HSCTLR to set correct exception endianness/instruction set
@@ -120,7 +120,8 @@ ENTRY(__hyp_stub_install_secondary)
 * Eventually, CPU-specific code might be needed -- assume not for now
 *
 * This code relies on the "eret" instruction to synchronize the
 * various coprocessor accesses.
 * various coprocessor accesses. This is done when we switch to SVC
 * (see safe_svcmode_maskall).
 */
	@ Now install the hypervisor stub:
	adr	r7, __hyp_stub_vectors
@@ -155,14 +156,7 @@ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
1:
#endif

	bic	r7, r4, #MODE_MASK
	orr	r7, r7, #SVC_MODE
THUMB(	orr	r7, r7, #PSR_T_BIT	)
	msr	spsr_cxsf, r7		@ This is SPSR_hyp.

	__MSR_ELR_HYP(14)		@ msr elr_hyp, lr
	__ERET				@ return, switching to SVC mode
					@ The boot CPU mode is left in r4.
	bx	lr			@ The boot CPU mode is left in r4.
ENDPROC(__hyp_stub_install_secondary)

__hyp_stub_do_trap:
@@ -200,7 +194,7 @@ ENDPROC(__hyp_get_vectors)
	@ fall through
ENTRY(__hyp_set_vectors)
	__HVC(0)
	bx	lr
	mov	pc, lr
ENDPROC(__hyp_set_vectors)

#ifndef ZIMAGE
+10 −8
Original line number Diff line number Diff line
@@ -774,25 +774,27 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
	size_t size, enum dma_data_direction dir,
	void (*op)(const void *, size_t, int))
{
	unsigned long pfn;
	size_t left = size;

	pfn = page_to_pfn(page) + offset / PAGE_SIZE;
	offset %= PAGE_SIZE;

	/*
	 * A single sg entry may refer to multiple physically contiguous
	 * pages.  But we still need to process highmem pages individually.
	 * If highmem is not configured then the bulk of this loop gets
	 * optimized out.
	 */
	size_t left = size;
	do {
		size_t len = left;
		void *vaddr;

		page = pfn_to_page(pfn);

		if (PageHighMem(page)) {
			if (len + offset > PAGE_SIZE) {
				if (offset >= PAGE_SIZE) {
					page += offset / PAGE_SIZE;
					offset %= PAGE_SIZE;
				}
			if (len + offset > PAGE_SIZE)
				len = PAGE_SIZE - offset;
			}
			vaddr = kmap_high_get(page);
			if (vaddr) {
				vaddr += offset;
@@ -809,7 +811,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
			op(vaddr, len, dir);
		}
		offset = 0;
		page++;
		pfn++;
		left -= len;
	} while (left);
}
+1 −1
Original line number Diff line number Diff line
@@ -283,7 +283,7 @@ static struct mem_type mem_types[] = {
	},
	[MT_MEMORY_SO] = {
		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
				L_PTE_MT_UNCACHED,
				L_PTE_MT_UNCACHED | L_PTE_XN,
		.prot_l1   = PMD_TYPE_TABLE,
		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
				PMD_SECT_UNCACHED | PMD_SECT_XN,
+3 −3
Original line number Diff line number Diff line
@@ -22,7 +22,7 @@
@  IRQs disabled.
@
ENTRY(do_vfp)
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT_COUNT
	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
	add	r11, r4, #1		@ increment it
	str	r11, [r10, #TI_PREEMPT]
@@ -35,7 +35,7 @@ ENTRY(do_vfp)
ENDPROC(do_vfp)

ENTRY(vfp_null_entry)
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT_COUNT
	get_thread_info	r10
	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
	sub	r11, r4, #1		@ decrement it
@@ -53,7 +53,7 @@ ENDPROC(vfp_null_entry)

	__INIT
ENTRY(vfp_testing_entry)
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT_COUNT
	get_thread_info	r10
	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
	sub	r11, r4, #1		@ decrement it
Loading