Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0861fd1c authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull arm64 fixes from Catalin Marinas:

 - fix EFI stub cache maintenance causing aborts during boot on certain
   platforms

 - handle byte stores in __clear_user without panicking

 - fix race condition in aarch64_insn_patch_text_sync() (instruction
   patching)

 - Couple of type fixes

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: ARCH_PFN_OFFSET should be unsigned long
  Correct the race condition in aarch64_insn_patch_text_sync()
  arm64: __clear_user: handle exceptions on strb
  arm64: Fix data type for physical address
  arm64: efi: Fix stub cache maintenance
parents 5ae93760 5fd6690c
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -142,7 +142,7 @@ static inline void *phys_to_virt(phys_addr_t x)
 *  virt_to_page(k)	convert a _valid_ virtual address to struct page *
 *  virt_addr_valid(k)	indicates whether a virtual address is valid
 */
#define ARCH_PFN_OFFSET		PHYS_PFN_OFFSET
#define ARCH_PFN_OFFSET		((unsigned long)PHYS_PFN_OFFSET)

#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define	virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+21 −6
Original line number Diff line number Diff line
@@ -54,18 +54,17 @@ ENTRY(efi_stub_entry)
	b.eq	efi_load_fail

	/*
	 * efi_entry() will have relocated the kernel image if necessary
	 * and we return here with device tree address in x0 and the kernel
	 * entry point stored at *image_addr. Save those values in registers
	 * which are callee preserved.
	 * efi_entry() will have copied the kernel image if necessary and we
	 * return here with device tree address in x0 and the kernel entry
	 * point stored at *image_addr. Save those values in registers which
	 * are callee preserved.
	 */
	mov	x20, x0		// DTB address
	ldr	x0, [sp, #16]	// relocated _text address
	mov	x21, x0

	/*
	 * Flush dcache covering current runtime addresses
	 * of kernel text/data. Then flush all of icache.
	 * Calculate size of the kernel Image (same for original and copy).
	 */
	adrp	x1, _text
	add	x1, x1, #:lo12:_text
@@ -73,9 +72,24 @@ ENTRY(efi_stub_entry)
	add	x2, x2, #:lo12:_edata
	sub	x1, x2, x1

	/*
	 * Flush the copied Image to the PoC, and ensure it is not shadowed by
	 * stale icache entries from before relocation.
	 */
	bl	__flush_dcache_area
	ic	ialluis

	/*
	 * Ensure that the rest of this function (in the original Image) is
	 * visible when the caches are disabled. The I-cache can't have stale
	 * entries for the VA range of the current image, so no maintenance is
	 * necessary.
	 */
	adr	x0, efi_stub_entry
	adr	x1, efi_stub_entry_end
	sub	x1, x1, x0
	bl	__flush_dcache_area

	/* Turn off Dcache and MMU */
	mrs	x0, CurrentEL
	cmp	x0, #CurrentEL_EL2
@@ -105,4 +119,5 @@ efi_load_fail:
	ldp	x29, x30, [sp], #32
	ret

efi_stub_entry_end:
ENDPROC(efi_stub_entry)
+3 −2
Original line number Diff line number Diff line
@@ -163,9 +163,10 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
		 * which ends with "dsb; isb" pair guaranteeing global
		 * visibility.
		 */
		atomic_set(&pp->cpu_count, -1);
		/* Notify other processors with an additional increment. */
		atomic_inc(&pp->cpu_count);
	} else {
		while (atomic_read(&pp->cpu_count) != -1)
		while (atomic_read(&pp->cpu_count) <= num_online_cpus())
			cpu_relax();
		isb();
	}
+1 −1
Original line number Diff line number Diff line
@@ -46,7 +46,7 @@ USER(9f, strh wzr, [x0], #2 )
	sub	x1, x1, #2
4:	adds	x1, x1, #1
	b.mi	5f
	strb	wzr, [x0]
USER(9f, strb	wzr, [x0]	)
5:	mov	x0, #0
	ret
ENDPROC(__clear_user)
+1 −1
Original line number Diff line number Diff line
@@ -202,7 +202,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
}

static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
				  unsigned long end, unsigned long phys,
				  unsigned long end, phys_addr_t phys,
				  int map_io)
{
	pud_t *pud;