Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 70b99eff authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
  powerpc: Update a BKL related comment
  powerpc/mm: Fix module instruction tlb fault handling on Book-E 64
  powerpc: Fix call to subpage_protection()
  powerpc: Set CONFIG_32BIT on ppc32
  powerpc/mm: Fix build error in setup_initial_memory_limit
  powerpc/pseries: Don't override CONFIG_PPC_PSERIES_DEBUG
  powerpc: Fix div64 in bootloader
parents 0a5b871e 0f6b77ca
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -4,6 +4,10 @@ config PPC32
	bool
	default y if !PPC64

config 32BIT
	bool
	default y if PPC32

config 64BIT
	bool
	default y if PPC64
+2 −1
Original line number Diff line number Diff line
@@ -33,9 +33,10 @@ __div64_32:
	cntlzw	r0,r5		# we are shifting the dividend right
	li	r10,-1		# to make it < 2^32, and shifting
	srw	r10,r10,r0	# the divisor right the same amount,
	add	r9,r4,r10	# rounding up (so the estimate cannot
	addc	r9,r4,r10	# rounding up (so the estimate cannot
	andc	r11,r6,r10	# ever be too large, only too small)
	andc	r9,r9,r10
	addze	r9,r9
	or	r11,r5,r11
	rotlw	r9,r9,r0
	rotlw	r11,r11,r0
+2 −3
Original line number Diff line number Diff line
@@ -497,9 +497,8 @@ static void __init emergency_stack_init(void)
}

/*
 * Called into from start_kernel, after lock_kernel has been called.
 * Initializes bootmem, which is unsed to manage page allocation until
 * mem_init is called.
 * Called into from start_kernel this initializes bootmem, which is used
 * to manage page allocation until mem_init is called.
 */
void __init setup_arch(char **cmdline_p)
{
+1 −1
Original line number Diff line number Diff line
@@ -1123,7 +1123,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
	else
#endif /* CONFIG_PPC_HAS_HASH_64K */
		rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
				    subpage_protection(pgdir, ea));
				    subpage_protection(mm, ea));

	/* Dump some info in case of hash insertion failure, they should
	 * never happen so it is really useful to know if/when they do
+4 −1
Original line number Diff line number Diff line
@@ -138,8 +138,11 @@
	cmpldi	cr0,r15,0			/* Check for user region */
	std	r14,EX_TLB_ESR(r12)		/* write crazy -1 to frame */
	beq	normal_tlb_miss

	li	r11,_PAGE_PRESENT|_PAGE_BAP_SX	/* Base perm */
	oris	r11,r11,_PAGE_ACCESSED@h
	/* XXX replace the RMW cycles with immediate loads + writes */
1:	mfspr	r10,SPRN_MAS1
	mfspr	r10,SPRN_MAS1
	cmpldi	cr0,r15,8			/* Check for vmalloc region */
	rlwinm	r10,r10,0,16,1			/* Clear TID */
	mtspr	SPRN_MAS1,r10
Loading