Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 95001ee9 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6

parents 63906e41 0dc46106
Loading
Loading
Loading
Loading
+8 −0
Original line number Original line Diff line number Diff line
@@ -33,6 +33,14 @@ config DEBUG_BOOTMEM
	depends on DEBUG_KERNEL
	depends on DEBUG_KERNEL
	bool "Debug BOOTMEM initialization"
	bool "Debug BOOTMEM initialization"


config DEBUG_PAGEALLOC
	bool "Page alloc debugging"
	depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND
	help
	  Unmap pages from the kernel linear mapping after free_pages().
	  This results in a large slowdown, but helps to find certain types
	  of memory corruptions.

config MCOUNT
config MCOUNT
	bool
	bool
	depends on STACK_DEBUG
	depends on STACK_DEBUG
+22 −0
Original line number Original line Diff line number Diff line
@@ -135,6 +135,28 @@ void __init device_scan(void)
		cpu_data(0).clock_tick = prom_getintdefault(cpu_node,
		cpu_data(0).clock_tick = prom_getintdefault(cpu_node,
							    "clock-frequency",
							    "clock-frequency",
							    0);
							    0);
		cpu_data(0).dcache_size = prom_getintdefault(cpu_node,
							     "dcache-size",
							     16 * 1024);
		cpu_data(0).dcache_line_size =
			prom_getintdefault(cpu_node, "dcache-line-size", 32);
		cpu_data(0).icache_size = prom_getintdefault(cpu_node,
							     "icache-size",
							     16 * 1024);
		cpu_data(0).icache_line_size =
			prom_getintdefault(cpu_node, "icache-line-size", 32);
		cpu_data(0).ecache_size = prom_getintdefault(cpu_node,
							     "ecache-size",
							     4 * 1024 * 1024);
		cpu_data(0).ecache_line_size =
			prom_getintdefault(cpu_node, "ecache-line-size", 64);
		printk("CPU[0]: Caches "
		       "D[sz(%d):line_sz(%d)] "
		       "I[sz(%d):line_sz(%d)] "
		       "E[sz(%d):line_sz(%d)]\n",
		       cpu_data(0).dcache_size, cpu_data(0).dcache_line_size,
		       cpu_data(0).icache_size, cpu_data(0).icache_line_size,
		       cpu_data(0).ecache_size, cpu_data(0).ecache_line_size);
	}
	}
#endif
#endif


+1 −12
Original line number Original line Diff line number Diff line
@@ -9,17 +9,7 @@
#include <asm/pgtable.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu.h>


#if PAGE_SHIFT == 13
#define VALID_SZ_BITS	(_PAGE_VALID | _PAGE_SZBITS)
#define SZ_BITS		_PAGE_SZ8K
#elif PAGE_SHIFT == 16
#define SZ_BITS		_PAGE_SZ64K
#elif PAGE_SHIFT == 19
#define SZ_BITS		_PAGE_SZ512K
#elif PAGE_SHIFT == 22
#define SZ_BITS		_PAGE_SZ4MB
#endif

#define VALID_SZ_BITS	(_PAGE_VALID | SZ_BITS)


#define VPTE_BITS		(_PAGE_CP | _PAGE_CV | _PAGE_P )
#define VPTE_BITS		(_PAGE_CP | _PAGE_CV | _PAGE_P )
#define VPTE_SHIFT		(PAGE_SHIFT - 3)
#define VPTE_SHIFT		(PAGE_SHIFT - 3)
@@ -163,7 +153,6 @@ sparc64_vpte_continue:
	stxa		%g4, [%g1 + %g1] ASI_DMMU	! Restore previous TAG_ACCESS
	stxa		%g4, [%g1 + %g1] ASI_DMMU	! Restore previous TAG_ACCESS
	retry						! Load PTE once again
	retry						! Load PTE once again


#undef SZ_BITS
#undef VALID_SZ_BITS
#undef VALID_SZ_BITS
#undef VPTE_SHIFT
#undef VPTE_SHIFT
#undef VPTE_BITS
#undef VPTE_BITS
+4 −4
Original line number Original line Diff line number Diff line
@@ -71,7 +71,7 @@
from_tl1_trap:
from_tl1_trap:
	rdpr		%tl, %g5			! For TL==3 test
	rdpr		%tl, %g5			! For TL==3 test
	CREATE_VPTE_OFFSET1(%g4, %g6)			! Create VPTE offset
	CREATE_VPTE_OFFSET1(%g4, %g6)			! Create VPTE offset
	be,pn		%xcc, 3f			! Yep, special processing
	be,pn		%xcc, kvmap			! Yep, special processing
	 CREATE_VPTE_OFFSET2(%g4, %g6)			! Create VPTE offset
	 CREATE_VPTE_OFFSET2(%g4, %g6)			! Create VPTE offset
	cmp		%g5, 4				! Last trap level?
	cmp		%g5, 4				! Last trap level?
	be,pn		%xcc, longpath			! Yep, cannot risk VPTE miss
	be,pn		%xcc, longpath			! Yep, cannot risk VPTE miss
@@ -83,9 +83,9 @@ from_tl1_trap:
	 nop						! Delay-slot
	 nop						! Delay-slot
9:	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Reload TLB
9:	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Reload TLB
	retry						! Trap return
	retry						! Trap return
3:	brlz,pt		%g4, 9b				! Kernel virtual map?
	nop
	 xor		%g2, %g4, %g5			! Finish bit twiddles
	nop
	ba,a,pt		%xcc, kvmap			! Yep, go check for obp/vmalloc
	nop


/* DTLB ** ICACHE line 3: winfixups+real_faults		*/
/* DTLB ** ICACHE line 3: winfixups+real_faults		*/
longpath:
longpath:
+18 −162
Original line number Original line Diff line number Diff line
@@ -30,159 +30,6 @@
	.text
	.text
	.align		32
	.align		32


	.globl		sparc64_vpte_patchme1
	.globl		sparc64_vpte_patchme2
/*
 * On a second level vpte miss, check whether the original fault is to the OBP 
 * range (note that this is only possible for instruction miss, data misses to
 * obp range do not use vpte). If so, go back directly to the faulting address.
 * This is because we want to read the tpc, otherwise we have no way of knowing
 * the 8k aligned faulting address if we are using >8k kernel pagesize. This
 * also ensures no vpte range addresses are dropped into tlb while obp is
 * executing (see inherit_locked_prom_mappings() rant).
 */
sparc64_vpte_nucleus:
	/* Note that kvmap below has verified that the address is
	 * in the range MODULES_VADDR --> VMALLOC_END already.  So
	 * here we need only check if it is an OBP address or not.
	 */
	sethi		%hi(LOW_OBP_ADDRESS), %g5
	cmp		%g4, %g5
	blu,pn		%xcc, sparc64_vpte_patchme1
	 mov		0x1, %g5
	sllx		%g5, 32, %g5
	cmp		%g4, %g5
	blu,pn		%xcc, obp_iaddr_patch
	 nop

	/* These two instructions are patched by paginig_init().  */
sparc64_vpte_patchme1:
	sethi		%hi(0), %g5
sparc64_vpte_patchme2:
	or		%g5, %lo(0), %g5

	/* With kernel PGD in %g5, branch back into dtlb_backend.  */
	ba,pt		%xcc, sparc64_kpte_continue
	 andn		%g1, 0x3, %g1	/* Finish PMD offset adjustment.  */

vpte_noent:
	/* Restore previous TAG_ACCESS, %g5 is zero, and we will
	 * skip over the trap instruction so that the top level
	 * TLB miss handler will thing this %g5 value is just an
	 * invalid PTE, thus branching to full fault processing.
	 */
	mov		TLB_SFSR, %g1
	stxa		%g4, [%g1 + %g1] ASI_DMMU
	done

	.globl		obp_iaddr_patch
obp_iaddr_patch:
	/* These two instructions patched by inherit_prom_mappings().  */
	sethi		%hi(0), %g5
	or		%g5, %lo(0), %g5

	/* Behave as if we are at TL0.  */
	wrpr		%g0, 1, %tl
	rdpr		%tpc, %g4	/* Find original faulting iaddr */
	srlx		%g4, 13, %g4	/* Throw out context bits */
	sllx		%g4, 13, %g4	/* g4 has vpn + ctx0 now */

	/* Restore previous TAG_ACCESS.  */
	mov		TLB_SFSR, %g1
	stxa		%g4, [%g1 + %g1] ASI_IMMU

	/* Get PMD offset.  */
	srlx		%g4, 23, %g6
	and		%g6, 0x7ff, %g6
	sllx		%g6, 2, %g6

	/* Load PMD, is it valid?  */
	lduwa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
	brz,pn		%g5, longpath
	 sllx		%g5, 11, %g5

	/* Get PTE offset.  */
	srlx		%g4, 13, %g6
	and		%g6, 0x3ff, %g6
	sllx		%g6, 3, %g6

	/* Load PTE.  */
	ldxa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
	brgez,pn	%g5, longpath
	 nop

	/* TLB load and return from trap.  */
	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
	retry

	.globl		obp_daddr_patch
obp_daddr_patch:
	/* These two instructions patched by inherit_prom_mappings().  */
	sethi		%hi(0), %g5
	or		%g5, %lo(0), %g5

	/* Get PMD offset.  */
	srlx		%g4, 23, %g6
	and		%g6, 0x7ff, %g6
	sllx		%g6, 2, %g6

	/* Load PMD, is it valid?  */
	lduwa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
	brz,pn		%g5, longpath
	 sllx		%g5, 11, %g5

	/* Get PTE offset.  */
	srlx		%g4, 13, %g6
	and		%g6, 0x3ff, %g6
	sllx		%g6, 3, %g6

	/* Load PTE.  */
	ldxa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
	brgez,pn	%g5, longpath
	 nop

	/* TLB load and return from trap.  */
	stxa		%g5, [%g0] ASI_DTLB_DATA_IN
	retry

/*
 * On a first level data miss, check whether this is to the OBP range (note
 * that such accesses can be made by prom, as well as by kernel using
 * prom_getproperty on "address"), and if so, do not use vpte access ...
 * rather, use information saved during inherit_prom_mappings() using 8k
 * pagesize.
 */
	.align		32
kvmap:
	sethi		%hi(MODULES_VADDR), %g5
	cmp		%g4, %g5
	blu,pn		%xcc, longpath
	 mov		(VMALLOC_END >> 24), %g5
	sllx		%g5, 24, %g5
	cmp		%g4, %g5
	bgeu,pn		%xcc, longpath
	 nop

kvmap_check_obp:
	sethi		%hi(LOW_OBP_ADDRESS), %g5
	cmp		%g4, %g5
	blu,pn		%xcc, kvmap_vmalloc_addr
	 mov		0x1, %g5
	sllx		%g5, 32, %g5
	cmp		%g4, %g5
	blu,pn		%xcc, obp_daddr_patch
	 nop

kvmap_vmalloc_addr:
	/* If we get here, a vmalloc addr was accessed, load kernel VPTE.  */
	ldxa		[%g3 + %g6] ASI_N, %g5
	brgez,pn	%g5, longpath
	 nop

	/* PTE is valid, load into TLB and return from trap.  */
	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Reload TLB
	retry

	/* This is trivial with the new code... */
	/* This is trivial with the new code... */
	.globl		do_fpdis
	.globl		do_fpdis
do_fpdis:
do_fpdis:
@@ -525,14 +372,13 @@ cheetah_plus_patch_fpdis:
	 *
	 *
	 * DATA 0: [low 32-bits]  Address of function to call, jmp to this
	 * DATA 0: [low 32-bits]  Address of function to call, jmp to this
	 *         [high 32-bits] MMU Context Argument 0, place in %g5
	 *         [high 32-bits] MMU Context Argument 0, place in %g5
	 * DATA 1: Address Argument 1, place in %g6
	 * DATA 1: Address Argument 1, place in %g1
	 * DATA 2: Address Argument 2, place in %g7
	 * DATA 2: Address Argument 2, place in %g7
	 *
	 *
	 * With this method we can do most of the cross-call tlb/cache
	 * With this method we can do most of the cross-call tlb/cache
	 * flushing very quickly.
	 * flushing very quickly.
	 *
	 *
	 * Current CPU's IRQ worklist table is locked into %g1,
	 * Current CPU's IRQ worklist table is locked into %g6, don't touch.
	 * don't touch.
	 */
	 */
	.text
	.text
	.align		32
	.align		32
@@ -1006,13 +852,14 @@ cheetah_plus_dcpe_trap_vector:
	nop
	nop


do_cheetah_plus_data_parity:
do_cheetah_plus_data_parity:
	ba,pt		%xcc, etrap
	rdpr		%pil, %g2
	wrpr		%g0, 15, %pil
	ba,pt		%xcc, etrap_irq
	 rd		%pc, %g7
	 rd		%pc, %g7
	mov		0x0, %o0
	mov		0x0, %o0
	call		cheetah_plus_parity_error
	call		cheetah_plus_parity_error
	 add		%sp, PTREGS_OFF, %o1
	 add		%sp, PTREGS_OFF, %o1
	ba,pt		%xcc, rtrap
	ba,a,pt		%xcc, rtrap_irq
	 clr		%l6


cheetah_plus_dcpe_trap_vector_tl1:
cheetah_plus_dcpe_trap_vector_tl1:
	membar		#Sync
	membar		#Sync
@@ -1036,13 +883,14 @@ cheetah_plus_icpe_trap_vector:
	nop
	nop


do_cheetah_plus_insn_parity:
do_cheetah_plus_insn_parity:
	ba,pt		%xcc, etrap
	rdpr		%pil, %g2
	wrpr		%g0, 15, %pil
	ba,pt		%xcc, etrap_irq
	 rd		%pc, %g7
	 rd		%pc, %g7
	mov		0x1, %o0
	mov		0x1, %o0
	call		cheetah_plus_parity_error
	call		cheetah_plus_parity_error
	 add		%sp, PTREGS_OFF, %o1
	 add		%sp, PTREGS_OFF, %o1
	ba,pt		%xcc, rtrap
	ba,a,pt		%xcc, rtrap_irq
	 clr		%l6


cheetah_plus_icpe_trap_vector_tl1:
cheetah_plus_icpe_trap_vector_tl1:
	membar		#Sync
	membar		#Sync
@@ -1075,6 +923,10 @@ do_dcpe_tl1:
	 nop
	 nop
	wrpr		%g1, %tl		! Restore original trap level
	wrpr		%g1, %tl		! Restore original trap level
do_dcpe_tl1_nonfatal:	/* Ok we may use interrupt globals safely. */
do_dcpe_tl1_nonfatal:	/* Ok we may use interrupt globals safely. */
	sethi		%hi(dcache_parity_tl1_occurred), %g2
	lduw		[%g2 + %lo(dcache_parity_tl1_occurred)], %g1
	add		%g1, 1, %g1
	stw		%g1, [%g2 + %lo(dcache_parity_tl1_occurred)]
	/* Reset D-cache parity */
	/* Reset D-cache parity */
	sethi		%hi(1 << 16), %g1	! D-cache size
	sethi		%hi(1 << 16), %g1	! D-cache size
	mov		(1 << 5), %g2		! D-cache line size
	mov		(1 << 5), %g2		! D-cache line size
@@ -1121,6 +973,10 @@ do_icpe_tl1:
	 nop
	 nop
	wrpr		%g1, %tl		! Restore original trap level
	wrpr		%g1, %tl		! Restore original trap level
do_icpe_tl1_nonfatal:	/* Ok we may use interrupt globals safely. */
do_icpe_tl1_nonfatal:	/* Ok we may use interrupt globals safely. */
	sethi		%hi(icache_parity_tl1_occurred), %g2
	lduw		[%g2 + %lo(icache_parity_tl1_occurred)], %g1
	add		%g1, 1, %g1
	stw		%g1, [%g2 + %lo(icache_parity_tl1_occurred)]
	/* Flush I-cache */
	/* Flush I-cache */
	sethi		%hi(1 << 15), %g1	! I-cache size
	sethi		%hi(1 << 15), %g1	! I-cache size
	mov		(1 << 5), %g2		! I-cache line size
	mov		(1 << 5), %g2		! I-cache line size
Loading