Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 56425306 authored by David S. Miller's avatar David S. Miller
Browse files

[SPARC64]: Add CONFIG_DEBUG_PAGEALLOC support.



The trick is that we do the kernel linear mapping TLB miss starting
with an instruction sequence like this:

	ba,pt		%xcc, kvmap_load
	 xor		%g2, %g4, %g5

succeeded by an instruction sequence which performs a full page table
walk starting at swapper_pg_dir.

We first take over the trap table from the firmware.  Then, using this
constant PTE generation for the linear mapping area above, we build
the kernel page tables for the linear mapping.

After this is setup, we patch that branch above into a "nop", which
will cause TLB misses to fall through to the full page table walk.

With this, the page unmapping for CONFIG_DEBUG_PAGEALLOC is trivial.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 52f26deb
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -33,6 +33,14 @@ config DEBUG_BOOTMEM
	depends on DEBUG_KERNEL
	bool "Debug BOOTMEM initialization"

config DEBUG_PAGEALLOC
	bool "Page alloc debugging"
	depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND
	help
	  Unmap pages from the kernel linear mapping after free_pages().
	  This results in a large slowdown, but helps to find certain types
	  of memory corruptions.

config MCOUNT
	bool
	depends on STACK_DEBUG
+0 −6
Original line number Diff line number Diff line
@@ -525,12 +525,6 @@ bootup_user_stack_end:

#include "ttable.S"
#include "systbls.S"

	.align	1024
	.globl	swapper_pg_dir
swapper_pg_dir:
	.word	0

#include "ktlb.S"
#include "etrap.S"
#include "rtrap.S"
+32 −1
Original line number Diff line number Diff line
@@ -132,9 +132,40 @@ kvmap_do_obp:
 */
	.align		32
kvmap:
	brlz,pt		%g4, kvmap_load
	brgez,pn	%g4, kvmap_nonlinear
	 nop

#ifdef CONFIG_DEBUG_PAGEALLOC
	.globl		kvmap_linear_patch
kvmap_linear_patch:
#endif
	ba,pt		%xcc, kvmap_load
	 xor		%g2, %g4, %g5

#ifdef CONFIG_DEBUG_PAGEALLOC
	sethi		%hi(swapper_pg_dir), %g5
	or		%g5, %lo(swapper_pg_dir), %g5
	sllx		%g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6
	srlx		%g6, 64 - PAGE_SHIFT, %g6
	andn		%g6, 0x3, %g6
	lduw		[%g5 + %g6], %g5
	brz,pn		%g5, longpath
	 sllx		%g4, 64 - (PMD_SHIFT + PMD_BITS), %g6
	srlx		%g6, 64 - PAGE_SHIFT, %g6
	sllx		%g5, 11, %g5
	andn		%g6, 0x3, %g6
	lduwa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
	brz,pn		%g5, longpath
	 sllx		%g4, 64 - PMD_SHIFT, %g6
	srlx		%g6, 64 - PAGE_SHIFT, %g6
	sllx		%g5, 11, %g5
	andn		%g6, 0x7, %g6
	ldxa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
	brz,pn		%g5, longpath
	 nop
	ba,a,pt		%xcc, kvmap_load
#endif

kvmap_nonlinear:
	sethi		%hi(MODULES_VADDR), %g5
	cmp		%g4, %g5
+1 −2
Original line number Diff line number Diff line
@@ -9,8 +9,7 @@ ENTRY(_start)
jiffies = jiffies_64;
SECTIONS
{
  swapper_pmd_dir = 0x0000000000402000;
  empty_pg_dir = 0x0000000000403000;
  swapper_low_pmd_dir = 0x0000000000402000;
  . = 0x4000;
  .text 0x0000000000404000 :
  {
+106 −3
Original line number Diff line number Diff line
@@ -1332,15 +1332,114 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
	return end_pfn;
}

#ifdef CONFIG_DEBUG_PAGEALLOC
static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot)
{
	unsigned long vstart = PAGE_OFFSET + pstart;
	unsigned long vend = PAGE_OFFSET + pend;
	unsigned long alloc_bytes = 0UL;

	if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
		prom_printf("kernel_map: Unaligned sp_banks[%lx:%lx]\n",
			    vstart, vend);
		prom_halt();
	}

	while (vstart < vend) {
		unsigned long this_end, paddr = __pa(vstart);
		pgd_t *pgd = pgd_offset_k(vstart);
		pud_t *pud;
		pmd_t *pmd;
		pte_t *pte;

		pud = pud_offset(pgd, vstart);
		if (pud_none(*pud)) {
			pmd_t *new;

			new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
			alloc_bytes += PAGE_SIZE;
			pud_populate(&init_mm, pud, new);
		}

		pmd = pmd_offset(pud, vstart);
		if (!pmd_present(*pmd)) {
			pte_t *new;

			new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
			alloc_bytes += PAGE_SIZE;
			pmd_populate_kernel(&init_mm, pmd, new);
		}

		pte = pte_offset_kernel(pmd, vstart);
		this_end = (vstart + PMD_SIZE) & PMD_MASK;
		if (this_end > vend)
			this_end = vend;

		while (vstart < this_end) {
			pte_val(*pte) = (paddr | pgprot_val(prot));

			vstart += PAGE_SIZE;
			paddr += PAGE_SIZE;
			pte++;
		}
	}

	return alloc_bytes;
}

extern struct linux_mlist_p1275 *prom_ptot_ptr;
extern unsigned int kvmap_linear_patch[1];

static void __init kernel_physical_mapping_init(void)
{
	struct linux_mlist_p1275 *p = prom_ptot_ptr;
	unsigned long mem_alloced = 0UL;

	while (p) {
		unsigned long phys_start, phys_end;

		phys_start = p->start_adr;
		phys_end = phys_start + p->num_bytes;
		mem_alloced += kernel_map_range(phys_start, phys_end,
						PAGE_KERNEL);

		p = p->theres_more;
	}

	printk("Allocated %ld bytes for kernel page tables.\n",
	       mem_alloced);

	kvmap_linear_patch[0] = 0x01000000; /* nop */
	flushi(&kvmap_linear_patch[0]);

	__flush_tlb_all();
}

void kernel_map_pages(struct page *page, int numpages, int enable)
{
	unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
	unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);

	kernel_map_range(phys_start, phys_end,
			 (enable ? PAGE_KERNEL : __pgprot(0)));

	/* we should perform an IPI and flush all tlbs,
	 * but that can deadlock->flush only current cpu.
	 */
	__flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
				 PAGE_OFFSET + phys_end);
}
#endif

/* paging_init() sets up the page tables */

extern void cheetah_ecache_flush_init(void);

static unsigned long last_valid_pfn;
pgd_t swapper_pg_dir[2048];

void __init paging_init(void)
{
	extern pmd_t swapper_pmd_dir[1024];
	unsigned long end_pfn, pages_avail, shift;
	unsigned long real_end;

@@ -1361,11 +1460,11 @@ void __init paging_init(void)
	 */
	init_mm.pgd += ((shift) / (sizeof(pgd_t)));
	
	memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir));
	memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));

	/* Now can init the kernel/bad page tables. */
	pud_set(pud_offset(&swapper_pg_dir[0], 0),
		swapper_pmd_dir + (shift / sizeof(pgd_t)));
		swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
	
	swapper_pgd_zero = pgd_val(swapper_pg_dir[0]);
	
@@ -1390,6 +1489,10 @@ void __init paging_init(void)
	pages_avail = 0;
	last_valid_pfn = end_pfn = bootmem_init(&pages_avail);

#ifdef CONFIG_DEBUG_PAGEALLOC
	kernel_physical_mapping_init();
#endif

	{
		unsigned long zones_size[MAX_NR_ZONES];
		unsigned long zholes_size[MAX_NR_ZONES];
Loading