Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e006222b authored by Vasily Gorbik's avatar Vasily Gorbik Committed by Martin Schwidefsky
Browse files

s390/mm: optimize debugfs ptdump kasan zero page walking



Kasan zero p4d/pud/pmd/pte are always filled in with corresponding
kasan zero entries. Walking kasan zero page backed area is time
consuming and unnecessary. When kasan zero p4d/pud/pmd is encountered,
it eventually points to the kasan zero page always with the same
attributes and nothing but it, therefore zero p4d/pud/pmd could
be jumped over.

Also adds a space between address range and pages number to separate
them from each other when pages number is huge.

0x0018000000000000-0x0018000010000000       256M PMD RW X
0x0018000010000000-0x001bfffff0000000 1073741312M PTE RO X
0x001bfffff0000000-0x001bfffff0001000         4K PTE RW X

Reviewed-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 5dff0381
Loading
Loading
Loading
Loading
+34 −1
Original line number Diff line number Diff line
@@ -3,6 +3,7 @@
#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/kasan.h>
#include <asm/kasan.h>
#include <asm/sections.h>
#include <asm/pgtable.h>
@@ -109,6 +110,17 @@ static void note_page(struct seq_file *m, struct pg_state *st,
	}
}

#ifdef CONFIG_KASAN
static void note_kasan_zero_page(struct seq_file *m, struct pg_state *st)
{
	unsigned int prot;

	prot = pte_val(*kasan_zero_pte) &
		(_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC);
	note_page(m, st, prot, 4);
}
#endif

/*
 * The actual page table walker functions. In order to keep the
 * implementation of print_prot() short, we only check and pass
@@ -141,6 +153,13 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
	pmd_t *pmd;
	int i;

#ifdef CONFIG_KASAN
	if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_zero_pmd)) {
		note_kasan_zero_page(m, st);
		return;
	}
#endif

	for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) {
		st->current_address = addr;
		pmd = pmd_offset(pud, addr);
@@ -165,6 +184,13 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
	pud_t *pud;
	int i;

#ifdef CONFIG_KASAN
	if ((p4d_val(*p4d) & PAGE_MASK) == __pa(kasan_zero_pud)) {
		note_kasan_zero_page(m, st);
		return;
	}
#endif

	for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) {
		st->current_address = addr;
		pud = pud_offset(p4d, addr);
@@ -188,6 +214,13 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st,
	p4d_t *p4d;
	int i;

#ifdef CONFIG_KASAN
	if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_zero_p4d)) {
		note_kasan_zero_page(m, st);
		return;
	}
#endif

	for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) {
		st->current_address = addr;
		p4d = p4d_offset(pgd, addr);