Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f5ea64dc authored by David Gibson's avatar David Gibson Committed by Benjamin Herrenschmidt
Browse files

powerpc: Get USE_STRICT_MM_TYPECHECKS working again



The typesafe version of the powerpc pagetable handling (with
USE_STRICT_MM_TYPECHECKS defined) has bitrotted again.  This patch
makes a bunch of small fixes to get it back to building status.

It's still not enabled by default as gcc still generates worse
code with it for some reason.

Signed-off-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent cd301c7b
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)

static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
{
	return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : 0;
	return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
}
#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)

+9 −8
Original line number Diff line number Diff line
@@ -431,7 +431,7 @@ extern int icache_44x_need_flush;
#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)


#define PAGE_PROT_BITS	__pgprot(_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
#define PAGE_PROT_BITS	(_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
			 _PAGE_WRITETHRU | _PAGE_ENDIAN | \
			 _PAGE_USER | _PAGE_ACCESSED | \
			 _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
@@ -570,9 +570,9 @@ static inline pte_t pte_mkyoung(pte_t pte) {
	pte_val(pte) |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkspecial(pte_t pte) {
	pte_val(pte) |= _PAGE_SPECIAL; return pte; }
static inline unsigned long pte_pgprot(pte_t pte)
static inline pgprot_t pte_pgprot(pte_t pte)
{
	return __pgprot(pte_val(pte)) & PAGE_PROT_BITS;
	return __pgprot(pte_val(pte) & PAGE_PROT_BITS);
}

static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
@@ -688,7 +688,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
	: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
	: "r" (pte) : "memory");
#else
	*ptep = (*ptep & _PAGE_HASHPTE) | (pte & ~_PAGE_HASHPTE);
	*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
		      | (pte_val(pte) & ~_PAGE_HASHPTE));
#endif
}

+6 −6
Original line number Diff line number Diff line
@@ -117,7 +117,7 @@
#define PAGE_AGP	__pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE)
#define HAVE_PAGE_AGP

#define PAGE_PROT_BITS	__pgprot(_PAGE_GUARDED | _PAGE_COHERENT | \
#define PAGE_PROT_BITS	(_PAGE_GUARDED | _PAGE_COHERENT | \
			 _PAGE_NO_CACHE | _PAGE_WRITETHRU |		\
			 _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER |		\
			 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC)
@@ -264,9 +264,9 @@ static inline pte_t pte_mkhuge(pte_t pte) {
	return pte; }
static inline pte_t pte_mkspecial(pte_t pte) {
	pte_val(pte) |= _PAGE_SPECIAL; return pte; }
static inline unsigned long pte_pgprot(pte_t pte)
static inline pgprot_t pte_pgprot(pte_t pte)
{
	return __pgprot(pte_val(pte)) & PAGE_PROT_BITS;
	return __pgprot(pte_val(pte) & PAGE_PROT_BITS);
}

/* Atomic PTE updates */
+4 −3
Original line number Diff line number Diff line
@@ -41,7 +41,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
		page = pte_page(pte);
		if (!page_cache_get_speculative(page))
			return 0;
		if (unlikely(pte != *ptep)) {
		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
			put_page(page);
			return 0;
		}
@@ -92,7 +92,7 @@ static noinline int gup_huge_pte(pte_t *ptep, struct hstate *hstate,
		*nr -= refs;
		return 0;
	}
	if (unlikely(pte != *ptep)) {
	if (unlikely(pte_val(pte) != pte_val(*ptep))) {
		/* Could be optimized better */
		while (*nr) {
			put_page(page);
@@ -237,7 +237,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
			pgd_t pgd = *pgdp;

			VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, addr)].shift);
			pr_debug("  %016lx: normal pgd %p\n", addr, (void *)pgd);
			pr_debug("  %016lx: normal pgd %p\n", addr,
				 (void *)pgd_val(pgd));
			next = pgd_addr_end(addr, end);
			if (pgd_none(pgd))
				goto slow;
+2 −2
Original line number Diff line number Diff line
@@ -541,7 +541,7 @@ static unsigned long __init htab_get_table_size(void)
void create_section_mapping(unsigned long start, unsigned long end)
{
	BUG_ON(htab_bolt_mapping(start, end, __pa(start),
				 PAGE_KERNEL, mmu_linear_psize,
				 pgprot_val(PAGE_KERNEL), mmu_linear_psize,
				 mmu_kernel_ssize));
}

@@ -649,7 +649,7 @@ void __init htab_initialize(void)
		mtspr(SPRN_SDR1, _SDR1);
	}

	prot = PAGE_KERNEL;
	prot = pgprot_val(PAGE_KERNEL);

#ifdef CONFIG_DEBUG_PAGEALLOC
	linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
Loading