Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ece0e2b6 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds
Browse files

mm: remove pte_*map_nested()



Since we no longer need to provide KM_type, the whole pte_*map_nested()
API is now redundant, remove it.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: default avatarChris Metcalf <cmetcalf@tilera.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: David Miller <davem@davemloft.net>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3e4d3af5
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -318,9 +318,7 @@ extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
}

#define pte_offset_map(dir,addr)	pte_offset_kernel((dir),(addr))
#define pte_offset_map_nested(dir,addr)	pte_offset_kernel((dir),(addr))
#define pte_unmap(pte)			do { } while (0)
#define pte_unmap_nested(pte)		do { } while (0)

extern pgd_t swapper_pg_dir[1024];

+6 −8
Original line number Diff line number Diff line
@@ -263,17 +263,15 @@ extern struct page *empty_zero_page;
#define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
#define pte_offset_kernel(dir,addr)	(pmd_page_vaddr(*(dir)) + __pte_index(addr))

#define pte_offset_map(dir,addr)	(__pte_map(dir, KM_PTE0) + __pte_index(addr))
#define pte_offset_map_nested(dir,addr)	(__pte_map(dir, KM_PTE1) + __pte_index(addr))
#define pte_unmap(pte)			__pte_unmap(pte, KM_PTE0)
#define pte_unmap_nested(pte)		__pte_unmap(pte, KM_PTE1)
#define pte_offset_map(dir,addr)	(__pte_map(dir) + __pte_index(addr))
#define pte_unmap(pte)			__pte_unmap(pte)

#ifndef CONFIG_HIGHPTE
#define __pte_map(dir,km)	pmd_page_vaddr(*(dir))
#define __pte_unmap(pte,km)	do { } while (0)
#define __pte_map(dir)		pmd_page_vaddr(*(dir))
#define __pte_unmap(pte)	do { } while (0)
#else
#define __pte_map(dir,km)	((pte_t *)kmap_atomic(pmd_page(*(dir)), km) + PTRS_PER_PTE)
#define __pte_unmap(pte,km)	kunmap_atomic((pte - PTRS_PER_PTE), km)
#define __pte_map(dir)		((pte_t *)kmap_atomic(pmd_page(*(dir))) + PTRS_PER_PTE)
#define __pte_unmap(pte)	kunmap_atomic((pte - PTRS_PER_PTE))
#endif

#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
+2 −2
Original line number Diff line number Diff line
@@ -89,13 +89,13 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
	 * open-code the spin-locking.
	 */
	ptl = pte_lockptr(vma->vm_mm, pmd);
	pte = pte_offset_map_nested(pmd, address);
	pte = pte_offset_map(pmd, address);
	spin_lock(ptl);

	ret = do_adjust_pte(vma, address, pfn, pte);

	spin_unlock(ptl);
	pte_unmap_nested(pte);
	pte_unmap(pte);

	return ret;
}
+2 −2
Original line number Diff line number Diff line
@@ -57,9 +57,9 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
			goto no_pte;

		init_pmd = pmd_offset(init_pgd, 0);
		init_pte = pte_offset_map_nested(init_pmd, 0);
		init_pte = pte_offset_map(init_pmd, 0);
		set_pte_ext(new_pte, *init_pte, 0);
		pte_unmap_nested(init_pte);
		pte_unmap(init_pte);
		pte_unmap(new_pte);
	}

+0 −2
Original line number Diff line number Diff line
@@ -319,9 +319,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pte_offset_kernel(dir, address)					\
	((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
#define pte_unmap(pte)		do { } while (0)
#define pte_unmap_nested(pte)	do { } while (0)

struct vm_area_struct;
extern void update_mmu_cache(struct vm_area_struct * vma,
Loading