Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bab70a4a authored by Eugene Surovegin's avatar Eugene Surovegin Committed by Paul Mackerras
Browse files

[PATCH] lock PTE before updating it in 440/BookE page fault handler



Fix 44x and BookE page fault handler to correctly lock PTE before
trying to pte_update() it, otherwise this PTE might be swapped out
after pte_present() check but before pte_uptdate() call, resulting in
corrupted PTE. This can happen with enabled preemption and low memory
condition.

Signed-off-by: default avatarEugene Surovegin <ebs@ebshome.net>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent bac30d1a
Loading
Loading
Loading
Loading
+17 −13
Original line number Diff line number Diff line
@@ -267,11 +267,15 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
#endif
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
		pte_t *ptep;
		pmd_t *pmdp;

		/* Since 4xx/Book-E supports per-page execute permission,
		 * we lazily flush dcache to icache. */
		ptep = NULL;
		if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) {
		if (get_pteptr(mm, address, &ptep, &pmdp)) {
			spinlock_t *ptl = pte_lockptr(mm, pmdp);
			spin_lock(ptl);
			if (pte_present(*ptep)) {
				struct page *page = pte_page(*ptep);

				if (!test_bit(PG_arch_1, &page->flags)) {
@@ -280,12 +284,12 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
				}
				pte_update(ptep, 0, _PAGE_HWEXEC);
				_tlbie(address);
			pte_unmap(ptep);
				pte_unmap_unlock(ptep, ptl);
				up_read(&mm->mmap_sem);
				return 0;
			}
		if (ptep != NULL)
			pte_unmap(ptep);
			pte_unmap_unlock(ptep, ptl);
		}
#endif
	/* a write */
	} else if (is_write) {
+4 −2
Original line number Diff line number Diff line
@@ -372,7 +372,7 @@ void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
 * the PTE pointer is unmodified if PTE is not found.
 */
int
get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
{
        pgd_t	*pgd;
        pmd_t	*pmd;
@@ -387,6 +387,8 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
                        if (pte) {
				retval = 1;
				*ptep = pte;
				if (pmdp)
					*pmdp = pmd;
				/* XXX caller needs to do pte_unmap, yuck */
                        }
                }
@@ -424,7 +426,7 @@ unsigned long iopa(unsigned long addr)
		mm = &init_mm;

	pa = 0;
	if (get_pteptr(mm, addr, &pte)) {
	if (get_pteptr(mm, addr, &pte, NULL)) {
		pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
		pte_unmap(pte);
	}
+17 −13
Original line number Diff line number Diff line
@@ -202,6 +202,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
	/* an exec  - 4xx/Book-E allows for per-page execute permission */
	} else if (TRAP(regs) == 0x400) {
		pte_t *ptep;
		pmd_t *pmdp;

#if 0
		/* It would be nice to actually enforce the VM execute
@@ -215,7 +216,10 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
		/* Since 4xx/Book-E supports per-page execute permission,
		 * we lazily flush dcache to icache. */
		ptep = NULL;
		if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) {
		if (get_pteptr(mm, address, &ptep, &pmdp)) {
			spinlock_t *ptl = pte_lockptr(mm, pmdp);
			spin_lock(ptl);
			if (pte_present(*ptep)) {
				struct page *page = pte_page(*ptep);

				if (!test_bit(PG_arch_1, &page->flags)) {
@@ -224,12 +228,12 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
				}
				pte_update(ptep, 0, _PAGE_HWEXEC);
				_tlbie(address);
			pte_unmap(ptep);
				pte_unmap_unlock(ptep, ptl);
				up_read(&mm->mmap_sem);
				return 0;
			}
		if (ptep != NULL)
			pte_unmap(ptep);
			pte_unmap_unlock(ptep, ptl);
		}
#endif
	/* a read */
	} else {
+4 −2
Original line number Diff line number Diff line
@@ -368,7 +368,7 @@ void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
 * the PTE pointer is unmodified if PTE is not found.
 */
int
get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
{
        pgd_t	*pgd;
        pmd_t	*pmd;
@@ -383,6 +383,8 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
                        if (pte) {
				retval = 1;
				*ptep = pte;
				if (pmdp)
					*pmdp = pmd;
				/* XXX caller needs to do pte_unmap, yuck */
                        }
                }
@@ -420,7 +422,7 @@ unsigned long iopa(unsigned long addr)
		mm = &init_mm;

	pa = 0;
	if (get_pteptr(mm, addr, &pte)) {
	if (get_pteptr(mm, addr, &pte, NULL)) {
		pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
		pte_unmap(pte);
	}
+2 −1
Original line number Diff line number Diff line
@@ -837,7 +837,8 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
 */
#define pgtable_cache_init()	do { } while (0)

extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep);
extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
		      pmd_t **pmdp);

#include <asm-generic/pgtable.h>