Loading drivers/lguest/lg.h +3 −3 Original line number Original line Diff line number Diff line Loading @@ -30,7 +30,7 @@ struct lguest_dma_info struct pgdir struct pgdir { { unsigned long cr3; unsigned long gpgdir; pgd_t *pgdir; pgd_t *pgdir; }; }; Loading Loading @@ -154,10 +154,10 @@ void copy_gdt_tls(const struct lguest *lg, struct desc_struct *gdt); int init_guest_pagetable(struct lguest *lg, unsigned long pgtable); int init_guest_pagetable(struct lguest *lg, unsigned long pgtable); void free_guest_pagetable(struct lguest *lg); void free_guest_pagetable(struct lguest *lg); void guest_new_pagetable(struct lguest *lg, unsigned long pgtable); void guest_new_pagetable(struct lguest *lg, unsigned long pgtable); void guest_set_pmd(struct lguest *lg, unsigned long cr3, u32 i); void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i); void guest_pagetable_clear_all(struct lguest *lg); void guest_pagetable_clear_all(struct lguest *lg); void guest_pagetable_flush_user(struct lguest *lg); void guest_pagetable_flush_user(struct lguest *lg); void guest_set_pte(struct lguest *lg, unsigned long cr3, void guest_set_pte(struct lguest *lg, unsigned long gpgdir, unsigned long vaddr, pte_t val); unsigned long vaddr, pte_t val); void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages); void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages); int demand_page(struct lguest *info, unsigned long cr2, int errcode); int demand_page(struct lguest *info, unsigned long cr2, int errcode); Loading drivers/lguest/page_tables.c +9 −9 Original line number Original line Diff line number Diff line Loading @@ -96,7 +96,7 @@ static pte_t *spte_addr(struct lguest *lg, pgd_t spgd, unsigned long vaddr) static unsigned long gpgd_addr(struct lguest *lg, unsigned long vaddr) static unsigned long gpgd_addr(struct lguest *lg, unsigned long vaddr) { { unsigned int index = vaddr >> (PGDIR_SHIFT); unsigned int index = vaddr >> (PGDIR_SHIFT); return lg->pgdirs[lg->pgdidx].cr3 + index * sizeof(pgd_t); return lg->pgdirs[lg->pgdidx].gpgdir + index * sizeof(pgd_t); } } static unsigned long gpte_addr(struct lguest *lg, static unsigned long gpte_addr(struct lguest *lg, Loading Loading @@ -365,7 +365,7 @@ static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) { { unsigned int i; unsigned int i; for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) if (lg->pgdirs[i].cr3 == pgtable) if (lg->pgdirs[i].gpgdir == pgtable) break; break; return i; return i; } } Loading @@ -374,7 +374,7 @@ static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) * allocate a new one (and so the kernel parts are not there), we set * allocate a new one (and so the kernel parts are not there), we set * blank_pgdir. */ * blank_pgdir. */ static unsigned int new_pgdir(struct lguest *lg, static unsigned int new_pgdir(struct lguest *lg, unsigned long cr3, unsigned long gpgdir, int *blank_pgdir) int *blank_pgdir) { { unsigned int next; unsigned int next; Loading @@ -394,7 +394,7 @@ static unsigned int new_pgdir(struct lguest *lg, *blank_pgdir = 1; *blank_pgdir = 1; } } /* Record which Guest toplevel this shadows. */ /* Record which Guest toplevel this shadows. */ lg->pgdirs[next].cr3 = cr3; lg->pgdirs[next].gpgdir = gpgdir; /* Release all the non-kernel mappings. */ /* Release all the non-kernel mappings. */ flush_user_mappings(lg, next); flush_user_mappings(lg, next); Loading Loading @@ -496,7 +496,7 @@ static void do_set_pte(struct lguest *lg, int idx, * The benefit is that when we have to track a new page table, we can copy keep * The benefit is that when we have to track a new page table, we can copy keep * all the kernel mappings. This speeds up context switch immensely. */ * all the kernel mappings. This speeds up context switch immensely. */ void guest_set_pte(struct lguest *lg, void guest_set_pte(struct lguest *lg, unsigned long cr3, unsigned long vaddr, pte_t gpte) unsigned long gpgdir, unsigned long vaddr, pte_t gpte) { { /* Kernel mappings must be changed on all top levels. Slow, but /* Kernel mappings must be changed on all top levels. Slow, but * doesn't happen often. */ * doesn't happen often. */ Loading @@ -507,7 +507,7 @@ void guest_set_pte(struct lguest *lg, do_set_pte(lg, i, vaddr, gpte); do_set_pte(lg, i, vaddr, gpte); } else { } else { /* Is this page table one we have a shadow for? */ /* Is this page table one we have a shadow for? */ int pgdir = find_pgdir(lg, cr3); int pgdir = find_pgdir(lg, gpgdir); if (pgdir != ARRAY_SIZE(lg->pgdirs)) if (pgdir != ARRAY_SIZE(lg->pgdirs)) /* If so, do the update. */ /* If so, do the update. */ do_set_pte(lg, pgdir, vaddr, gpte); do_set_pte(lg, pgdir, vaddr, gpte); Loading @@ -528,7 +528,7 @@ void guest_set_pte(struct lguest *lg, * * * So with that in mind here's our code to to update a (top-level) PGD entry: * So with that in mind here's our code to to update a (top-level) PGD entry: */ */ void guest_set_pmd(struct lguest *lg, unsigned long cr3, u32 idx) void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx) { { int pgdir; int pgdir; Loading @@ -538,7 +538,7 @@ void guest_set_pmd(struct lguest *lg, unsigned long cr3, u32 idx) return; return; /* If they're talking about a page table we have a shadow for... */ /* If they're talking about a page table we have a shadow for... */ pgdir = find_pgdir(lg, cr3); pgdir = find_pgdir(lg, gpgdir); if (pgdir < ARRAY_SIZE(lg->pgdirs)) if (pgdir < ARRAY_SIZE(lg->pgdirs)) /* ... throw it away. */ /* ... throw it away. */ release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx); release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx); Loading @@ -558,7 +558,7 @@ int init_guest_pagetable(struct lguest *lg, unsigned long pgtable) /* We start on the first shadow page table, and give it a blank PGD /* We start on the first shadow page table, and give it a blank PGD * page. */ * page. */ lg->pgdidx = 0; lg->pgdidx = 0; lg->pgdirs[lg->pgdidx].cr3 = pgtable; lg->pgdirs[lg->pgdidx].gpgdir = pgtable; lg->pgdirs[lg->pgdidx].pgdir = (pgd_t*)get_zeroed_page(GFP_KERNEL); lg->pgdirs[lg->pgdidx].pgdir = (pgd_t*)get_zeroed_page(GFP_KERNEL); if (!lg->pgdirs[lg->pgdidx].pgdir) if (!lg->pgdirs[lg->pgdidx].pgdir) return -ENOMEM; return -ENOMEM; Loading Loading
drivers/lguest/lg.h +3 −3 Original line number Original line Diff line number Diff line Loading @@ -30,7 +30,7 @@ struct lguest_dma_info struct pgdir struct pgdir { { unsigned long cr3; unsigned long gpgdir; pgd_t *pgdir; pgd_t *pgdir; }; }; Loading Loading @@ -154,10 +154,10 @@ void copy_gdt_tls(const struct lguest *lg, struct desc_struct *gdt); int init_guest_pagetable(struct lguest *lg, unsigned long pgtable); int init_guest_pagetable(struct lguest *lg, unsigned long pgtable); void free_guest_pagetable(struct lguest *lg); void free_guest_pagetable(struct lguest *lg); void guest_new_pagetable(struct lguest *lg, unsigned long pgtable); void guest_new_pagetable(struct lguest *lg, unsigned long pgtable); void guest_set_pmd(struct lguest *lg, unsigned long cr3, u32 i); void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i); void guest_pagetable_clear_all(struct lguest *lg); void guest_pagetable_clear_all(struct lguest *lg); void guest_pagetable_flush_user(struct lguest *lg); void guest_pagetable_flush_user(struct lguest *lg); void guest_set_pte(struct lguest *lg, unsigned long cr3, void guest_set_pte(struct lguest *lg, unsigned long gpgdir, unsigned long vaddr, pte_t val); unsigned long vaddr, pte_t val); void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages); void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages); int demand_page(struct lguest *info, unsigned long cr2, int errcode); int demand_page(struct lguest *info, unsigned long cr2, int errcode); Loading
drivers/lguest/page_tables.c +9 −9 Original line number Original line Diff line number Diff line Loading @@ -96,7 +96,7 @@ static pte_t *spte_addr(struct lguest *lg, pgd_t spgd, unsigned long vaddr) static unsigned long gpgd_addr(struct lguest *lg, unsigned long vaddr) static unsigned long gpgd_addr(struct lguest *lg, unsigned long vaddr) { { unsigned int index = vaddr >> (PGDIR_SHIFT); unsigned int index = vaddr >> (PGDIR_SHIFT); return lg->pgdirs[lg->pgdidx].cr3 + index * sizeof(pgd_t); return lg->pgdirs[lg->pgdidx].gpgdir + index * sizeof(pgd_t); } } static unsigned long gpte_addr(struct lguest *lg, static unsigned long gpte_addr(struct lguest *lg, Loading Loading @@ -365,7 +365,7 @@ static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) { { unsigned int i; unsigned int i; for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) if (lg->pgdirs[i].cr3 == pgtable) if (lg->pgdirs[i].gpgdir == pgtable) break; break; return i; return i; } } Loading @@ -374,7 +374,7 @@ static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) * allocate a new one (and so the kernel parts are not there), we set * allocate a new one (and so the kernel parts are not there), we set * blank_pgdir. */ * blank_pgdir. */ static unsigned int new_pgdir(struct lguest *lg, static unsigned int new_pgdir(struct lguest *lg, unsigned long cr3, unsigned long gpgdir, int *blank_pgdir) int *blank_pgdir) { { unsigned int next; unsigned int next; Loading @@ -394,7 +394,7 @@ static unsigned int new_pgdir(struct lguest *lg, *blank_pgdir = 1; *blank_pgdir = 1; } } /* Record which Guest toplevel this shadows. */ /* Record which Guest toplevel this shadows. */ lg->pgdirs[next].cr3 = cr3; lg->pgdirs[next].gpgdir = gpgdir; /* Release all the non-kernel mappings. */ /* Release all the non-kernel mappings. */ flush_user_mappings(lg, next); flush_user_mappings(lg, next); Loading Loading @@ -496,7 +496,7 @@ static void do_set_pte(struct lguest *lg, int idx, * The benefit is that when we have to track a new page table, we can copy keep * The benefit is that when we have to track a new page table, we can copy keep * all the kernel mappings. This speeds up context switch immensely. */ * all the kernel mappings. This speeds up context switch immensely. */ void guest_set_pte(struct lguest *lg, void guest_set_pte(struct lguest *lg, unsigned long cr3, unsigned long vaddr, pte_t gpte) unsigned long gpgdir, unsigned long vaddr, pte_t gpte) { { /* Kernel mappings must be changed on all top levels. Slow, but /* Kernel mappings must be changed on all top levels. Slow, but * doesn't happen often. */ * doesn't happen often. */ Loading @@ -507,7 +507,7 @@ void guest_set_pte(struct lguest *lg, do_set_pte(lg, i, vaddr, gpte); do_set_pte(lg, i, vaddr, gpte); } else { } else { /* Is this page table one we have a shadow for? */ /* Is this page table one we have a shadow for? */ int pgdir = find_pgdir(lg, cr3); int pgdir = find_pgdir(lg, gpgdir); if (pgdir != ARRAY_SIZE(lg->pgdirs)) if (pgdir != ARRAY_SIZE(lg->pgdirs)) /* If so, do the update. */ /* If so, do the update. */ do_set_pte(lg, pgdir, vaddr, gpte); do_set_pte(lg, pgdir, vaddr, gpte); Loading @@ -528,7 +528,7 @@ void guest_set_pte(struct lguest *lg, * * * So with that in mind here's our code to to update a (top-level) PGD entry: * So with that in mind here's our code to to update a (top-level) PGD entry: */ */ void guest_set_pmd(struct lguest *lg, unsigned long cr3, u32 idx) void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx) { { int pgdir; int pgdir; Loading @@ -538,7 +538,7 @@ void guest_set_pmd(struct lguest *lg, unsigned long cr3, u32 idx) return; return; /* If they're talking about a page table we have a shadow for... */ /* If they're talking about a page table we have a shadow for... */ pgdir = find_pgdir(lg, cr3); pgdir = find_pgdir(lg, gpgdir); if (pgdir < ARRAY_SIZE(lg->pgdirs)) if (pgdir < ARRAY_SIZE(lg->pgdirs)) /* ... throw it away. */ /* ... throw it away. */ release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx); release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx); Loading @@ -558,7 +558,7 @@ int init_guest_pagetable(struct lguest *lg, unsigned long pgtable) /* We start on the first shadow page table, and give it a blank PGD /* We start on the first shadow page table, and give it a blank PGD * page. */ * page. */ lg->pgdidx = 0; lg->pgdidx = 0; lg->pgdirs[lg->pgdidx].cr3 = pgtable; lg->pgdirs[lg->pgdidx].gpgdir = pgtable; lg->pgdirs[lg->pgdidx].pgdir = (pgd_t*)get_zeroed_page(GFP_KERNEL); lg->pgdirs[lg->pgdidx].pgdir = (pgd_t*)get_zeroed_page(GFP_KERNEL); if (!lg->pgdirs[lg->pgdidx].pgdir) if (!lg->pgdirs[lg->pgdidx].pgdir) return -ENOMEM; return -ENOMEM; Loading