Loading include/linux/ksm.h +0 −7 Original line number Diff line number Diff line Loading @@ -53,8 +53,6 @@ struct page *ksm_might_need_to_copy(struct page *page, void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); void ksm_migrate_page(struct page *newpage, struct page *oldpage); bool reuse_ksm_page(struct page *page, struct vm_area_struct *vma, unsigned long address); #else /* !CONFIG_KSM */ Loading Loading @@ -88,11 +86,6 @@ static inline void rmap_walk_ksm(struct page *page, static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) { } static inline bool reuse_ksm_page(struct page *page, struct vm_area_struct *vma, unsigned long address) { return false; } #endif /* CONFIG_MMU */ #endif /* !CONFIG_KSM */ Loading mm/ksm.c +0 −25 Original line number Diff line number Diff line Loading @@ -2660,31 +2660,6 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) goto again; } bool reuse_ksm_page(struct page *page, struct vm_area_struct *vma, unsigned long address) { #ifdef CONFIG_DEBUG_VM if (WARN_ON(is_zero_pfn(page_to_pfn(page))) || WARN_ON(!page_mapped(page)) || WARN_ON(!PageLocked(page))) { dump_page(page, "reuse_ksm_page"); return false; } #endif if (PageSwapCache(page) || !page_stable_node(page)) return false; /* Prohibit parallel get_ksm_page() */ if (!page_ref_freeze(page, 1)) return false; page_move_anon_rmap(page, vma); page->index = linear_page_index(vma, address); page_ref_unfreeze(page, 1); return true; } #ifdef CONFIG_MIGRATION void ksm_migrate_page(struct page *newpage, struct page *oldpage) { Loading mm/memory.c +17 −44 Original line number Diff line number Diff line Loading @@ -2755,52 +2755,25 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) * not dirty accountable. */ if (PageAnon(vmf->page)) { int total_map_swapcount; if (PageKsm(vmf->page) && (PageSwapCache(vmf->page) || page_count(vmf->page) != 1)) struct page *page = vmf->page; /* PageKsm() doesn't necessarily raise the page refcount */ if (PageKsm(page) || page_count(page) != 1) goto copy; if (!trylock_page(vmf->page)) { get_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); lock_page(vmf->page); if (!pte_map_lock(vmf)) { unlock_page(vmf->page); put_page(vmf->page); return VM_FAULT_RETRY; } if (!pte_same(*vmf->pte, vmf->orig_pte)) { unlock_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); put_page(vmf->page); return 0; } put_page(vmf->page); } if (PageKsm(vmf->page)) { bool reused = reuse_ksm_page(vmf->page, vmf->vma, vmf->address); unlock_page(vmf->page); if (!reused) if (!trylock_page(page)) goto copy; if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) { unlock_page(page); goto copy; wp_page_reuse(vmf); return VM_FAULT_WRITE; } if (reuse_swap_page(vmf->page, &total_map_swapcount)) { if (total_map_swapcount == 1) { /* * The page is all ours. Move it to * our anon_vma so the rmap code will * not search our parent or siblings. * Protected against the rmap code by * the page lock. * Ok, we've got the only map reference, and the only * page count reference, and the page is locked, * it's dark out, and we're wearing sunglasses. Hit it. */ page_move_anon_rmap(vmf->page, vma); } unlock_page(vmf->page); wp_page_reuse(vmf); unlock_page(page); return VM_FAULT_WRITE; } unlock_page(vmf->page); } else if (unlikely((vmf->vma_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))) { return wp_page_shared(vmf); Loading Loading
include/linux/ksm.h +0 −7 Original line number Diff line number Diff line Loading @@ -53,8 +53,6 @@ struct page *ksm_might_need_to_copy(struct page *page, void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); void ksm_migrate_page(struct page *newpage, struct page *oldpage); bool reuse_ksm_page(struct page *page, struct vm_area_struct *vma, unsigned long address); #else /* !CONFIG_KSM */ Loading Loading @@ -88,11 +86,6 @@ static inline void rmap_walk_ksm(struct page *page, static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) { } static inline bool reuse_ksm_page(struct page *page, struct vm_area_struct *vma, unsigned long address) { return false; } #endif /* CONFIG_MMU */ #endif /* !CONFIG_KSM */ Loading
mm/ksm.c +0 −25 Original line number Diff line number Diff line Loading @@ -2660,31 +2660,6 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) goto again; } bool reuse_ksm_page(struct page *page, struct vm_area_struct *vma, unsigned long address) { #ifdef CONFIG_DEBUG_VM if (WARN_ON(is_zero_pfn(page_to_pfn(page))) || WARN_ON(!page_mapped(page)) || WARN_ON(!PageLocked(page))) { dump_page(page, "reuse_ksm_page"); return false; } #endif if (PageSwapCache(page) || !page_stable_node(page)) return false; /* Prohibit parallel get_ksm_page() */ if (!page_ref_freeze(page, 1)) return false; page_move_anon_rmap(page, vma); page->index = linear_page_index(vma, address); page_ref_unfreeze(page, 1); return true; } #ifdef CONFIG_MIGRATION void ksm_migrate_page(struct page *newpage, struct page *oldpage) { Loading
mm/memory.c +17 −44 Original line number Diff line number Diff line Loading @@ -2755,52 +2755,25 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) * not dirty accountable. */ if (PageAnon(vmf->page)) { int total_map_swapcount; if (PageKsm(vmf->page) && (PageSwapCache(vmf->page) || page_count(vmf->page) != 1)) struct page *page = vmf->page; /* PageKsm() doesn't necessarily raise the page refcount */ if (PageKsm(page) || page_count(page) != 1) goto copy; if (!trylock_page(vmf->page)) { get_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); lock_page(vmf->page); if (!pte_map_lock(vmf)) { unlock_page(vmf->page); put_page(vmf->page); return VM_FAULT_RETRY; } if (!pte_same(*vmf->pte, vmf->orig_pte)) { unlock_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); put_page(vmf->page); return 0; } put_page(vmf->page); } if (PageKsm(vmf->page)) { bool reused = reuse_ksm_page(vmf->page, vmf->vma, vmf->address); unlock_page(vmf->page); if (!reused) if (!trylock_page(page)) goto copy; if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) { unlock_page(page); goto copy; wp_page_reuse(vmf); return VM_FAULT_WRITE; } if (reuse_swap_page(vmf->page, &total_map_swapcount)) { if (total_map_swapcount == 1) { /* * The page is all ours. Move it to * our anon_vma so the rmap code will * not search our parent or siblings. * Protected against the rmap code by * the page lock. * Ok, we've got the only map reference, and the only * page count reference, and the page is locked, * it's dark out, and we're wearing sunglasses. Hit it. */ page_move_anon_rmap(vmf->page, vma); } unlock_page(vmf->page); wp_page_reuse(vmf); unlock_page(page); return VM_FAULT_WRITE; } unlock_page(vmf->page); } else if (unlikely((vmf->vma_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))) { return wp_page_shared(vmf); Loading