Loading mm/memory.c +1 −13 Original line number Diff line number Diff line Loading @@ -1636,20 +1636,8 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start, tlb_gather_mmu(&tlb, mm, start, end); update_hiwater_rss(mm); mmu_notifier_invalidate_range_start(mm, start, end); for ( ; vma && vma->vm_start < end; vma = vma->vm_next) { for ( ; vma && vma->vm_start < end; vma = vma->vm_next) unmap_single_vma(&tlb, vma, start, end, NULL); /* * zap_page_range does not specify whether mmap_sem should be * held for read or write. That allows parallel zap_page_range * operations to unmap a PTE and defer a flush meaning that * this call observes pte_none and fails to flush the TLB. * Rather than adding a complex API, ensure that no stale * TLB entries exist when this call returns. */ flush_tlb_range(vma, start, end); } mmu_notifier_invalidate_range_end(mm, start, end); tlb_finish_mmu(&tlb, start, end); } Loading Loading
mm/memory.c +1 −13 Original line number Diff line number Diff line Loading @@ -1636,20 +1636,8 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start, tlb_gather_mmu(&tlb, mm, start, end); update_hiwater_rss(mm); mmu_notifier_invalidate_range_start(mm, start, end); for ( ; vma && vma->vm_start < end; vma = vma->vm_next) { for ( ; vma && vma->vm_start < end; vma = vma->vm_next) unmap_single_vma(&tlb, vma, start, end, NULL); /* * zap_page_range does not specify whether mmap_sem should be * held for read or write. That allows parallel zap_page_range * operations to unmap a PTE and defer a flush meaning that * this call observes pte_none and fails to flush the TLB. * Rather than adding a complex API, ensure that no stale * TLB entries exist when this call returns. */ flush_tlb_range(vma, start, end); } mmu_notifier_invalidate_range_end(mm, start, end); tlb_finish_mmu(&tlb, start, end); } Loading