Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6e8bb019 authored by Al Viro's avatar Al Viro
Browse files

VM: make unmap_vmas() return void



same story - nobody uses it and it's been pointless since
"mm: Remove i_mmap_lock lockbreak" went in.

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 853f5e26
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -895,7 +895,7 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
		unsigned long size);
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
		unsigned long size, struct zap_details *);
unsigned long unmap_vmas(struct mmu_gather *tlb,
void unmap_vmas(struct mmu_gather *tlb,
		struct vm_area_struct *start_vma, unsigned long start_addr,
		unsigned long end_addr, unsigned long *nr_accounted,
		struct zap_details *);
+1 −5
Original line number Diff line number Diff line
@@ -1316,8 +1316,6 @@ static void unmap_page_range(struct mmu_gather *tlb,
 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
 * @details: details of nonlinear truncation or shared cache invalidation
 *
 * Returns the end address of the unmapping (restart addr if interrupted).
 *
 * Unmap all pages in the vma list.
 *
 * Only addresses between `start' and `end' will be unmapped.
@@ -1329,7 +1327,7 @@ static void unmap_page_range(struct mmu_gather *tlb,
 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
 * drops the lock and schedules.
 */
unsigned long unmap_vmas(struct mmu_gather *tlb,
void unmap_vmas(struct mmu_gather *tlb,
		struct vm_area_struct *vma, unsigned long start_addr,
		unsigned long end_addr, unsigned long *nr_accounted,
		struct zap_details *details)
@@ -1372,11 +1370,9 @@ unsigned long unmap_vmas(struct mmu_gather *tlb,
			} else
				unmap_page_range(tlb, vma, start, end, details);
		}
		start = end;
	}

	mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
	return start;	/* which is now the end (or restart) address */
}

/**
+1 −2
Original line number Diff line number Diff line
@@ -2224,7 +2224,6 @@ void exit_mmap(struct mm_struct *mm)
	struct mmu_gather tlb;
	struct vm_area_struct *vma;
	unsigned long nr_accounted = 0;
	unsigned long end;

	/* mm's last user has gone, and its about to be pulled down */
	mmu_notifier_release(mm);
@@ -2249,7 +2248,7 @@ void exit_mmap(struct mm_struct *mm)
	tlb_gather_mmu(&tlb, mm, 1);
	/* update_hiwater_rss(mm) here? but nobody should be looking */
	/* Use -1 here to ensure all VMAs in the mm are unmapped */
	end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
	unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
	vm_unacct_memory(nr_accounted);

	free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);