Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f5cc4eef authored by Al Viro's avatar Al Viro
Browse files

VM: make zap_page_range() callers that act on a single VMA use separate helper



... and not rely on ->vm_next being there for them...

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 6e8bb019
Loading
Loading
Loading
Loading
+74 −39
Original line number Original line Diff line number Diff line
@@ -1307,44 +1307,20 @@ static void unmap_page_range(struct mmu_gather *tlb,
	mem_cgroup_uncharge_end();
	mem_cgroup_uncharge_end();
}
}


/**

 * unmap_vmas - unmap a range of memory covered by a list of vma's
static void unmap_single_vma(struct mmu_gather *tlb,
 * @tlb: address of the caller's struct mmu_gather
 * @vma: the starting vma
 * @start_addr: virtual address at which to start unmapping
 * @end_addr: virtual address at which to end unmapping
 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
 * @details: details of nonlinear truncation or shared cache invalidation
 *
 * Unmap all pages in the vma list.
 *
 * Only addresses between `start' and `end' will be unmapped.
 *
 * The VMA list must be sorted in ascending virtual address order.
 *
 * unmap_vmas() assumes that the caller will flush the whole unmapped address
 * range after unmap_vmas() returns.  So the only responsibility here is to
 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
 * drops the lock and schedules.
 */
void unmap_vmas(struct mmu_gather *tlb,
		struct vm_area_struct *vma, unsigned long start_addr,
		struct vm_area_struct *vma, unsigned long start_addr,
		unsigned long end_addr, unsigned long *nr_accounted,
		unsigned long end_addr, unsigned long *nr_accounted,
		struct zap_details *details)
		struct zap_details *details)
{
{
	unsigned long start = start_addr;
	unsigned long start = max(vma->vm_start, start_addr);
	struct mm_struct *mm = vma->vm_mm;

	mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
	unsigned long end;
	unsigned long end;


		start = max(vma->vm_start, start_addr);
	if (start >= vma->vm_end)
	if (start >= vma->vm_end)
			continue;
		return;
	end = min(vma->vm_end, end_addr);
	end = min(vma->vm_end, end_addr);
	if (end <= vma->vm_start)
	if (end <= vma->vm_start)
			continue;
		return;


	if (vma->vm_flags & VM_ACCOUNT)
	if (vma->vm_flags & VM_ACCOUNT)
		*nr_accounted += (end - start) >> PAGE_SHIFT;
		*nr_accounted += (end - start) >> PAGE_SHIFT;
@@ -1372,6 +1348,37 @@ void unmap_vmas(struct mmu_gather *tlb,
	}
	}
}
}


/**
 * unmap_vmas - unmap a range of memory covered by a list of vma's
 * @tlb: address of the caller's struct mmu_gather
 * @vma: the starting vma
 * @start_addr: virtual address at which to start unmapping
 * @end_addr: virtual address at which to end unmapping
 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
 * @details: details of nonlinear truncation or shared cache invalidation
 *
 * Unmap all pages in the vma list.
 *
 * Only addresses between `start' and `end' will be unmapped.
 *
 * The VMA list must be sorted in ascending virtual address order.
 *
 * unmap_vmas() assumes that the caller will flush the whole unmapped address
 * range after unmap_vmas() returns.  So the only responsibility here is to
 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
 * drops the lock and schedules.
 */
void unmap_vmas(struct mmu_gather *tlb,
		struct vm_area_struct *vma, unsigned long start_addr,
		unsigned long end_addr, unsigned long *nr_accounted,
		struct zap_details *details)
{
	struct mm_struct *mm = vma->vm_mm;

	mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
		unmap_single_vma(tlb, vma, start_addr, end_addr, nr_accounted,
				 details);
	mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
	mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
}
}


@@ -1381,6 +1388,8 @@ void unmap_vmas(struct mmu_gather *tlb,
 * @address: starting address of pages to zap
 * @address: starting address of pages to zap
 * @size: number of bytes to zap
 * @size: number of bytes to zap
 * @details: details of nonlinear truncation or shared cache invalidation
 * @details: details of nonlinear truncation or shared cache invalidation
 *
 * Caller must protect the VMA list
 */
 */
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
		unsigned long size, struct zap_details *details)
		unsigned long size, struct zap_details *details)
@@ -1397,6 +1406,32 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address,
	tlb_finish_mmu(&tlb, address, end);
	tlb_finish_mmu(&tlb, address, end);
}
}


/**
 * zap_page_range_single - remove user pages in a given range
 * @vma: vm_area_struct holding the applicable pages
 * @address: starting address of pages to zap
 * @size: number of bytes to zap
 * @details: details of nonlinear truncation or shared cache invalidation
 *
 * The range must fit into one VMA.
 */
static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
		unsigned long size, struct zap_details *details)
{
	struct mm_struct *mm = vma->vm_mm;
	struct mmu_gather tlb;
	unsigned long end = address + size;
	unsigned long nr_accounted = 0;

	lru_add_drain();
	tlb_gather_mmu(&tlb, mm, 0);
	update_hiwater_rss(mm);
	mmu_notifier_invalidate_range_start(mm, address, end);
	unmap_single_vma(&tlb, vma, address, end, &nr_accounted, details);
	mmu_notifier_invalidate_range_end(mm, address, end);
	tlb_finish_mmu(&tlb, address, end);
}

/**
/**
 * zap_vma_ptes - remove ptes mapping the vma
 * zap_vma_ptes - remove ptes mapping the vma
 * @vma: vm_area_struct holding ptes to be zapped
 * @vma: vm_area_struct holding ptes to be zapped
@@ -1415,7 +1450,7 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
	if (address < vma->vm_start || address + size > vma->vm_end ||
	if (address < vma->vm_start || address + size > vma->vm_end ||
	    		!(vma->vm_flags & VM_PFNMAP))
	    		!(vma->vm_flags & VM_PFNMAP))
		return -1;
		return -1;
	zap_page_range(vma, address, size, NULL);
	zap_page_range_single(vma, address, size, NULL);
	return 0;
	return 0;
}
}
EXPORT_SYMBOL_GPL(zap_vma_ptes);
EXPORT_SYMBOL_GPL(zap_vma_ptes);
@@ -2762,7 +2797,7 @@ static void unmap_mapping_range_vma(struct vm_area_struct *vma,
		unsigned long start_addr, unsigned long end_addr,
		unsigned long start_addr, unsigned long end_addr,
		struct zap_details *details)
		struct zap_details *details)
{
{
	zap_page_range(vma, start_addr, end_addr - start_addr, details);
	zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
}
}


static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
static inline void unmap_mapping_range_tree(struct prio_tree_root *root,