Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dceac2f0 authored by Sanjay Dwivedi's avatar Sanjay Dwivedi Committed by Gerrit - the friendly Code Review server
Browse files

mm: add preempt points into __purge_vmap_area_lazy()

Use cond_resched_lock to avoid holding the vmap_area_lock for a
potentially long time and thus creating bad latencies for various
workloads.

Change-Id: I36eb4d8dbd6604f52e5c463373a9754847a44bc2
[hch: split from a larger patch by Joel, wrote the crappy changelog]
Link: http://lkml.kernel.org/r/1479474236-4139-11-git-send-email-hch@lst.de


Signed-off-by: default avatarJoel Fernandes <joelaf@google.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarJisheng Zhang <jszhang@marvell.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: John Dias <joaodias@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Git-commit: 763b218ddfaf56761c19923beb7e16656f66ec62
Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git


Signed-off-by: default avatarVinayak Menon <vinmenon@codeaurora.org>
Signed-off-by: default avatarSanjay Dwivedi <sanjaykd@codeaurora.org>
parent 36e92370
Loading
Loading
Loading
Loading
+9 −8
Original line number Original line Diff line number Diff line
@@ -651,7 +651,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
	LIST_HEAD(valist);
	LIST_HEAD(valist);
	struct vmap_area *va;
	struct vmap_area *va;
	struct vmap_area *n_va;
	struct vmap_area *n_va;
	int nr = 0;
	bool do_free = false;


	/*
	/*
	 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
	 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
@@ -674,7 +674,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
				*start = va->va_start;
				*start = va->va_start;
			if (va->va_end > *end)
			if (va->va_end > *end)
				*end = va->va_end;
				*end = va->va_end;
			nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
			do_free = true;
			list_add_tail(&va->purge_list, &valist);
			list_add_tail(&va->purge_list, &valist);
			va->flags |= VM_LAZY_FREEING;
			va->flags |= VM_LAZY_FREEING;
			va->flags &= ~VM_LAZY_FREE;
			va->flags &= ~VM_LAZY_FREE;
@@ -682,16 +682,17 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
	}
	}
	rcu_read_unlock();
	rcu_read_unlock();


	if (nr)
	if (do_free || force_flush)
		atomic_sub(nr, &vmap_lazy_nr);

	if (nr || force_flush)
		flush_tlb_kernel_range(*start, *end);
		flush_tlb_kernel_range(*start, *end);


	if (nr) {
	if (do_free) {
		spin_lock(&vmap_area_lock);
		spin_lock(&vmap_area_lock);
		list_for_each_entry_safe(va, n_va, &valist, purge_list)
		list_for_each_entry_safe(va, n_va, &valist, purge_list) {
			int nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
			__free_vmap_area(va);
			__free_vmap_area(va);
			atomic_sub(nr, &vmap_lazy_nr);
			cond_resched_lock(&vmap_area_lock);
		}
		spin_unlock(&vmap_area_lock);
		spin_unlock(&vmap_area_lock);
	}
	}
	spin_unlock(&purge_lock);
	spin_unlock(&purge_lock);