Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d4d87115 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: refactor __purge_vmap_area_lazy()"

parents 3aa63aa4 0818bc7a
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -3778,7 +3778,9 @@ static void binder_deferred_release(struct binder_proc *proc)
			page_count++;
		}
		kfree(proc->pages);
		preempt_enable_no_resched();
		vfree(proc->buffer);
		preempt_disable();
	}

	put_task_struct(proc->tsk);
+2 −1
Original line number Diff line number Diff line
@@ -4,6 +4,7 @@
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/llist.h>
#include <asm/page.h>		/* pgprot_t */
#include <linux/rbtree.h>

@@ -47,7 +48,7 @@ struct vmap_area {
	unsigned long flags;
	struct rb_node rb_node;         /* address sorted rbtree */
	struct list_head list;          /* address sorted list */
	struct list_head purge_list;    /* "lazy purge" list */
	struct llist_node purge_list;    /* "lazy purge" list */
	struct vm_struct *vm;
	struct rcu_head rcu_head;
};
+59 −84
Original line number Diff line number Diff line
@@ -274,13 +274,12 @@ EXPORT_SYMBOL(vmalloc_to_pfn);

/*** Global kva allocator ***/

#define VM_LAZY_FREE	0x01
#define VM_LAZY_FREEING	0x02
#define VM_VM_AREA	0x04

static DEFINE_SPINLOCK(vmap_area_lock);
/* Export for kexec only */
LIST_HEAD(vmap_area_list);
static LLIST_HEAD(vmap_purge_list);
static struct rb_root vmap_area_root = RB_ROOT;

/* The vmap cache globals are protected by vmap_area_lock */
@@ -628,6 +627,13 @@ static unsigned long lazy_max_pages(void)

static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);

/*
 * Serialize vmap purging.  There is no actual criticial section protected
 * by this look, but we want to avoid concurrent calls for performance
 * reasons and to make the pcpu_get_vm_areas more deterministic.
 */
static DEFINE_SPINLOCK(vmap_purge_lock);

/* for per-CPU blocks */
static void purge_fragmented_blocks_allcpus(void);

@@ -642,65 +648,36 @@ void set_iounmap_nonlazy(void)

/*
 * Purges all lazily-freed vmap areas.
 *
 * If sync is 0 then don't purge if there is already a purge in progress.
 * If force_flush is 1, then flush kernel TLBs between *start and *end even
 * if we found no lazy vmap areas to unmap (callers can use this to optimise
 * their own TLB flushing).
 * Returns with *start = min(*start, lowest purged address)
 *              *end = max(*end, highest purged address)
 */
static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
					int sync, int force_flush)
static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
{
	static DEFINE_SPINLOCK(purge_lock);
	LIST_HEAD(valist);
	struct llist_node *valist;
	struct vmap_area *va;
	struct vmap_area *n_va;
	int nr = 0;

	/*
	 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
	 * should not expect such behaviour. This just simplifies locking for
	 * the case that isn't actually used at the moment anyway.
	 */
	if (!sync && !force_flush) {
		if (!spin_trylock(&purge_lock))
			return;
	} else
		spin_lock(&purge_lock);
	lockdep_assert_held(&vmap_purge_lock);

	if (sync)
		purge_fragmented_blocks_allcpus();

	rcu_read_lock();
	list_for_each_entry_rcu(va, &vmap_area_list, list) {
		if (va->flags & VM_LAZY_FREE) {
			if (va->va_start < *start)
				*start = va->va_start;
			if (va->va_end > *end)
				*end = va->va_end;
	valist = llist_del_all(&vmap_purge_list);
	llist_for_each_entry(va, valist, purge_list) {
		if (va->va_start < start)
			start = va->va_start;
		if (va->va_end > end)
			end = va->va_end;
		nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
			list_add_tail(&va->purge_list, &valist);
			va->flags |= VM_LAZY_FREEING;
			va->flags &= ~VM_LAZY_FREE;
		}
	}
	rcu_read_unlock();

	if (nr)
		atomic_sub(nr, &vmap_lazy_nr);
	if (!nr)
		return false;

	if (nr || force_flush)
		flush_tlb_kernel_range(*start, *end);
	atomic_sub(nr, &vmap_lazy_nr);
	flush_tlb_kernel_range(start, end);

	if (nr) {
	spin_lock(&vmap_area_lock);
		list_for_each_entry_safe(va, n_va, &valist, purge_list)
	llist_for_each_entry_safe(va, n_va, valist, purge_list)
		__free_vmap_area(va);
	spin_unlock(&vmap_area_lock);
	}
	spin_unlock(&purge_lock);
	return true;
}

/*
@@ -709,9 +686,10 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
 */
static void try_purge_vmap_area_lazy(void)
{
	unsigned long start = ULONG_MAX, end = 0;

	__purge_vmap_area_lazy(&start, &end, 0, 0);
	if (spin_trylock(&vmap_purge_lock)) {
		__purge_vmap_area_lazy(ULONG_MAX, 0);
		spin_unlock(&vmap_purge_lock);
	}
}

/*
@@ -719,9 +697,10 @@ static void try_purge_vmap_area_lazy(void)
 */
static void purge_vmap_area_lazy(void)
{
	unsigned long start = ULONG_MAX, end = 0;

	__purge_vmap_area_lazy(&start, &end, 1, 0);
	spin_lock(&vmap_purge_lock);
	purge_fragmented_blocks_allcpus();
	__purge_vmap_area_lazy(ULONG_MAX, 0);
	spin_unlock(&vmap_purge_lock);
}

/*
@@ -731,20 +710,16 @@ static void purge_vmap_area_lazy(void)
 */
static void free_vmap_area_noflush(struct vmap_area *va)
{
	va->flags |= VM_LAZY_FREE;
	atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
	if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
		try_purge_vmap_area_lazy();
}
	int nr_lazy;

/*
 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
 * called for the correct range previously.
 */
static void free_unmap_vmap_area_noflush(struct vmap_area *va)
{
	unmap_vmap_area(va);
	free_vmap_area_noflush(va);
	nr_lazy = atomic_add_return((va->va_end - va->va_start) >> PAGE_SHIFT,
				    &vmap_lazy_nr);

	/* After this point, we may free va at any time */
	llist_add(&va->purge_list, &vmap_purge_list);

	if (unlikely(nr_lazy > lazy_max_pages()))
		try_purge_vmap_area_lazy();
}

/*
@@ -753,7 +728,8 @@ static void free_unmap_vmap_area_noflush(struct vmap_area *va)
static void free_unmap_vmap_area(struct vmap_area *va)
{
	flush_cache_vunmap(va->va_start, va->va_end);
	free_unmap_vmap_area_noflush(va);
	unmap_vmap_area(va);
	free_vmap_area_noflush(va);
}

static struct vmap_area *find_vmap_area(unsigned long addr)
@@ -767,16 +743,6 @@ static struct vmap_area *find_vmap_area(unsigned long addr)
	return va;
}

static void free_unmap_vmap_area_addr(unsigned long addr)
{
	struct vmap_area *va;

	va = find_vmap_area(addr);
	BUG_ON(!va);
	free_unmap_vmap_area(va);
}


/*** Per cpu kva allocator ***/

/*
@@ -1121,7 +1087,11 @@ void vm_unmap_aliases(void)
		rcu_read_unlock();
	}

	__purge_vmap_area_lazy(&start, &end, 1, flush);
	spin_lock(&vmap_purge_lock);
	purge_fragmented_blocks_allcpus();
	if (!__purge_vmap_area_lazy(start, end) && flush)
		flush_tlb_kernel_range(start, end);
	spin_unlock(&vmap_purge_lock);
}
EXPORT_SYMBOL_GPL(vm_unmap_aliases);

@@ -1134,6 +1104,7 @@ void vm_unmap_ram(const void *mem, unsigned int count)
{
	unsigned long size = count << PAGE_SHIFT;
	unsigned long addr = (unsigned long)mem;
	struct vmap_area *va;

	BUG_ON(!addr);
	BUG_ON(addr < VMALLOC_START);
@@ -1143,10 +1114,14 @@ void vm_unmap_ram(const void *mem, unsigned int count)
	debug_check_no_locks_freed(mem, size);
	vmap_debug_free_range(addr, addr+size);

	if (likely(count <= VMAP_MAX_ALLOC))
	if (likely(count <= VMAP_MAX_ALLOC)) {
		vb_free(mem, size);
	else
		free_unmap_vmap_area_addr(addr);
		return;
	}

	va = find_vmap_area(addr);
	BUG_ON(!va);
	free_unmap_vmap_area(va);
}
EXPORT_SYMBOL(vm_unmap_ram);