Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4188180f authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: add vfree_atomic()"

parents 035e6e88 3af922c0
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -84,6 +84,7 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
			const void *caller);

extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);

extern void *vmap(struct page **pages, unsigned int count,
			unsigned long flags, pgprot_t prot);
+81 −75
Original line number Diff line number Diff line
@@ -662,6 +662,13 @@ static unsigned long lazy_max_pages(void)

static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);

/*
 * Serialize vmap purging.  There is no actual criticial section protected
 * by this look, but we want to avoid concurrent calls for performance
 * reasons and to make the pcpu_get_vm_areas more deterministic.
 */
static DEFINE_SPINLOCK(vmap_purge_lock);

/* for per-CPU blocks */
static void purge_fragmented_blocks_allcpus(void);

@@ -676,59 +683,36 @@ void set_iounmap_nonlazy(void)

/*
 * Purges all lazily-freed vmap areas.
 *
 * If sync is 0 then don't purge if there is already a purge in progress.
 * If force_flush is 1, then flush kernel TLBs between *start and *end even
 * if we found no lazy vmap areas to unmap (callers can use this to optimise
 * their own TLB flushing).
 * Returns with *start = min(*start, lowest purged address)
 *              *end = max(*end, highest purged address)
 */
static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
					int sync, int force_flush)
static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
{
	static DEFINE_SPINLOCK(purge_lock);
	struct llist_node *valist;
	struct vmap_area *va;
	struct vmap_area *n_va;
	int nr = 0;

	/*
	 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
	 * should not expect such behaviour. This just simplifies locking for
	 * the case that isn't actually used at the moment anyway.
	 */
	if (!sync && !force_flush) {
		if (!spin_trylock(&purge_lock))
			return;
	} else
		spin_lock(&purge_lock);

	if (sync)
		purge_fragmented_blocks_allcpus();
	lockdep_assert_held(&vmap_purge_lock);

	valist = llist_del_all(&vmap_purge_list);
	llist_for_each_entry(va, valist, purge_list) {
		if (va->va_start < *start)
			*start = va->va_start;
		if (va->va_end > *end)
			*end = va->va_end;
		if (va->va_start < start)
			start = va->va_start;
		if (va->va_end > end)
			end = va->va_end;
		nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
	}

	if (nr)
		atomic_sub(nr, &vmap_lazy_nr);
	if (!nr)
		return false;

	if (nr || force_flush)
		flush_tlb_kernel_range(*start, *end);
	atomic_sub(nr, &vmap_lazy_nr);
	flush_tlb_kernel_range(start, end);

	if (nr) {
	spin_lock(&vmap_area_lock);
	llist_for_each_entry_safe(va, n_va, valist, purge_list)
		__free_vmap_area(va);
	spin_unlock(&vmap_area_lock);
	}
	spin_unlock(&purge_lock);
	return true;
}

/*
@@ -737,9 +721,10 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
 */
static void try_purge_vmap_area_lazy(void)
{
	unsigned long start = ULONG_MAX, end = 0;

	__purge_vmap_area_lazy(&start, &end, 0, 0);
	if (spin_trylock(&vmap_purge_lock)) {
		__purge_vmap_area_lazy(ULONG_MAX, 0);
		spin_unlock(&vmap_purge_lock);
	}
}

/*
@@ -747,9 +732,10 @@ static void try_purge_vmap_area_lazy(void)
 */
static void purge_vmap_area_lazy(void)
{
	unsigned long start = ULONG_MAX, end = 0;

	__purge_vmap_area_lazy(&start, &end, 1, 0);
	spin_lock(&vmap_purge_lock);
	purge_fragmented_blocks_allcpus();
	__purge_vmap_area_lazy(ULONG_MAX, 0);
	spin_unlock(&vmap_purge_lock);
}

/*
@@ -771,23 +757,14 @@ static void free_vmap_area_noflush(struct vmap_area *va)
		try_purge_vmap_area_lazy();
}

/*
 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
 * called for the correct range previously.
 */
static void free_unmap_vmap_area_noflush(struct vmap_area *va)
{
	unmap_vmap_area(va);
	free_vmap_area_noflush(va);
}

/*
 * Free and unmap a vmap area
 */
static void free_unmap_vmap_area(struct vmap_area *va)
{
	flush_cache_vunmap(va->va_start, va->va_end);
	free_unmap_vmap_area_noflush(va);
	unmap_vmap_area(va);
	free_vmap_area_noflush(va);
}

static struct vmap_area *find_vmap_area(unsigned long addr)
@@ -801,16 +778,6 @@ static struct vmap_area *find_vmap_area(unsigned long addr)
	return va;
}

static void free_unmap_vmap_area_addr(unsigned long addr)
{
	struct vmap_area *va;

	va = find_vmap_area(addr);
	BUG_ON(!va);
	free_unmap_vmap_area(va);
}


/*** Per cpu kva allocator ***/

/*
@@ -1155,7 +1122,11 @@ void vm_unmap_aliases(void)
		rcu_read_unlock();
	}

	__purge_vmap_area_lazy(&start, &end, 1, flush);
	spin_lock(&vmap_purge_lock);
	purge_fragmented_blocks_allcpus();
	if (!__purge_vmap_area_lazy(start, end) && flush)
		flush_tlb_kernel_range(start, end);
	spin_unlock(&vmap_purge_lock);
}
EXPORT_SYMBOL_GPL(vm_unmap_aliases);

@@ -1168,6 +1139,7 @@ void vm_unmap_ram(const void *mem, unsigned int count)
{
	unsigned long size = (unsigned long)count << PAGE_SHIFT;
	unsigned long addr = (unsigned long)mem;
	struct vmap_area *va;

	BUG_ON(!addr);
	BUG_ON(addr < VMALLOC_START);
@@ -1177,10 +1149,14 @@ void vm_unmap_ram(const void *mem, unsigned int count)
	debug_check_no_locks_freed(mem, size);
	vmap_debug_free_range(addr, addr+size);

	if (likely(count <= VMAP_MAX_ALLOC))
	if (likely(count <= VMAP_MAX_ALLOC)) {
		vb_free(mem, size);
	else
		free_unmap_vmap_area_addr(addr);
		return;
	}

	va = find_vmap_area(addr);
	BUG_ON(!va);
	free_unmap_vmap_area(va);
}
EXPORT_SYMBOL(vm_unmap_ram);

@@ -1610,6 +1586,38 @@ static void __vunmap(const void *addr, int deallocate_pages)
	return;
}

static inline void __vfree_deferred(const void *addr)
{
	/*
	 * Use raw_cpu_ptr() because this can be called from preemptible
	 * context. Preemption is absolutely fine here, because the llist_add()
	 * implementation is lockless, so it works even if we are adding to
	 * nother cpu's list.  schedule_work() should be fine with this too.
	 */
	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);

	if (llist_add((struct llist_node *)addr, &p->list))
		schedule_work(&p->wq);
}

/**
 *	vfree_atomic  -  release memory allocated by vmalloc()
 *	@addr:		memory base address
 *
 *	This one is just like vfree() but can be called in any atomic context
 *	except NMIs.
 */
void vfree_atomic(const void *addr)
{
	BUG_ON(in_nmi());

	kmemleak_free(addr);

	if (!addr)
		return;
	__vfree_deferred(addr);
}

/**
 *	vfree  -  release memory allocated by vmalloc()
 *	@addr:		memory base address
@@ -1632,11 +1640,9 @@ void vfree(const void *addr)

	if (!addr)
		return;
	if (unlikely(in_interrupt())) {
		struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
		if (llist_add((struct llist_node *)addr, &p->list))
			schedule_work(&p->wq);
	} else
	if (unlikely(in_interrupt()))
		__vfree_deferred(addr);
	else
		__vunmap(addr, 1);
}
EXPORT_SYMBOL(vfree);