Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a56dbddf authored by Tejun Heo's avatar Tejun Heo
Browse files

percpu: move fully free chunk reclamation into a work



Impact: code reorganization for later changes

Do fully free chunk reclamation using a work.  This change is to
prepare for locking changes.

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 9f7dcf22
Loading
Loading
Loading
Loading
+38 −10
Original line number Original line Diff line number Diff line
@@ -63,6 +63,7 @@
#include <linux/rbtree.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>


#include <asm/cacheflush.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/tlbflush.h>
@@ -118,6 +119,10 @@ static DEFINE_MUTEX(pcpu_mutex);
static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
static struct rb_root pcpu_addr_root = RB_ROOT;	/* chunks by address */
static struct rb_root pcpu_addr_root = RB_ROOT;	/* chunks by address */


/* reclaim work to release fully free chunks, scheduled from free path */
static void pcpu_reclaim(struct work_struct *work);
static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);

static int __pcpu_size_to_slot(int size)
static int __pcpu_size_to_slot(int size)
{
{
	int highbit = fls(size);	/* size is in bytes */
	int highbit = fls(size);	/* size is in bytes */
@@ -846,14 +851,38 @@ void *__alloc_reserved_percpu(size_t size, size_t align)
	return pcpu_alloc(size, align, true);
	return pcpu_alloc(size, align, true);
}
}


static void pcpu_kill_chunk(struct pcpu_chunk *chunk)
/**
 * pcpu_reclaim - reclaim fully free chunks, workqueue function
 * @work: unused
 *
 * Reclaim all fully free chunks except for the first one.
 */
static void pcpu_reclaim(struct work_struct *work)
{
{
	LIST_HEAD(todo);
	struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
	struct pcpu_chunk *chunk, *next;

	mutex_lock(&pcpu_mutex);

	list_for_each_entry_safe(chunk, next, head, list) {
		WARN_ON(chunk->immutable);
		WARN_ON(chunk->immutable);
	pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false);

	list_del(&chunk->list);
		/* spare the first one */
		if (chunk == list_first_entry(head, struct pcpu_chunk, list))
			continue;

		rb_erase(&chunk->rb_node, &pcpu_addr_root);
		rb_erase(&chunk->rb_node, &pcpu_addr_root);
		list_move(&chunk->list, &todo);
	}

	mutex_unlock(&pcpu_mutex);

	list_for_each_entry_safe(chunk, next, &todo, list) {
		pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false);
		free_pcpu_chunk(chunk);
		free_pcpu_chunk(chunk);
	}
	}
}


/**
/**
 * free_percpu - free percpu area
 * free_percpu - free percpu area
@@ -877,14 +906,13 @@ void free_percpu(void *ptr)


	pcpu_free_area(chunk, off);
	pcpu_free_area(chunk, off);


	/* the chunk became fully free, kill one if there are other free ones */
	/* if there are more than one fully free chunks, wake up grim reaper */
	if (chunk->free_size == pcpu_unit_size) {
	if (chunk->free_size == pcpu_unit_size) {
		struct pcpu_chunk *pos;
		struct pcpu_chunk *pos;


		list_for_each_entry(pos,
		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
				    &pcpu_slot[pcpu_chunk_slot(chunk)], list)
			if (pos != chunk) {
			if (pos != chunk) {
				pcpu_kill_chunk(pos);
				schedule_work(&pcpu_reclaim_work);
				break;
				break;
			}
			}
	}
	}