Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0d15d74a authored by Thomas Gleixner's avatar Thomas Gleixner
Browse files

fork: Provide kmemcache based thread_info allocator



Several architectures have their own kmemcache based thread allocator
because THREAD_SIZE is smaller than PAGE_SIZE. Add it to the core code
conditionally on THREAD_SIZE < PAGE_SIZE so the private copies can go.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20120505150141.491002124@linutronix.de
parent 67ba5293
Loading
Loading
Loading
Loading
+27 −0
Original line number Diff line number Diff line
@@ -132,6 +132,11 @@ static inline void free_task_struct(struct task_struct *tsk)

void __weak arch_release_thread_info(struct thread_info *ti) { }

/*
 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
 * kmemcache based allocator.
 */
# if THREAD_SIZE >= PAGE_SIZE
static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
						  int node)
{
@@ -146,6 +151,28 @@ static inline void free_thread_info(struct thread_info *ti)
	arch_release_thread_info(ti);
	free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
}
# else
static struct kmem_cache *thread_info_cache;

static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
						  int node)
{
	return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
}

static void free_thread_info(struct thread_info *ti)
{
	arch_release_thread_info(ti);
	kmem_cache_free(thread_info_cache, ti);
}

void thread_info_cache_init(void)
{
	thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
					      THREAD_SIZE, 0, NULL);
	BUG_ON(thread_info_cache == NULL);
}
# endif
#endif

/* SLAB cache for signal_struct structures (tsk->signal) */