Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit df9a7b9b authored by Thomas Gleixner's avatar Thomas Gleixner
Browse files

sh-use-common-threadinfo-allocator



The core now has a threadinfo allocator which uses a kmemcache when
THREAD_SIZE < PAGE_SIZE.

Deal with the xstate cleanup in the new arch_release_task_struct()
function.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Paul Mundt <lethal@linux-sh.org>
Link: http://lkml.kernel.org/r/20120505150142.189348931@linutronix.de
parent c03a6a7b
Loading
Loading
Loading
Loading
+1 −9
Original line number Original line Diff line number Diff line
@@ -88,21 +88,13 @@ static inline struct thread_info *current_thread_info(void)
	return ti;
	return ti;
}
}


/* thread information allocation */
#if THREAD_SHIFT >= PAGE_SHIFT

#define THREAD_SIZE_ORDER	(THREAD_SHIFT - PAGE_SHIFT)
#define THREAD_SIZE_ORDER	(THREAD_SHIFT - PAGE_SHIFT)


#endif

extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node);
extern void free_thread_info(struct thread_info *ti);
extern void arch_task_cache_init(void);
extern void arch_task_cache_init(void);
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
extern void arch_release_task_struct(struct task_struct *tsk);
extern void init_thread_xstate(void);
extern void init_thread_xstate(void);


#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR

#endif /* __ASSEMBLY__ */
#endif /* __ASSEMBLY__ */


/*
/*
+2 −44
Original line number Original line Diff line number Diff line
@@ -29,52 +29,10 @@ void free_thread_xstate(struct task_struct *tsk)
	}
	}
}
}


#if THREAD_SHIFT < PAGE_SHIFT
void arch_release_task_struct(struct task_struct *tsk)
static struct kmem_cache *thread_info_cache;

struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
{
	struct thread_info *ti;
#ifdef CONFIG_DEBUG_STACK_USAGE
	gfp_t mask = GFP_KERNEL | __GFP_ZERO;
#else
	gfp_t mask = GFP_KERNEL;
#endif

	ti = kmem_cache_alloc_node(thread_info_cache, mask, node);
	return ti;
}

void free_thread_info(struct thread_info *ti)
{
	free_thread_xstate(ti->task);
	kmem_cache_free(thread_info_cache, ti);
}

void thread_info_cache_init(void)
{
	thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
					      THREAD_SIZE, SLAB_PANIC, NULL);
}
#else
struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
{
#ifdef CONFIG_DEBUG_STACK_USAGE
	gfp_t mask = GFP_KERNEL | __GFP_ZERO;
#else
	gfp_t mask = GFP_KERNEL;
#endif
	struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);

	return page ? page_address(page) : NULL;
}

void free_thread_info(struct thread_info *ti)
{
{
	free_thread_xstate(ti->task);
	free_thread_xstate(tsk);
	free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
}
}
#endif /* THREAD_SHIFT < PAGE_SHIFT */


void arch_task_cache_init(void)
void arch_task_cache_init(void)
{
{