Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4e7e3adb authored by David Howells's avatar David Howells
Browse files

Expand various INIT_* macros and remove



Expand various INIT_* macros into the single places they're used in
init/init_task.c and remove them.

Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Tested-by: default avatarTony Luck <tony.luck@intel.com>
Tested-by: Will Deacon <will.deacon@arm.com> (arm64)
Tested-by: default avatarPalmer Dabbelt <palmer@sifive.com>
Acked-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent d11ed3ab
Loading
Loading
Loading
Loading
+0 −12
Original line number Original line Diff line number Diff line
@@ -764,9 +764,6 @@ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */


#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_FUNCTION_GRAPH_TRACER


/* for init task */
#define INIT_FTRACE_GRAPH		.ret_stack = NULL,

/*
/*
 * Stack of return addresses for functions
 * Stack of return addresses for functions
 * of a thread.
 * of a thread.
@@ -844,7 +841,6 @@ static inline void unpause_graph_tracing(void)
#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
#else /* !CONFIG_FUNCTION_GRAPH_TRACER */


#define __notrace_funcgraph
#define __notrace_funcgraph
#define INIT_FTRACE_GRAPH


static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
@@ -923,10 +919,6 @@ extern int tracepoint_printk;
extern void disable_trace_on_warning(void);
extern void disable_trace_on_warning(void);
extern int __disable_trace_on_warning;
extern int __disable_trace_on_warning;


#ifdef CONFIG_PREEMPT
#define INIT_TRACE_RECURSION		.trace_recursion = 0,
#endif

int tracepoint_printk_sysctl(struct ctl_table *table, int write,
int tracepoint_printk_sysctl(struct ctl_table *table, int write,
			     void __user *buffer, size_t *lenp,
			     void __user *buffer, size_t *lenp,
			     loff_t *ppos);
			     loff_t *ppos);
@@ -935,10 +927,6 @@ int tracepoint_printk_sysctl(struct ctl_table *table, int write,
static inline void  disable_trace_on_warning(void) { }
static inline void  disable_trace_on_warning(void) { }
#endif /* CONFIG_TRACING */
#endif /* CONFIG_TRACING */


#ifndef INIT_TRACE_RECURSION
#define INIT_TRACE_RECURSION
#endif

#ifdef CONFIG_FTRACE_SYSCALLS
#ifdef CONFIG_FTRACE_SYSCALLS


unsigned long arch_syscall_addr(int nr);
unsigned long arch_syscall_addr(int nr);
+0 −112
Original line number Original line Diff line number Diff line
@@ -21,23 +21,9 @@


#include <asm/thread_info.h>
#include <asm/thread_info.h>


#ifdef CONFIG_SMP
# define INIT_PUSHABLE_TASKS(tsk)					\
	.pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO),
#else
# define INIT_PUSHABLE_TASKS(tsk)
#endif

extern struct files_struct init_files;
extern struct files_struct init_files;
extern struct fs_struct init_fs;
extern struct fs_struct init_fs;


#ifdef CONFIG_CPUSETS
#define INIT_CPUSET_SEQ(tsk)							\
	.mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
#else
#define INIT_CPUSET_SEQ(tsk)
#endif

#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#define INIT_PREV_CPUTIME(x)	.prev_cputime = {			\
#define INIT_PREV_CPUTIME(x)	.prev_cputime = {			\
	.lock = __RAW_SPIN_LOCK_UNLOCKED(x.prev_cputime.lock),		\
	.lock = __RAW_SPIN_LOCK_UNLOCKED(x.prev_cputime.lock),		\
@@ -117,107 +103,10 @@ extern struct group_info init_groups;
	.pid = &init_struct_pid,				\
	.pid = &init_struct_pid,				\
}
}


#ifdef CONFIG_AUDITSYSCALL
#define INIT_IDS \
	.loginuid = INVALID_UID, \
	.sessionid = (unsigned int)-1,
#else
#define INIT_IDS
#endif

#ifdef CONFIG_PREEMPT_RCU
#define INIT_TASK_RCU_PREEMPT(tsk)					\
	.rcu_read_lock_nesting = 0,					\
	.rcu_read_unlock_special.s = 0,					\
	.rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry),		\
	.rcu_blocked_node = NULL,
#else
#define INIT_TASK_RCU_PREEMPT(tsk)
#endif
#ifdef CONFIG_TASKS_RCU
#define INIT_TASK_RCU_TASKS(tsk)					\
	.rcu_tasks_holdout = false,					\
	.rcu_tasks_holdout_list =					\
		LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list),		\
	.rcu_tasks_idle_cpu = -1,
#else
#define INIT_TASK_RCU_TASKS(tsk)
#endif

extern struct cred init_cred;
extern struct cred init_cred;


#ifdef CONFIG_CGROUP_SCHED
# define INIT_CGROUP_SCHED(tsk)						\
	.sched_task_group = &root_task_group,
#else
# define INIT_CGROUP_SCHED(tsk)
#endif

#ifdef CONFIG_PERF_EVENTS
# define INIT_PERF_EVENTS(tsk)						\
	.perf_event_mutex = 						\
		 __MUTEX_INITIALIZER(tsk.perf_event_mutex),		\
	.perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
#else
# define INIT_PERF_EVENTS(tsk)
#endif

#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
# define INIT_VTIME(tsk)						\
	.vtime.seqcount = SEQCNT_ZERO(tsk.vtime.seqcount),		\
	.vtime.starttime = 0,						\
	.vtime.state = VTIME_SYS,
#else
# define INIT_VTIME(tsk)
#endif

#define INIT_TASK_COMM "swapper"
#define INIT_TASK_COMM "swapper"


#ifdef CONFIG_RT_MUTEXES
# define INIT_RT_MUTEXES(tsk)						\
	.pi_waiters = RB_ROOT_CACHED,					\
	.pi_top_task = NULL,
#else
# define INIT_RT_MUTEXES(tsk)
#endif

#ifdef CONFIG_NUMA_BALANCING
# define INIT_NUMA_BALANCING(tsk)					\
	.numa_preferred_nid = -1,					\
	.numa_group = NULL,						\
	.numa_faults = NULL,
#else
# define INIT_NUMA_BALANCING(tsk)
#endif

#ifdef CONFIG_KASAN
# define INIT_KASAN(tsk)						\
	.kasan_depth = 1,
#else
# define INIT_KASAN(tsk)
#endif

#ifdef CONFIG_LIVEPATCH
# define INIT_LIVEPATCH(tsk)						\
	.patch_state = KLP_UNDEFINED,
#else
# define INIT_LIVEPATCH(tsk)
#endif

#ifdef CONFIG_THREAD_INFO_IN_TASK
# define INIT_TASK_TI(tsk)			\
	.thread_info = INIT_THREAD_INFO(tsk),	\
	.stack_refcount = ATOMIC_INIT(1),
#else
# define INIT_TASK_TI(tsk)
#endif

#ifdef CONFIG_SECURITY
#define INIT_TASK_SECURITY .security = NULL,
#else
#define INIT_TASK_SECURITY
#endif

/* Attach to the init_task data structure for proper alignment */
/* Attach to the init_task data structure for proper alignment */
#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
#define __init_task_data __attribute__((__section__(".data..init_task")))
#define __init_task_data __attribute__((__section__(".data..init_task")))
@@ -228,5 +117,4 @@ extern struct cred init_cred;
/* Attach to the thread_info data structure for proper alignment */
/* Attach to the thread_info data structure for proper alignment */
#define __init_thread_info __attribute__((__section__(".data..init_thread_info")))
#define __init_thread_info __attribute__((__section__(".data..init_thread_info")))



#endif
#endif
+0 −2
Original line number Original line Diff line number Diff line
@@ -44,7 +44,6 @@ do { \
	current->softirq_context--;		\
	current->softirq_context--;		\
	crossrelease_hist_end(XHLOCK_SOFT);	\
	crossrelease_hist_end(XHLOCK_SOFT);	\
} while (0)
} while (0)
# define INIT_TRACE_IRQFLAGS	.softirqs_enabled = 1,
#else
#else
# define trace_hardirqs_on()		do { } while (0)
# define trace_hardirqs_on()		do { } while (0)
# define trace_hardirqs_off()		do { } while (0)
# define trace_hardirqs_off()		do { } while (0)
@@ -58,7 +57,6 @@ do { \
# define trace_hardirq_exit()		do { } while (0)
# define trace_hardirq_exit()		do { } while (0)
# define lockdep_softirq_enter()	do { } while (0)
# define lockdep_softirq_enter()	do { } while (0)
# define lockdep_softirq_exit()		do { } while (0)
# define lockdep_softirq_exit()		do { } while (0)
# define INIT_TRACE_IRQFLAGS
#endif
#endif


#if defined(CONFIG_IRQSOFF_TRACER) || \
#if defined(CONFIG_IRQSOFF_TRACER) || \
+0 −3
Original line number Original line Diff line number Diff line
@@ -367,8 +367,6 @@ extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);


# define INIT_LOCKDEP				.lockdep_recursion = 0,

#define lockdep_depth(tsk)	(debug_locks ? (tsk)->lockdep_depth : 0)
#define lockdep_depth(tsk)	(debug_locks ? (tsk)->lockdep_depth : 0)


#define lockdep_assert_held(l)	do {				\
#define lockdep_assert_held(l)	do {				\
@@ -426,7 +424,6 @@ static inline void lockdep_on(void)
 * #ifdef the call himself.
 * #ifdef the call himself.
 */
 */


# define INIT_LOCKDEP
# define lockdep_reset()		do { debug_locks = 1; } while (0)
# define lockdep_reset()		do { debug_locks = 1; } while (0)
# define lockdep_free_key_range(start, size)	do { } while (0)
# define lockdep_free_key_range(start, size)	do { } while (0)
# define lockdep_sys_exit() 			do { } while (0)
# define lockdep_sys_exit() 			do { } while (0)
+71 −24
Original line number Original line Diff line number Diff line
@@ -16,7 +16,6 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);



/*
/*
 * Set up the first task table, touch at your own risk!. Base=0,
 * Set up the first task table, touch at your own risk!. Base=0,
 * limit=0x1fffff (=2MB)
 * limit=0x1fffff (=2MB)
@@ -26,7 +25,10 @@ struct task_struct init_task
	__init_task_data
	__init_task_data
#endif
#endif
= {
= {
	INIT_TASK_TI(init_task)
#ifdef CONFIG_THREAD_INFO_IN_TASK
	.thread_info	= INIT_THREAD_INFO(init_task),
	.stack_refcount	= ATOMIC_INIT(1),
#endif
	.state		= 0,
	.state		= 0,
	.stack		= init_stack,
	.stack		= init_stack,
	.usage		= ATOMIC_INIT(2),
	.usage		= ATOMIC_INIT(2),
@@ -50,8 +52,12 @@ struct task_struct init_task
		.time_slice	= RR_TIMESLICE,
		.time_slice	= RR_TIMESLICE,
	},
	},
	.tasks		= LIST_HEAD_INIT(init_task.tasks),
	.tasks		= LIST_HEAD_INIT(init_task.tasks),
	INIT_PUSHABLE_TASKS(init_task)
#ifdef CONFIG_SMP
	INIT_CGROUP_SCHED(init_task)
	.pushable_tasks	= PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
#endif
#ifdef CONFIG_CGROUP_SCHED
	.sched_task_group = &root_task_group,
#endif
	.ptraced	= LIST_HEAD_INIT(init_task.ptraced),
	.ptraced	= LIST_HEAD_INIT(init_task.ptraced),
	.ptrace_entry	= LIST_HEAD_INIT(init_task.ptrace_entry),
	.ptrace_entry	= LIST_HEAD_INIT(init_task.ptrace_entry),
	.real_parent	= &init_task,
	.real_parent	= &init_task,
@@ -85,24 +91,65 @@ struct task_struct init_task
	},
	},
	.thread_group	= LIST_HEAD_INIT(init_task.thread_group),
	.thread_group	= LIST_HEAD_INIT(init_task.thread_group),
	.thread_node	= LIST_HEAD_INIT(init_signals.thread_head),
	.thread_node	= LIST_HEAD_INIT(init_signals.thread_head),
	INIT_IDS
#ifdef CONFIG_AUDITSYSCALL
	INIT_PERF_EVENTS(init_task)
	.loginuid	= INVALID_UID,
	INIT_TRACE_IRQFLAGS
	.sessionid	= (unsigned int)-1,
	INIT_LOCKDEP
#endif
	INIT_FTRACE_GRAPH
#ifdef CONFIG_PERF_EVENTS
	INIT_TRACE_RECURSION
	.perf_event_mutex = __MUTEX_INITIALIZER(init_task.perf_event_mutex),
	INIT_TASK_RCU_PREEMPT(init_task)
	.perf_event_list = LIST_HEAD_INIT(init_task.perf_event_list),
	INIT_TASK_RCU_TASKS(init_task)
#endif
	INIT_CPUSET_SEQ(init_task)
#ifdef CONFIG_PREEMPT_RCU
	INIT_RT_MUTEXES(init_task)
	.rcu_read_lock_nesting = 0,
	.rcu_read_unlock_special.s = 0,
	.rcu_node_entry = LIST_HEAD_INIT(init_task.rcu_node_entry),
	.rcu_blocked_node = NULL,
#endif
#ifdef CONFIG_TASKS_RCU
	.rcu_tasks_holdout = false,
	.rcu_tasks_holdout_list = LIST_HEAD_INIT(init_task.rcu_tasks_holdout_list),
	.rcu_tasks_idle_cpu = -1,
#endif
#ifdef CONFIG_CPUSETS
	.mems_allowed_seq = SEQCNT_ZERO(init_task.mems_allowed_seq),
#endif
#ifdef CONFIG_RT_MUTEXES
	.pi_waiters	= RB_ROOT_CACHED,
	.pi_top_task	= NULL,
#endif
	INIT_PREV_CPUTIME(init_task)
	INIT_PREV_CPUTIME(init_task)
	INIT_VTIME(init_task)
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
	INIT_NUMA_BALANCING(init_task)
	.vtime.seqcount	= SEQCNT_ZERO(init_task.vtime_seqcount),
	INIT_KASAN(init_task)
	.vtime.starttime = 0,
	INIT_LIVEPATCH(init_task)
	.vtime.state	= VTIME_SYS,
	INIT_TASK_SECURITY
#endif
#ifdef CONFIG_NUMA_BALANCING
	.numa_preferred_nid = -1,
	.numa_group	= NULL,
	.numa_faults	= NULL,
#endif
#ifdef CONFIG_KASAN
	.kasan_depth	= 1,
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	.softirqs_enabled = 1,
#endif
#ifdef CONFIG_LOCKDEP
	.lockdep_recursion = 0,
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	.ret_stack	= NULL,
#endif
#if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPT)
	.trace_recursion = 0,
#endif
#ifdef CONFIG_LIVEPATCH
	.patch_state	= KLP_UNDEFINED,
#endif
#ifdef CONFIG_SECURITY
	.security	= NULL,
#endif
};
};

EXPORT_SYMBOL(init_task);
EXPORT_SYMBOL(init_task);


/*
/*