Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e56d0903 authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds
Browse files

[PATCH] RCU signal handling



RCU tasklist_lock and RCU signal handling: send signals RCU-read-locked
instead of tasklist_lock read-locked.  This is a scalability improvement on
SMP and a preemption-latency improvement under PREEMPT_RCU.

Signed-off-by: default avatarPaul E. McKenney <paulmck@us.ibm.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Acked-by: default avatarWilliam Irwin <wli@holomorphy.com>
Cc: Roland McGrath <roland@redhat.com>
Cc: Oleg Nesterov <oleg@tv-sign.ru>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 4369ef3c
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -760,7 +760,7 @@ static inline int de_thread(struct task_struct *tsk)
		spin_lock(&oldsighand->siglock);
		spin_lock(&oldsighand->siglock);
		spin_lock(&newsighand->siglock);
		spin_lock(&newsighand->siglock);


		current->sighand = newsighand;
		rcu_assign_pointer(current->sighand, newsighand);
		recalc_sigpending();
		recalc_sigpending();


		spin_unlock(&newsighand->siglock);
		spin_unlock(&newsighand->siglock);
@@ -768,7 +768,7 @@ static inline int de_thread(struct task_struct *tsk)
		write_unlock_irq(&tasklist_lock);
		write_unlock_irq(&tasklist_lock);


		if (atomic_dec_and_test(&oldsighand->count))
		if (atomic_dec_and_test(&oldsighand->count))
			kmem_cache_free(sighand_cachep, oldsighand);
			sighand_free(oldsighand);
	}
	}


	BUG_ON(!thread_group_leader(current));
	BUG_ON(!thread_group_leader(current));
+30 −2
Original line number Original line Diff line number Diff line
@@ -34,6 +34,7 @@
#include <linux/percpu.h>
#include <linux/percpu.h>
#include <linux/topology.h>
#include <linux/topology.h>
#include <linux/seccomp.h>
#include <linux/seccomp.h>
#include <linux/rcupdate.h>


#include <linux/auxvec.h>	/* For AT_VECTOR_SIZE */
#include <linux/auxvec.h>	/* For AT_VECTOR_SIZE */


@@ -350,8 +351,16 @@ struct sighand_struct {
	atomic_t		count;
	atomic_t		count;
	struct k_sigaction	action[_NSIG];
	struct k_sigaction	action[_NSIG];
	spinlock_t		siglock;
	spinlock_t		siglock;
	struct rcu_head		rcu;
};
};


extern void sighand_free_cb(struct rcu_head *rhp);

static inline void sighand_free(struct sighand_struct *sp)
{
	call_rcu(&sp->rcu, sighand_free_cb);
}

/*
/*
 * NOTE! "signal_struct" does not have it's own
 * NOTE! "signal_struct" does not have it's own
 * locking, because a shared signal_struct always
 * locking, because a shared signal_struct always
@@ -844,6 +853,7 @@ struct task_struct {
	int cpuset_mems_generation;
	int cpuset_mems_generation;
#endif
#endif
	atomic_t fs_excl;	/* holding fs exclusive resources */
	atomic_t fs_excl;	/* holding fs exclusive resources */
	struct rcu_head rcu;
};
};


static inline pid_t process_group(struct task_struct *tsk)
static inline pid_t process_group(struct task_struct *tsk)
@@ -867,8 +877,26 @@ static inline int pid_alive(struct task_struct *p)
extern void free_task(struct task_struct *tsk);
extern void free_task(struct task_struct *tsk);
extern void __put_task_struct(struct task_struct *tsk);
extern void __put_task_struct(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
#define put_task_struct(tsk) \

do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0)
static inline int get_task_struct_rcu(struct task_struct *t)
{
	int oldusage;

	do {
		oldusage = atomic_read(&t->usage);
		if (oldusage == 0)
			return 0;
	} while (cmpxchg(&t->usage.counter, oldusage, oldusage+1) != oldusage);
	return 1;
}

extern void __put_task_struct_cb(struct rcu_head *rhp);

static inline void put_task_struct(struct task_struct *t)
{
	if (atomic_dec_and_test(&t->usage))
		call_rcu(&t->rcu, __put_task_struct_cb);
}


/*
/*
 * Per process flags
 * Per process flags
+0 −1
Original line number Original line Diff line number Diff line
@@ -72,7 +72,6 @@ void release_task(struct task_struct * p)
		__ptrace_unlink(p);
		__ptrace_unlink(p);
	BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
	BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
	__exit_signal(p);
	__exit_signal(p);
	__exit_sighand(p);
	/*
	/*
	 * Note that the fastpath in sys_times depends on __exit_signal having
	 * Note that the fastpath in sys_times depends on __exit_signal having
	 * updated the counters before a task is removed from the tasklist of
	 * updated the counters before a task is removed from the tasklist of
+9 −1
Original line number Original line Diff line number Diff line
@@ -743,6 +743,14 @@ int unshare_files(void)


EXPORT_SYMBOL(unshare_files);
EXPORT_SYMBOL(unshare_files);


void sighand_free_cb(struct rcu_head *rhp)
{
	struct sighand_struct *sp;

	sp = container_of(rhp, struct sighand_struct, rcu);
	kmem_cache_free(sighand_cachep, sp);
}

static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
{
{
	struct sighand_struct *sig;
	struct sighand_struct *sig;
@@ -752,7 +760,7 @@ static inline int copy_sighand(unsigned long clone_flags, struct task_struct * t
		return 0;
		return 0;
	}
	}
	sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
	sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
	tsk->sighand = sig;
	rcu_assign_pointer(tsk->sighand, sig);
	if (!sig)
	if (!sig)
		return -ENOMEM;
		return -ENOMEM;
	spin_lock_init(&sig->siglock);
	spin_lock_init(&sig->siglock);
+11 −11
Original line number Original line Diff line number Diff line
@@ -136,7 +136,7 @@ struct pid * fastcall find_pid(enum pid_type type, int nr)
	struct hlist_node *elem;
	struct hlist_node *elem;
	struct pid *pid;
	struct pid *pid;


	hlist_for_each_entry(pid, elem,
	hlist_for_each_entry_rcu(pid, elem,
			&pid_hash[type][pid_hashfn(nr)], pid_chain) {
			&pid_hash[type][pid_hashfn(nr)], pid_chain) {
		if (pid->nr == nr)
		if (pid->nr == nr)
			return pid;
			return pid;
@@ -150,15 +150,15 @@ int fastcall attach_pid(task_t *task, enum pid_type type, int nr)


	task_pid = &task->pids[type];
	task_pid = &task->pids[type];
	pid = find_pid(type, nr);
	pid = find_pid(type, nr);
	task_pid->nr = nr;
	if (pid == NULL) {
	if (pid == NULL) {
		hlist_add_head(&task_pid->pid_chain,
				&pid_hash[type][pid_hashfn(nr)]);
		INIT_LIST_HEAD(&task_pid->pid_list);
		INIT_LIST_HEAD(&task_pid->pid_list);
		hlist_add_head_rcu(&task_pid->pid_chain,
				   &pid_hash[type][pid_hashfn(nr)]);
	} else {
	} else {
		INIT_HLIST_NODE(&task_pid->pid_chain);
		INIT_HLIST_NODE(&task_pid->pid_chain);
		list_add_tail(&task_pid->pid_list, &pid->pid_list);
		list_add_tail_rcu(&task_pid->pid_list, &pid->pid_list);
	}
	}
	task_pid->nr = nr;


	return 0;
	return 0;
}
}
@@ -170,20 +170,20 @@ static fastcall int __detach_pid(task_t *task, enum pid_type type)


	pid = &task->pids[type];
	pid = &task->pids[type];
	if (!hlist_unhashed(&pid->pid_chain)) {
	if (!hlist_unhashed(&pid->pid_chain)) {
		hlist_del(&pid->pid_chain);


		if (list_empty(&pid->pid_list))
		if (list_empty(&pid->pid_list)) {
			nr = pid->nr;
			nr = pid->nr;
		else {
			hlist_del_rcu(&pid->pid_chain);
		} else {
			pid_next = list_entry(pid->pid_list.next,
			pid_next = list_entry(pid->pid_list.next,
						struct pid, pid_list);
						struct pid, pid_list);
			/* insert next pid from pid_list to hash */
			/* insert next pid from pid_list to hash */
			hlist_add_head(&pid_next->pid_chain,
			hlist_replace_rcu(&pid->pid_chain,
				&pid_hash[type][pid_hashfn(pid_next->nr)]);
					  &pid_next->pid_chain);
		}
		}
	}
	}


	list_del(&pid->pid_list);
	list_del_rcu(&pid->pid_list);
	pid->nr = 0;
	pid->nr = 0;


	return nr;
	return nr;
Loading