Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7e1fb765 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
  futex: correctly return -EFAULT not -EINVAL
  lockdep: in_range() fix
  lockdep: fix debug_show_all_locks()
  sched: style cleanups
  futex: fix for futex_wait signal stack corruption
parents ad658cec cde898fa
Loading
Loading
Loading
Loading
+15 −2
Original line number Diff line number Diff line
@@ -7,13 +7,26 @@
#ifndef _LINUX_THREAD_INFO_H
#define _LINUX_THREAD_INFO_H

#include <linux/types.h>

/*
 * System call restart block.
 */
struct restart_block {
	long (*fn)(struct restart_block *);
	union {
		struct {
			unsigned long arg0, arg1, arg2, arg3;
		};
		/* For futex_wait */
		struct {
			u32 *uaddr;
			u32 val;
			u32 flags;
			u64 time;
		} futex;
	};
};

extern long do_no_restart_syscall(struct restart_block *parm);

+14 −13
Original line number Diff line number Diff line
@@ -658,7 +658,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)

		if (curval == -EFAULT)
			ret = -EFAULT;
		if (curval != uval)
		else if (curval != uval)
			ret = -EINVAL;
		if (ret) {
			spin_unlock(&pi_state->pi_mutex.wait_lock);
@@ -1149,9 +1149,9 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,

/*
 * In case we must use restart_block to restart a futex_wait,
 * we encode in the 'arg3' shared capability
 * we encode in the 'flags' shared capability
 */
#define ARG3_SHARED  1
#define FLAGS_SHARED  1

static long futex_wait_restart(struct restart_block *restart);

@@ -1290,12 +1290,13 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
		struct restart_block *restart;
		restart = &current_thread_info()->restart_block;
		restart->fn = futex_wait_restart;
		restart->arg0 = (unsigned long)uaddr;
		restart->arg1 = (unsigned long)val;
		restart->arg2 = (unsigned long)abs_time;
		restart->arg3 = 0;
		restart->futex.uaddr = (u32 *)uaddr;
		restart->futex.val = val;
		restart->futex.time = abs_time->tv64;
		restart->futex.flags = 0;

		if (fshared)
			restart->arg3 |= ARG3_SHARED;
			restart->futex.flags |= FLAGS_SHARED;
		return -ERESTART_RESTARTBLOCK;
	}

@@ -1310,15 +1311,15 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,

static long futex_wait_restart(struct restart_block *restart)
{
	u32 __user *uaddr = (u32 __user *)restart->arg0;
	u32 val = (u32)restart->arg1;
	ktime_t *abs_time = (ktime_t *)restart->arg2;
	u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
	struct rw_semaphore *fshared = NULL;
	ktime_t t;

	t.tv64 = restart->futex.time;
	restart->fn = do_no_restart_syscall;
	if (restart->arg3 & ARG3_SHARED)
	if (restart->futex.flags & FLAGS_SHARED)
		fshared = &current->mm->mmap_sem;
	return (long)futex_wait(uaddr, fshared, val, abs_time);
	return (long)futex_wait(uaddr, fshared, restart->futex.val, &t);
}


+17 −12
Original line number Diff line number Diff line
@@ -3054,11 +3054,6 @@ void __init lockdep_info(void)
#endif
}

static inline int in_range(const void *start, const void *addr, const void *end)
{
	return addr >= start && addr <= end;
}

static void
print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
		     const void *mem_to, struct held_lock *hlock)
@@ -3080,6 +3075,13 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
	dump_stack();
}

static inline int not_in_range(const void* mem_from, unsigned long mem_len,
				const void* lock_from, unsigned long lock_len)
{
	return lock_from + lock_len <= mem_from ||
		mem_from + mem_len <= lock_from;
}

/*
 * Called when kernel memory is freed (or unmapped), or if a lock
 * is destroyed or reinitialized - this code checks whether there is
@@ -3087,7 +3089,6 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
 */
void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
{
	const void *mem_to = mem_from + mem_len, *lock_from, *lock_to;
	struct task_struct *curr = current;
	struct held_lock *hlock;
	unsigned long flags;
@@ -3100,14 +3101,11 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
	for (i = 0; i < curr->lockdep_depth; i++) {
		hlock = curr->held_locks + i;

		lock_from = (void *)hlock->instance;
		lock_to = (void *)(hlock->instance + 1);

		if (!in_range(mem_from, lock_from, mem_to) &&
					!in_range(mem_from, lock_to, mem_to))
		if (not_in_range(mem_from, mem_len, hlock->instance,
					sizeof(*hlock->instance)))
			continue;

		print_freed_lock_bug(curr, mem_from, mem_to, hlock);
		print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
		break;
	}
	local_irq_restore(flags);
@@ -3173,6 +3171,13 @@ void debug_show_all_locks(void)
		printk(" locked it.\n");

	do_each_thread(g, p) {
		/*
		 * It's not reliable to print a task's held locks
		 * if it's not sleeping (or if it's not the current
		 * task):
		 */
		if (p->state == TASK_RUNNING && p != current)
			continue;
		if (p->lockdep_depth)
			lockdep_print_held_locks(p);
		if (!unlock)
+68 −64
Original line number Diff line number Diff line
@@ -211,7 +211,6 @@ static inline struct task_group *task_group(struct task_struct *p)
#else
	tg = &init_task_group;
#endif

	return tg;
}

@@ -249,14 +248,15 @@ struct cfs_rq {
#ifdef CONFIG_FAIR_GROUP_SCHED
	struct rq *rq;	/* cpu runqueue to which this cfs_rq is attached */

	/* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
	/*
	 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
	 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
	 * (like users, containers etc.)
	 *
	 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
	 * list is used during load balance.
	 */
	struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */
	struct list_head leaf_cfs_rq_list;
	struct task_group *tg;	/* group that "owns" this runqueue */
#endif
};
@@ -4390,8 +4390,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
 * @policy: new policy.
 * @param: structure containing the new RT priority.
 */
asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
				       struct sched_param __user *param)
asmlinkage long
sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
{
	/* negative values for policy are not valid */
	if (policy < 0)
@@ -5245,11 +5245,12 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
			 * kernel threads (both mm NULL), since they never
			 * leave kernel.
			 */
			if (p->mm && printk_ratelimit())
			if (p->mm && printk_ratelimit()) {
				printk(KERN_INFO "process %d (%s) no "
				       "longer affine to cpu%d\n",
					task_pid_nr(p), p->comm, dead_cpu);
			}
		}
	} while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
}

@@ -5612,9 +5613,11 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
		migrate_nr_uninterruptible(rq);
		BUG_ON(rq->nr_running != 0);

		/* No need to migrate the tasks: it was best-effort if
		/*
		 * No need to migrate the tasks: it was best-effort if
		 * they didn't take sched_hotcpu_mutex. Just wake up
		 * the requestors. */
		 * the requestors.
		 */
		spin_lock_irq(&rq->lock);
		while (!list_empty(&rq->migration_queue)) {
			struct migration_req *req;
@@ -5999,8 +6002,8 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);

static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map,
			    struct sched_group **sg)
static int
cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
{
	if (sg)
		*sg = &per_cpu(sched_group_cpus, cpu);
@@ -6017,8 +6020,8 @@ static DEFINE_PER_CPU(struct sched_group, sched_group_core);
#endif

#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
			     struct sched_group **sg)
static int
cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
{
	int group;
	cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
@@ -6029,8 +6032,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
	return group;
}
#elif defined(CONFIG_SCHED_MC)
static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
			     struct sched_group **sg)
static int
cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
{
	if (sg)
		*sg = &per_cpu(sched_group_core, cpu);
@@ -6041,8 +6044,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
static DEFINE_PER_CPU(struct sched_domain, phys_domains);
static DEFINE_PER_CPU(struct sched_group, sched_group_phys);

static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
			     struct sched_group **sg)
static int
cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
{
	int group;
#ifdef CONFIG_SCHED_MC
@@ -7193,16 +7196,17 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
	return &tg->css;
}

static void cpu_cgroup_destroy(struct cgroup_subsys *ss,
			       struct cgroup *cgrp)
static void
cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
{
	struct task_group *tg = cgroup_tg(cgrp);

	sched_destroy_group(tg);
}

static int cpu_cgroup_can_attach(struct cgroup_subsys *ss,
			     struct cgroup *cgrp, struct task_struct *tsk)
static int
cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
		      struct task_struct *tsk)
{
	/* We don't support RT-tasks being in separate groups */
	if (tsk->sched_class != &fair_sched_class)
@@ -7308,8 +7312,8 @@ static struct cgroup_subsys_state *cpuacct_create(
}

/* destroy an existing cpu accounting group */
static void cpuacct_destroy(struct cgroup_subsys *ss,
			    struct cgroup *cont)
static void
cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
{
	struct cpuacct *ca = cgroup_ca(cont);