Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 702a7c76 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-fixes-for-linus' of...

Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (21 commits)
  sched: Remove forced2_migrations stats
  sched: Fix memory leak in two error corner cases
  sched: Fix build warning in get_update_sysctl_factor()
  sched: Update normalized values on user updates via proc
  sched: Make tunable scaling style configurable
  sched: Fix missing sched tunable recalculation on cpu add/remove
  sched: Fix task priority bug
  sched: cgroup: Implement different treatment for idle shares
  sched: Remove unnecessary RCU exclusion
  sched: Discard some old bits
  sched: Clean up check_preempt_wakeup()
  sched: Move update_curr() in check_preempt_wakeup() to avoid redundant call
  sched: Sanitize fork() handling
  sched: Clean up ttwu() rq locking
  sched: Remove rq->clock coupling from set_task_cpu()
  sched: Consolidate select_task_rq() callers
  sched: Remove sysctl.sched_features
  sched: Protect sched_rr_get_param() access to task->sched_class
  sched: Protect task->cpus_allowed access in sched_getaffinity()
  sched: Fix balance vs hotplug race
  ...

Fixed up conflicts in kernel/sysctl.c (due to sysctl cleanup)
parents 053fe57a b9889ed1
Loading
Loading
Loading
Loading
+2 −0
Original line number Original line Diff line number Diff line
@@ -84,6 +84,7 @@ extern const struct cpumask *const cpu_active_mask;
#define num_online_cpus()	cpumask_weight(cpu_online_mask)
#define num_online_cpus()	cpumask_weight(cpu_online_mask)
#define num_possible_cpus()	cpumask_weight(cpu_possible_mask)
#define num_possible_cpus()	cpumask_weight(cpu_possible_mask)
#define num_present_cpus()	cpumask_weight(cpu_present_mask)
#define num_present_cpus()	cpumask_weight(cpu_present_mask)
#define num_active_cpus()	cpumask_weight(cpu_active_mask)
#define cpu_online(cpu)		cpumask_test_cpu((cpu), cpu_online_mask)
#define cpu_online(cpu)		cpumask_test_cpu((cpu), cpu_online_mask)
#define cpu_possible(cpu)	cpumask_test_cpu((cpu), cpu_possible_mask)
#define cpu_possible(cpu)	cpumask_test_cpu((cpu), cpu_possible_mask)
#define cpu_present(cpu)	cpumask_test_cpu((cpu), cpu_present_mask)
#define cpu_present(cpu)	cpumask_test_cpu((cpu), cpu_present_mask)
@@ -92,6 +93,7 @@ extern const struct cpumask *const cpu_active_mask;
#define num_online_cpus()	1
#define num_online_cpus()	1
#define num_possible_cpus()	1
#define num_possible_cpus()	1
#define num_present_cpus()	1
#define num_present_cpus()	1
#define num_active_cpus()	1
#define cpu_online(cpu)		((cpu) == 0)
#define cpu_online(cpu)		((cpu) == 0)
#define cpu_possible(cpu)	((cpu) == 0)
#define cpu_possible(cpu)	((cpu) == 0)
#define cpu_present(cpu)	((cpu) == 0)
#define cpu_present(cpu)	((cpu) == 0)
+13 −7
Original line number Original line Diff line number Diff line
@@ -1102,7 +1102,7 @@ struct sched_class {


	void (*set_curr_task) (struct rq *rq);
	void (*set_curr_task) (struct rq *rq);
	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
	void (*task_new) (struct rq *rq, struct task_struct *p);
	void (*task_fork) (struct task_struct *p);


	void (*switched_from) (struct rq *this_rq, struct task_struct *task,
	void (*switched_from) (struct rq *this_rq, struct task_struct *task,
			       int running);
			       int running);
@@ -1111,7 +1111,8 @@ struct sched_class {
	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
			     int oldprio, int running);
			     int oldprio, int running);


	unsigned int (*get_rr_interval) (struct task_struct *task);
	unsigned int (*get_rr_interval) (struct rq *rq,
					 struct task_struct *task);


#ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_FAIR_GROUP_SCHED
	void (*moved_group) (struct task_struct *p);
	void (*moved_group) (struct task_struct *p);
@@ -1151,8 +1152,6 @@ struct sched_entity {
	u64			start_runtime;
	u64			start_runtime;
	u64			avg_wakeup;
	u64			avg_wakeup;


	u64			avg_running;

#ifdef CONFIG_SCHEDSTATS
#ifdef CONFIG_SCHEDSTATS
	u64			wait_start;
	u64			wait_start;
	u64			wait_max;
	u64			wait_max;
@@ -1175,7 +1174,6 @@ struct sched_entity {
	u64			nr_failed_migrations_running;
	u64			nr_failed_migrations_running;
	u64			nr_failed_migrations_hot;
	u64			nr_failed_migrations_hot;
	u64			nr_forced_migrations;
	u64			nr_forced_migrations;
	u64			nr_forced2_migrations;


	u64			nr_wakeups;
	u64			nr_wakeups;
	u64			nr_wakeups_sync;
	u64			nr_wakeups_sync;
@@ -1904,14 +1902,22 @@ extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_shares_ratelimit;
extern unsigned int sysctl_sched_shares_ratelimit;
extern unsigned int sysctl_sched_shares_thresh;
extern unsigned int sysctl_sched_shares_thresh;
extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_child_runs_first;

enum sched_tunable_scaling {
	SCHED_TUNABLESCALING_NONE,
	SCHED_TUNABLESCALING_LOG,
	SCHED_TUNABLESCALING_LINEAR,
	SCHED_TUNABLESCALING_END,
};
extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;

#ifdef CONFIG_SCHED_DEBUG
#ifdef CONFIG_SCHED_DEBUG
extern unsigned int sysctl_sched_features;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
extern unsigned int sysctl_sched_nr_migrate;
extern unsigned int sysctl_sched_time_avg;
extern unsigned int sysctl_sched_time_avg;
extern unsigned int sysctl_timer_migration;
extern unsigned int sysctl_timer_migration;


int sched_nr_latency_handler(struct ctl_table *table, int write,
int sched_proc_update_handler(struct ctl_table *table, int write,
		void __user *buffer, size_t *length,
		void __user *buffer, size_t *length,
		loff_t *ppos);
		loff_t *ppos);
#endif
#endif
+13 −5
Original line number Original line Diff line number Diff line
@@ -212,6 +212,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
	err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
	err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
					hcpu, -1, &nr_calls);
					hcpu, -1, &nr_calls);
	if (err == NOTIFY_BAD) {
	if (err == NOTIFY_BAD) {
		set_cpu_active(cpu, true);

		nr_calls--;
		nr_calls--;
		__raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
		__raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
					  hcpu, nr_calls, NULL);
					  hcpu, nr_calls, NULL);
@@ -223,11 +225,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)


	/* Ensure that we are not runnable on dying cpu */
	/* Ensure that we are not runnable on dying cpu */
	cpumask_copy(old_allowed, &current->cpus_allowed);
	cpumask_copy(old_allowed, &current->cpus_allowed);
	set_cpus_allowed_ptr(current,
	set_cpus_allowed_ptr(current, cpu_active_mask);
			     cpumask_of(cpumask_any_but(cpu_online_mask, cpu)));


	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
	if (err) {
	if (err) {
		set_cpu_active(cpu, true);
		/* CPU didn't die: tell everyone.  Can't complain. */
		/* CPU didn't die: tell everyone.  Can't complain. */
		if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
		if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
					    hcpu) == NOTIFY_BAD)
					    hcpu) == NOTIFY_BAD)
@@ -292,9 +294,6 @@ int __ref cpu_down(unsigned int cpu)


	err = _cpu_down(cpu, 0);
	err = _cpu_down(cpu, 0);


	if (cpu_online(cpu))
		set_cpu_active(cpu, true);

out:
out:
	cpu_maps_update_done();
	cpu_maps_update_done();
	stop_machine_destroy();
	stop_machine_destroy();
@@ -387,6 +386,15 @@ int disable_nonboot_cpus(void)
	 * with the userspace trying to use the CPU hotplug at the same time
	 * with the userspace trying to use the CPU hotplug at the same time
	 */
	 */
	cpumask_clear(frozen_cpus);
	cpumask_clear(frozen_cpus);

	for_each_online_cpu(cpu) {
		if (cpu == first_cpu)
			continue;
		set_cpu_active(cpu, false);
	}

	synchronize_sched();

	printk("Disabling non-boot CPUs ...\n");
	printk("Disabling non-boot CPUs ...\n");
	for_each_online_cpu(cpu) {
	for_each_online_cpu(cpu) {
		if (cpu == first_cpu)
		if (cpu == first_cpu)
+10 −8
Original line number Original line Diff line number Diff line
@@ -737,7 +737,7 @@ static void do_rebuild_sched_domains(struct work_struct *unused)
{
{
}
}


static int generate_sched_domains(struct cpumask **domains,
static int generate_sched_domains(cpumask_var_t **domains,
			struct sched_domain_attr **attributes)
			struct sched_domain_attr **attributes)
{
{
	*domains = NULL;
	*domains = NULL;
@@ -872,7 +872,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
		if (retval < 0)
		if (retval < 0)
			return retval;
			return retval;


		if (!cpumask_subset(trialcs->cpus_allowed, cpu_online_mask))
		if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
			return -EINVAL;
			return -EINVAL;
	}
	}
	retval = validate_change(cs, trialcs);
	retval = validate_change(cs, trialcs);
@@ -2010,7 +2010,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
		}
		}


		/* Continue past cpusets with all cpus, mems online */
		/* Continue past cpusets with all cpus, mems online */
		if (cpumask_subset(cp->cpus_allowed, cpu_online_mask) &&
		if (cpumask_subset(cp->cpus_allowed, cpu_active_mask) &&
		    nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
		    nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
			continue;
			continue;


@@ -2019,7 +2019,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
		/* Remove offline cpus and mems from this cpuset. */
		/* Remove offline cpus and mems from this cpuset. */
		mutex_lock(&callback_mutex);
		mutex_lock(&callback_mutex);
		cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
		cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
			    cpu_online_mask);
			    cpu_active_mask);
		nodes_and(cp->mems_allowed, cp->mems_allowed,
		nodes_and(cp->mems_allowed, cp->mems_allowed,
						node_states[N_HIGH_MEMORY]);
						node_states[N_HIGH_MEMORY]);
		mutex_unlock(&callback_mutex);
		mutex_unlock(&callback_mutex);
@@ -2057,8 +2057,10 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
	switch (phase) {
	switch (phase) {
	case CPU_ONLINE:
	case CPU_ONLINE:
	case CPU_ONLINE_FROZEN:
	case CPU_ONLINE_FROZEN:
	case CPU_DEAD:
	case CPU_DOWN_PREPARE:
	case CPU_DEAD_FROZEN:
	case CPU_DOWN_PREPARE_FROZEN:
	case CPU_DOWN_FAILED:
	case CPU_DOWN_FAILED_FROZEN:
		break;
		break;


	default:
	default:
@@ -2067,7 +2069,7 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,


	cgroup_lock();
	cgroup_lock();
	mutex_lock(&callback_mutex);
	mutex_lock(&callback_mutex);
	cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
	cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
	mutex_unlock(&callback_mutex);
	mutex_unlock(&callback_mutex);
	scan_for_empty_cpusets(&top_cpuset);
	scan_for_empty_cpusets(&top_cpuset);
	ndoms = generate_sched_domains(&doms, &attr);
	ndoms = generate_sched_domains(&doms, &attr);
@@ -2114,7 +2116,7 @@ static int cpuset_track_online_nodes(struct notifier_block *self,


void __init cpuset_init_smp(void)
void __init cpuset_init_smp(void)
{
{
	cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
	cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
	top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
	top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];


	hotcpu_notifier(cpuset_track_online_cpus, 0);
	hotcpu_notifier(cpuset_track_online_cpus, 0);
+114 −104
Original line number Original line Diff line number Diff line
@@ -814,6 +814,7 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
 * default: 0.25ms
 * default: 0.25ms
 */
 */
unsigned int sysctl_sched_shares_ratelimit = 250000;
unsigned int sysctl_sched_shares_ratelimit = 250000;
unsigned int normalized_sysctl_sched_shares_ratelimit = 250000;


/*
/*
 * Inject some fuzzyness into changing the per-cpu group shares
 * Inject some fuzzyness into changing the per-cpu group shares
@@ -1614,7 +1615,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
 */
 */
static int tg_shares_up(struct task_group *tg, void *data)
static int tg_shares_up(struct task_group *tg, void *data)
{
{
	unsigned long weight, rq_weight = 0, shares = 0;
	unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0;
	unsigned long *usd_rq_weight;
	unsigned long *usd_rq_weight;
	struct sched_domain *sd = data;
	struct sched_domain *sd = data;
	unsigned long flags;
	unsigned long flags;
@@ -1630,6 +1631,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
		weight = tg->cfs_rq[i]->load.weight;
		weight = tg->cfs_rq[i]->load.weight;
		usd_rq_weight[i] = weight;
		usd_rq_weight[i] = weight;


		rq_weight += weight;
		/*
		/*
		 * If there are currently no tasks on the cpu pretend there
		 * If there are currently no tasks on the cpu pretend there
		 * is one of average load so that when a new task gets to
		 * is one of average load so that when a new task gets to
@@ -1638,10 +1640,13 @@ static int tg_shares_up(struct task_group *tg, void *data)
		if (!weight)
		if (!weight)
			weight = NICE_0_LOAD;
			weight = NICE_0_LOAD;


		rq_weight += weight;
		sum_weight += weight;
		shares += tg->cfs_rq[i]->shares;
		shares += tg->cfs_rq[i]->shares;
	}
	}


	if (!rq_weight)
		rq_weight = sum_weight;

	if ((!shares && rq_weight) || shares > tg->shares)
	if ((!shares && rq_weight) || shares > tg->shares)
		shares = tg->shares;
		shares = tg->shares;


@@ -1810,6 +1815,22 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
#endif
#endif


static void calc_load_account_active(struct rq *this_rq);
static void calc_load_account_active(struct rq *this_rq);
static void update_sysctl(void);
static int get_update_sysctl_factor(void);

static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
{
	set_task_rq(p, cpu);
#ifdef CONFIG_SMP
	/*
	 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
	 * successfuly executed on another CPU. We must ensure that updates of
	 * per-task data have been completed by this moment.
	 */
	smp_wmb();
	task_thread_info(p)->cpu = cpu;
#endif
}


#include "sched_stats.h"
#include "sched_stats.h"
#include "sched_idletask.c"
#include "sched_idletask.c"
@@ -1967,20 +1988,6 @@ inline int task_curr(const struct task_struct *p)
	return cpu_curr(task_cpu(p)) == p;
	return cpu_curr(task_cpu(p)) == p;
}
}


static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
{
	set_task_rq(p, cpu);
#ifdef CONFIG_SMP
	/*
	 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
	 * successfuly executed on another CPU. We must ensure that updates of
	 * per-task data have been completed by this moment.
	 */
	smp_wmb();
	task_thread_info(p)->cpu = cpu;
#endif
}

static inline void check_class_changed(struct rq *rq, struct task_struct *p,
static inline void check_class_changed(struct rq *rq, struct task_struct *p,
				       const struct sched_class *prev_class,
				       const struct sched_class *prev_class,
				       int oldprio, int running)
				       int oldprio, int running)
@@ -2060,29 +2067,13 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{
{
	int old_cpu = task_cpu(p);
	int old_cpu = task_cpu(p);
	struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
	struct cfs_rq *old_cfsrq = task_cfs_rq(p),
	struct cfs_rq *old_cfsrq = task_cfs_rq(p),
		      *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
		      *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
	u64 clock_offset;

	clock_offset = old_rq->clock - new_rq->clock;


	trace_sched_migrate_task(p, new_cpu);
	trace_sched_migrate_task(p, new_cpu);


#ifdef CONFIG_SCHEDSTATS
	if (p->se.wait_start)
		p->se.wait_start -= clock_offset;
	if (p->se.sleep_start)
		p->se.sleep_start -= clock_offset;
	if (p->se.block_start)
		p->se.block_start -= clock_offset;
#endif
	if (old_cpu != new_cpu) {
	if (old_cpu != new_cpu) {
		p->se.nr_migrations++;
		p->se.nr_migrations++;
#ifdef CONFIG_SCHEDSTATS
		if (task_hot(p, old_rq->clock, NULL))
			schedstat_inc(p, se.nr_forced2_migrations);
#endif
		perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
		perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
				     1, 1, NULL, 0);
				     1, 1, NULL, 0);
	}
	}
@@ -2323,6 +2314,14 @@ void task_oncpu_function_call(struct task_struct *p,
	preempt_enable();
	preempt_enable();
}
}


#ifdef CONFIG_SMP
static inline
int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
{
	return p->sched_class->select_task_rq(p, sd_flags, wake_flags);
}
#endif

/***
/***
 * try_to_wake_up - wake up a thread
 * try_to_wake_up - wake up a thread
 * @p: the to-be-woken-up thread
 * @p: the to-be-woken-up thread
@@ -2374,17 +2373,14 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
	if (task_contributes_to_load(p))
	if (task_contributes_to_load(p))
		rq->nr_uninterruptible--;
		rq->nr_uninterruptible--;
	p->state = TASK_WAKING;
	p->state = TASK_WAKING;
	task_rq_unlock(rq, &flags);
	__task_rq_unlock(rq);


	cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
	cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
	if (cpu != orig_cpu) {
	if (cpu != orig_cpu)
		local_irq_save(flags);
		rq = cpu_rq(cpu);
		update_rq_clock(rq);
		set_task_cpu(p, cpu);
		set_task_cpu(p, cpu);
		local_irq_restore(flags);

	}
	rq = __task_rq_lock(p);
	rq = task_rq_lock(p, &flags);
	update_rq_clock(rq);


	WARN_ON(p->state != TASK_WAKING);
	WARN_ON(p->state != TASK_WAKING);
	cpu = task_cpu(p);
	cpu = task_cpu(p);
@@ -2499,7 +2495,6 @@ static void __sched_fork(struct task_struct *p)
	p->se.avg_overlap		= 0;
	p->se.avg_overlap		= 0;
	p->se.start_runtime		= 0;
	p->se.start_runtime		= 0;
	p->se.avg_wakeup		= sysctl_sched_wakeup_granularity;
	p->se.avg_wakeup		= sysctl_sched_wakeup_granularity;
	p->se.avg_running		= 0;


#ifdef CONFIG_SCHEDSTATS
#ifdef CONFIG_SCHEDSTATS
	p->se.wait_start			= 0;
	p->se.wait_start			= 0;
@@ -2521,7 +2516,6 @@ static void __sched_fork(struct task_struct *p)
	p->se.nr_failed_migrations_running	= 0;
	p->se.nr_failed_migrations_running	= 0;
	p->se.nr_failed_migrations_hot		= 0;
	p->se.nr_failed_migrations_hot		= 0;
	p->se.nr_forced_migrations		= 0;
	p->se.nr_forced_migrations		= 0;
	p->se.nr_forced2_migrations		= 0;


	p->se.nr_wakeups			= 0;
	p->se.nr_wakeups			= 0;
	p->se.nr_wakeups_sync			= 0;
	p->se.nr_wakeups_sync			= 0;
@@ -2558,7 +2552,6 @@ static void __sched_fork(struct task_struct *p)
void sched_fork(struct task_struct *p, int clone_flags)
void sched_fork(struct task_struct *p, int clone_flags)
{
{
	int cpu = get_cpu();
	int cpu = get_cpu();
	unsigned long flags;


	__sched_fork(p);
	__sched_fork(p);


@@ -2592,13 +2585,13 @@ void sched_fork(struct task_struct *p, int clone_flags)
	if (!rt_prio(p->prio))
	if (!rt_prio(p->prio))
		p->sched_class = &fair_sched_class;
		p->sched_class = &fair_sched_class;


	if (p->sched_class->task_fork)
		p->sched_class->task_fork(p);

#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
	cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0);
	cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
#endif
#endif
	local_irq_save(flags);
	update_rq_clock(cpu_rq(cpu));
	set_task_cpu(p, cpu);
	set_task_cpu(p, cpu);
	local_irq_restore(flags);


#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
	if (likely(sched_info_on()))
	if (likely(sched_info_on()))
@@ -2631,17 +2624,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
	rq = task_rq_lock(p, &flags);
	rq = task_rq_lock(p, &flags);
	BUG_ON(p->state != TASK_RUNNING);
	BUG_ON(p->state != TASK_RUNNING);
	update_rq_clock(rq);
	update_rq_clock(rq);

	if (!p->sched_class->task_new || !current->se.on_rq) {
	activate_task(rq, p, 0);
	activate_task(rq, p, 0);
	} else {
		/*
		 * Let the scheduling class do new task startup
		 * management (if any):
		 */
		p->sched_class->task_new(rq, p);
		inc_nr_running(rq);
	}
	trace_sched_wakeup_new(rq, p, 1);
	trace_sched_wakeup_new(rq, p, 1);
	check_preempt_curr(rq, p, WF_FORK);
	check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
@@ -3156,7 +3139,7 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
void sched_exec(void)
void sched_exec(void)
{
{
	int new_cpu, this_cpu = get_cpu();
	int new_cpu, this_cpu = get_cpu();
	new_cpu = current->sched_class->select_task_rq(current, SD_BALANCE_EXEC, 0);
	new_cpu = select_task_rq(current, SD_BALANCE_EXEC, 0);
	put_cpu();
	put_cpu();
	if (new_cpu != this_cpu)
	if (new_cpu != this_cpu)
		sched_migrate_task(current, new_cpu);
		sched_migrate_task(current, new_cpu);
@@ -3172,10 +3155,6 @@ static void pull_task(struct rq *src_rq, struct task_struct *p,
	deactivate_task(src_rq, p, 0);
	deactivate_task(src_rq, p, 0);
	set_task_cpu(p, this_cpu);
	set_task_cpu(p, this_cpu);
	activate_task(this_rq, p, 0);
	activate_task(this_rq, p, 0);
	/*
	 * Note that idle threads have a prio of MAX_PRIO, for this test
	 * to be always true for them.
	 */
	check_preempt_curr(this_rq, p, 0);
	check_preempt_curr(this_rq, p, 0);
}
}


@@ -4134,7 +4113,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
	unsigned long flags;
	unsigned long flags;
	struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
	struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);


	cpumask_copy(cpus, cpu_online_mask);
	cpumask_copy(cpus, cpu_active_mask);


	/*
	/*
	 * When power savings policy is enabled for the parent domain, idle
	 * When power savings policy is enabled for the parent domain, idle
@@ -4297,7 +4276,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
	int all_pinned = 0;
	int all_pinned = 0;
	struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
	struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);


	cpumask_copy(cpus, cpu_online_mask);
	cpumask_copy(cpus, cpu_active_mask);


	/*
	/*
	 * When power savings policy is enabled for the parent domain, idle
	 * When power savings policy is enabled for the parent domain, idle
@@ -4694,7 +4673,7 @@ int select_nohz_load_balancer(int stop_tick)
		cpumask_set_cpu(cpu, nohz.cpu_mask);
		cpumask_set_cpu(cpu, nohz.cpu_mask);


		/* time for ilb owner also to sleep */
		/* time for ilb owner also to sleep */
		if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
		if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) {
			if (atomic_read(&nohz.load_balancer) == cpu)
			if (atomic_read(&nohz.load_balancer) == cpu)
				atomic_set(&nohz.load_balancer, -1);
				atomic_set(&nohz.load_balancer, -1);
			return 0;
			return 0;
@@ -5396,13 +5375,14 @@ static inline void schedule_debug(struct task_struct *prev)
#endif
#endif
}
}


static void put_prev_task(struct rq *rq, struct task_struct *p)
static void put_prev_task(struct rq *rq, struct task_struct *prev)
{
{
	u64 runtime = p->se.sum_exec_runtime - p->se.prev_sum_exec_runtime;
	if (prev->state == TASK_RUNNING) {
		u64 runtime = prev->se.sum_exec_runtime;


	update_avg(&p->se.avg_running, runtime);
		runtime -= prev->se.prev_sum_exec_runtime;
		runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);


	if (p->state == TASK_RUNNING) {
		/*
		/*
		 * In order to avoid avg_overlap growing stale when we are
		 * In order to avoid avg_overlap growing stale when we are
		 * indeed overlapping and hence not getting put to sleep, grow
		 * indeed overlapping and hence not getting put to sleep, grow
@@ -5412,12 +5392,9 @@ static void put_prev_task(struct rq *rq, struct task_struct *p)
		 * correlates to the amount of cache footprint a task can
		 * correlates to the amount of cache footprint a task can
		 * build up.
		 * build up.
		 */
		 */
		runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
		update_avg(&prev->se.avg_overlap, runtime);
		update_avg(&p->se.avg_overlap, runtime);
	} else {
		update_avg(&p->se.avg_running, 0);
	}
	}
	p->sched_class->put_prev_task(rq, p);
	prev->sched_class->put_prev_task(rq, prev);
}
}


/*
/*
@@ -6631,6 +6608,8 @@ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
long sched_getaffinity(pid_t pid, struct cpumask *mask)
long sched_getaffinity(pid_t pid, struct cpumask *mask)
{
{
	struct task_struct *p;
	struct task_struct *p;
	unsigned long flags;
	struct rq *rq;
	int retval;
	int retval;


	get_online_cpus();
	get_online_cpus();
@@ -6645,7 +6624,9 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
	if (retval)
	if (retval)
		goto out_unlock;
		goto out_unlock;


	rq = task_rq_lock(p, &flags);
	cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
	cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
	task_rq_unlock(rq, &flags);


out_unlock:
out_unlock:
	read_unlock(&tasklist_lock);
	read_unlock(&tasklist_lock);
@@ -6883,6 +6864,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
{
{
	struct task_struct *p;
	struct task_struct *p;
	unsigned int time_slice;
	unsigned int time_slice;
	unsigned long flags;
	struct rq *rq;
	int retval;
	int retval;
	struct timespec t;
	struct timespec t;


@@ -6899,7 +6882,9 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
	if (retval)
	if (retval)
		goto out_unlock;
		goto out_unlock;


	time_slice = p->sched_class->get_rr_interval(p);
	rq = task_rq_lock(p, &flags);
	time_slice = p->sched_class->get_rr_interval(rq, p);
	task_rq_unlock(rq, &flags);


	read_unlock(&tasklist_lock);
	read_unlock(&tasklist_lock);
	jiffies_to_timespec(time_slice, &t);
	jiffies_to_timespec(time_slice, &t);
@@ -7000,7 +6985,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
	__sched_fork(idle);
	__sched_fork(idle);
	idle->se.exec_start = sched_clock();
	idle->se.exec_start = sched_clock();


	idle->prio = idle->normal_prio = MAX_PRIO;
	cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
	cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
	__set_task_cpu(idle, cpu);
	__set_task_cpu(idle, cpu);


@@ -7041,22 +7025,43 @@ cpumask_var_t nohz_cpu_mask;
 *
 *
 * This idea comes from the SD scheduler of Con Kolivas:
 * This idea comes from the SD scheduler of Con Kolivas:
 */
 */
static inline void sched_init_granularity(void)
static int get_update_sysctl_factor(void)
{
{
	unsigned int factor = 1 + ilog2(num_online_cpus());
	unsigned int cpus = min_t(int, num_online_cpus(), 8);
	const unsigned long limit = 200000000;
	unsigned int factor;

	switch (sysctl_sched_tunable_scaling) {
	case SCHED_TUNABLESCALING_NONE:
		factor = 1;
		break;
	case SCHED_TUNABLESCALING_LINEAR:
		factor = cpus;
		break;
	case SCHED_TUNABLESCALING_LOG:
	default:
		factor = 1 + ilog2(cpus);
		break;
	}


	sysctl_sched_min_granularity *= factor;
	return factor;
	if (sysctl_sched_min_granularity > limit)
}
		sysctl_sched_min_granularity = limit;


	sysctl_sched_latency *= factor;
static void update_sysctl(void)
	if (sysctl_sched_latency > limit)
{
		sysctl_sched_latency = limit;
	unsigned int factor = get_update_sysctl_factor();


	sysctl_sched_wakeup_granularity *= factor;
#define SET_SYSCTL(name) \
	(sysctl_##name = (factor) * normalized_sysctl_##name)
	SET_SYSCTL(sched_min_granularity);
	SET_SYSCTL(sched_latency);
	SET_SYSCTL(sched_wakeup_granularity);
	SET_SYSCTL(sched_shares_ratelimit);
#undef SET_SYSCTL
}


	sysctl_sched_shares_ratelimit *= factor;
static inline void sched_init_granularity(void)
{
	update_sysctl();
}
}


#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
@@ -7093,7 +7098,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
	int ret = 0;
	int ret = 0;


	rq = task_rq_lock(p, &flags);
	rq = task_rq_lock(p, &flags);
	if (!cpumask_intersects(new_mask, cpu_online_mask)) {
	if (!cpumask_intersects(new_mask, cpu_active_mask)) {
		ret = -EINVAL;
		ret = -EINVAL;
		goto out;
		goto out;
	}
	}
@@ -7115,7 +7120,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
	if (cpumask_test_cpu(task_cpu(p), new_mask))
	if (cpumask_test_cpu(task_cpu(p), new_mask))
		goto out;
		goto out;


	if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) {
	if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) {
		/* Need help from migration thread: drop lock and wait. */
		/* Need help from migration thread: drop lock and wait. */
		struct task_struct *mt = rq->migration_thread;
		struct task_struct *mt = rq->migration_thread;


@@ -7269,19 +7274,19 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)


again:
again:
	/* Look for allowed, online CPU in same node. */
	/* Look for allowed, online CPU in same node. */
	for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
	for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
		if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
		if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
			goto move;
			goto move;


	/* Any allowed, online CPU? */
	/* Any allowed, online CPU? */
	dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
	dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
	if (dest_cpu < nr_cpu_ids)
	if (dest_cpu < nr_cpu_ids)
		goto move;
		goto move;


	/* No more Mr. Nice Guy. */
	/* No more Mr. Nice Guy. */
	if (dest_cpu >= nr_cpu_ids) {
	if (dest_cpu >= nr_cpu_ids) {
		cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
		cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
		dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
		dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);


		/*
		/*
		 * Don't tell them about moving exiting tasks or
		 * Don't tell them about moving exiting tasks or
@@ -7310,7 +7315,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
 */
 */
static void migrate_nr_uninterruptible(struct rq *rq_src)
static void migrate_nr_uninterruptible(struct rq *rq_src)
{
{
	struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask));
	struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
	unsigned long flags;
	unsigned long flags;


	local_irq_save(flags);
	local_irq_save(flags);
@@ -7563,7 +7568,7 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
static struct ctl_table_header *sd_sysctl_header;
static struct ctl_table_header *sd_sysctl_header;
static void register_sched_domain_sysctl(void)
static void register_sched_domain_sysctl(void)
{
{
	int i, cpu_num = num_online_cpus();
	int i, cpu_num = num_possible_cpus();
	struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
	struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
	char buf[32];
	char buf[32];


@@ -7573,7 +7578,7 @@ static void register_sched_domain_sysctl(void)
	if (entry == NULL)
	if (entry == NULL)
		return;
		return;


	for_each_online_cpu(i) {
	for_each_possible_cpu(i) {
		snprintf(buf, 32, "cpu%d", i);
		snprintf(buf, 32, "cpu%d", i);
		entry->procname = kstrdup(buf, GFP_KERNEL);
		entry->procname = kstrdup(buf, GFP_KERNEL);
		entry->mode = 0555;
		entry->mode = 0555;
@@ -7703,7 +7708,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
		spin_lock_irq(&rq->lock);
		spin_lock_irq(&rq->lock);
		update_rq_clock(rq);
		update_rq_clock(rq);
		deactivate_task(rq, rq->idle, 0);
		deactivate_task(rq, rq->idle, 0);
		rq->idle->static_prio = MAX_PRIO;
		__setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
		__setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
		rq->idle->sched_class = &idle_sched_class;
		rq->idle->sched_class = &idle_sched_class;
		migrate_dead_tasks(cpu);
		migrate_dead_tasks(cpu);
@@ -9099,7 +9103,7 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
	if (doms_new == NULL) {
	if (doms_new == NULL) {
		ndoms_cur = 0;
		ndoms_cur = 0;
		doms_new = &fallback_doms;
		doms_new = &fallback_doms;
		cpumask_andnot(doms_new[0], cpu_online_mask, cpu_isolated_map);
		cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
		WARN_ON_ONCE(dattr_new);
		WARN_ON_ONCE(dattr_new);
	}
	}


@@ -9230,8 +9234,10 @@ static int update_sched_domains(struct notifier_block *nfb,
	switch (action) {
	switch (action) {
	case CPU_ONLINE:
	case CPU_ONLINE:
	case CPU_ONLINE_FROZEN:
	case CPU_ONLINE_FROZEN:
	case CPU_DEAD:
	case CPU_DOWN_PREPARE:
	case CPU_DEAD_FROZEN:
	case CPU_DOWN_PREPARE_FROZEN:
	case CPU_DOWN_FAILED:
	case CPU_DOWN_FAILED_FROZEN:
		partition_sched_domains(1, NULL, NULL);
		partition_sched_domains(1, NULL, NULL);
		return NOTIFY_OK;
		return NOTIFY_OK;


@@ -9278,7 +9284,7 @@ void __init sched_init_smp(void)
#endif
#endif
	get_online_cpus();
	get_online_cpus();
	mutex_lock(&sched_domains_mutex);
	mutex_lock(&sched_domains_mutex);
	arch_init_sched_domains(cpu_online_mask);
	arch_init_sched_domains(cpu_active_mask);
	cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
	cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
	if (cpumask_empty(non_isolated_cpus))
	if (cpumask_empty(non_isolated_cpus))
		cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
		cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
@@ -9842,13 +9848,15 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
		se = kzalloc_node(sizeof(struct sched_entity),
		se = kzalloc_node(sizeof(struct sched_entity),
				  GFP_KERNEL, cpu_to_node(i));
				  GFP_KERNEL, cpu_to_node(i));
		if (!se)
		if (!se)
			goto err;
			goto err_free_rq;


		init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
		init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
	}
	}


	return 1;
	return 1;


 err_free_rq:
	kfree(cfs_rq);
 err:
 err:
	return 0;
	return 0;
}
}
@@ -9930,13 +9938,15 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
				     GFP_KERNEL, cpu_to_node(i));
				     GFP_KERNEL, cpu_to_node(i));
		if (!rt_se)
		if (!rt_se)
			goto err;
			goto err_free_rq;


		init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
		init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
	}
	}


	return 1;
	return 1;


 err_free_rq:
	kfree(rt_rq);
 err:
 err:
	return 0;
	return 0;
}
}
Loading