Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 897e81be authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-core-for-linus' of...

Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (35 commits)
  sched, cputime: Introduce thread_group_times()
  sched, cputime: Cleanups related to task_times()
  Revert "sched, x86: Optimize branch hint in __switch_to()"
  sched: Fix isolcpus boot option
  sched: Revert 498657a4
  sched, time: Define nsecs_to_jiffies()
  sched: Remove task_{u,s,g}time()
  sched: Introduce task_times() to replace task_{u,s}time() pair
  sched: Limit the number of scheduler debug messages
  sched.c: Call debug_show_all_locks() when dumping all tasks
  sched, x86: Optimize branch hint in __switch_to()
  sched: Optimize branch hint in context_switch()
  sched: Optimize branch hint in pick_next_task_fair()
  sched_feat_write(): Update ppos instead of file->f_pos
  sched: Sched_rt_periodic_timer vs cpu hotplug
  sched, kvm: Fix race condition involving sched_in_preempt_notifers
  sched: More generic WAKE_AFFINE vs select_idle_sibling()
  sched: Cleanup select_task_rq_fair()
  sched: Fix granularity of task_u/stime()
  sched: Fix/add missing update_rq_clock() calls
  ...
parents c3fa27d1 0cf55e1e
Loading
Loading
Loading
Loading
+15 −0
Original line number Diff line number Diff line
@@ -6,6 +6,21 @@ be removed from this file.

---------------------------

What:	USER_SCHED
When:	2.6.34

Why:	USER_SCHED was implemented as a proof of concept for group scheduling.
	The effect of USER_SCHED can already be achieved from userspace with
	the help of libcgroup. The removal of USER_SCHED will also simplify
	the scheduler code with the removal of one major ifdef. There are also
	issues USER_SCHED has with USER_NS. A decision was taken not to fix
	those and instead remove USER_SCHED. Also new group scheduling
	features will not be implemented for USER_SCHED.

Who:	Dhaval Giani <dhaval@linux.vnet.ibm.com>

---------------------------

What:	PRISM54
When:	2.6.34

+2 −1
Original line number Diff line number Diff line
@@ -1072,7 +1072,8 @@ second). The meanings of the columns are as follows, from left to right:
- irq: servicing interrupts
- softirq: servicing softirqs
- steal: involuntary wait
- guest: running a guest
- guest: running a normal guest
- guest_nice: running a niced guest

The "intr" line gives counts of interrupts  serviced since boot time, for each
of the  possible system interrupts.   The first  column  is the  total of  all
+2 −0
Original line number Diff line number Diff line
@@ -2186,6 +2186,8 @@ and is between 256 and 4096 characters. It is defined in the file

	sbni=		[NET] Granch SBNI12 leased line adapter

	sched_debug	[KNL] Enables verbose scheduler debug messages.

	sc1200wdt=	[HW,WDT] SC1200 WDT (watchdog) driver
			Format: <io>[,<timeout>[,<isapnp>]]

+15 −8
Original line number Diff line number Diff line
@@ -410,6 +410,16 @@ static void task_show_stack_usage(struct seq_file *m, struct task_struct *task)
}
#endif		/* CONFIG_MMU */

static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
{
	seq_printf(m, "Cpus_allowed:\t");
	seq_cpumask(m, &task->cpus_allowed);
	seq_printf(m, "\n");
	seq_printf(m, "Cpus_allowed_list:\t");
	seq_cpumask_list(m, &task->cpus_allowed);
	seq_printf(m, "\n");
}

int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
			struct pid *pid, struct task_struct *task)
{
@@ -424,6 +434,7 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
	}
	task_sig(m, task);
	task_cap(m, task);
	task_cpus_allowed(m, task);
	cpuset_task_status_allowed(m, task);
#if defined(CONFIG_S390)
	task_show_regs(m, task);
@@ -495,20 +506,17 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,

		/* add up live thread stats at the group level */
		if (whole) {
			struct task_cputime cputime;
			struct task_struct *t = task;
			do {
				min_flt += t->min_flt;
				maj_flt += t->maj_flt;
				gtime = cputime_add(gtime, task_gtime(t));
				gtime = cputime_add(gtime, t->gtime);
				t = next_thread(t);
			} while (t != task);

			min_flt += sig->min_flt;
			maj_flt += sig->maj_flt;
			thread_group_cputime(task, &cputime);
			utime = cputime.utime;
			stime = cputime.stime;
			thread_group_times(task, &utime, &stime);
			gtime = cputime_add(gtime, sig->gtime);
		}

@@ -524,9 +532,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
	if (!whole) {
		min_flt = task->min_flt;
		maj_flt = task->maj_flt;
		utime = task_utime(task);
		stime = task_stime(task);
		gtime = task_gtime(task);
		task_times(task, &utime, &stime);
		gtime = task->gtime;
	}

	/* scale priority and nice values from timeslices to -20..20 */
+13 −6
Original line number Diff line number Diff line
@@ -27,7 +27,7 @@ static int show_stat(struct seq_file *p, void *v)
	int i, j;
	unsigned long jif;
	cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
	cputime64_t guest;
	cputime64_t guest, guest_nice;
	u64 sum = 0;
	u64 sum_softirq = 0;
	unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
@@ -36,7 +36,7 @@ static int show_stat(struct seq_file *p, void *v)

	user = nice = system = idle = iowait =
		irq = softirq = steal = cputime64_zero;
	guest = cputime64_zero;
	guest = guest_nice = cputime64_zero;
	getboottime(&boottime);
	jif = boottime.tv_sec;

@@ -51,6 +51,8 @@ static int show_stat(struct seq_file *p, void *v)
		softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
		steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
		guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
		guest_nice = cputime64_add(guest_nice,
			kstat_cpu(i).cpustat.guest_nice);
		for_each_irq_nr(j) {
			sum += kstat_irqs_cpu(j, i);
		}
@@ -65,7 +67,8 @@ static int show_stat(struct seq_file *p, void *v)
	}
	sum += arch_irq_stat();

	seq_printf(p, "cpu  %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
	seq_printf(p, "cpu  %llu %llu %llu %llu %llu %llu %llu %llu %llu "
		"%llu\n",
		(unsigned long long)cputime64_to_clock_t(user),
		(unsigned long long)cputime64_to_clock_t(nice),
		(unsigned long long)cputime64_to_clock_t(system),
@@ -74,7 +77,8 @@ static int show_stat(struct seq_file *p, void *v)
		(unsigned long long)cputime64_to_clock_t(irq),
		(unsigned long long)cputime64_to_clock_t(softirq),
		(unsigned long long)cputime64_to_clock_t(steal),
		(unsigned long long)cputime64_to_clock_t(guest));
		(unsigned long long)cputime64_to_clock_t(guest),
		(unsigned long long)cputime64_to_clock_t(guest_nice));
	for_each_online_cpu(i) {

		/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
@@ -88,8 +92,10 @@ static int show_stat(struct seq_file *p, void *v)
		softirq = kstat_cpu(i).cpustat.softirq;
		steal = kstat_cpu(i).cpustat.steal;
		guest = kstat_cpu(i).cpustat.guest;
		guest_nice = kstat_cpu(i).cpustat.guest_nice;
		seq_printf(p,
			"cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
			"cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu "
			"%llu\n",
			i,
			(unsigned long long)cputime64_to_clock_t(user),
			(unsigned long long)cputime64_to_clock_t(nice),
@@ -99,7 +105,8 @@ static int show_stat(struct seq_file *p, void *v)
			(unsigned long long)cputime64_to_clock_t(irq),
			(unsigned long long)cputime64_to_clock_t(softirq),
			(unsigned long long)cputime64_to_clock_t(steal),
			(unsigned long long)cputime64_to_clock_t(guest));
			(unsigned long long)cputime64_to_clock_t(guest),
			(unsigned long long)cputime64_to_clock_t(guest_nice));
	}
	seq_printf(p, "intr %llu", (unsigned long long)sum);

Loading