Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7f621861 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-fixes-for-linus' of...

Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  sched: fix process time monotonicity
  sched_clock: fix NOHZ interaction
parents 1c402c8c 49048622
Loading
Loading
Loading
Loading
+0 −59
Original line number Diff line number Diff line
@@ -337,65 +337,6 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
	return 0;
}

/*
 * Use precise platform statistics if available:
 */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
static cputime_t task_utime(struct task_struct *p)
{
	return p->utime;
}

static cputime_t task_stime(struct task_struct *p)
{
	return p->stime;
}
#else
static cputime_t task_utime(struct task_struct *p)
{
	clock_t utime = cputime_to_clock_t(p->utime),
		total = utime + cputime_to_clock_t(p->stime);
	u64 temp;

	/*
	 * Use CFS's precise accounting:
	 */
	temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);

	if (total) {
		temp *= utime;
		do_div(temp, total);
	}
	utime = (clock_t)temp;

	p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
	return p->prev_utime;
}

static cputime_t task_stime(struct task_struct *p)
{
	clock_t stime;

	/*
	 * Use CFS's precise accounting. (we subtract utime from
	 * the total, to make sure the total observed by userspace
	 * grows monotonically - apps rely on that):
	 */
	stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
			cputime_to_clock_t(task_utime(p));

	if (stime >= 0)
		p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));

	return p->prev_stime;
}
#endif

static cputime_t task_gtime(struct task_struct *p)
{
	return p->gtime;
}

static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
			struct pid *pid, struct task_struct *task, int whole)
{
+4 −0
Original line number Diff line number Diff line
@@ -1475,6 +1475,10 @@ static inline void put_task_struct(struct task_struct *t)
		__put_task_struct(t);
}

extern cputime_t task_utime(struct task_struct *p);
extern cputime_t task_stime(struct task_struct *p);
extern cputime_t task_gtime(struct task_struct *p);

/*
 * Per process flags
 */
+3 −3
Original line number Diff line number Diff line
@@ -112,9 +112,9 @@ static void __exit_signal(struct task_struct *tsk)
		 * We won't ever get here for the group leader, since it
		 * will have been the last reference on the signal_struct.
		 */
		sig->utime = cputime_add(sig->utime, tsk->utime);
		sig->stime = cputime_add(sig->stime, tsk->stime);
		sig->gtime = cputime_add(sig->gtime, tsk->gtime);
		sig->utime = cputime_add(sig->utime, task_utime(tsk));
		sig->stime = cputime_add(sig->stime, task_stime(tsk));
		sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
		sig->min_flt += tsk->min_flt;
		sig->maj_flt += tsk->maj_flt;
		sig->nvcsw += tsk->nvcsw;
+59 −0
Original line number Diff line number Diff line
@@ -4178,6 +4178,65 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
		cpustat->steal = cputime64_add(cpustat->steal, tmp);
}

/*
 * Use precise platform statistics if available:
 */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
cputime_t task_utime(struct task_struct *p)
{
	return p->utime;
}

cputime_t task_stime(struct task_struct *p)
{
	return p->stime;
}
#else
cputime_t task_utime(struct task_struct *p)
{
	clock_t utime = cputime_to_clock_t(p->utime),
		total = utime + cputime_to_clock_t(p->stime);
	u64 temp;

	/*
	 * Use CFS's precise accounting:
	 */
	temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);

	if (total) {
		temp *= utime;
		do_div(temp, total);
	}
	utime = (clock_t)temp;

	p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
	return p->prev_utime;
}

cputime_t task_stime(struct task_struct *p)
{
	clock_t stime;

	/*
	 * Use CFS's precise accounting. (we subtract utime from
	 * the total, to make sure the total observed by userspace
	 * grows monotonically - apps rely on that):
	 */
	stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
			cputime_to_clock_t(task_utime(p));

	if (stime >= 0)
		p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));

	return p->prev_stime;
}
#endif

inline cputime_t task_gtime(struct task_struct *p)
{
	return p->gtime;
}

/*
 * This function gets called by the timer code, with HZ frequency.
 * We call it with interrupts disabled.
+3 −0
Original line number Diff line number Diff line
@@ -162,6 +162,8 @@ void tick_nohz_stop_idle(int cpu)
		ts->idle_lastupdate = now;
		ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
		ts->idle_active = 0;

		sched_clock_idle_wakeup_event(0);
	}
}

@@ -177,6 +179,7 @@ static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
	}
	ts->idle_entrytime = now;
	ts->idle_active = 1;
	sched_clock_idle_sleep_event();
	return now;
}