Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dcad0fce authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar.

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  cputime: Use local_clock() for full dynticks cputime accounting
  cputime: Constify timeval_to_cputime(timeval) argument
  sched: Move RR_TIMESLICE from sysctl.h to rt.h
  sched: Fix /proc/sched_debug failure on very very large systems
  sched: Fix /proc/sched_stat failure on very very large systems
  sched/core: Remove the obsolete and unused nr_uninterruptible() function
parents f8ef15d6 7f6575f1
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -76,7 +76,7 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
/*
/*
 * Convert cputime <-> timeval (msec)
 * Convert cputime <-> timeval (msec)
 */
 */
static inline cputime_t timeval_to_cputime(struct timeval *val)
static inline cputime_t timeval_to_cputime(const struct timeval *val)
{
{
	u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
	u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
	return (__force cputime_t) ret;
	return (__force cputime_t) ret;
+0 −1
Original line number Original line Diff line number Diff line
@@ -99,7 +99,6 @@ extern int nr_threads;
DECLARE_PER_CPU(unsigned long, process_counts);
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern int nr_processes(void);
extern unsigned long nr_running(void);
extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait_cpu(int cpu);
extern unsigned long nr_iowait_cpu(int cpu);
extern unsigned long this_cpu_load(void);
extern unsigned long this_cpu_load(void);
+2 −20
Original line number Original line Diff line number Diff line
@@ -1979,11 +1979,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
}
}


/*
/*
 * nr_running, nr_uninterruptible and nr_context_switches:
 * nr_running and nr_context_switches:
 *
 *
 * externally visible scheduler statistics: current number of runnable
 * externally visible scheduler statistics: current number of runnable
 * threads, current number of uninterruptible-sleeping threads, total
 * threads, total number of context switches performed since bootup.
 * number of context switches performed since bootup.
 */
 */
unsigned long nr_running(void)
unsigned long nr_running(void)
{
{
@@ -1995,23 +1994,6 @@ unsigned long nr_running(void)
	return sum;
	return sum;
}
}


unsigned long nr_uninterruptible(void)
{
	unsigned long i, sum = 0;

	for_each_possible_cpu(i)
		sum += cpu_rq(i)->nr_uninterruptible;

	/*
	 * Since we read the counters lockless, it might be slightly
	 * inaccurate. Do not allow it to go below zero though:
	 */
	if (unlikely((long)sum < 0))
		sum = 0;

	return sum;
}

unsigned long long nr_context_switches(void)
unsigned long long nr_context_switches(void)
{
{
	int i;
	int i;
+1 −1
Original line number Original line Diff line number Diff line
@@ -604,7 +604,7 @@ static unsigned long long vtime_delta(struct task_struct *tsk)
{
{
	unsigned long long clock;
	unsigned long long clock;


	clock = sched_clock();
	clock = local_clock();
	if (clock < tsk->vtime_snap)
	if (clock < tsk->vtime_snap)
		return 0;
		return 0;


+79 −11
Original line number Original line Diff line number Diff line
@@ -262,11 +262,11 @@ static void print_cpu(struct seq_file *m, int cpu)
	{
	{
		unsigned int freq = cpu_khz ? : 1;
		unsigned int freq = cpu_khz ? : 1;


		SEQ_printf(m, "\ncpu#%d, %u.%03u MHz\n",
		SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
			   cpu, freq / 1000, (freq % 1000));
			   cpu, freq / 1000, (freq % 1000));
	}
	}
#else
#else
	SEQ_printf(m, "\ncpu#%d\n", cpu);
	SEQ_printf(m, "cpu#%d\n", cpu);
#endif
#endif


#define P(x)								\
#define P(x)								\
@@ -323,6 +323,7 @@ do { \
	print_rq(m, rq, cpu);
	print_rq(m, rq, cpu);
	rcu_read_unlock();
	rcu_read_unlock();
	spin_unlock_irqrestore(&sched_debug_lock, flags);
	spin_unlock_irqrestore(&sched_debug_lock, flags);
	SEQ_printf(m, "\n");
}
}


static const char *sched_tunable_scaling_names[] = {
static const char *sched_tunable_scaling_names[] = {
@@ -331,11 +332,10 @@ static const char *sched_tunable_scaling_names[] = {
	"linear"
	"linear"
};
};


static int sched_debug_show(struct seq_file *m, void *v)
static void sched_debug_header(struct seq_file *m)
{
{
	u64 ktime, sched_clk, cpu_clk;
	u64 ktime, sched_clk, cpu_clk;
	unsigned long flags;
	unsigned long flags;
	int cpu;


	local_irq_save(flags);
	local_irq_save(flags);
	ktime = ktime_to_ns(ktime_get());
	ktime = ktime_to_ns(ktime_get());
@@ -377,33 +377,101 @@ static int sched_debug_show(struct seq_file *m, void *v)
#undef PN
#undef PN
#undef P
#undef P


	SEQ_printf(m, "  .%-40s: %d (%s)\n", "sysctl_sched_tunable_scaling",
	SEQ_printf(m, "  .%-40s: %d (%s)\n",
		"sysctl_sched_tunable_scaling",
		sysctl_sched_tunable_scaling,
		sysctl_sched_tunable_scaling,
		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
	SEQ_printf(m, "\n");
}


	for_each_online_cpu(cpu)
static int sched_debug_show(struct seq_file *m, void *v)
		print_cpu(m, cpu);
{
	int cpu = (unsigned long)(v - 2);


	SEQ_printf(m, "\n");
	if (cpu != -1)
		print_cpu(m, cpu);
	else
		sched_debug_header(m);


	return 0;
	return 0;
}
}


void sysrq_sched_debug_show(void)
void sysrq_sched_debug_show(void)
{
{
	sched_debug_show(NULL, NULL);
	int cpu;

	sched_debug_header(NULL);
	for_each_online_cpu(cpu)
		print_cpu(NULL, cpu);

}

/*
 * This itererator needs some explanation.
 * It returns 1 for the header position.
 * This means 2 is cpu 0.
 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
 * to use cpumask_* to iterate over the cpus.
 */
static void *sched_debug_start(struct seq_file *file, loff_t *offset)
{
	unsigned long n = *offset;

	if (n == 0)
		return (void *) 1;

	n--;

	if (n > 0)
		n = cpumask_next(n - 1, cpu_online_mask);
	else
		n = cpumask_first(cpu_online_mask);

	*offset = n + 1;

	if (n < nr_cpu_ids)
		return (void *)(unsigned long)(n + 2);
	return NULL;
}

static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
{
	(*offset)++;
	return sched_debug_start(file, offset);
}

static void sched_debug_stop(struct seq_file *file, void *data)
{
}

static const struct seq_operations sched_debug_sops = {
	.start = sched_debug_start,
	.next = sched_debug_next,
	.stop = sched_debug_stop,
	.show = sched_debug_show,
};

static int sched_debug_release(struct inode *inode, struct file *file)
{
	seq_release(inode, file);

	return 0;
}
}


static int sched_debug_open(struct inode *inode, struct file *filp)
static int sched_debug_open(struct inode *inode, struct file *filp)
{
{
	return single_open(filp, sched_debug_show, NULL);
	int ret = 0;

	ret = seq_open(filp, &sched_debug_sops);

	return ret;
}
}


static const struct file_operations sched_debug_fops = {
static const struct file_operations sched_debug_fops = {
	.open		= sched_debug_open,
	.open		= sched_debug_open,
	.read		= seq_read,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.llseek		= seq_lseek,
	.release	= single_release,
	.release	= sched_debug_release,
};
};


static int __init init_sched_debug_procfs(void)
static int __init init_sched_debug_procfs(void)
Loading