Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2d72376b authored by Ingo Molnar's avatar Ingo Molnar
Browse files

sched: clean up schedstats, cnt -> count



rename all 'cnt' fields and variables to the less yucky 'count' name.

yuckage noticed by Andrew Morton.

no change in code, other than the /proc/sched_debug bkl_count string got
a bit larger:

   text    data     bss     dec     hex filename
  38236    3506      24   41766    a326 sched.o.before
  38240    3506      24   41770    a32a sched.o.after

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 2b1e315d
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -304,7 +304,7 @@ static int proc_pid_schedstat(struct task_struct *task, char *buffer)
	return sprintf(buffer, "%llu %llu %lu\n",
			task->sched_info.cpu_time,
			task->sched_info.run_delay,
			task->sched_info.pcnt);
			task->sched_info.pcount);
}
#endif

+6 −6
Original line number Diff line number Diff line
@@ -614,7 +614,7 @@ struct reclaim_state;
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
struct sched_info {
	/* cumulative counters */
	unsigned long pcnt;	      /* # of times run on this cpu */
	unsigned long pcount;	      /* # of times run on this cpu */
	unsigned long long cpu_time,  /* time spent on the cpu */
			   run_delay; /* time spent waiting on a runqueue */

@@ -623,7 +623,7 @@ struct sched_info {
			   last_queued;	/* when we were last queued to run */
#ifdef CONFIG_SCHEDSTATS
	/* BKL stats */
	unsigned long bkl_cnt;
	unsigned long bkl_count;
#endif
};
#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
@@ -759,7 +759,7 @@ struct sched_domain {

#ifdef CONFIG_SCHEDSTATS
	/* load_balance() stats */
	unsigned long lb_cnt[CPU_MAX_IDLE_TYPES];
	unsigned long lb_count[CPU_MAX_IDLE_TYPES];
	unsigned long lb_failed[CPU_MAX_IDLE_TYPES];
	unsigned long lb_balanced[CPU_MAX_IDLE_TYPES];
	unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES];
@@ -769,17 +769,17 @@ struct sched_domain {
	unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES];

	/* Active load balancing */
	unsigned long alb_cnt;
	unsigned long alb_count;
	unsigned long alb_failed;
	unsigned long alb_pushed;

	/* SD_BALANCE_EXEC stats */
	unsigned long sbe_cnt;
	unsigned long sbe_count;
	unsigned long sbe_balanced;
	unsigned long sbe_pushed;

	/* SD_BALANCE_FORK stats */
	unsigned long sbf_cnt;
	unsigned long sbf_count;
	unsigned long sbf_balanced;
	unsigned long sbf_pushed;

+1 −1
Original line number Diff line number Diff line
@@ -119,7 +119,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
	 * No locking available for sched_info (and too expensive to add one)
	 * Mitigate by taking snapshot of values
	 */
	t1 = tsk->sched_info.pcnt;
	t1 = tsk->sched_info.pcount;
	t2 = tsk->sched_info.run_delay;
	t3 = tsk->sched_info.cpu_time;

+12 −12
Original line number Diff line number Diff line
@@ -349,19 +349,19 @@ struct rq {
	unsigned long yld_exp_empty;
	unsigned long yld_act_empty;
	unsigned long yld_both_empty;
	unsigned long yld_cnt;
	unsigned long yld_count;

	/* schedule() stats */
	unsigned long sched_switch;
	unsigned long sched_cnt;
	unsigned long sched_count;
	unsigned long sched_goidle;

	/* try_to_wake_up() stats */
	unsigned long ttwu_cnt;
	unsigned long ttwu_count;
	unsigned long ttwu_local;

	/* BKL stats */
	unsigned long bkl_cnt;
	unsigned long bkl_count;
#endif
	struct lock_class_key rq_lock_key;
};
@@ -1481,7 +1481,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)

	new_cpu = cpu;

	schedstat_inc(rq, ttwu_cnt);
	schedstat_inc(rq, ttwu_count);
	if (cpu == this_cpu) {
		schedstat_inc(rq, ttwu_local);
		goto out_set_cpu;
@@ -2637,7 +2637,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
	    !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
		sd_idle = 1;

	schedstat_inc(sd, lb_cnt[idle]);
	schedstat_inc(sd, lb_count[idle]);

redo:
	group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
@@ -2790,7 +2790,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
	    !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
		sd_idle = 1;

	schedstat_inc(sd, lb_cnt[CPU_NEWLY_IDLE]);
	schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
redo:
	group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
				   &sd_idle, &cpus, NULL);
@@ -2924,7 +2924,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
	}

	if (likely(sd)) {
		schedstat_inc(sd, alb_cnt);
		schedstat_inc(sd, alb_count);

		if (move_one_task(target_rq, target_cpu, busiest_rq,
				  sd, CPU_IDLE))
@@ -3414,11 +3414,11 @@ static inline void schedule_debug(struct task_struct *prev)

	profile_hit(SCHED_PROFILING, __builtin_return_address(0));

	schedstat_inc(this_rq(), sched_cnt);
	schedstat_inc(this_rq(), sched_count);
#ifdef CONFIG_SCHEDSTATS
	if (unlikely(prev->lock_depth >= 0)) {
		schedstat_inc(this_rq(), bkl_cnt);
		schedstat_inc(prev, sched_info.bkl_cnt);
		schedstat_inc(this_rq(), bkl_count);
		schedstat_inc(prev, sched_info.bkl_count);
	}
#endif
}
@@ -4558,7 +4558,7 @@ asmlinkage long sys_sched_yield(void)
{
	struct rq *rq = this_rq_lock();

	schedstat_inc(rq, yld_cnt);
	schedstat_inc(rq, yld_count);
	current->sched_class->yield_task(rq);

	/*
+4 −4
Original line number Diff line number Diff line
@@ -137,8 +137,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
	SEQ_printf(m, "  .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
#ifdef CONFIG_SCHEDSTATS
	SEQ_printf(m, "  .%-30s: %ld\n", "bkl_cnt",
			rq->bkl_cnt);
	SEQ_printf(m, "  .%-30s: %ld\n", "bkl_count",
			rq->bkl_count);
#endif
	SEQ_printf(m, "  .%-30s: %ld\n", "nr_spread_over",
			cfs_rq->nr_spread_over);
@@ -342,7 +342,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
	PN(se.exec_max);
	PN(se.slice_max);
	PN(se.wait_max);
	P(sched_info.bkl_cnt);
	P(sched_info.bkl_count);
#endif
	SEQ_printf(m, "%-25s:%20Ld\n",
		   "nr_switches", (long long)(p->nvcsw + p->nivcsw));
@@ -370,7 +370,7 @@ void proc_sched_set_task(struct task_struct *p)
	p->se.exec_max			= 0;
	p->se.slice_max			= 0;
	p->se.wait_max			= 0;
	p->sched_info.bkl_cnt		= 0;
	p->sched_info.bkl_count		= 0;
#endif
	p->se.sum_exec_runtime		= 0;
	p->se.prev_sum_exec_runtime	= 0;
Loading