Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 53f7b9bc authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
  sched: fix ideal_runtime calculations for reniced tasks
  sched: improve prev_sum_exec_runtime setting
  sched: simplify __check_preempt_curr_fair()
  sched: fix xtensa build warning
  sched: debug: fix sum_exec_runtime clearing
  sched: debug: fix cfs_rq->wait_runtime accounting
  sched: fix niced_granularity() shift
  sched: fix MC/HT scheduler optimization, without breaking the FUZZ logic.
parents 3c038f97 11697830
Loading
Loading
Loading
Loading
+6 −9
Original line number Diff line number Diff line
@@ -668,7 +668,7 @@ static u64 div64_likely32(u64 divident, unsigned long divisor)
/*
 * Shift right and round:
 */
#define RSR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))

static unsigned long
calc_delta_mine(unsigned long delta_exec, unsigned long weight,
@@ -684,10 +684,10 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,
	 * Check whether we'd overflow the 64-bit multiplication:
	 */
	if (unlikely(tmp > WMULT_CONST))
		tmp = RSR(RSR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
		tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
			WMULT_SHIFT/2);
	else
		tmp = RSR(tmp * lw->inv_weight, WMULT_SHIFT);
		tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);

	return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
}
@@ -858,7 +858,6 @@ static void dec_nr_running(struct task_struct *p, struct rq *rq)

static void set_load_weight(struct task_struct *p)
{
	task_rq(p)->cfs.wait_runtime -= p->se.wait_runtime;
	p->se.wait_runtime = 0;

	if (task_has_rt_policy(p)) {
@@ -2512,7 +2511,7 @@ group_next:
	 * a think about bumping its value to force at least one task to be
	 * moved
	 */
	if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task) {
	if (*imbalance < busiest_load_per_task) {
		unsigned long tmp, pwr_now, pwr_move;
		unsigned int imbn;

@@ -2564,9 +2563,7 @@ small_imbalance:
		pwr_move /= SCHED_LOAD_SCALE;

		/* Move if we gain throughput */
		if (pwr_move <= pwr_now)
			goto out_balanced;

		if (pwr_move > pwr_now)
			*imbalance = busiest_load_per_task;
	}

+1 −0
Original line number Diff line number Diff line
@@ -283,4 +283,5 @@ void proc_sched_set_task(struct task_struct *p)
	p->se.wait_runtime_overruns = p->se.wait_runtime_underruns = 0;
#endif
	p->se.sum_exec_runtime = 0;
	p->se.prev_sum_exec_runtime	= 0;
}
+31 −28
Original line number Diff line number Diff line
@@ -194,6 +194,8 @@ __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
	update_load_add(&cfs_rq->load, se->load.weight);
	cfs_rq->nr_running++;
	se->on_rq = 1;

	schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
}

static inline void
@@ -205,6 +207,8 @@ __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
	update_load_sub(&cfs_rq->load, se->load.weight);
	cfs_rq->nr_running--;
	se->on_rq = 0;

	schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
}

static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
@@ -291,7 +295,7 @@ niced_granularity(struct sched_entity *curr, unsigned long granularity)
	/*
	 * It will always fit into 'long':
	 */
	return (long) (tmp >> WMULT_SHIFT);
	return (long) (tmp >> (WMULT_SHIFT-NICE_0_SHIFT));
}

static inline void
@@ -574,7 +578,6 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)

	prev_runtime = se->wait_runtime;
	__add_wait_runtime(cfs_rq, se, delta_fair);
	schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
	delta_fair = se->wait_runtime - prev_runtime;

	/*
@@ -662,7 +665,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
			if (tsk->state & TASK_UNINTERRUPTIBLE)
				se->block_start = rq_of(cfs_rq)->clock;
		}
		cfs_rq->wait_runtime -= se->wait_runtime;
#endif
	}
	__dequeue_entity(cfs_rq, se);
@@ -671,22 +673,39 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
/*
 * Preempt the current task with a newly woken task if needed:
 */
static int
static void
__check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
			  struct sched_entity *curr, unsigned long granularity)
{
	s64 __delta = curr->fair_key - se->fair_key;
	unsigned long ideal_runtime, delta_exec;

	/*
	 * ideal_runtime is compared against sum_exec_runtime, which is
	 * walltime, hence do not scale.
	 */
	ideal_runtime = max(sysctl_sched_latency / cfs_rq->nr_running,
			(unsigned long)sysctl_sched_min_granularity);

	/*
	 * If we executed more than what the latency constraint suggests,
	 * reduce the rescheduling granularity. This way the total latency
	 * of how much a task is not scheduled converges to
	 * sysctl_sched_latency:
	 */
	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
	if (delta_exec > ideal_runtime)
		granularity = 0;

	/*
	 * Take scheduling granularity into account - do not
	 * preempt the current task unless the best task has
	 * a larger than sched_granularity fairness advantage:
	 *
	 * scale granularity as key space is in fair_clock.
	 */
	if (__delta > niced_granularity(curr, granularity)) {
	if (__delta > niced_granularity(curr, granularity))
		resched_task(rq_of(cfs_rq)->curr);
		return 1;
	}
	return 0;
}

static inline void
@@ -702,6 +721,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
	update_stats_wait_end(cfs_rq, se);
	update_stats_curr_start(cfs_rq, se);
	set_cfs_rq_curr(cfs_rq, se);
	se->prev_sum_exec_runtime = se->sum_exec_runtime;
}

static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
@@ -731,7 +751,6 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)

static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
	unsigned long gran, ideal_runtime, delta_exec;
	struct sched_entity *next;

	/*
@@ -748,22 +767,8 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
	if (next == curr)
		return;

	gran = sched_granularity(cfs_rq);
	ideal_runtime = niced_granularity(curr,
		max(sysctl_sched_latency / cfs_rq->nr_running,
		    (unsigned long)sysctl_sched_min_granularity));
	/*
	 * If we executed more than what the latency constraint suggests,
	 * reduce the rescheduling granularity. This way the total latency
	 * of how much a task is not scheduled converges to
	 * sysctl_sched_latency:
	 */
	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
	if (delta_exec > ideal_runtime)
		gran = 0;

	if (__check_preempt_curr_fair(cfs_rq, next, curr, gran))
		curr->prev_sum_exec_runtime = curr->sum_exec_runtime;
	__check_preempt_curr_fair(cfs_rq, next, curr,
			sched_granularity(cfs_rq));
}

/**************************************************
@@ -1121,10 +1126,8 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
	 * The statistical average of wait_runtime is about
	 * -granularity/2, so initialize the task with that:
	 */
	if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) {
	if (sysctl_sched_features & SCHED_FEAT_START_DEBIT)
		se->wait_runtime = -(sched_granularity(cfs_rq) / 2);
		schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
	}

	__enqueue_entity(cfs_rq, se);
}