Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 71b8ebbf authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Thomas Gleixner:
 "A few scheduler fixes:

   - Prevent a bogus warning vs. runqueue clock update flags in
     do_sched_rt_period_timer()

   - Simplify the helper functions which handle requests for skipping
     the runqueue clock updat.

   - Do not unlock the tunables mutex in the error path of the cpu
     frequency scheduler utils. Its not held.

   - Enforce proper alignement for 'struct util_est' in sched_avg to
     prevent a misalignment fault on IA64"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/core: Force proper alignment of 'struct util_est'
  sched/core: Simplify helpers for rq clock update skip requests
  sched/rt: Fix rq->clock_update_flags < RQCF_ACT_SKIP warning
  sched/cpufreq/schedutil: Fix error path mutex unlock
parents 174e7194 317d359d
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -300,7 +300,7 @@ struct util_est {
	unsigned int			enqueued;
	unsigned int			ewma;
#define UTIL_EST_WEIGHT_SHIFT		2
};
} __attribute__((__aligned__(sizeof(u64))));

/*
 * The load_avg/util_avg accumulates an infinite geometric series
@@ -364,7 +364,7 @@ struct sched_avg {
	unsigned long			runnable_load_avg;
	unsigned long			util_avg;
	struct util_est			util_est;
};
} ____cacheline_aligned;

struct sched_statistics {
#ifdef CONFIG_SCHEDSTATS
@@ -435,7 +435,7 @@ struct sched_entity {
	 * Put into separate cache line so it does not
	 * collide with read-mostly values above.
	 */
	struct sched_avg		avg ____cacheline_aligned_in_smp;
	struct sched_avg		avg;
#endif
};

+1 −1
Original line number Diff line number Diff line
@@ -874,7 +874,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
	 * this case, we can save a useless back to back clock update.
	 */
	if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
		rq_clock_skip_update(rq, true);
		rq_clock_skip_update(rq);
}

#ifdef CONFIG_SMP
+1 −2
Original line number Diff line number Diff line
@@ -631,10 +631,9 @@ static int sugov_init(struct cpufreq_policy *policy)

stop_kthread:
	sugov_kthread_stop(sg_policy);

free_sg_policy:
	mutex_unlock(&global_tunables_lock);

free_sg_policy:
	sugov_policy_free(sg_policy);

disable_fast_switch:
+1 −1
Original line number Diff line number Diff line
@@ -1560,7 +1560,7 @@ static void yield_task_dl(struct rq *rq)
	 * so we don't do microscopic update in schedule()
	 * and double the fastpath cost.
	 */
	rq_clock_skip_update(rq, true);
	rq_clock_skip_update(rq);
}

#ifdef CONFIG_SMP
+1 −1
Original line number Diff line number Diff line
@@ -7089,7 +7089,7 @@ static void yield_task_fair(struct rq *rq)
		 * so we don't do microscopic update in schedule()
		 * and double the fastpath cost.
		 */
		rq_clock_skip_update(rq, true);
		rq_clock_skip_update(rq);
	}

	set_skip_buddy(se);
Loading