Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a0b57ca3 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Various smaller fixlets, all over the place"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/doc: Fix generation of device-drivers
  sched: Expose preempt_schedule_irq()
  sched: Fix a trivial typo in comments
  sched: Remove unused variable in 'struct sched_domain'
  sched: Avoid NULL dereference on sd_busy
  sched: Check sched_domain before computing group power
  MAINTAINERS: Update file patterns in the lockdep and scheduler entries
parents e321ae4c 96739d6e
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -5267,7 +5267,7 @@ S: Maintained
F:	Documentation/lockdep*.txt
F:	Documentation/lockstat.txt
F:	include/linux/lockdep.h
F:	kernel/lockdep*
F:	kernel/locking/

LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks)
M:	"Richard Russon (FlatCap)" <ldm@flatcap.org>
@@ -7391,7 +7391,6 @@ S: Maintained
F:	kernel/sched/
F:	include/linux/sched.h
F:	include/uapi/linux/sched.h
F:	kernel/wait.c
F:	include/linux/wait.h

SCORE ARCHITECTURE
+0 −2
Original line number Diff line number Diff line
@@ -831,8 +831,6 @@ struct sched_domain {
	unsigned int balance_interval;	/* initialise to 1. units in ms. */
	unsigned int nr_balance_failed; /* initialise to 0 */

	u64 last_update;

	/* idle_balance() stats */
	u64 max_newidle_lb_cost;
	unsigned long next_decay_max_lb_cost;
+4 −4
Original line number Diff line number Diff line
@@ -2660,6 +2660,7 @@ asmlinkage void __sched notrace preempt_schedule(void)
	} while (need_resched());
}
EXPORT_SYMBOL(preempt_schedule);
#endif /* CONFIG_PREEMPT */

/*
 * this is the entry point to schedule() from kernel preemption
@@ -2693,8 +2694,6 @@ asmlinkage void __sched preempt_schedule_irq(void)
	exception_exit(prev_state);
}

#endif /* CONFIG_PREEMPT */

int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
			  void *key)
{
@@ -4762,7 +4761,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
		cpumask_clear_cpu(rq->cpu, old_rd->span);

		/*
		 * If we dont want to free the old_rt yet then
		 * If we dont want to free the old_rd yet then
		 * set old_rd to NULL to skip the freeing later
		 * in this function:
		 */
@@ -4910,8 +4909,9 @@ static void update_top_cache_domain(int cpu)
	if (sd) {
		id = cpumask_first(sched_domain_span(sd));
		size = cpumask_weight(sched_domain_span(sd));
		rcu_assign_pointer(per_cpu(sd_busy, cpu), sd->parent);
		sd = sd->parent; /* sd_busy */
	}
	rcu_assign_pointer(per_cpu(sd_busy, cpu), sd);

	rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
	per_cpu(sd_llc_size, cpu) = size;
+24 −3
Original line number Diff line number Diff line
@@ -5379,10 +5379,31 @@ void update_group_power(struct sched_domain *sd, int cpu)
		 */

		for_each_cpu(cpu, sched_group_cpus(sdg)) {
			struct sched_group *sg = cpu_rq(cpu)->sd->groups;
			struct sched_group_power *sgp;
			struct rq *rq = cpu_rq(cpu);

			/*
			 * build_sched_domains() -> init_sched_groups_power()
			 * gets here before we've attached the domains to the
			 * runqueues.
			 *
			 * Use power_of(), which is set irrespective of domains
			 * in update_cpu_power().
			 *
			 * This avoids power/power_orig from being 0 and
			 * causing divide-by-zero issues on boot.
			 *
			 * Runtime updates will correct power_orig.
			 */
			if (unlikely(!rq->sd)) {
				power_orig += power_of(cpu);
				power += power_of(cpu);
				continue;
			}

			power_orig += sg->sgp->power_orig;
			power += sg->sgp->power;
			sgp = rq->sd->groups->sgp;
			power_orig += sgp->power_orig;
			power += sgp->power;
		}
	} else  {
		/*