Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 98f20fb6 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
  sched: default to more agressive yield for SCHED_BATCH tasks
  sched: fix crash in sys_sched_rr_get_interval()
parents 22082102 db292ca3
Loading
Loading
Loading
Loading
+9 −5
Original line number Diff line number Diff line
@@ -4850,17 +4850,21 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
	if (retval)
		goto out_unlock;

	if (p->policy == SCHED_FIFO)
	/*
	 * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER
	 * tasks that are on an otherwise idle runqueue:
	 */
	time_slice = 0;
	else if (p->policy == SCHED_RR)
	if (p->policy == SCHED_RR) {
		time_slice = DEF_TIMESLICE;
	else {
	} else {
		struct sched_entity *se = &p->se;
		unsigned long flags;
		struct rq *rq;

		rq = task_rq_lock(p, &flags);
		time_slice = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
		if (rq->cfs.load.weight)
			time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
		task_rq_unlock(rq, &flags);
	}
	read_unlock(&tasklist_lock);
+4 −3
Original line number Diff line number Diff line
@@ -799,8 +799,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
 */
static void yield_task_fair(struct rq *rq)
{
	struct cfs_rq *cfs_rq = task_cfs_rq(rq->curr);
	struct sched_entity *rightmost, *se = &rq->curr->se;
	struct task_struct *curr = rq->curr;
	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
	struct sched_entity *rightmost, *se = &curr->se;

	/*
	 * Are we the only task in the tree?
@@ -808,7 +809,7 @@ static void yield_task_fair(struct rq *rq)
	if (unlikely(cfs_rq->nr_running == 1))
		return;

	if (likely(!sysctl_sched_compat_yield)) {
	if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
		__update_rq_clock(rq);
		/*
		 * Update run-time statistics of the 'current'.