Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e05606d3 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

sched: clean up the rt priority macros



clean up the rt priority macros, pointed out by Andrew Morton.

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 138a8aeb
Loading
Loading
Loading
Loading
+36 −25
Original line number Diff line number Diff line
@@ -525,31 +525,6 @@ struct signal_struct {
#define SIGNAL_STOP_CONTINUED	0x00000004 /* SIGCONT since WCONTINUED reap */
#define SIGNAL_GROUP_EXIT	0x00000008 /* group exit in progress */


/*
 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
 * values are inverted: lower p->prio value means higher priority.
 *
 * The MAX_USER_RT_PRIO value allows the actual maximum
 * RT priority to be separate from the value exported to
 * user-space.  This allows kernel threads to set their
 * priority to a value higher than any user task. Note:
 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
 */

#define MAX_USER_RT_PRIO	100
#define MAX_RT_PRIO		MAX_USER_RT_PRIO

#define MAX_PRIO		(MAX_RT_PRIO + 40)

#define rt_prio(prio)		unlikely((prio) < MAX_RT_PRIO)
#define rt_task(p)		rt_prio((p)->prio)
#define batch_task(p)		(unlikely((p)->policy == SCHED_BATCH))
#define is_rt_policy(p)		((p) != SCHED_NORMAL && (p) != SCHED_BATCH)
#define has_rt_policy(p)	unlikely(is_rt_policy((p)->policy))

/*
 * Some day this will be a full-fledged user tracking system..
 */
@@ -1164,6 +1139,42 @@ struct task_struct {
#endif
};

/*
 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
 * values are inverted: lower p->prio value means higher priority.
 *
 * The MAX_USER_RT_PRIO value allows the actual maximum
 * RT priority to be separate from the value exported to
 * user-space.  This allows kernel threads to set their
 * priority to a value higher than any user task. Note:
 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
 */

#define MAX_USER_RT_PRIO	100
#define MAX_RT_PRIO		MAX_USER_RT_PRIO

#define MAX_PRIO		(MAX_RT_PRIO + 40)
#define DEFAULT_PRIO		(MAX_RT_PRIO + 20)

static inline int rt_prio(int prio)
{
	if (unlikely(prio < MAX_RT_PRIO))
		return 1;
	return 0;
}

static inline int rt_task(struct task_struct *p)
{
	return rt_prio(p->prio);
}

static inline int batch_task(struct task_struct *p)
{
	return p->policy == SCHED_BATCH;
}

static inline pid_t process_group(struct task_struct *tsk)
{
	return tsk->signal->pgrp;
+1 −1
Original line number Diff line number Diff line
@@ -290,7 +290,7 @@ static void reparent_to_kthreadd(void)
	/* Set the exit signal to SIGCHLD so we signal init on exit */
	current->exit_signal = SIGCHLD;

	if (!has_rt_policy(current) && (task_nice(current) < 0))
	if (task_nice(current) < 0)
		set_user_nice(current, 0);
	/* cpus_allowed? */
	/* rt_priority? */
+17 −5
Original line number Diff line number Diff line
@@ -220,6 +220,18 @@ static inline unsigned int task_timeslice(struct task_struct *p)
	return static_prio_timeslice(p->static_prio);
}

static inline int rt_policy(int policy)
{
	if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR))
		return 1;
	return 0;
}

static inline int task_has_rt_policy(struct task_struct *p)
{
	return rt_policy(p->policy);
}

/*
 * This is the priority-queue data structure of the RT scheduling class:
 */
@@ -698,7 +710,7 @@ static inline int __normal_prio(struct task_struct *p)

static void set_load_weight(struct task_struct *p)
{
	if (has_rt_policy(p)) {
	if (task_has_rt_policy(p)) {
#ifdef CONFIG_SMP
		if (p == task_rq(p)->migration_thread)
			/*
@@ -749,7 +761,7 @@ static inline int normal_prio(struct task_struct *p)
{
	int prio;

	if (has_rt_policy(p))
	if (task_has_rt_policy(p))
		prio = MAX_RT_PRIO-1 - p->rt_priority;
	else
		prio = __normal_prio(p);
@@ -4051,7 +4063,7 @@ void set_user_nice(struct task_struct *p, long nice)
	 * it wont have any effect on scheduling until the task is
	 * not SCHED_NORMAL/SCHED_BATCH:
	 */
	if (has_rt_policy(p)) {
	if (task_has_rt_policy(p)) {
		p->static_prio = NICE_TO_PRIO(nice);
		goto out_unlock;
	}
@@ -4240,14 +4252,14 @@ int sched_setscheduler(struct task_struct *p, int policy,
	    (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
	    (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
		return -EINVAL;
	if (is_rt_policy(policy) != (param->sched_priority != 0))
	if (rt_policy(policy) != (param->sched_priority != 0))
		return -EINVAL;

	/*
	 * Allow unprivileged RT tasks to decrease priority:
	 */
	if (!capable(CAP_SYS_NICE)) {
		if (is_rt_policy(policy)) {
		if (rt_policy(policy)) {
			unsigned long rlim_rtprio;
			unsigned long flags;