Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8d5b9025 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

sched/fair: Introduce {en,de}queue_load_avg()



Analogous to the existing {en,de}queue_runnable_load_avg() add helpers
for {en,de}queue_load_avg(). More users will follow.

Includes some code movement to avoid fwd declarations.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent b5b3e35f
Loading
Loading
Loading
Loading
+86 −70
Original line number Original line Diff line number Diff line
@@ -2692,6 +2692,90 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
	cfs_rq->nr_running--;
	cfs_rq->nr_running--;
}
}


/*
 * Signed add and clamp on underflow.
 *
 * Explicitly do a load-store to ensure the intermediate value never hits
 * memory. This allows lockless observations without ever seeing the negative
 * values.
 */
#define add_positive(_ptr, _val) do {                           \
	typeof(_ptr) ptr = (_ptr);                              \
	typeof(_val) val = (_val);                              \
	typeof(*ptr) res, var = READ_ONCE(*ptr);                \
								\
	res = var + val;                                        \
								\
	if (val < 0 && res > var)                               \
		res = 0;                                        \
								\
	WRITE_ONCE(*ptr, res);                                  \
} while (0)

/*
 * Unsigned subtract and clamp on underflow.
 *
 * Explicitly do a load-store to ensure the intermediate value never hits
 * memory. This allows lockless observations without ever seeing the negative
 * values.
 */
#define sub_positive(_ptr, _val) do {				\
	typeof(_ptr) ptr = (_ptr);				\
	typeof(*ptr) val = (_val);				\
	typeof(*ptr) res, var = READ_ONCE(*ptr);		\
	res = var - val;					\
	if (res > var)						\
		res = 0;					\
	WRITE_ONCE(*ptr, res);					\
} while (0)

#ifdef CONFIG_SMP
/*
 * XXX we want to get rid of this helper and use the full load resolution.
 */
static inline long se_weight(struct sched_entity *se)
{
	return scale_load_down(se->load.weight);
}

static inline void
enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	cfs_rq->runnable_load_avg += se->avg.load_avg;
	cfs_rq->runnable_load_sum += se_weight(se) * se->avg.load_sum;
}

static inline void
dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	sub_positive(&cfs_rq->runnable_load_avg, se->avg.load_avg);
	sub_positive(&cfs_rq->runnable_load_sum, se_weight(se) * se->avg.load_sum);
}

static inline void
enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	cfs_rq->avg.load_avg += se->avg.load_avg;
	cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
}

static inline void
dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
	sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
}
#else
static inline void
enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
static inline void
dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
static inline void
enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
static inline void
dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
#endif

#ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_FAIR_GROUP_SCHED
# ifdef CONFIG_SMP
# ifdef CONFIG_SMP
/*
/*
@@ -3084,14 +3168,6 @@ ___update_load_avg(struct sched_avg *sa, unsigned long weight, struct cfs_rq *cf
	sa->util_avg = sa->util_sum / divider;
	sa->util_avg = sa->util_sum / divider;
}
}


/*
 * XXX we want to get rid of this helper and use the full load resolution.
 */
static inline long se_weight(struct sched_entity *se)
{
	return scale_load_down(se->load.weight);
}

/*
/*
 * sched_entity:
 * sched_entity:
 *
 *
@@ -3141,26 +3217,6 @@ __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
	return 0;
	return 0;
}
}


/*
 * Signed add and clamp on underflow.
 *
 * Explicitly do a load-store to ensure the intermediate value never hits
 * memory. This allows lockless observations without ever seeing the negative
 * values.
 */
#define add_positive(_ptr, _val) do {                           \
	typeof(_ptr) ptr = (_ptr);                              \
	typeof(_val) val = (_val);                              \
	typeof(*ptr) res, var = READ_ONCE(*ptr);                \
								\
	res = var + val;                                        \
								\
	if (val < 0 && res > var)                               \
		res = 0;                                        \
								\
	WRITE_ONCE(*ptr, res);                                  \
} while (0)

#ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_FAIR_GROUP_SCHED
/**
/**
 * update_tg_load_avg - update the tg's load avg
 * update_tg_load_avg - update the tg's load avg
@@ -3405,23 +3461,6 @@ static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {}


#endif /* CONFIG_FAIR_GROUP_SCHED */
#endif /* CONFIG_FAIR_GROUP_SCHED */


/*
 * Unsigned subtract and clamp on underflow.
 *
 * Explicitly do a load-store to ensure the intermediate value never hits
 * memory. This allows lockless observations without ever seeing the negative
 * values.
 */
#define sub_positive(_ptr, _val) do {				\
	typeof(_ptr) ptr = (_ptr);				\
	typeof(*ptr) val = (_val);				\
	typeof(*ptr) res, var = READ_ONCE(*ptr);		\
	res = var - val;					\
	if (res > var)						\
		res = 0;					\
	WRITE_ONCE(*ptr, res);					\
} while (0)

/**
/**
 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
 * @now: current time, as per cfs_rq_clock_task()
 * @now: current time, as per cfs_rq_clock_task()
@@ -3484,8 +3523,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
{
	se->avg.last_update_time = cfs_rq->avg.last_update_time;
	se->avg.last_update_time = cfs_rq->avg.last_update_time;
	cfs_rq->avg.load_avg += se->avg.load_avg;
	enqueue_load_avg(cfs_rq, se);
	cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
	cfs_rq->avg.util_avg += se->avg.util_avg;
	cfs_rq->avg.util_avg += se->avg.util_avg;
	cfs_rq->avg.util_sum += se->avg.util_sum;
	cfs_rq->avg.util_sum += se->avg.util_sum;
	set_tg_cfs_propagate(cfs_rq);
	set_tg_cfs_propagate(cfs_rq);
@@ -3503,9 +3541,7 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 */
 */
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
{

	dequeue_load_avg(cfs_rq, se);
	sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
	sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
	sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
	sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
	sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
	sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
	set_tg_cfs_propagate(cfs_rq);
	set_tg_cfs_propagate(cfs_rq);
@@ -3547,22 +3583,6 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
		update_tg_load_avg(cfs_rq, 0);
		update_tg_load_avg(cfs_rq, 0);
}
}


/* Add the load generated by se into cfs_rq's load average */
static inline void
enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	cfs_rq->runnable_load_avg += se->avg.load_avg;
	cfs_rq->runnable_load_sum += se_weight(se) * se->avg.load_sum;
}

/* Remove the runnable load generated by se from cfs_rq's runnable load average */
static inline void
dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	sub_positive(&cfs_rq->runnable_load_avg, se->avg.load_avg);
	sub_positive(&cfs_rq->runnable_load_sum, se_weight(se) * se->avg.load_sum);
}

#ifndef CONFIG_64BIT
#ifndef CONFIG_64BIT
static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
{
{
@@ -3649,10 +3669,6 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
	cfs_rq_util_change(cfs_rq);
	cfs_rq_util_change(cfs_rq);
}
}


static inline void
enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
static inline void
dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
static inline void remove_entity_load_avg(struct sched_entity *se) {}
static inline void remove_entity_load_avg(struct sched_entity *se) {}


static inline void
static inline void