Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9cfb9b3c authored by Martin Schwidefsky's avatar Martin Schwidefsky
Browse files

[PATCH] improve idle cputime accounting



Distinguish the cputime of the idle process where idle is actually using
cpu cycles from the cputime where idle is sleeping on an enabled wait psw.
The former is accounted as system time, the later as idle time.

Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 6f430924
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -21,12 +21,12 @@ struct s390_idle_data {

DECLARE_PER_CPU(struct s390_idle_data, s390_idle);

void s390_idle_leave(void);
void vtime_start_cpu(void);

static inline void s390_idle_check(void)
{
	if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL)
		s390_idle_leave();
		vtime_start_cpu();
}

#endif /* _ASM_S390_CPU_H_ */
+7 −9
Original line number Diff line number Diff line
@@ -23,20 +23,18 @@ struct vtimer_list {
	__u64 expires;
	__u64 interval;

	spinlock_t lock;
	unsigned long magic;

	void (*function)(unsigned long);
	unsigned long data;
};

/* the offset value will wrap after ca. 71 years */
/* the vtimer value will wrap after ca. 71 years */
struct vtimer_queue {
	struct list_head list;
	spinlock_t lock;
	__u64 to_expire;	  /* current event expire time */
	__u64 offset;		  /* list offset to zero */
	__u64 timer;		/* last programmed timer */
	__u64 elapsed;		/* elapsed time of timer expire values */
	__u64 idle;		/* temp var for idle */
	int do_spt;		/* =1: reprogram cpu timer in idle */
};

extern void init_virt_timer(struct vtimer_list *timer);
@@ -48,8 +46,8 @@ extern int del_virt_timer(struct vtimer_list *timer);
extern void init_cpu_vtimer(void);
extern void vtime_init(void);

extern void vtime_start_cpu_timer(void);
extern void vtime_stop_cpu_timer(void);
extern void vtime_stop_cpu(void);
extern void vtime_start_leave(void);

#endif /* __KERNEL__ */

+3 −2
Original line number Diff line number Diff line
@@ -583,8 +583,8 @@ kernel_per:

	.globl io_int_handler
io_int_handler:
	stpt	__LC_ASYNC_ENTER_TIMER
	stck	__LC_INT_CLOCK
	stpt	__LC_ASYNC_ENTER_TIMER
	SAVE_ALL_BASE __LC_SAVE_AREA+16
	SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
	CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
@@ -723,8 +723,8 @@ io_notify_resume:

	.globl	ext_int_handler
ext_int_handler:
	stpt	__LC_ASYNC_ENTER_TIMER
	stck	__LC_INT_CLOCK
	stpt	__LC_ASYNC_ENTER_TIMER
	SAVE_ALL_BASE __LC_SAVE_AREA+16
	SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
	CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
@@ -750,6 +750,7 @@ __critical_end:

	.globl mcck_int_handler
mcck_int_handler:
	stck	__LC_INT_CLOCK
	spt	__LC_CPU_TIMER_SAVE_AREA	# revalidate cpu timer
	lm	%r0,%r15,__LC_GPREGS_SAVE_AREA	# revalidate gprs
	SAVE_ALL_BASE __LC_SAVE_AREA+32
+3 −2
Original line number Diff line number Diff line
@@ -559,8 +559,8 @@ kernel_per:
 */
	.globl io_int_handler
io_int_handler:
	stpt	__LC_ASYNC_ENTER_TIMER
	stck	__LC_INT_CLOCK
	stpt	__LC_ASYNC_ENTER_TIMER
	SAVE_ALL_BASE __LC_SAVE_AREA+32
	SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
	CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
@@ -721,8 +721,8 @@ io_notify_resume:
 */
	.globl	ext_int_handler
ext_int_handler:
	stpt	__LC_ASYNC_ENTER_TIMER
	stck	__LC_INT_CLOCK
	stpt	__LC_ASYNC_ENTER_TIMER
	SAVE_ALL_BASE __LC_SAVE_AREA+32
	SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
	CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
@@ -746,6 +746,7 @@ __critical_end:
 */
	.globl mcck_int_handler
mcck_int_handler:
	stck	__LC_INT_CLOCK
	la	%r1,4095		# revalidate r1
	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# revalidate cpu timer
	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
+3 −61
Original line number Diff line number Diff line
@@ -46,7 +46,6 @@
#include <asm/processor.h>
#include <asm/irq.h>
#include <asm/timer.h>
#include <asm/cpu.h>
#include "entry.h"

asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
@@ -76,35 +75,12 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
	return sf->gprs[8];
}

DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = {
	.lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock)
};

void s390_idle_leave(void)
{
	struct s390_idle_data *idle;
	unsigned long long idle_time;

	idle = &__get_cpu_var(s390_idle);
	idle_time = S390_lowcore.int_clock - idle->idle_enter;
	spin_lock(&idle->lock);
	idle->idle_time += idle_time;
	idle->idle_enter = 0ULL;
	idle->idle_count++;
	spin_unlock(&idle->lock);
	vtime_start_cpu_timer();
}

extern void s390_handle_mcck(void);
/*
 * The idle loop on a S390...
 */
static void default_idle(void)
{
	struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
	unsigned long addr;
	psw_t psw;

	/* CPU is going idle. */
	local_irq_disable();
	if (need_resched()) {
@@ -120,7 +96,6 @@ static void default_idle(void)
	local_mcck_disable();
	if (test_thread_flag(TIF_MCCK_PENDING)) {
		local_mcck_enable();
		s390_idle_leave();
		local_irq_enable();
		s390_handle_mcck();
		return;
@@ -128,42 +103,9 @@ static void default_idle(void)
	trace_hardirqs_on();
	/* Don't trace preempt off for idle. */
	stop_critical_timings();
	vtime_stop_cpu_timer();

	/*
	 * The inline assembly is equivalent to
	 *	idle->idle_enter = get_clock();
	 *	__load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
	 *			   PSW_MASK_IO | PSW_MASK_EXT);
	 * The difference is that the inline assembly makes sure that
	 * the stck instruction is right before the lpsw instruction.
	 * This is done to increase the precision.
	 */

	/* Wait for external, I/O or machine check interrupt. */
	psw.mask = psw_kernel_bits|PSW_MASK_WAIT|PSW_MASK_IO|PSW_MASK_EXT;
#ifndef __s390x__
	asm volatile(
		"	basr	%0,0\n"
		"0:	ahi	%0,1f-0b\n"
		"	st	%0,4(%2)\n"
		"	stck	0(%3)\n"
		"	lpsw	0(%2)\n"
		"1:"
		: "=&d" (addr), "=m" (idle->idle_enter)
		: "a" (&psw), "a" (&idle->idle_enter), "m" (psw)
		: "memory", "cc");
#else /* __s390x__ */
	asm volatile(
		"	larl	%0,1f\n"
		"	stg	%0,8(%2)\n"
		"	stck	0(%3)\n"
		"	lpswe	0(%2)\n"
		"1:"
		: "=&d" (addr), "=m" (idle->idle_enter)
		: "a" (&psw), "a" (&idle->idle_enter), "m" (psw)
		: "memory", "cc");
#endif /* __s390x__ */
	/* Stop virtual timer and halt the cpu. */
	vtime_stop_cpu();
	/* Reenable preemption tracer. */
	start_critical_timings();
}

Loading