Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b2c77a57 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge tag 'full-dynticks-cputime-for-mingo' of...

Merge tag 'full-dynticks-cputime-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks

 into sched/core

Pull full-dynticks (user-space execution is undisturbed and
receives no timer IRQs) preparation changes that convert the
cputime accounting code to be full-dynticks ready,
from Frederic Weisbecker:

 "This implements the cputime accounting on full dynticks CPUs.

  Typical cputime stats infrastructure relies on the timer tick and
  its periodic polling on the CPU to account the amount of time
  spent by the CPUs and the tasks per high level domains such as
  userspace, kernelspace, guest, ...

  Now we are preparing to implement full dynticks capability on
  Linux for Real Time and HPC users who want full CPU isolation.
  This feature requires a cputime accounting that doesn't depend
  on the timer tick.

  To implement it, this new cputime infrastructure plugs into
  kernel/user/guest boundaries to take snapshots of cputime and
  flush these to the stats when needed. This performs pretty
  much like CONFIG_VIRT_CPU_ACCOUNTING except that context location
  and cputime snaphots are synchronized between write and read
  side such that the latter can safely retrieve the pending tickless
  cputime of a task and add it to its latest cputime snapshot to
  return the correct result to the user."

Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents c3c18640 6a61671b
Loading
Loading
Loading
Loading
+4 −2
Original line number Original line Diff line number Diff line
@@ -1139,6 +1139,7 @@ struct rusage32 {
SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
{
{
	struct rusage32 r;
	struct rusage32 r;
	cputime_t utime, stime;


	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
		return -EINVAL;
		return -EINVAL;
@@ -1146,8 +1147,9 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
	memset(&r, 0, sizeof(r));
	memset(&r, 0, sizeof(r));
	switch (who) {
	switch (who) {
	case RUSAGE_SELF:
	case RUSAGE_SELF:
		jiffies_to_timeval32(current->utime, &r.ru_utime);
		task_cputime(current, &utime, &stime);
		jiffies_to_timeval32(current->stime, &r.ru_stime);
		jiffies_to_timeval32(utime, &r.ru_utime);
		jiffies_to_timeval32(stime, &r.ru_stime);
		r.ru_minflt = current->min_flt;
		r.ru_minflt = current->min_flt;
		r.ru_majflt = current->maj_flt;
		r.ru_majflt = current->maj_flt;
		break;
		break;
+6 −86
Original line number Original line Diff line number Diff line
@@ -11,99 +11,19 @@
 * as published by the Free Software Foundation; either version
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 * 2 of the License, or (at your option) any later version.
 *
 *
 * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in nsec.
 * If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in nsec.
 * Otherwise we measure cpu time in jiffies using the generic definitions.
 * Otherwise we measure cpu time in jiffies using the generic definitions.
 */
 */


#ifndef __IA64_CPUTIME_H
#ifndef __IA64_CPUTIME_H
#define __IA64_CPUTIME_H
#define __IA64_CPUTIME_H


#ifndef CONFIG_VIRT_CPU_ACCOUNTING
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
# include <asm-generic/cputime.h>
# include <asm-generic/cputime.h>
#else
#else

#include <linux/time.h>
#include <linux/jiffies.h>
# include <asm/processor.h>
# include <asm/processor.h>

# include <asm-generic/cputime_nsecs.h>
typedef u64 __nocast cputime_t;
typedef u64 __nocast cputime64_t;

#define cputime_one_jiffy		jiffies_to_cputime(1)

/*
 * Convert cputime <-> jiffies (HZ)
 */
#define cputime_to_jiffies(__ct)	\
	((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
#define jiffies_to_cputime(__jif)	\
	(__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ))
#define cputime64_to_jiffies64(__ct)	\
	((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
#define jiffies64_to_cputime64(__jif)	\
	(__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ))

/*
 * Convert cputime <-> microseconds
 */
#define cputime_to_usecs(__ct)		\
	((__force u64)(__ct) / NSEC_PER_USEC)
#define usecs_to_cputime(__usecs)	\
	(__force cputime_t)((__usecs) * NSEC_PER_USEC)
#define usecs_to_cputime64(__usecs)	\
	(__force cputime64_t)((__usecs) * NSEC_PER_USEC)

/*
 * Convert cputime <-> seconds
 */
#define cputime_to_secs(__ct)		\
	((__force u64)(__ct) / NSEC_PER_SEC)
#define secs_to_cputime(__secs)		\
	(__force cputime_t)((__secs) * NSEC_PER_SEC)

/*
 * Convert cputime <-> timespec (nsec)
 */
static inline cputime_t timespec_to_cputime(const struct timespec *val)
{
	u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
	return (__force cputime_t) ret;
}
static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
{
	val->tv_sec  = (__force u64) ct / NSEC_PER_SEC;
	val->tv_nsec = (__force u64) ct % NSEC_PER_SEC;
}

/*
 * Convert cputime <-> timeval (msec)
 */
static inline cputime_t timeval_to_cputime(struct timeval *val)
{
	u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
	return (__force cputime_t) ret;
}
static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
{
	val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
	val->tv_usec = ((__force u64) ct % NSEC_PER_SEC) / NSEC_PER_USEC;
}

/*
 * Convert cputime <-> clock (USER_HZ)
 */
#define cputime_to_clock_t(__ct)	\
	((__force u64)(__ct) / (NSEC_PER_SEC / USER_HZ))
#define clock_t_to_cputime(__x)		\
	(__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ))

/*
 * Convert cputime64 to clock.
 */
#define cputime64_to_clock_t(__ct)	\
	cputime_to_clock_t((__force cputime_t)__ct)

extern void arch_vtime_task_switch(struct task_struct *tsk);
extern void arch_vtime_task_switch(struct task_struct *tsk);
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */


#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
#endif /* __IA64_CPUTIME_H */
#endif /* __IA64_CPUTIME_H */
+2 −2
Original line number Original line Diff line number Diff line
@@ -31,7 +31,7 @@ struct thread_info {
	mm_segment_t addr_limit;	/* user-level address space limit */
	mm_segment_t addr_limit;	/* user-level address space limit */
	int preempt_count;		/* 0=premptable, <0=BUG; will also serve as bh-counter */
	int preempt_count;		/* 0=premptable, <0=BUG; will also serve as bh-counter */
	struct restart_block restart_block;
	struct restart_block restart_block;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
	__u64 ac_stamp;
	__u64 ac_stamp;
	__u64 ac_leave;
	__u64 ac_leave;
	__u64 ac_stime;
	__u64 ac_stime;
@@ -69,7 +69,7 @@ struct thread_info {
#define task_stack_page(tsk)	((void *)(tsk))
#define task_stack_page(tsk)	((void *)(tsk))


#define __HAVE_THREAD_FUNCTIONS
#define __HAVE_THREAD_FUNCTIONS
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#define setup_thread_stack(p, org)			\
#define setup_thread_stack(p, org)			\
	*task_thread_info(p) = *task_thread_info(org);	\
	*task_thread_info(p) = *task_thread_info(org);	\
	task_thread_info(p)->ac_stime = 0;		\
	task_thread_info(p)->ac_stime = 0;		\
+1 −1
Original line number Original line Diff line number Diff line


#ifdef CONFIG_VIRT_CPU_ACCOUNTING
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
/* read ar.itc in advance, and use it before leaving bank 0 */
/* read ar.itc in advance, and use it before leaving bank 0 */
#define XEN_ACCOUNT_GET_STAMP		\
#define XEN_ACCOUNT_GET_STAMP		\
	MOV_FROM_ITC(pUStk, p6, r20, r2);
	MOV_FROM_ITC(pUStk, p6, r20, r2);
+1 −1
Original line number Original line Diff line number Diff line
@@ -41,7 +41,7 @@ void foo(void)
	DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
	DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
	DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
	DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
	DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
	DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
	DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp));
	DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp));
	DEFINE(TI_AC_LEAVE, offsetof(struct thread_info, ac_leave));
	DEFINE(TI_AC_LEAVE, offsetof(struct thread_info, ac_leave));
	DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime));
	DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime));
Loading