Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 68107df5 authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Ingo Molnar
Browse files

u64_stats: Introduce IRQs disabled helpers



Introduce light versions of u64_stats helpers for context where
either preempt or IRQs are disabled. This way we can make this library
usable by scheduler irqtime accounting which currenty implement its
ad-hoc version.

Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Wanpeng Li <wanpeng.li@hotmail.com>
Link: http://lkml.kernel.org/r/1474849761-12678-4-git-send-email-fweisbec@gmail.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 2810f611
Loading
Loading
Loading
Loading
+24 −21
Original line number Original line Diff line number Diff line
@@ -103,31 +103,42 @@ static inline void u64_stats_update_end_raw(struct u64_stats_sync *syncp)
#endif
#endif
}
}


static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
	return read_seqcount_begin(&syncp->seq);
	return read_seqcount_begin(&syncp->seq);
#else
#else
#if BITS_PER_LONG==32
	preempt_disable();
#endif
	return 0;
	return 0;
#endif
#endif
}
}


static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{
#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
	preempt_disable();
#endif
	return __u64_stats_fetch_begin(syncp);
}

static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
					 unsigned int start)
					 unsigned int start)
{
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
	return read_seqcount_retry(&syncp->seq, start);
	return read_seqcount_retry(&syncp->seq, start);
#else
#else
#if BITS_PER_LONG==32
	preempt_enable();
#endif
	return false;
	return false;
#endif
#endif
}
}


static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
					 unsigned int start)
{
#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
	preempt_enable();
#endif
	return __u64_stats_fetch_retry(syncp, start);
}

/*
/*
 * In case irq handlers can update u64 counters, readers can use following helpers
 * In case irq handlers can update u64 counters, readers can use following helpers
 * - SMP 32bit arches use seqcount protection, irq safe.
 * - SMP 32bit arches use seqcount protection, irq safe.
@@ -136,27 +147,19 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
 */
 */
static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
{
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
	return read_seqcount_begin(&syncp->seq);
#else
#if BITS_PER_LONG==32
	local_irq_disable();
	local_irq_disable();
#endif
#endif
	return 0;
	return __u64_stats_fetch_begin(syncp);
#endif
}
}


static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
					     unsigned int start)
					     unsigned int start)
{
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
	return read_seqcount_retry(&syncp->seq, start);
#else
#if BITS_PER_LONG==32
	local_irq_enable();
	local_irq_enable();
#endif
#endif
	return false;
	return __u64_stats_fetch_retry(syncp, start);
#endif
}
}


#endif /* _LINUX_U64_STATS_SYNC_H */
#endif /* _LINUX_U64_STATS_SYNC_H */