Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 740969f9 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar
Browse files

locking, lib/proportions: Annotate prop_local_percpu::lock as raw



The prop_local_percpu::lock can be taken in atomic context and therefore
cannot be preempted on -rt - annotate it.

In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent cdcc136f
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -58,7 +58,7 @@ struct prop_local_percpu {
	 */
	int shift;
	unsigned long period;
	spinlock_t lock;		/* protect the snapshot state */
	raw_spinlock_t lock;		/* protect the snapshot state */
};

int prop_local_init_percpu(struct prop_local_percpu *pl);
@@ -106,11 +106,11 @@ struct prop_local_single {
	 */
	unsigned long period;
	int shift;
	spinlock_t lock;		/* protect the snapshot state */
	raw_spinlock_t lock;		/* protect the snapshot state */
};

#define INIT_PROP_LOCAL_SINGLE(name)			\
{	.lock = __SPIN_LOCK_UNLOCKED(name.lock),	\
{	.lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock),	\
}

int prop_local_init_single(struct prop_local_single *pl);
+6 −6
Original line number Diff line number Diff line
@@ -190,7 +190,7 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift)

int prop_local_init_percpu(struct prop_local_percpu *pl)
{
	spin_lock_init(&pl->lock);
	raw_spin_lock_init(&pl->lock);
	pl->shift = 0;
	pl->period = 0;
	return percpu_counter_init(&pl->events, 0);
@@ -226,7 +226,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
	if (pl->period == global_period)
		return;

	spin_lock_irqsave(&pl->lock, flags);
	raw_spin_lock_irqsave(&pl->lock, flags);
	prop_adjust_shift(&pl->shift, &pl->period, pg->shift);

	/*
@@ -247,7 +247,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
		percpu_counter_set(&pl->events, 0);

	pl->period = global_period;
	spin_unlock_irqrestore(&pl->lock, flags);
	raw_spin_unlock_irqrestore(&pl->lock, flags);
}

/*
@@ -324,7 +324,7 @@ void prop_fraction_percpu(struct prop_descriptor *pd,

int prop_local_init_single(struct prop_local_single *pl)
{
	spin_lock_init(&pl->lock);
	raw_spin_lock_init(&pl->lock);
	pl->shift = 0;
	pl->period = 0;
	pl->events = 0;
@@ -356,7 +356,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
	if (pl->period == global_period)
		return;

	spin_lock_irqsave(&pl->lock, flags);
	raw_spin_lock_irqsave(&pl->lock, flags);
	prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
	/*
	 * For each missed period, we half the local counter.
@@ -367,7 +367,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
	else
		pl->events = 0;
	pl->period = global_period;
	spin_unlock_irqrestore(&pl->lock, flags);
	raw_spin_unlock_irqrestore(&pl->lock, flags);
}

/*