Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e625cce1 authored by Thomas Gleixner's avatar Thomas Gleixner
Browse files

perf_event: Convert to raw_spinlock



Convert locks which cannot be sleeping locks in preempt-rt to
raw_spinlocks.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
parent ecb49d1a
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -681,7 +681,7 @@ struct perf_event_context {
	 * Protect the states of the events in the list,
	 * Protect the states of the events in the list,
	 * nr_active, and the list:
	 * nr_active, and the list:
	 */
	 */
	spinlock_t			lock;
	raw_spinlock_t			lock;
	/*
	/*
	 * Protect the list of events.  Locking either mutex or lock
	 * Protect the list of events.  Locking either mutex or lock
	 * is sufficient to ensure the list doesn't change; to change
	 * is sufficient to ensure the list doesn't change; to change
+2 −2
Original line number Original line Diff line number Diff line
@@ -96,7 +96,7 @@ static int task_bp_pinned(struct task_struct *tsk)


	list = &ctx->event_list;
	list = &ctx->event_list;


	spin_lock_irqsave(&ctx->lock, flags);
	raw_spin_lock_irqsave(&ctx->lock, flags);


	/*
	/*
	 * The current breakpoint counter is not included in the list
	 * The current breakpoint counter is not included in the list
@@ -107,7 +107,7 @@ static int task_bp_pinned(struct task_struct *tsk)
			count++;
			count++;
	}
	}


	spin_unlock_irqrestore(&ctx->lock, flags);
	raw_spin_unlock_irqrestore(&ctx->lock, flags);


	return count;
	return count;
}
}
+53 −53
Original line number Original line Diff line number Diff line
@@ -203,14 +203,14 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
		 * if so.  If we locked the right context, then it
		 * if so.  If we locked the right context, then it
		 * can't get swapped on us any more.
		 * can't get swapped on us any more.
		 */
		 */
		spin_lock_irqsave(&ctx->lock, *flags);
		raw_spin_lock_irqsave(&ctx->lock, *flags);
		if (ctx != rcu_dereference(task->perf_event_ctxp)) {
		if (ctx != rcu_dereference(task->perf_event_ctxp)) {
			spin_unlock_irqrestore(&ctx->lock, *flags);
			raw_spin_unlock_irqrestore(&ctx->lock, *flags);
			goto retry;
			goto retry;
		}
		}


		if (!atomic_inc_not_zero(&ctx->refcount)) {
		if (!atomic_inc_not_zero(&ctx->refcount)) {
			spin_unlock_irqrestore(&ctx->lock, *flags);
			raw_spin_unlock_irqrestore(&ctx->lock, *flags);
			ctx = NULL;
			ctx = NULL;
		}
		}
	}
	}
@@ -231,7 +231,7 @@ static struct perf_event_context *perf_pin_task_context(struct task_struct *task
	ctx = perf_lock_task_context(task, &flags);
	ctx = perf_lock_task_context(task, &flags);
	if (ctx) {
	if (ctx) {
		++ctx->pin_count;
		++ctx->pin_count;
		spin_unlock_irqrestore(&ctx->lock, flags);
		raw_spin_unlock_irqrestore(&ctx->lock, flags);
	}
	}
	return ctx;
	return ctx;
}
}
@@ -240,9 +240,9 @@ static void perf_unpin_context(struct perf_event_context *ctx)
{
{
	unsigned long flags;
	unsigned long flags;


	spin_lock_irqsave(&ctx->lock, flags);
	raw_spin_lock_irqsave(&ctx->lock, flags);
	--ctx->pin_count;
	--ctx->pin_count;
	spin_unlock_irqrestore(&ctx->lock, flags);
	raw_spin_unlock_irqrestore(&ctx->lock, flags);
	put_ctx(ctx);
	put_ctx(ctx);
}
}


@@ -427,7 +427,7 @@ static void __perf_event_remove_from_context(void *info)
	if (ctx->task && cpuctx->task_ctx != ctx)
	if (ctx->task && cpuctx->task_ctx != ctx)
		return;
		return;


	spin_lock(&ctx->lock);
	raw_spin_lock(&ctx->lock);
	/*
	/*
	 * Protect the list operation against NMI by disabling the
	 * Protect the list operation against NMI by disabling the
	 * events on a global level.
	 * events on a global level.
@@ -449,7 +449,7 @@ static void __perf_event_remove_from_context(void *info)
	}
	}


	perf_enable();
	perf_enable();
	spin_unlock(&ctx->lock);
	raw_spin_unlock(&ctx->lock);
}
}




@@ -488,12 +488,12 @@ static void perf_event_remove_from_context(struct perf_event *event)
	task_oncpu_function_call(task, __perf_event_remove_from_context,
	task_oncpu_function_call(task, __perf_event_remove_from_context,
				 event);
				 event);


	spin_lock_irq(&ctx->lock);
	raw_spin_lock_irq(&ctx->lock);
	/*
	/*
	 * If the context is active we need to retry the smp call.
	 * If the context is active we need to retry the smp call.
	 */
	 */
	if (ctx->nr_active && !list_empty(&event->group_entry)) {
	if (ctx->nr_active && !list_empty(&event->group_entry)) {
		spin_unlock_irq(&ctx->lock);
		raw_spin_unlock_irq(&ctx->lock);
		goto retry;
		goto retry;
	}
	}


@@ -504,7 +504,7 @@ static void perf_event_remove_from_context(struct perf_event *event)
	 */
	 */
	if (!list_empty(&event->group_entry))
	if (!list_empty(&event->group_entry))
		list_del_event(event, ctx);
		list_del_event(event, ctx);
	spin_unlock_irq(&ctx->lock);
	raw_spin_unlock_irq(&ctx->lock);
}
}


/*
/*
@@ -535,7 +535,7 @@ static void __perf_event_disable(void *info)
	if (ctx->task && cpuctx->task_ctx != ctx)
	if (ctx->task && cpuctx->task_ctx != ctx)
		return;
		return;


	spin_lock(&ctx->lock);
	raw_spin_lock(&ctx->lock);


	/*
	/*
	 * If the event is on, turn it off.
	 * If the event is on, turn it off.
@@ -551,7 +551,7 @@ static void __perf_event_disable(void *info)
		event->state = PERF_EVENT_STATE_OFF;
		event->state = PERF_EVENT_STATE_OFF;
	}
	}


	spin_unlock(&ctx->lock);
	raw_spin_unlock(&ctx->lock);
}
}


/*
/*
@@ -584,12 +584,12 @@ void perf_event_disable(struct perf_event *event)
 retry:
 retry:
	task_oncpu_function_call(task, __perf_event_disable, event);
	task_oncpu_function_call(task, __perf_event_disable, event);


	spin_lock_irq(&ctx->lock);
	raw_spin_lock_irq(&ctx->lock);
	/*
	/*
	 * If the event is still active, we need to retry the cross-call.
	 * If the event is still active, we need to retry the cross-call.
	 */
	 */
	if (event->state == PERF_EVENT_STATE_ACTIVE) {
	if (event->state == PERF_EVENT_STATE_ACTIVE) {
		spin_unlock_irq(&ctx->lock);
		raw_spin_unlock_irq(&ctx->lock);
		goto retry;
		goto retry;
	}
	}


@@ -602,7 +602,7 @@ void perf_event_disable(struct perf_event *event)
		event->state = PERF_EVENT_STATE_OFF;
		event->state = PERF_EVENT_STATE_OFF;
	}
	}


	spin_unlock_irq(&ctx->lock);
	raw_spin_unlock_irq(&ctx->lock);
}
}


static int
static int
@@ -770,7 +770,7 @@ static void __perf_install_in_context(void *info)
		cpuctx->task_ctx = ctx;
		cpuctx->task_ctx = ctx;
	}
	}


	spin_lock(&ctx->lock);
	raw_spin_lock(&ctx->lock);
	ctx->is_active = 1;
	ctx->is_active = 1;
	update_context_time(ctx);
	update_context_time(ctx);


@@ -820,7 +820,7 @@ static void __perf_install_in_context(void *info)
 unlock:
 unlock:
	perf_enable();
	perf_enable();


	spin_unlock(&ctx->lock);
	raw_spin_unlock(&ctx->lock);
}
}


/*
/*
@@ -856,12 +856,12 @@ perf_install_in_context(struct perf_event_context *ctx,
	task_oncpu_function_call(task, __perf_install_in_context,
	task_oncpu_function_call(task, __perf_install_in_context,
				 event);
				 event);


	spin_lock_irq(&ctx->lock);
	raw_spin_lock_irq(&ctx->lock);
	/*
	/*
	 * we need to retry the smp call.
	 * we need to retry the smp call.
	 */
	 */
	if (ctx->is_active && list_empty(&event->group_entry)) {
	if (ctx->is_active && list_empty(&event->group_entry)) {
		spin_unlock_irq(&ctx->lock);
		raw_spin_unlock_irq(&ctx->lock);
		goto retry;
		goto retry;
	}
	}


@@ -872,7 +872,7 @@ perf_install_in_context(struct perf_event_context *ctx,
	 */
	 */
	if (list_empty(&event->group_entry))
	if (list_empty(&event->group_entry))
		add_event_to_ctx(event, ctx);
		add_event_to_ctx(event, ctx);
	spin_unlock_irq(&ctx->lock);
	raw_spin_unlock_irq(&ctx->lock);
}
}


/*
/*
@@ -917,7 +917,7 @@ static void __perf_event_enable(void *info)
		cpuctx->task_ctx = ctx;
		cpuctx->task_ctx = ctx;
	}
	}


	spin_lock(&ctx->lock);
	raw_spin_lock(&ctx->lock);
	ctx->is_active = 1;
	ctx->is_active = 1;
	update_context_time(ctx);
	update_context_time(ctx);


@@ -959,7 +959,7 @@ static void __perf_event_enable(void *info)
	}
	}


 unlock:
 unlock:
	spin_unlock(&ctx->lock);
	raw_spin_unlock(&ctx->lock);
}
}


/*
/*
@@ -985,7 +985,7 @@ void perf_event_enable(struct perf_event *event)
		return;
		return;
	}
	}


	spin_lock_irq(&ctx->lock);
	raw_spin_lock_irq(&ctx->lock);
	if (event->state >= PERF_EVENT_STATE_INACTIVE)
	if (event->state >= PERF_EVENT_STATE_INACTIVE)
		goto out;
		goto out;


@@ -1000,10 +1000,10 @@ void perf_event_enable(struct perf_event *event)
		event->state = PERF_EVENT_STATE_OFF;
		event->state = PERF_EVENT_STATE_OFF;


 retry:
 retry:
	spin_unlock_irq(&ctx->lock);
	raw_spin_unlock_irq(&ctx->lock);
	task_oncpu_function_call(task, __perf_event_enable, event);
	task_oncpu_function_call(task, __perf_event_enable, event);


	spin_lock_irq(&ctx->lock);
	raw_spin_lock_irq(&ctx->lock);


	/*
	/*
	 * If the context is active and the event is still off,
	 * If the context is active and the event is still off,
@@ -1020,7 +1020,7 @@ void perf_event_enable(struct perf_event *event)
		__perf_event_mark_enabled(event, ctx);
		__perf_event_mark_enabled(event, ctx);


 out:
 out:
	spin_unlock_irq(&ctx->lock);
	raw_spin_unlock_irq(&ctx->lock);
}
}


static int perf_event_refresh(struct perf_event *event, int refresh)
static int perf_event_refresh(struct perf_event *event, int refresh)
@@ -1042,7 +1042,7 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
{
{
	struct perf_event *event;
	struct perf_event *event;


	spin_lock(&ctx->lock);
	raw_spin_lock(&ctx->lock);
	ctx->is_active = 0;
	ctx->is_active = 0;
	if (likely(!ctx->nr_events))
	if (likely(!ctx->nr_events))
		goto out;
		goto out;
@@ -1055,7 +1055,7 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
	}
	}
	perf_enable();
	perf_enable();
 out:
 out:
	spin_unlock(&ctx->lock);
	raw_spin_unlock(&ctx->lock);
}
}


/*
/*
@@ -1193,8 +1193,8 @@ void perf_event_task_sched_out(struct task_struct *task,
		 * order we take the locks because no other cpu could
		 * order we take the locks because no other cpu could
		 * be trying to lock both of these tasks.
		 * be trying to lock both of these tasks.
		 */
		 */
		spin_lock(&ctx->lock);
		raw_spin_lock(&ctx->lock);
		spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
		raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
		if (context_equiv(ctx, next_ctx)) {
		if (context_equiv(ctx, next_ctx)) {
			/*
			/*
			 * XXX do we need a memory barrier of sorts
			 * XXX do we need a memory barrier of sorts
@@ -1208,8 +1208,8 @@ void perf_event_task_sched_out(struct task_struct *task,


			perf_event_sync_stat(ctx, next_ctx);
			perf_event_sync_stat(ctx, next_ctx);
		}
		}
		spin_unlock(&next_ctx->lock);
		raw_spin_unlock(&next_ctx->lock);
		spin_unlock(&ctx->lock);
		raw_spin_unlock(&ctx->lock);
	}
	}
	rcu_read_unlock();
	rcu_read_unlock();


@@ -1251,7 +1251,7 @@ __perf_event_sched_in(struct perf_event_context *ctx,
	struct perf_event *event;
	struct perf_event *event;
	int can_add_hw = 1;
	int can_add_hw = 1;


	spin_lock(&ctx->lock);
	raw_spin_lock(&ctx->lock);
	ctx->is_active = 1;
	ctx->is_active = 1;
	if (likely(!ctx->nr_events))
	if (likely(!ctx->nr_events))
		goto out;
		goto out;
@@ -1306,7 +1306,7 @@ __perf_event_sched_in(struct perf_event_context *ctx,
	}
	}
	perf_enable();
	perf_enable();
 out:
 out:
	spin_unlock(&ctx->lock);
	raw_spin_unlock(&ctx->lock);
}
}


/*
/*
@@ -1370,7 +1370,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
	struct hw_perf_event *hwc;
	struct hw_perf_event *hwc;
	u64 interrupts, freq;
	u64 interrupts, freq;


	spin_lock(&ctx->lock);
	raw_spin_lock(&ctx->lock);
	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
		if (event->state != PERF_EVENT_STATE_ACTIVE)
		if (event->state != PERF_EVENT_STATE_ACTIVE)
			continue;
			continue;
@@ -1425,7 +1425,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
			perf_enable();
			perf_enable();
		}
		}
	}
	}
	spin_unlock(&ctx->lock);
	raw_spin_unlock(&ctx->lock);
}
}


/*
/*
@@ -1438,7 +1438,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
	if (!ctx->nr_events)
	if (!ctx->nr_events)
		return;
		return;


	spin_lock(&ctx->lock);
	raw_spin_lock(&ctx->lock);
	/*
	/*
	 * Rotate the first entry last (works just fine for group events too):
	 * Rotate the first entry last (works just fine for group events too):
	 */
	 */
@@ -1449,7 +1449,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
	}
	}
	perf_enable();
	perf_enable();


	spin_unlock(&ctx->lock);
	raw_spin_unlock(&ctx->lock);
}
}


void perf_event_task_tick(struct task_struct *curr, int cpu)
void perf_event_task_tick(struct task_struct *curr, int cpu)
@@ -1498,7 +1498,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)


	__perf_event_task_sched_out(ctx);
	__perf_event_task_sched_out(ctx);


	spin_lock(&ctx->lock);
	raw_spin_lock(&ctx->lock);


	list_for_each_entry(event, &ctx->group_list, group_entry) {
	list_for_each_entry(event, &ctx->group_list, group_entry) {
		if (!event->attr.enable_on_exec)
		if (!event->attr.enable_on_exec)
@@ -1516,7 +1516,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
	if (enabled)
	if (enabled)
		unclone_ctx(ctx);
		unclone_ctx(ctx);


	spin_unlock(&ctx->lock);
	raw_spin_unlock(&ctx->lock);


	perf_event_task_sched_in(task, smp_processor_id());
	perf_event_task_sched_in(task, smp_processor_id());
 out:
 out:
@@ -1542,10 +1542,10 @@ static void __perf_event_read(void *info)
	if (ctx->task && cpuctx->task_ctx != ctx)
	if (ctx->task && cpuctx->task_ctx != ctx)
		return;
		return;


	spin_lock(&ctx->lock);
	raw_spin_lock(&ctx->lock);
	update_context_time(ctx);
	update_context_time(ctx);
	update_event_times(event);
	update_event_times(event);
	spin_unlock(&ctx->lock);
	raw_spin_unlock(&ctx->lock);


	event->pmu->read(event);
	event->pmu->read(event);
}
}
@@ -1563,10 +1563,10 @@ static u64 perf_event_read(struct perf_event *event)
		struct perf_event_context *ctx = event->ctx;
		struct perf_event_context *ctx = event->ctx;
		unsigned long flags;
		unsigned long flags;


		spin_lock_irqsave(&ctx->lock, flags);
		raw_spin_lock_irqsave(&ctx->lock, flags);
		update_context_time(ctx);
		update_context_time(ctx);
		update_event_times(event);
		update_event_times(event);
		spin_unlock_irqrestore(&ctx->lock, flags);
		raw_spin_unlock_irqrestore(&ctx->lock, flags);
	}
	}


	return atomic64_read(&event->count);
	return atomic64_read(&event->count);
@@ -1579,7 +1579,7 @@ static void
__perf_event_init_context(struct perf_event_context *ctx,
__perf_event_init_context(struct perf_event_context *ctx,
			    struct task_struct *task)
			    struct task_struct *task)
{
{
	spin_lock_init(&ctx->lock);
	raw_spin_lock_init(&ctx->lock);
	mutex_init(&ctx->mutex);
	mutex_init(&ctx->mutex);
	INIT_LIST_HEAD(&ctx->group_list);
	INIT_LIST_HEAD(&ctx->group_list);
	INIT_LIST_HEAD(&ctx->event_list);
	INIT_LIST_HEAD(&ctx->event_list);
@@ -1649,7 +1649,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
	ctx = perf_lock_task_context(task, &flags);
	ctx = perf_lock_task_context(task, &flags);
	if (ctx) {
	if (ctx) {
		unclone_ctx(ctx);
		unclone_ctx(ctx);
		spin_unlock_irqrestore(&ctx->lock, flags);
		raw_spin_unlock_irqrestore(&ctx->lock, flags);
	}
	}


	if (!ctx) {
	if (!ctx) {
@@ -1987,7 +1987,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
	if (!value)
	if (!value)
		return -EINVAL;
		return -EINVAL;


	spin_lock_irq(&ctx->lock);
	raw_spin_lock_irq(&ctx->lock);
	if (event->attr.freq) {
	if (event->attr.freq) {
		if (value > sysctl_perf_event_sample_rate) {
		if (value > sysctl_perf_event_sample_rate) {
			ret = -EINVAL;
			ret = -EINVAL;
@@ -2000,7 +2000,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
		event->hw.sample_period = value;
		event->hw.sample_period = value;
	}
	}
unlock:
unlock:
	spin_unlock_irq(&ctx->lock);
	raw_spin_unlock_irq(&ctx->lock);


	return ret;
	return ret;
}
}
@@ -4992,7 +4992,7 @@ void perf_event_exit_task(struct task_struct *child)
	 * reading child->perf_event_ctxp, we wait until it has
	 * reading child->perf_event_ctxp, we wait until it has
	 * incremented the context's refcount before we do put_ctx below.
	 * incremented the context's refcount before we do put_ctx below.
	 */
	 */
	spin_lock(&child_ctx->lock);
	raw_spin_lock(&child_ctx->lock);
	child->perf_event_ctxp = NULL;
	child->perf_event_ctxp = NULL;
	/*
	/*
	 * If this context is a clone; unclone it so it can't get
	 * If this context is a clone; unclone it so it can't get
@@ -5001,7 +5001,7 @@ void perf_event_exit_task(struct task_struct *child)
	 */
	 */
	unclone_ctx(child_ctx);
	unclone_ctx(child_ctx);
	update_context_time(child_ctx);
	update_context_time(child_ctx);
	spin_unlock_irqrestore(&child_ctx->lock, flags);
	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);


	/*
	/*
	 * Report the task dead after unscheduling the events so that we
	 * Report the task dead after unscheduling the events so that we
@@ -5292,11 +5292,11 @@ perf_set_reserve_percpu(struct sysdev_class *class,
	perf_reserved_percpu = val;
	perf_reserved_percpu = val;
	for_each_online_cpu(cpu) {
	for_each_online_cpu(cpu) {
		cpuctx = &per_cpu(perf_cpu_context, cpu);
		cpuctx = &per_cpu(perf_cpu_context, cpu);
		spin_lock_irq(&cpuctx->ctx.lock);
		raw_spin_lock_irq(&cpuctx->ctx.lock);
		mpt = min(perf_max_events - cpuctx->ctx.nr_events,
		mpt = min(perf_max_events - cpuctx->ctx.nr_events,
			  perf_max_events - perf_reserved_percpu);
			  perf_max_events - perf_reserved_percpu);
		cpuctx->max_pertask = mpt;
		cpuctx->max_pertask = mpt;
		spin_unlock_irq(&cpuctx->ctx.lock);
		raw_spin_unlock_irq(&cpuctx->ctx.lock);
	}
	}
	spin_unlock(&perf_resource_lock);
	spin_unlock(&perf_resource_lock);