Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1c10adbb authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Ingo Molnar
Browse files

hw_breakpoint: Introduce cpumask_of_bp()



Add the trivial helper which simply returns cpumask_of() or
cpu_possible_mask depending on bp->cpu.

Change fetch_bp_busy_slots() and toggle_bp_slot() to always do
for_each_cpu(cpumask_of_bp) to simplify the code and avoid the
code duplication.

Reported-by: default avatarVince Weaver <vincent.weaver@maine.edu>
Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Acked-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Link: http://lkml.kernel.org/r/20130620155015.GA6340@redhat.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 7ab71f32
Loading
Loading
Loading
Loading
+17 −26
Original line number Diff line number Diff line
@@ -127,6 +127,13 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
	return count;
}

static const struct cpumask *cpumask_of_bp(struct perf_event *bp)
{
	if (bp->cpu >= 0)
		return cpumask_of(bp->cpu);
	return cpu_possible_mask;
}

/*
 * Report the number of pinned/un-pinned breakpoints we have in
 * a given cpu (cpu > -1) or in all of them (cpu = -1).
@@ -135,25 +142,13 @@ static void
fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
		    enum bp_type_idx type)
{
	int cpu = bp->cpu;
	struct task_struct *tsk = bp->hw.bp_target;

	if (cpu >= 0) {
		slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
		if (!tsk)
			slots->pinned += max_task_bp_pinned(cpu, type);
		else
			slots->pinned += task_bp_pinned(cpu, bp, type);
		slots->flexible = per_cpu(nr_bp_flexible[type], cpu);

		return;
	}
	const struct cpumask *cpumask = cpumask_of_bp(bp);
	int cpu;

	for_each_possible_cpu(cpu) {
		unsigned int nr;
	for_each_cpu(cpu, cpumask) {
		unsigned int nr = per_cpu(nr_cpu_bp_pinned[type], cpu);

		nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
		if (!tsk)
		if (!bp->hw.bp_target)
			nr += max_task_bp_pinned(cpu, type);
		else
			nr += task_bp_pinned(cpu, bp, type);
@@ -205,25 +200,21 @@ static void
toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
	       int weight)
{
	int cpu = bp->cpu;
	struct task_struct *tsk = bp->hw.bp_target;
	const struct cpumask *cpumask = cpumask_of_bp(bp);
	int cpu;

	if (!enable)
		weight = -weight;

	/* Pinned counter cpu profiling */
	if (!tsk) {
		per_cpu(nr_cpu_bp_pinned[type], cpu) += weight;
	if (!bp->hw.bp_target) {
		per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
		return;
	}

	/* Pinned counter task profiling */
	if (cpu >= 0) {
		toggle_bp_task_slot(bp, cpu, type, weight);
	} else {
		for_each_possible_cpu(cpu)
	for_each_cpu(cpu, cpumask)
		toggle_bp_task_slot(bp, cpu, type, weight);
	}

	if (enable)
		list_add_tail(&bp->hw.bp_list, &bp_task_head);