Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 11a80ddb authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'perf/urgent' of...

Merge branch 'perf/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing into perf/urgent
parents 3a9a0beb 56053170
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -22,6 +22,8 @@ enum {

static inline void hw_breakpoint_init(struct perf_event_attr *attr)
{
	memset(attr, 0, sizeof(*attr));

	attr->type = PERF_TYPE_BREAKPOINT;
	attr->size = sizeof(*attr);
	/*
+45 −29
Original line number Diff line number Diff line
@@ -83,15 +83,51 @@ static unsigned int max_task_bp_pinned(int cpu)
	return 0;
}

static int task_bp_pinned(struct task_struct *tsk)
{
	struct perf_event_context *ctx = tsk->perf_event_ctxp;
	struct list_head *list;
	struct perf_event *bp;
	unsigned long flags;
	int count = 0;

	if (WARN_ONCE(!ctx, "No perf context for this task"))
		return 0;

	list = &ctx->event_list;

	spin_lock_irqsave(&ctx->lock, flags);

	/*
	 * The current breakpoint counter is not included in the list
	 * at the open() callback time
	 */
	list_for_each_entry(bp, list, event_entry) {
		if (bp->attr.type == PERF_TYPE_BREAKPOINT)
			count++;
	}

	spin_unlock_irqrestore(&ctx->lock, flags);

	return count;
}

/*
 * Report the number of pinned/un-pinned breakpoints we have in
 * a given cpu (cpu > -1) or in all of them (cpu = -1).
 */
static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
static void
fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp)
{
	int cpu = bp->cpu;
	struct task_struct *tsk = bp->ctx->task;

	if (cpu >= 0) {
		slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu);
		if (!tsk)
			slots->pinned += max_task_bp_pinned(cpu);
		else
			slots->pinned += task_bp_pinned(tsk);
		slots->flexible = per_cpu(nr_bp_flexible, cpu);

		return;
@@ -101,7 +137,10 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
		unsigned int nr;

		nr = per_cpu(nr_cpu_bp_pinned, cpu);
		if (!tsk)
			nr += max_task_bp_pinned(cpu);
		else
			nr += task_bp_pinned(tsk);

		if (nr > slots->pinned)
			slots->pinned = nr;
@@ -118,33 +157,10 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
 */
static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
{
	int count = 0;
	struct perf_event *bp;
	struct perf_event_context *ctx = tsk->perf_event_ctxp;
	unsigned int *tsk_pinned;
	struct list_head *list;
	unsigned long flags;

	if (WARN_ONCE(!ctx, "No perf context for this task"))
		return;

	list = &ctx->event_list;

	spin_lock_irqsave(&ctx->lock, flags);

	/*
	 * The current breakpoint counter is not included in the list
	 * at the open() callback time
	 */
	list_for_each_entry(bp, list, event_entry) {
		if (bp->attr.type == PERF_TYPE_BREAKPOINT)
			count++;
	}

	spin_unlock_irqrestore(&ctx->lock, flags);
	int count = 0;

	if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list"))
		return;
	count = task_bp_pinned(tsk);

	tsk_pinned = per_cpu(task_bp_pinned, cpu);
	if (enable) {
@@ -233,7 +249,7 @@ int reserve_bp_slot(struct perf_event *bp)

	mutex_lock(&nr_bp_mutex);

	fetch_bp_busy_slots(&slots, bp->cpu);
	fetch_bp_busy_slots(&slots, bp);

	/* Flexible counters need to keep at least one slot */
	if (slots.pinned + (!!slots.flexible) == HBP_NUM) {