Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 54faf77d authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
 "Three small fixlets"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  hw_breakpoint: Use cpu_possible_mask in {reserve,release}_bp_slot()
  hw_breakpoint: Fix cpu check in task_bp_pinned(cpu)
  kprobes: Fix arch_prepare_kprobe to handle copy insn failures
parents e3ff9114 c790b0ad
Loading
Loading
Loading
Loading
+10 −4
Original line number Diff line number Diff line
@@ -365,10 +365,14 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
	return insn.length;
}

static void __kprobes arch_copy_kprobe(struct kprobe *p)
static int __kprobes arch_copy_kprobe(struct kprobe *p)
{
	int ret;

	/* Copy an instruction with recovering if other optprobe modifies it.*/
	__copy_instruction(p->ainsn.insn, p->addr);
	ret = __copy_instruction(p->ainsn.insn, p->addr);
	if (!ret)
		return -EINVAL;

	/*
	 * __copy_instruction can modify the displacement of the instruction,
@@ -384,6 +388,8 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)

	/* Also, displacement change doesn't affect the first byte */
	p->opcode = p->ainsn.insn[0];

	return 0;
}

int __kprobes arch_prepare_kprobe(struct kprobe *p)
@@ -397,8 +403,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
	p->ainsn.insn = get_insn_slot();
	if (!p->ainsn.insn)
		return -ENOMEM;
	arch_copy_kprobe(p);
	return 0;

	return arch_copy_kprobe(p);
}

void __kprobes arch_arm_kprobe(struct kprobe *p)
+3 −3
Original line number Diff line number Diff line
@@ -120,7 +120,7 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
	list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
		if (iter->hw.bp_target == tsk &&
		    find_slot_idx(iter) == type &&
		    cpu == iter->cpu)
		    (iter->cpu < 0 || cpu == iter->cpu))
			count += hw_breakpoint_weight(iter);
	}

@@ -149,7 +149,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
		return;
	}

	for_each_online_cpu(cpu) {
	for_each_possible_cpu(cpu) {
		unsigned int nr;

		nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
@@ -235,7 +235,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
	if (cpu >= 0) {
		toggle_bp_task_slot(bp, cpu, enable, type, weight);
	} else {
		for_each_online_cpu(cpu)
		for_each_possible_cpu(cpu)
			toggle_bp_task_slot(bp, cpu, enable, type, weight);
	}