Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bfe33492 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf/x86: Fix RDPMC vs. mm_struct tracking



Vince reported the following rdpmc() testcase failure:

 > Failing test case:
 >
 >	fd=perf_event_open();
 >	addr=mmap(fd);
 >	exec()  // without closing or unmapping the event
 >	fd=perf_event_open();
 >	addr=mmap(fd);
 >	rdpmc()	// GPFs due to rdpmc being disabled

The problem is of course that exec() plays tricks with what is
current->mm, only destroying the old mappings after having
installed the new mm.

Fix this confusion by passing along vma->vm_mm instead of relying on
current->mm.

Reported-by: default avatarVince Weaver <vincent.weaver@maine.edu>
Tested-by: default avatarVince Weaver <vincent.weaver@maine.edu>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarAndy Lutomirski <luto@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: stable@vger.kernel.org
Fixes: 1e0fb9ec ("perf: Add pmu callbacks to track event mapping and unmapping")
Link: http://lkml.kernel.org/r/20170802173930.cstykcqefmqt7jau@hirez.programming.kicks-ass.net


[ Minor cleanups. ]
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 8d31f80e
Loading
Loading
Loading
Loading
+7 −9
Original line number Original line Diff line number Diff line
@@ -2114,7 +2114,7 @@ static void refresh_pce(void *ignored)
	load_mm_cr4(this_cpu_read(cpu_tlbstate.loaded_mm));
	load_mm_cr4(this_cpu_read(cpu_tlbstate.loaded_mm));
}
}


static void x86_pmu_event_mapped(struct perf_event *event)
static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
{
{
	if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
	if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
		return;
		return;
@@ -2129,22 +2129,20 @@ static void x86_pmu_event_mapped(struct perf_event *event)
	 * For now, this can't happen because all callers hold mmap_sem
	 * For now, this can't happen because all callers hold mmap_sem
	 * for write.  If this changes, we'll need a different solution.
	 * for write.  If this changes, we'll need a different solution.
	 */
	 */
	lockdep_assert_held_exclusive(&current->mm->mmap_sem);
	lockdep_assert_held_exclusive(&mm->mmap_sem);


	if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
	if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
		on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
		on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
}
}


static void x86_pmu_event_unmapped(struct perf_event *event)
static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
{
{
	if (!current->mm)
		return;


	if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
	if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
		return;
		return;


	if (atomic_dec_and_test(&current->mm->context.perf_rdpmc_allowed))
	if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
		on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
		on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
}
}


static int x86_pmu_event_idx(struct perf_event *event)
static int x86_pmu_event_idx(struct perf_event *event)
+2 −2
Original line number Original line Diff line number Diff line
@@ -310,8 +310,8 @@ struct pmu {
	 * Notification that the event was mapped or unmapped.  Called
	 * Notification that the event was mapped or unmapped.  Called
	 * in the context of the mapping task.
	 * in the context of the mapping task.
	 */
	 */
	void (*event_mapped)		(struct perf_event *event); /*optional*/
	void (*event_mapped)		(struct perf_event *event, struct mm_struct *mm); /* optional */
	void (*event_unmapped)		(struct perf_event *event); /*optional*/
	void (*event_unmapped)		(struct perf_event *event, struct mm_struct *mm); /* optional */


	/*
	/*
	 * Flags for ->add()/->del()/ ->start()/->stop(). There are
	 * Flags for ->add()/->del()/ ->start()/->stop(). There are
+3 −3
Original line number Original line Diff line number Diff line
@@ -5090,7 +5090,7 @@ static void perf_mmap_open(struct vm_area_struct *vma)
		atomic_inc(&event->rb->aux_mmap_count);
		atomic_inc(&event->rb->aux_mmap_count);


	if (event->pmu->event_mapped)
	if (event->pmu->event_mapped)
		event->pmu->event_mapped(event);
		event->pmu->event_mapped(event, vma->vm_mm);
}
}


static void perf_pmu_output_stop(struct perf_event *event);
static void perf_pmu_output_stop(struct perf_event *event);
@@ -5113,7 +5113,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
	unsigned long size = perf_data_size(rb);
	unsigned long size = perf_data_size(rb);


	if (event->pmu->event_unmapped)
	if (event->pmu->event_unmapped)
		event->pmu->event_unmapped(event);
		event->pmu->event_unmapped(event, vma->vm_mm);


	/*
	/*
	 * rb->aux_mmap_count will always drop before rb->mmap_count and
	 * rb->aux_mmap_count will always drop before rb->mmap_count and
@@ -5411,7 +5411,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
	vma->vm_ops = &perf_mmap_vmops;
	vma->vm_ops = &perf_mmap_vmops;


	if (event->pmu->event_mapped)
	if (event->pmu->event_mapped)
		event->pmu->event_mapped(event);
		event->pmu->event_mapped(event, vma->vm_mm);


	return ret;
	return ret;
}
}