Loading include/linux/perf_event.h +6 −0 Original line number Diff line number Diff line Loading @@ -339,6 +339,12 @@ struct perf_event { int nr_siblings; int group_flags; struct perf_event *group_leader; /* * Protect the pmu, attributes and context of a group leader. * Note: does not protect the pointer to the group_leader. */ struct mutex group_leader_mutex; struct pmu *pmu; enum perf_event_active_state state; Loading kernel/events/core.c +15 −0 Original line number Diff line number Diff line Loading @@ -6998,6 +6998,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, if (!group_leader) group_leader = event; mutex_init(&event->group_leader_mutex); mutex_init(&event->child_mutex); INIT_LIST_HEAD(&event->child_list); Loading Loading @@ -7353,6 +7354,16 @@ SYSCALL_DEFINE5(perf_event_open, group_leader = NULL; } /* * Take the group_leader's group_leader_mutex before observing * anything in the group leader that leads to changes in ctx, * many of which may be changing on another thread. * In particular, we want to take this lock before deciding * whether we need to move_group. */ if (group_leader) mutex_lock(&group_leader->group_leader_mutex); if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { task = find_lively_task_by_vpid(pid); if (IS_ERR(task)) { Loading Loading @@ -7531,6 +7542,8 @@ SYSCALL_DEFINE5(perf_event_open, perf_install_in_context(ctx, event, event->cpu); perf_unpin_context(ctx); mutex_unlock(&ctx->mutex); if (group_leader) mutex_unlock(&group_leader->group_leader_mutex); put_online_cpus(); Loading Loading @@ -7567,6 +7580,8 @@ err_task: if (task) put_task_struct(task); err_group_fd: if (group_leader) mutex_unlock(&group_leader->group_leader_mutex); fdput(group); err_fd: put_unused_fd(event_fd); Loading Loading
include/linux/perf_event.h +6 −0 Original line number Diff line number Diff line Loading @@ -339,6 +339,12 @@ struct perf_event { int nr_siblings; int group_flags; struct perf_event *group_leader; /* * Protect the pmu, attributes and context of a group leader. * Note: does not protect the pointer to the group_leader. */ struct mutex group_leader_mutex; struct pmu *pmu; enum perf_event_active_state state; Loading
kernel/events/core.c +15 −0 Original line number Diff line number Diff line Loading @@ -6998,6 +6998,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, if (!group_leader) group_leader = event; mutex_init(&event->group_leader_mutex); mutex_init(&event->child_mutex); INIT_LIST_HEAD(&event->child_list); Loading Loading @@ -7353,6 +7354,16 @@ SYSCALL_DEFINE5(perf_event_open, group_leader = NULL; } /* * Take the group_leader's group_leader_mutex before observing * anything in the group leader that leads to changes in ctx, * many of which may be changing on another thread. * In particular, we want to take this lock before deciding * whether we need to move_group. */ if (group_leader) mutex_lock(&group_leader->group_leader_mutex); if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { task = find_lively_task_by_vpid(pid); if (IS_ERR(task)) { Loading Loading @@ -7531,6 +7542,8 @@ SYSCALL_DEFINE5(perf_event_open, perf_install_in_context(ctx, event, event->cpu); perf_unpin_context(ctx); mutex_unlock(&ctx->mutex); if (group_leader) mutex_unlock(&group_leader->group_leader_mutex); put_online_cpus(); Loading Loading @@ -7567,6 +7580,8 @@ err_task: if (task) put_task_struct(task); err_group_fd: if (group_leader) mutex_unlock(&group_leader->group_leader_mutex); fdput(group); err_fd: put_unused_fd(event_fd); Loading