Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 58fd41cb authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman
Browse files

Revert "BACKPORT: perf_event: Add support for LSM and SELinux checks"



This reverts commit 8af21ac1 as it
breaks the build :(

Cc: Joel Fernandes (Google) <joel@joelfernandes.org>
Cc: Ryan Savitski <rsavitski@google.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@google.com>
parent 99976514
Loading
Loading
Loading
Loading
+10 −8
Original line number Diff line number Diff line
@@ -95,7 +95,7 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
{
	return 0;
}
static inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *regs, u64 *addrp) { }
static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
	return 0;
@@ -126,7 +126,7 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) {}
static inline void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) {}
static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
static void pmao_restore_workaround(bool ebb) { }
#endif /* CONFIG_PPC32 */

@@ -170,7 +170,7 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC, the
 * [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA, or the SDAR_VALID bit in SIER.
 */
static inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *regs, u64 *addrp)
static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
{
	unsigned long mmcra = regs->dsisr;
	bool sdar_valid;
@@ -195,7 +195,8 @@ static inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *
	if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid)
		*addrp = mfspr(SPRN_SDAR);

	if (is_kernel_addr(mfspr(SPRN_SDAR)) && perf_allow_kernel(&event->attr) != 0)
	if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN) &&
		is_kernel_addr(mfspr(SPRN_SDAR)))
		*addrp = 0;
}

@@ -434,7 +435,7 @@ static __u64 power_pmu_bhrb_to(u64 addr)
}

/* Processing BHRB entries */
static void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw)
static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
{
	u64 val;
	u64 addr;
@@ -462,7 +463,8 @@ static void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *
			 * exporting it to userspace (avoid exposure of regions
			 * where we could have speculative execution)
			 */
			if (is_kernel_addr(addr) && perf_allow_kernel(&event->attr) != 0)
			if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN) &&
				is_kernel_addr(addr))
				continue;

			/* Branches are read most recent first (ie. mfbhrb 0 is
@@ -2066,12 +2068,12 @@ static void record_and_restart(struct perf_event *event, unsigned long val,

		if (event->attr.sample_type &
		    (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR))
			perf_get_data_addr(event, regs, &data.addr);
			perf_get_data_addr(regs, &data.addr);

		if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) {
			struct cpu_hw_events *cpuhw;
			cpuhw = this_cpu_ptr(&cpu_hw_events);
			power_pmu_bhrb_read(event, cpuhw);
			power_pmu_bhrb_read(cpuhw);
			data.br_stack = &cpuhw->bhrb_stack;
		}

+3 −5
Original line number Diff line number Diff line
@@ -557,11 +557,9 @@ static int bts_event_init(struct perf_event *event)
	 * Note that the default paranoia setting permits unprivileged
	 * users to profile the kernel.
	 */
	if (event->attr.exclude_kernel) {
		ret = perf_allow_kernel(&event->attr);
		if (ret)
			return ret;
	}
	if (event->attr.exclude_kernel && perf_paranoid_kernel() &&
	    !capable(CAP_SYS_ADMIN))
		return -EACCES;

	if (x86_add_exclusive(x86_lbr_exclusive_bts))
		return -EBUSY;
+2 −3
Original line number Diff line number Diff line
@@ -3109,9 +3109,8 @@ static int intel_pmu_hw_config(struct perf_event *event)
	if (x86_pmu.version < 3)
		return -EINVAL;

	ret = perf_allow_cpu(&event->attr);
	if (ret)
		return ret;
	if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
		return -EACCES;

	event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;

+2 −3
Original line number Diff line number Diff line
@@ -776,9 +776,8 @@ static int p4_validate_raw_event(struct perf_event *event)
	 * the user needs special permissions to be able to use it
	 */
	if (p4_ht_active() && p4_event_bind_map[v].shared) {
		v = perf_allow_cpu(&event->attr);
		if (v)
			return v;
		if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
			return -EACCES;
	}

	/* ESCR EventMask bits may be invalid */
+0 −15
Original line number Diff line number Diff line
@@ -1777,14 +1777,6 @@ union security_list_options {
	int (*bpf_prog_alloc_security)(struct bpf_prog_aux *aux);
	void (*bpf_prog_free_security)(struct bpf_prog_aux *aux);
#endif /* CONFIG_BPF_SYSCALL */
#ifdef CONFIG_PERF_EVENTS
	int (*perf_event_open)(struct perf_event_attr *attr, int type);
	int (*perf_event_alloc)(struct perf_event *event);
	void (*perf_event_free)(struct perf_event *event);
	int (*perf_event_read)(struct perf_event *event);
	int (*perf_event_write)(struct perf_event *event);

#endif
};

struct security_hook_heads {
@@ -2019,13 +2011,6 @@ struct security_hook_heads {
	struct hlist_head bpf_prog_alloc_security;
	struct hlist_head bpf_prog_free_security;
#endif /* CONFIG_BPF_SYSCALL */
#ifdef CONFIG_PERF_EVENTS
	struct hlist_head perf_event_open;
	struct hlist_head perf_event_alloc;
	struct hlist_head perf_event_free;
	struct hlist_head perf_event_read;
	struct hlist_head perf_event_write;
#endif
} __randomize_layout;

/*
Loading