Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e7b5e11e authored by Harvey Harrison's avatar Harvey Harrison Committed by Ingo Molnar
Browse files

x86: kprobes leftover cleanups



Eliminate __always_inline, all of these static functions are
only called once.  Minor whitespace cleanup.  Eliminate one
supefluous return at end of void function.  Change the one
#ifndef to #ifdef to match the sense of the rest of the config
tests.

Signed-off-by: default avatarHarvey Harrison <harvey.harrison@gmail.com>
Acked-by: default avatarMasami Hiramatsu <mhiramat@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent ab4a574e
Loading
Loading
Loading
Loading
+6 −8
Original line number Original line Diff line number Diff line
@@ -159,7 +159,7 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {
const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);


/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
static __always_inline void set_jmp_op(void *from, void *to)
static void __kprobes set_jmp_op(void *from, void *to)
{
{
	struct __arch_jmp_op {
	struct __arch_jmp_op {
		char op;
		char op;
@@ -174,7 +174,7 @@ static __always_inline void set_jmp_op(void *from, void *to)
 * Returns non-zero if opcode is boostable.
 * Returns non-zero if opcode is boostable.
 * RIP relative instructions are adjusted at copying time in 64 bits mode
 * RIP relative instructions are adjusted at copying time in 64 bits mode
 */
 */
static __always_inline int can_boost(kprobe_opcode_t *opcodes)
static int __kprobes can_boost(kprobe_opcode_t *opcodes)
{
{
	kprobe_opcode_t opcode;
	kprobe_opcode_t opcode;
	kprobe_opcode_t *orig_opcodes = opcodes;
	kprobe_opcode_t *orig_opcodes = opcodes;
@@ -392,13 +392,13 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
		kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
		kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
}
}


static __always_inline void clear_btf(void)
static void __kprobes clear_btf(void)
{
{
	if (test_thread_flag(TIF_DEBUGCTLMSR))
	if (test_thread_flag(TIF_DEBUGCTLMSR))
		wrmsr(MSR_IA32_DEBUGCTLMSR, 0, 0);
		wrmsr(MSR_IA32_DEBUGCTLMSR, 0, 0);
}
}


static __always_inline void restore_btf(void)
static void __kprobes restore_btf(void)
{
{
	if (test_thread_flag(TIF_DEBUGCTLMSR))
	if (test_thread_flag(TIF_DEBUGCTLMSR))
		wrmsr(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr, 0);
		wrmsr(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr, 0);
@@ -767,7 +767,7 @@ static void __kprobes resume_execution(struct kprobe *p,
	case 0xe8:	/* call relative - Fix return addr */
	case 0xe8:	/* call relative - Fix return addr */
		*tos = orig_ip + (*tos - copy_ip);
		*tos = orig_ip + (*tos - copy_ip);
		break;
		break;
#ifndef CONFIG_X86_64
#ifdef CONFIG_X86_32
	case 0x9a:	/* call absolute -- same as call absolute, indirect */
	case 0x9a:	/* call absolute -- same as call absolute, indirect */
		*tos = orig_ip + (*tos - copy_ip);
		*tos = orig_ip + (*tos - copy_ip);
		goto no_change;
		goto no_change;
@@ -813,8 +813,6 @@ static void __kprobes resume_execution(struct kprobe *p,


no_change:
no_change:
	restore_btf();
	restore_btf();

	return;
}
}


/*
/*