Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0faef837 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull livepatching fixes from Jiri Kosina:

 - symbol lookup locking fix, from Miroslav Benes

 - error handling improvements in case of failure of the module coming
   notifier, from Minfei Huang

 - we were too pessimistic when kASLR has been enabled on x86 and were
   dropping address hints on the floor unnecessarily in such case.  Fix
   from Jiri Kosina

 - a few other small fixes and cleanups

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching:
  livepatch: add module locking around kallsyms calls
  livepatch: annotate klp_init() with __init
  livepatch: introduce patch/func-walking helpers
  livepatch: make kobject in klp_object statically allocated
  livepatch: Prevent patch inconsistencies if the coming module notifier fails
  livepatch: match return value to function signature
  x86: kaslr: fix build due to missing ALIGN definition
  livepatch: x86: make kASLR logic more accurate
  x86: introduce kaslr_offset()
parents 67db8a80 110c1466
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@
#ifndef _ASM_X86_LIVEPATCH_H
#define _ASM_X86_LIVEPATCH_H

#include <asm/setup.h>
#include <linux/module.h>
#include <linux/ftrace.h>

+7 −0
Original line number Diff line number Diff line
@@ -60,17 +60,24 @@ static inline void x86_ce4100_early_setup(void) { }
#ifndef _SETUP

#include <asm/espfix.h>
#include <linux/kernel.h>

/*
 * This is set up by the setup-routine at boot-time
 */
extern struct boot_params boot_params;
extern char _text[];

static inline bool kaslr_enabled(void)
{
	return !!(boot_params.hdr.loadflags & KASLR_FLAG);
}

static inline unsigned long kaslr_offset(void)
{
	return (unsigned long)&_text - __START_KERNEL;
}

/*
 * Do NOT EVER look at the BIOS memory size location.
 * It does not work on many machines.
+2 −1
Original line number Diff line number Diff line
@@ -26,6 +26,7 @@
#include <asm/io_apic.h>
#include <asm/debugreg.h>
#include <asm/kexec-bzimage64.h>
#include <asm/setup.h>

#ifdef CONFIG_KEXEC_FILE
static struct kexec_file_ops *kexec_file_loaders[] = {
@@ -335,7 +336,7 @@ void arch_crash_save_vmcoreinfo(void)
	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
#endif
	vmcoreinfo_append_str("KERNELOFFSET=%lx\n",
			      (unsigned long)&_text - __START_KERNEL);
			      kaslr_offset());
}

/* arch-dependent functionality related to kexec file-based syscall */
+1 −1
Original line number Diff line number Diff line
@@ -836,7 +836,7 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
{
	if (kaslr_enabled()) {
		pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
			 (unsigned long)&_text - __START_KERNEL,
			 kaslr_offset(),
			 __START_KERNEL,
			 __START_KERNEL_map,
			 MODULES_VADDR-1);
+7 −1
Original line number Diff line number Diff line
@@ -99,7 +99,7 @@ struct klp_object {
	struct klp_func *funcs;

	/* internal */
	struct kobject *kobj;
	struct kobject kobj;
	struct module *mod;
	enum klp_state state;
};
@@ -123,6 +123,12 @@ struct klp_patch {
	enum klp_state state;
};

#define klp_for_each_object(patch, obj) \
	for (obj = patch->objs; obj->funcs; obj++)

#define klp_for_each_func(obj, func) \
	for (func = obj->funcs; func->old_name; func++)

int klp_register_patch(struct klp_patch *);
int klp_unregister_patch(struct klp_patch *);
int klp_enable_patch(struct klp_patch *);
Loading