Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 806fdcce authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
 "Misc fixes: a binutils fix, an lguest fix, an mcelog fix and a missing
  documentation fix"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mce: Avoid using object after free in genpool
  lguest, x86/entry/32: Fix handling of guest syscalls using interrupt gates
  x86/build: Build compressed x86 kernels as PIE
  x86/mm/pkeys: Add missing Documentation
parents a1f98317 a3125494
Loading
Loading
Loading
Loading
+27 −0
Original line number Original line Diff line number Diff line
Memory Protection Keys for Userspace (PKU aka PKEYs) is a CPU feature
which will be found on future Intel CPUs.

Memory Protection Keys provides a mechanism for enforcing page-based
protections, but without requiring modification of the page tables
when an application changes protection domains.  It works by
dedicating 4 previously ignored bits in each page table entry to a
"protection key", giving 16 possible keys.

There is also a new user-accessible register (PKRU) with two separate
bits (Access Disable and Write Disable) for each key.  Being a CPU
register, PKRU is inherently thread-local, potentially giving each
thread a different set of protections from every other thread.

There are two new instructions (RDPKRU/WRPKRU) for reading and writing
to the new register.  The feature is only available in 64-bit mode,
even though there is theoretically space in the PAE PTEs.  These
permissions are enforced on data access only and have no effect on
instruction fetches.

=========================== Config Option ===========================

This config option adds approximately 1.5kb of text. and 50 bytes of
data to the executable.  A workload which does large O_DIRECT reads
of holes in XFS files was run to exercise get_user_pages_fast().  No
performance delta was observed with the config option
enabled or disabled.
+13 −1
Original line number Original line Diff line number Diff line
@@ -26,7 +26,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
	vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4
	vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4


KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC)
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
cflags-$(CONFIG_X86_64) := -mcmodel=small
@@ -40,6 +40,18 @@ GCOV_PROFILE := n
UBSAN_SANITIZE :=n
UBSAN_SANITIZE :=n


LDFLAGS := -m elf_$(UTS_MACHINE)
LDFLAGS := -m elf_$(UTS_MACHINE)
ifeq ($(CONFIG_RELOCATABLE),y)
# If kernel is relocatable, build compressed kernel as PIE.
ifeq ($(CONFIG_X86_32),y)
LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker)
else
# To build 64-bit compressed kernel as PIE, we disable relocation
# overflow check to avoid relocation overflow error with a new linker
# command-line option, -z noreloc-overflow.
LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
	&& echo "-z noreloc-overflow -pie --no-dynamic-linker")
endif
endif
LDFLAGS_vmlinux := -T
LDFLAGS_vmlinux := -T


hostprogs-y	:= mkpiggy
hostprogs-y	:= mkpiggy
+28 −0
Original line number Original line Diff line number Diff line
@@ -31,6 +31,34 @@
#include <asm/asm-offsets.h>
#include <asm/asm-offsets.h>
#include <asm/bootparam.h>
#include <asm/bootparam.h>


/*
 * The 32-bit x86 assembler in binutils 2.26 will generate R_386_GOT32X
 * relocation to get the symbol address in PIC.  When the compressed x86
 * kernel isn't built as PIC, the linker optimizes R_386_GOT32X
 * relocations to their fixed symbol addresses.  However, when the
 * compressed x86 kernel is loaded at a different address, it leads
 * to the following load failure:
 *
 *   Failed to allocate space for phdrs
 *
 * during the decompression stage.
 *
 * If the compressed x86 kernel is relocatable at run-time, it should be
 * compiled with -fPIE, instead of -fPIC, if possible and should be built as
 * Position Independent Executable (PIE) so that linker won't optimize
 * R_386_GOT32X relocation to its fixed symbol address.  Older
 * linkers generate R_386_32 relocations against locally defined symbols,
 * _bss, _ebss, _got and _egot, in PIE.  It isn't wrong, just less
 * optimal than R_386_RELATIVE.  But the x86 kernel fails to properly handle
 * R_386_32 relocations when relocating the kernel.  To generate
 * R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as
 * hidden:
 */
	.hidden _bss
	.hidden _ebss
	.hidden _got
	.hidden _egot

	__HEAD
	__HEAD
ENTRY(startup_32)
ENTRY(startup_32)
#ifdef CONFIG_EFI_STUB
#ifdef CONFIG_EFI_STUB
+8 −0
Original line number Original line Diff line number Diff line
@@ -33,6 +33,14 @@
#include <asm/asm-offsets.h>
#include <asm/asm-offsets.h>
#include <asm/bootparam.h>
#include <asm/bootparam.h>


/*
 * Locally defined symbols should be marked hidden:
 */
	.hidden _bss
	.hidden _ebss
	.hidden _got
	.hidden _egot

	__HEAD
	__HEAD
	.code32
	.code32
ENTRY(startup_32)
ENTRY(startup_32)
+2 −2
Original line number Original line Diff line number Diff line
@@ -29,7 +29,7 @@ static char gen_pool_buf[MCE_POOLSZ];
void mce_gen_pool_process(void)
void mce_gen_pool_process(void)
{
{
	struct llist_node *head;
	struct llist_node *head;
	struct mce_evt_llist *node;
	struct mce_evt_llist *node, *tmp;
	struct mce *mce;
	struct mce *mce;


	head = llist_del_all(&mce_event_llist);
	head = llist_del_all(&mce_event_llist);
@@ -37,7 +37,7 @@ void mce_gen_pool_process(void)
		return;
		return;


	head = llist_reverse_order(head);
	head = llist_reverse_order(head);
	llist_for_each_entry(node, head, llnode) {
	llist_for_each_entry_safe(node, tmp, head, llnode) {
		mce = &node->mce;
		mce = &node->mce;
		atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
		atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
		gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
		gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
Loading