Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0389075e authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
 "This is unusually large, partly due to the EFI fixes that prevent
  accidental deletion of EFI variables through efivarfs that may brick
  machines.  These fixes are somewhat involved to maintain compatibility
  with existing install methods and other usage modes, while trying to
  turn off the 'rm -rf' bricking vector.

  Other fixes are for large page ioremap()s and for non-temporal
  user-memcpy()s"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm: Fix vmalloc_fault() to handle large pages properly
  hpet: Drop stale URLs
  x86/uaccess/64: Handle the caching of 4-byte nocache copies properly in __copy_user_nocache()
  x86/uaccess/64: Make the __copy_user_nocache() assembly code more readable
  lib/ucs2_string: Correct ucs2 -> utf8 conversion
  efi: Add pstore variables to the deletion whitelist
  efi: Make efivarfs entries immutable by default
  efi: Make our variable validation list include the guid
  efi: Do variable name validation tests in utf8
  efi: Use ucs2_as_utf8 in efivarfs instead of open coding a bad version
  lib/ucs2_string: Add ucs2 -> utf8 helper functions
parents 06b74c65 f4eafd8b
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -14,3 +14,10 @@ filesystem.
efivarfs is typically mounted like this,

	mount -t efivarfs none /sys/firmware/efi/efivars

Due to the presence of numerous firmware bugs where removing non-standard
UEFI variables causes the system firmware to fail to POST, efivarfs
files that are not well-known standardized variables are created
as immutable files.  This doesn't prevent removal - "chattr -i" will work -
but it does prevent this kind of failure from being accomplished
accidentally.
+1 −3
Original line number Diff line number Diff line
		High Precision Event Timer Driver for Linux

The High Precision Event Timer (HPET) hardware follows a specification
by Intel and Microsoft which can be found at

	http://www.intel.com/hardwaredesign/hpetspec_1.pdf
by Intel and Microsoft, revision 1.

Each HPET has one fixed-rate counter (at 10+ MHz, hence "High Precision")
and up to 32 comparators.  Normally three or more comparators are provided,
+2 −2
Original line number Diff line number Diff line
@@ -778,8 +778,8 @@ config HPET_TIMER
	  HPET is the next generation timer replacing legacy 8254s.
	  The HPET provides a stable time base on SMP
	  systems, unlike the TSC, but it is more expensive to access,
	  as it is off-chip.  You can find the HPET spec at
	  <http://www.intel.com/hardwaredesign/hpetspec_1.pdf>.
	  as it is off-chip.  The interface used is documented
	  in the HPET spec, revision 1.

	  You can safely choose Y here.  However, HPET will only be
	  activated if the platform and the BIOS support this feature.
+101 −41
Original line number Diff line number Diff line
@@ -232,17 +232,31 @@ ENDPROC(copy_user_enhanced_fast_string)

/*
 * copy_user_nocache - Uncached memory copy with exception handling
 * This will force destination/source out of cache for more performance.
 * This will force destination out of cache for more performance.
 *
 * Note: Cached memory copy is used when destination or size is not
 * naturally aligned. That is:
 *  - Require 8-byte alignment when size is 8 bytes or larger.
 *  - Require 4-byte alignment when size is 4 bytes.
 */
ENTRY(__copy_user_nocache)
	ASM_STAC

	/* If size is less than 8 bytes, go to 4-byte copy */
	cmpl $8,%edx
	jb 20f		/* less then 8 bytes, go to byte copy loop */
	jb .L_4b_nocache_copy_entry

	/* If destination is not 8-byte aligned, "cache" copy to align it */
	ALIGN_DESTINATION

	/* Set 4x8-byte copy count and remainder */
	movl %edx,%ecx
	andl $63,%edx
	shrl $6,%ecx
	jz 17f
	jz .L_8b_nocache_copy_entry	/* jump if count is 0 */

	/* Perform 4x8-byte nocache loop-copy */
.L_4x8b_nocache_copy_loop:
1:	movq (%rsi),%r8
2:	movq 1*8(%rsi),%r9
3:	movq 2*8(%rsi),%r10
@@ -262,60 +276,106 @@ ENTRY(__copy_user_nocache)
	leaq 64(%rsi),%rsi
	leaq 64(%rdi),%rdi
	decl %ecx
	jnz 1b
17:	movl %edx,%ecx
	jnz .L_4x8b_nocache_copy_loop

	/* Set 8-byte copy count and remainder */
.L_8b_nocache_copy_entry:
	movl %edx,%ecx
	andl $7,%edx
	shrl $3,%ecx
	jz 20f
18:	movq (%rsi),%r8
19:	movnti %r8,(%rdi)
	jz .L_4b_nocache_copy_entry	/* jump if count is 0 */

	/* Perform 8-byte nocache loop-copy */
.L_8b_nocache_copy_loop:
20:	movq (%rsi),%r8
21:	movnti %r8,(%rdi)
	leaq 8(%rsi),%rsi
	leaq 8(%rdi),%rdi
	decl %ecx
	jnz 18b
20:	andl %edx,%edx
	jz 23f
	jnz .L_8b_nocache_copy_loop

	/* If no byte left, we're done */
.L_4b_nocache_copy_entry:
	andl %edx,%edx
	jz .L_finish_copy

	/* If destination is not 4-byte aligned, go to byte copy: */
	movl %edi,%ecx
	andl $3,%ecx
	jnz .L_1b_cache_copy_entry

	/* Set 4-byte copy count (1 or 0) and remainder */
	movl %edx,%ecx
21:	movb (%rsi),%al
22:	movb %al,(%rdi)
	andl $3,%edx
	shrl $2,%ecx
	jz .L_1b_cache_copy_entry	/* jump if count is 0 */

	/* Perform 4-byte nocache copy: */
30:	movl (%rsi),%r8d
31:	movnti %r8d,(%rdi)
	leaq 4(%rsi),%rsi
	leaq 4(%rdi),%rdi

	/* If no bytes left, we're done: */
	andl %edx,%edx
	jz .L_finish_copy

	/* Perform byte "cache" loop-copy for the remainder */
.L_1b_cache_copy_entry:
	movl %edx,%ecx
.L_1b_cache_copy_loop:
40:	movb (%rsi),%al
41:	movb %al,(%rdi)
	incq %rsi
	incq %rdi
	decl %ecx
	jnz 21b
23:	xorl %eax,%eax
	jnz .L_1b_cache_copy_loop

	/* Finished copying; fence the prior stores */
.L_finish_copy:
	xorl %eax,%eax
	ASM_CLAC
	sfence
	ret

	.section .fixup,"ax"
30:	shll $6,%ecx
.L_fixup_4x8b_copy:
	shll $6,%ecx
	addl %ecx,%edx
	jmp 60f
40:	lea (%rdx,%rcx,8),%rdx
	jmp 60f
50:	movl %ecx,%edx
60:	sfence
	jmp .L_fixup_handle_tail
.L_fixup_8b_copy:
	lea (%rdx,%rcx,8),%rdx
	jmp .L_fixup_handle_tail
.L_fixup_4b_copy:
	lea (%rdx,%rcx,4),%rdx
	jmp .L_fixup_handle_tail
.L_fixup_1b_copy:
	movl %ecx,%edx
.L_fixup_handle_tail:
	sfence
	jmp copy_user_handle_tail
	.previous

	_ASM_EXTABLE(1b,30b)
	_ASM_EXTABLE(2b,30b)
	_ASM_EXTABLE(3b,30b)
	_ASM_EXTABLE(4b,30b)
	_ASM_EXTABLE(5b,30b)
	_ASM_EXTABLE(6b,30b)
	_ASM_EXTABLE(7b,30b)
	_ASM_EXTABLE(8b,30b)
	_ASM_EXTABLE(9b,30b)
	_ASM_EXTABLE(10b,30b)
	_ASM_EXTABLE(11b,30b)
	_ASM_EXTABLE(12b,30b)
	_ASM_EXTABLE(13b,30b)
	_ASM_EXTABLE(14b,30b)
	_ASM_EXTABLE(15b,30b)
	_ASM_EXTABLE(16b,30b)
	_ASM_EXTABLE(18b,40b)
	_ASM_EXTABLE(19b,40b)
	_ASM_EXTABLE(21b,50b)
	_ASM_EXTABLE(22b,50b)
	_ASM_EXTABLE(1b,.L_fixup_4x8b_copy)
	_ASM_EXTABLE(2b,.L_fixup_4x8b_copy)
	_ASM_EXTABLE(3b,.L_fixup_4x8b_copy)
	_ASM_EXTABLE(4b,.L_fixup_4x8b_copy)
	_ASM_EXTABLE(5b,.L_fixup_4x8b_copy)
	_ASM_EXTABLE(6b,.L_fixup_4x8b_copy)
	_ASM_EXTABLE(7b,.L_fixup_4x8b_copy)
	_ASM_EXTABLE(8b,.L_fixup_4x8b_copy)
	_ASM_EXTABLE(9b,.L_fixup_4x8b_copy)
	_ASM_EXTABLE(10b,.L_fixup_4x8b_copy)
	_ASM_EXTABLE(11b,.L_fixup_4x8b_copy)
	_ASM_EXTABLE(12b,.L_fixup_4x8b_copy)
	_ASM_EXTABLE(13b,.L_fixup_4x8b_copy)
	_ASM_EXTABLE(14b,.L_fixup_4x8b_copy)
	_ASM_EXTABLE(15b,.L_fixup_4x8b_copy)
	_ASM_EXTABLE(16b,.L_fixup_4x8b_copy)
	_ASM_EXTABLE(20b,.L_fixup_8b_copy)
	_ASM_EXTABLE(21b,.L_fixup_8b_copy)
	_ASM_EXTABLE(30b,.L_fixup_4b_copy)
	_ASM_EXTABLE(31b,.L_fixup_4b_copy)
	_ASM_EXTABLE(40b,.L_fixup_1b_copy)
	_ASM_EXTABLE(41b,.L_fixup_1b_copy)
ENDPROC(__copy_user_nocache)
+11 −4
Original line number Diff line number Diff line
@@ -287,6 +287,9 @@ static noinline int vmalloc_fault(unsigned long address)
	if (!pmd_k)
		return -1;

	if (pmd_huge(*pmd_k))
		return 0;

	pte_k = pte_offset_kernel(pmd_k, address);
	if (!pte_present(*pte_k))
		return -1;
@@ -360,8 +363,6 @@ void vmalloc_sync_all(void)
 * 64-bit:
 *
 *   Handle a fault on the vmalloc area
 *
 * This assumes no large pages in there.
 */
static noinline int vmalloc_fault(unsigned long address)
{
@@ -403,17 +404,23 @@ static noinline int vmalloc_fault(unsigned long address)
	if (pud_none(*pud_ref))
		return -1;

	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
	if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
		BUG();

	if (pud_huge(*pud))
		return 0;

	pmd = pmd_offset(pud, address);
	pmd_ref = pmd_offset(pud_ref, address);
	if (pmd_none(*pmd_ref))
		return -1;

	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
	if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
		BUG();

	if (pmd_huge(*pmd))
		return 0;

	pte_ref = pte_offset_kernel(pmd_ref, address);
	if (!pte_present(*pte_ref))
		return -1;
Loading