Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 28003486 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
 "This update contains:

   - the manual revert of the SYSCALL32 changes which caused a
     regression

   - a fix for the MPX vma handling

   - three fixes for the ioremap 'is ram' checks.

   - PAT warning fixes

   - a trivial fix for the size calculation of TLB tracepoints

   - handle old EFI structures gracefully

  This also contains a PAT fix from Jan plus a revert thereof.  Toshi
  explained why the code is correct"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm/pat: Revert 'Adjust default caching mode translation tables'
  x86/asm/entry/32: Revert 'Do not use R9 in SYSCALL32' commit
  x86/mm: Fix newly introduced printk format warnings
  mm: Fix bugs in region_is_ram()
  x86/mm: Remove region_is_ram() call from ioremap
  x86/mm: Move warning from __ioremap_check_ram() to the call site
  x86/mm/pat, drivers/media/ivtv: Move the PAT warning and replace WARN() with pr_warn()
  x86/mm/pat, drivers/infiniband/ipath: Replace WARN() with pr_warn()
  x86/mm/pat: Adjust default caching mode translation tables
  x86/fpu: Disable dependent CPU features on "noxsave"
  x86/mpx: Do not set ->vm_ops on MPX VMAs
  x86/mm: Add parenthesis for TLB tracepoint size calculation
  efi: Handle memory error structures produced based on old versions of standard
parents 26ae19a3 1a4e8795
Loading
Loading
Loading
Loading
+9 −5
Original line number Diff line number Diff line
@@ -205,7 +205,6 @@ sysexit_from_sys_call:
	movl	RDX(%rsp), %edx		/* arg3 */
	movl	RSI(%rsp), %ecx		/* arg4 */
	movl	RDI(%rsp), %r8d		/* arg5 */
	movl	%ebp, %r9d		/* arg6 */
	.endm

	.macro auditsys_exit exit
@@ -236,6 +235,7 @@ sysexit_from_sys_call:

sysenter_auditsys:
	auditsys_entry_common
	movl	%ebp, %r9d		/* reload 6th syscall arg */
	jmp	sysenter_dispatch

sysexit_audit:
@@ -336,7 +336,7 @@ ENTRY(entry_SYSCALL_compat)
	 * 32-bit zero extended:
	 */
	ASM_STAC
1:	movl	(%r8), %ebp
1:	movl	(%r8), %r9d
	_ASM_EXTABLE(1b, ia32_badarg)
	ASM_CLAC
	orl	$TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
@@ -346,7 +346,7 @@ ENTRY(entry_SYSCALL_compat)
cstar_do_call:
	/* 32-bit syscall -> 64-bit C ABI argument conversion */
	movl	%edi, %r8d		/* arg5 */
	movl	%ebp, %r9d		/* arg6 */
	/* r9 already loaded */		/* arg6 */
	xchg	%ecx, %esi		/* rsi:arg2, rcx:arg4 */
	movl	%ebx, %edi		/* arg1 */
	movl	%edx, %edx		/* arg3 (zero extension) */
@@ -358,7 +358,6 @@ cstar_dispatch:
	call	*ia32_sys_call_table(, %rax, 8)
	movq	%rax, RAX(%rsp)
1:
	movl	RCX(%rsp), %ebp
	DISABLE_INTERRUPTS(CLBR_NONE)
	TRACE_IRQS_OFF
	testl	$_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
@@ -392,7 +391,9 @@ sysretl_from_sys_call:

#ifdef CONFIG_AUDITSYSCALL
cstar_auditsys:
	movl	%r9d, R9(%rsp)		/* register to be clobbered by call */
	auditsys_entry_common
	movl	R9(%rsp), %r9d		/* reload 6th syscall arg */
	jmp	cstar_dispatch

sysretl_audit:
@@ -404,14 +405,16 @@ cstar_tracesys:
	testl	$(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
	jz	cstar_auditsys
#endif
	xchgl	%r9d, %ebp
	SAVE_EXTRA_REGS
	xorl	%eax, %eax		/* Do not leak kernel information */
	movq	%rax, R11(%rsp)
	movq	%rax, R10(%rsp)
	movq	%rax, R9(%rsp)
	movq	%r9, R9(%rsp)
	movq	%rax, R8(%rsp)
	movq	%rsp, %rdi		/* &pt_regs -> arg1 */
	call	syscall_trace_enter
	movl	R9(%rsp), %r9d

	/* Reload arg registers from stack. (see sysenter_tracesys) */
	movl	RCX(%rsp), %ecx
@@ -421,6 +424,7 @@ cstar_tracesys:
	movl	%eax, %eax		/* zero extension */

	RESTORE_EXTRA_REGS
	xchgl	%ebp, %r9d
	jmp	cstar_do_call
END(entry_SYSCALL_compat)

+6 −0
Original line number Diff line number Diff line
@@ -351,9 +351,15 @@ static int __init x86_noxsave_setup(char *s)

	setup_clear_cpu_cap(X86_FEATURE_XSAVE);
	setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
	setup_clear_cpu_cap(X86_FEATURE_XSAVEC);
	setup_clear_cpu_cap(X86_FEATURE_XSAVES);
	setup_clear_cpu_cap(X86_FEATURE_AVX);
	setup_clear_cpu_cap(X86_FEATURE_AVX2);
	setup_clear_cpu_cap(X86_FEATURE_AVX512F);
	setup_clear_cpu_cap(X86_FEATURE_AVX512PF);
	setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
	setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
	setup_clear_cpu_cap(X86_FEATURE_MPX);

	return 1;
}
+6 −17
Original line number Diff line number Diff line
@@ -63,8 +63,6 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
		    !PageReserved(pfn_to_page(start_pfn + i)))
			return 1;

	WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);

	return 0;
}

@@ -94,7 +92,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
	pgprot_t prot;
	int retval;
	void __iomem *ret_addr;
	int ram_region;

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
@@ -117,23 +114,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
	/* First check if whole region can be identified as RAM or not */
	ram_region = region_is_ram(phys_addr, size);
	if (ram_region > 0) {
		WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
				(unsigned long int)phys_addr,
				(unsigned long int)last_addr);
		return NULL;
	}

	/* If could not be identified(-1), check page by page */
	if (ram_region < 0) {
	pfn      = phys_addr >> PAGE_SHIFT;
	last_pfn = last_addr >> PAGE_SHIFT;
	if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
					  __ioremap_check_ram) == 1)
					  __ioremap_check_ram) == 1) {
		WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
			  &phys_addr, &last_addr);
		return NULL;
	}

	/*
	 * Mappings have to be page-aligned
	 */
+7 −0
Original line number Diff line number Diff line
@@ -126,3 +126,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
	}
}

const char *arch_vma_name(struct vm_area_struct *vma)
{
	if (vma->vm_flags & VM_MPX)
		return "[mpx]";
	return NULL;
}
+3 −21
Original line number Diff line number Diff line
@@ -20,20 +20,6 @@
#define CREATE_TRACE_POINTS
#include <asm/trace/mpx.h>

static const char *mpx_mapping_name(struct vm_area_struct *vma)
{
	return "[mpx]";
}

static struct vm_operations_struct mpx_vma_ops = {
	.name = mpx_mapping_name,
};

static int is_mpx_vma(struct vm_area_struct *vma)
{
	return (vma->vm_ops == &mpx_vma_ops);
}

static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm)
{
	if (is_64bit_mm(mm))
@@ -53,9 +39,6 @@ static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm)
/*
 * This is really a simplified "vm_mmap". it only handles MPX
 * bounds tables (the bounds directory is user-allocated).
 *
 * Later on, we use the vma->vm_ops to uniquely identify these
 * VMAs.
 */
static unsigned long mpx_mmap(unsigned long len)
{
@@ -101,7 +84,6 @@ static unsigned long mpx_mmap(unsigned long len)
		ret = -ENOMEM;
		goto out;
	}
	vma->vm_ops = &mpx_vma_ops;

	if (vm_flags & VM_LOCKED) {
		up_write(&mm->mmap_sem);
@@ -812,7 +794,7 @@ static noinline int zap_bt_entries_mapping(struct mm_struct *mm,
		 * so stop immediately and return an error.  This
		 * probably results in a SIGSEGV.
		 */
		if (!is_mpx_vma(vma))
		if (!(vma->vm_flags & VM_MPX))
			return -EINVAL;

		len = min(vma->vm_end, end) - addr;
@@ -945,9 +927,9 @@ static int try_unmap_single_bt(struct mm_struct *mm,
	 * lots of tables even though we have no actual table
	 * entries in use.
	 */
	while (next && is_mpx_vma(next))
	while (next && (next->vm_flags & VM_MPX))
		next = next->vm_next;
	while (prev && is_mpx_vma(prev))
	while (prev && (prev->vm_flags & VM_MPX))
		prev = prev->vm_prev;
	/*
	 * We know 'start' and 'end' lie within an area controlled
Loading