Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3e6bdf47 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86:
  x86: fix deadlock, make pgd_lock irq-safe
  virtio: fix trivial build bug
  x86: fix mttr trimming
  x86: delay CPA self-test and repeat it
  x86: fix 64-bit sections
  generic: add __FINITDATA
  x86: remove suprious ifdefs from pageattr.c
  x86: mark the .rodata section also NX
  x86: fix iret exception recovery on 64-bit
  cpuidle: dubious one-bit signed bitfield in cpuidle.h
  x86: fix sparse warnings in powernow-k8.c
  x86: fix sparse error in traps_32.c
  x86: trivial sparse/checkpatch in quirks.c
  x86 ptrace: disallow null cs/ss
  MAINTAINERS: RDC R-321x SoC maintainer
  brk randomization: introduce CONFIG_COMPAT_BRK
  brk: check the lower bound properly
  x86: remove X2 workaround
  x86: make spurious fault handler aware of large mappings
  x86: make traps on entry code be debuggable in user space, 64-bit
parents 3d4d4582 58d5d0d8
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -3224,6 +3224,12 @@ M: mporter@kernel.crashing.org
L:	linux-kernel@vger.kernel.org
S:	Maintained

RDC R-321X SoC
P:	Florian Fainelli
M:	florian.fainelli@telecomint.eu
L:	linux-kernel@vger.kernel.org
S:	Maintained

RDC R6040 FAST ETHERNET DRIVER
P:	Florian Fainelli
M:	florian.fainelli@telecomint.eu
+2 −2
Original line number Diff line number Diff line
@@ -220,9 +220,9 @@ config DEBUG_BOOT_PARAMS
	  This option will cause struct boot_params to be exported via debugfs.

config CPA_DEBUG
	bool "CPA self test code"
	bool "CPA self-test code"
	depends on DEBUG_KERNEL
	help
	  Do change_page_attr self tests at boot.
	  Do change_page_attr() self-tests every 30 seconds.

endmenu
+0 −1
Original line number Diff line number Diff line
@@ -827,7 +827,6 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf

	for (i = 0; i < data->acpi_data.state_count; i++) {
		u32 index;
		u32 hi = 0, lo = 0;

		index = data->acpi_data.states[i].control & HW_PSTATE_MASK;
		if (index > data->max_hw_pstate) {
+9 −10
Original line number Diff line number Diff line
@@ -659,7 +659,7 @@ static __init int amd_special_default_mtrr(void)
 */
int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
{
	unsigned long i, base, size, highest_addr = 0, def, dummy;
	unsigned long i, base, size, highest_pfn = 0, def, dummy;
	mtrr_type type;
	u64 trim_start, trim_size;

@@ -682,28 +682,27 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
		mtrr_if->get(i, &base, &size, &type);
		if (type != MTRR_TYPE_WRBACK)
			continue;
		base <<= PAGE_SHIFT;
		size <<= PAGE_SHIFT;
		if (highest_addr < base + size)
			highest_addr = base + size;
		if (highest_pfn < base + size)
			highest_pfn = base + size;
	}

	/* kvm/qemu doesn't have mtrr set right, don't trim them all */
	if (!highest_addr) {
	if (!highest_pfn) {
		printk(KERN_WARNING "WARNING: strange, CPU MTRRs all blank?\n");
		WARN_ON(1);
		return 0;
	}

	if ((highest_addr >> PAGE_SHIFT) < end_pfn) {
	if (highest_pfn < end_pfn) {
		printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover"
			" all of memory, losing %LdMB of RAM.\n",
			(((u64)end_pfn << PAGE_SHIFT) - highest_addr) >> 20);
			" all of memory, losing %luMB of RAM.\n",
			(end_pfn - highest_pfn) >> (20 - PAGE_SHIFT));

		WARN_ON(1);

		printk(KERN_INFO "update e820 for mtrr\n");
		trim_start = highest_addr;
		trim_start = highest_pfn;
		trim_start <<= PAGE_SHIFT;
		trim_size = end_pfn;
		trim_size <<= PAGE_SHIFT;
		trim_size -= trim_start;
+16 −8
Original line number Diff line number Diff line
@@ -582,7 +582,6 @@ retint_restore_args: /* return to kernel space */
	TRACE_IRQS_IRETQ
restore_args:
	RESTORE_ARGS 0,8,0						
iret_label:	
#ifdef CONFIG_PARAVIRT
	INTERRUPT_RETURN
#endif
@@ -593,13 +592,22 @@ ENTRY(native_iret)
	.quad native_iret, bad_iret
	.previous
	.section .fixup,"ax"
	/* force a signal here? this matches i386 behaviour */
	/* running with kernel gs */
bad_iret:
	movq $11,%rdi	/* SIGSEGV */
	TRACE_IRQS_ON
	ENABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
	jmp do_exit
	/*
	 * The iret traps when the %cs or %ss being restored is bogus.
	 * We've lost the original trap vector and error code.
	 * #GPF is the most likely one to get for an invalid selector.
	 * So pretend we completed the iret and took the #GPF in user mode.
	 *
	 * We are now running with the kernel GS after exception recovery.
	 * But error_entry expects us to have user GS to match the user %cs,
	 * so swap back.
	 */
	pushq $0

	SWAPGS
	jmp general_protection

	.previous

	/* edi: workmask, edx: work */
@@ -911,7 +919,7 @@ error_kernelspace:
	   iret run with kernel gs again, so don't set the user space flag.
	   B stepping K8s sometimes report an truncated RIP for IRET 
	   exceptions returning to compat mode. Check for these here too. */
	leaq iret_label(%rip),%rbp
	leaq native_iret(%rip),%rbp
	cmpq %rbp,RIP(%rsp) 
	je   error_swapgs
	movl %ebp,%ebp	/* zero extend */
Loading