Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9b3499d7 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
 "Two fixes:

   - A PCID related revert that fixes power management and performance
     regressions.

   - The module loader robustization and sanity check commit is rather
     fresh, but it looked like a good idea to apply because of the
     hidden data corruption problem such invalid modules could cause"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/module: Detect and skip invalid relocations
  Revert "x86/mm: Stop calling leave_mm() in idle code"
parents b21172cf eda9cec4
Loading
Loading
Loading
Loading
+2 −0
Original line number Original line Diff line number Diff line
@@ -112,6 +112,8 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf)
	buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
	buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
}
}


#define acpi_unlazy_tlb(x)

#ifdef CONFIG_ACPI_NUMA
#ifdef CONFIG_ACPI_NUMA
extern cpumask_t early_cpu_possible_map;
extern cpumask_t early_cpu_possible_map;
#define for_each_possible_early_cpu(cpu)  \
#define for_each_possible_early_cpu(cpu)  \
+2 −0
Original line number Original line Diff line number Diff line
@@ -150,6 +150,8 @@ static inline void disable_acpi(void) { }
extern int x86_acpi_numa_init(void);
extern int x86_acpi_numa_init(void);
#endif /* CONFIG_ACPI_NUMA */
#endif /* CONFIG_ACPI_NUMA */


#define acpi_unlazy_tlb(x)	leave_mm(x)

#ifdef CONFIG_ACPI_APEI
#ifdef CONFIG_ACPI_APEI
static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
{
{
+13 −0
Original line number Original line Diff line number Diff line
@@ -172,19 +172,27 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
		case R_X86_64_NONE:
		case R_X86_64_NONE:
			break;
			break;
		case R_X86_64_64:
		case R_X86_64_64:
			if (*(u64 *)loc != 0)
				goto invalid_relocation;
			*(u64 *)loc = val;
			*(u64 *)loc = val;
			break;
			break;
		case R_X86_64_32:
		case R_X86_64_32:
			if (*(u32 *)loc != 0)
				goto invalid_relocation;
			*(u32 *)loc = val;
			*(u32 *)loc = val;
			if (val != *(u32 *)loc)
			if (val != *(u32 *)loc)
				goto overflow;
				goto overflow;
			break;
			break;
		case R_X86_64_32S:
		case R_X86_64_32S:
			if (*(s32 *)loc != 0)
				goto invalid_relocation;
			*(s32 *)loc = val;
			*(s32 *)loc = val;
			if ((s64)val != *(s32 *)loc)
			if ((s64)val != *(s32 *)loc)
				goto overflow;
				goto overflow;
			break;
			break;
		case R_X86_64_PC32:
		case R_X86_64_PC32:
			if (*(u32 *)loc != 0)
				goto invalid_relocation;
			val -= (u64)loc;
			val -= (u64)loc;
			*(u32 *)loc = val;
			*(u32 *)loc = val;
#if 0
#if 0
@@ -200,6 +208,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
	}
	}
	return 0;
	return 0;


invalid_relocation:
	pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
	       (int)ELF64_R_TYPE(rel[i].r_info), loc, val);
	return -ENOEXEC;

overflow:
overflow:
	pr_err("overflow in relocation type %d val %Lx\n",
	pr_err("overflow in relocation type %d val %Lx\n",
	       (int)ELF64_R_TYPE(rel[i].r_info), val);
	       (int)ELF64_R_TYPE(rel[i].r_info), val);
+14 −3
Original line number Original line Diff line number Diff line
@@ -85,6 +85,7 @@ void leave_mm(int cpu)


	switch_mm(NULL, &init_mm, NULL);
	switch_mm(NULL, &init_mm, NULL);
}
}
EXPORT_SYMBOL_GPL(leave_mm);


void switch_mm(struct mm_struct *prev, struct mm_struct *next,
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
	       struct task_struct *tsk)
	       struct task_struct *tsk)
@@ -195,12 +196,22 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
			this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
			this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
			this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
			this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
			write_cr3(build_cr3(next, new_asid));
			write_cr3(build_cr3(next, new_asid));
			trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,

					TLB_FLUSH_ALL);
			/*
			 * NB: This gets called via leave_mm() in the idle path
			 * where RCU functions differently.  Tracing normally
			 * uses RCU, so we need to use the _rcuidle variant.
			 *
			 * (There is no good reason for this.  The idle code should
			 *  be rearranged to call this before rcu_idle_enter().)
			 */
			trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
		} else {
		} else {
			/* The new ASID is already up to date. */
			/* The new ASID is already up to date. */
			write_cr3(build_cr3_noflush(next, new_asid));
			write_cr3(build_cr3_noflush(next, new_asid));
			trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);

			/* See above wrt _rcuidle. */
			trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
		}
		}


		this_cpu_write(cpu_tlbstate.loaded_mm, next);
		this_cpu_write(cpu_tlbstate.loaded_mm, next);
+2 −0
Original line number Original line Diff line number Diff line
@@ -710,6 +710,8 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
static void acpi_idle_enter_bm(struct acpi_processor *pr,
static void acpi_idle_enter_bm(struct acpi_processor *pr,
			       struct acpi_processor_cx *cx, bool timer_bc)
			       struct acpi_processor_cx *cx, bool timer_bc)
{
{
	acpi_unlazy_tlb(smp_processor_id());

	/*
	/*
	 * Must be done before busmaster disable as we might need to
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 * access HPET !
Loading