Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ee5c2ab0 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-fixes-for-linus' of...

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  xen: don't drop NX bit
  xen: mask unwanted pte bits in __supported_pte_mask
  xen: Use wmb instead of rmb in xen_evtchn_do_upcall().
  x86: fix NULL pointer deref in __switch_to
parents f6837bfa ebb9cfe2
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -1228,6 +1228,11 @@ asmlinkage void __init xen_start_kernel(void)
	if (xen_feature(XENFEAT_supervisor_mode_kernel))
		pv_info.kernel_rpl = 0;

	/* Prevent unwanted bits from being set in PTEs. */
	__supported_pte_mask &= ~_PAGE_GLOBAL;
	if (!is_initial_xendomain())
		__supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);

	/* set the limit of our address space */
	xen_reserve_top();

+30 −26
Original line number Diff line number Diff line
@@ -179,48 +179,54 @@ void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
		preempt_enable();
}

pteval_t xen_pte_val(pte_t pte)
/* Assume pteval_t is equivalent to all the other *val_t types. */
static pteval_t pte_mfn_to_pfn(pteval_t val)
{
	pteval_t ret = pte.pte;
	if (val & _PAGE_PRESENT) {
		unsigned long mfn = (val & PTE_MASK) >> PAGE_SHIFT;
		pteval_t flags = val & ~PTE_MASK;
		val = (mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
	}

	if (ret & _PAGE_PRESENT)
		ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
	return val;
}

	return ret;
static pteval_t pte_pfn_to_mfn(pteval_t val)
{
	if (val & _PAGE_PRESENT) {
		unsigned long pfn = (val & PTE_MASK) >> PAGE_SHIFT;
		pteval_t flags = val & ~PTE_MASK;
		val = (pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
	}

pgdval_t xen_pgd_val(pgd_t pgd)
	return val;
}

pteval_t xen_pte_val(pte_t pte)
{
	pgdval_t ret = pgd.pgd;
	if (ret & _PAGE_PRESENT)
		ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
	return ret;
	return pte_mfn_to_pfn(pte.pte);
}

pte_t xen_make_pte(pteval_t pte)
pgdval_t xen_pgd_val(pgd_t pgd)
{
	if (pte & _PAGE_PRESENT) {
		pte = phys_to_machine(XPADDR(pte)).maddr;
		pte &= ~(_PAGE_PCD | _PAGE_PWT);
	return pte_mfn_to_pfn(pgd.pgd);
}

	return (pte_t){ .pte = pte };
pte_t xen_make_pte(pteval_t pte)
{
	pte = pte_pfn_to_mfn(pte);
	return native_make_pte(pte);
}

pgd_t xen_make_pgd(pgdval_t pgd)
{
	if (pgd & _PAGE_PRESENT)
		pgd = phys_to_machine(XPADDR(pgd)).maddr;

	return (pgd_t){ pgd };
	pgd = pte_pfn_to_mfn(pgd);
	return native_make_pgd(pgd);
}

pmdval_t xen_pmd_val(pmd_t pmd)
{
	pmdval_t ret = native_pmd_val(pmd);
	if (ret & _PAGE_PRESENT)
		ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
	return ret;
	return pte_mfn_to_pfn(pmd.pmd);
}
#ifdef CONFIG_X86_PAE
void xen_set_pud(pud_t *ptr, pud_t val)
@@ -267,9 +273,7 @@ void xen_pmd_clear(pmd_t *pmdp)

pmd_t xen_make_pmd(pmdval_t pmd)
{
	if (pmd & _PAGE_PRESENT)
		pmd = phys_to_machine(XPADDR(pmd)).maddr;

	pmd = pte_pfn_to_mfn(pmd);
	return native_make_pmd(pmd);
}
#else  /* !PAE */
+1 −1
Original line number Diff line number Diff line
@@ -17,7 +17,7 @@ ENTRY(startup_xen)

	__FINIT

.pushsection .bss.page_aligned
.pushsection .text
	.align PAGE_SIZE_asm
ENTRY(hypercall_page)
	.skip 0x1000
+9 −6
Original line number Diff line number Diff line
@@ -176,7 +176,7 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
	 * we set it now, so we can trap and pass that trap to the Guest if it
	 * uses the FPU. */
	if (cpu->ts)
		lguest_set_ts();
		unlazy_fpu(current);

	/* SYSENTER is an optimized way of doing system calls.  We can't allow
	 * it because it always jumps to privilege level 0.  A normal Guest
@@ -196,6 +196,10 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
	 * trap made the switcher code come back, and an error code which some
	 * traps set.  */

	 /* Restore SYSENTER if it's supposed to be on. */
	 if (boot_cpu_has(X86_FEATURE_SEP))
		wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);

	/* If the Guest page faulted, then the cr2 register will tell us the
	 * bad virtual address.  We have to grab this now, because once we
	 * re-enable interrupts an interrupt could fault and thus overwrite
@@ -203,13 +207,12 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
	if (cpu->regs->trapnum == 14)
		cpu->arch.last_pagefault = read_cr2();
	/* Similarly, if we took a trap because the Guest used the FPU,
	 * we have to restore the FPU it expects to see. */
	 * we have to restore the FPU it expects to see.
	 * math_state_restore() may sleep and we may even move off to
	 * a different CPU. So all the critical stuff should be done
	 * before this.  */
	else if (cpu->regs->trapnum == 7)
		math_state_restore();

	/* Restore SYSENTER if it's supposed to be on. */
	if (boot_cpu_has(X86_FEATURE_SEP))
		wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
}

/*H:130 Now we've examined the hypercall code; our Guest can make requests.
+1 −1
Original line number Diff line number Diff line
@@ -529,7 +529,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)

#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
		/* Clear master flag /before/ clearing selector flag. */
		rmb();
		wmb();
#endif
		pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
		while (pending_words != 0) {