Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4ded3835 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-fixes-for-linus' of...

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-tip:
  x86, fpu: fix CONFIG_PREEMPT=y corruption of application's FPU stack
  suspend-vs-iommu: prevent suspend if we could not resume
  x86: section mismatch fix
  x86: fix Xorg crash with xf86MapVidMem error
  x86: fix pointer type warning in arch/x86/mm/init_64.c:early_memtest
  x86: fix bad pmd ffff810000207xxx(9090909090909090)
  x86: ioremap fix failing nesting check
  x86: fix broken math-emu with lazy allocation of fpu area
  x86: enable preemption in delay
  x86: disable preemption in native_smp_prepare_cpus
  x86: fix APIC warning on 32bit v2
parents e97dcb0e 870568b3
Loading
Loading
Loading
Loading
+14 −2
Original line number Diff line number Diff line
@@ -242,12 +242,19 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)

static void __cpuinit acpi_register_lapic(int id, u8 enabled)
{
	unsigned int ver = 0;

	if (!enabled) {
		++disabled_cpus;
		return;
	}

	generic_processor_info(id, 0);
#ifdef CONFIG_X86_32
	if (boot_cpu_physical_apicid != -1U)
		ver = apic_version[boot_cpu_physical_apicid];
#endif

	generic_processor_info(id, ver);
}

static int __init
@@ -767,8 +774,13 @@ static void __init acpi_register_lapic_address(unsigned long address)
	mp_lapic_addr = address;

	set_fixmap_nocache(FIX_APIC_BASE, address);
	if (boot_cpu_physical_apicid == -1U)
	if (boot_cpu_physical_apicid == -1U) {
		boot_cpu_physical_apicid  = GET_APIC_ID(read_apic_id());
#ifdef CONFIG_X86_32
		apic_version[boot_cpu_physical_apicid] =
			 GET_APIC_VERSION(apic_read(APIC_LVR));
#endif
	}
}

static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
+29 −15
Original line number Diff line number Diff line
@@ -56,6 +56,11 @@ void __cpuinit mxcsr_feature_mask_init(void)

void __init init_thread_xstate(void)
{
	if (!HAVE_HWFP) {
		xstate_size = sizeof(struct i387_soft_struct);
		return;
	}

	if (cpu_has_fxsr)
		xstate_size = sizeof(struct i387_fxsave_struct);
#ifdef CONFIG_X86_32
@@ -94,7 +99,7 @@ void __cpuinit fpu_init(void)
int init_fpu(struct task_struct *tsk)
{
	if (tsk_used_math(tsk)) {
		if (tsk == current)
		if (HAVE_HWFP && tsk == current)
			unlazy_fpu(tsk);
		return 0;
	}
@@ -109,6 +114,15 @@ int init_fpu(struct task_struct *tsk)
			return -ENOMEM;
	}

#ifdef CONFIG_X86_32
	if (!HAVE_HWFP) {
		memset(tsk->thread.xstate, 0, xstate_size);
		finit();
		set_stopped_child_used_math(tsk);
		return 0;
	}
#endif

	if (cpu_has_fxsr) {
		struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;

@@ -330,13 +344,13 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
	struct user_i387_ia32_struct env;
	int ret;

	if (!HAVE_HWFP)
		return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);

	ret = init_fpu(target);
	if (ret)
		return ret;

	if (!HAVE_HWFP)
		return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);

	if (!cpu_has_fxsr) {
		return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
					   &target->thread.xstate->fsave, 0,
@@ -360,15 +374,15 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
	struct user_i387_ia32_struct env;
	int ret;

	if (!HAVE_HWFP)
		return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);

	ret = init_fpu(target);
	if (ret)
		return ret;

	set_stopped_child_used_math(target);

	if (!HAVE_HWFP)
		return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);

	if (!cpu_has_fxsr) {
		return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
					  &target->thread.xstate->fsave, 0, -1);
@@ -474,10 +488,9 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf)
int restore_i387_ia32(struct _fpstate_ia32 __user *buf)
{
	int err;

	if (HAVE_HWFP) {
	struct task_struct *tsk = current;

	if (HAVE_HWFP)
		clear_fpu(tsk);

	if (!used_math()) {
@@ -486,6 +499,7 @@ int restore_i387_ia32(struct _fpstate_ia32 __user *buf)
			return err;
	}

	if (HAVE_HWFP) {
		if (cpu_has_fxsr)
			err = restore_i387_fxsave(buf);
		else
+30 −1
Original line number Diff line number Diff line
@@ -26,6 +26,7 @@
#include <linux/kdebug.h>
#include <linux/scatterlist.h>
#include <linux/iommu-helper.h>
#include <linux/sysdev.h>
#include <asm/atomic.h>
#include <asm/io.h>
#include <asm/mtrr.h>
@@ -548,6 +549,28 @@ static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
	return aper_base;
}

static int gart_resume(struct sys_device *dev)
{
	return 0;
}

static int gart_suspend(struct sys_device *dev, pm_message_t state)
{
	return -EINVAL;
}

static struct sysdev_class gart_sysdev_class = {
	.name = "gart",
	.suspend = gart_suspend,
	.resume = gart_resume,

};

static struct sys_device device_gart = {
	.id	= 0,
	.cls	= &gart_sysdev_class,
};

/*
 * Private Northbridge GATT initialization in case we cannot use the
 * AGP driver for some reason.
@@ -558,7 +581,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
	unsigned aper_base, new_aper_base;
	struct pci_dev *dev;
	void *gatt;
	int i;
	int i, error;

	printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
	aper_size = aper_base = info->aper_size = 0;
@@ -606,6 +629,12 @@ static __init int init_k8_gatt(struct agp_kern_info *info)

		pci_write_config_dword(dev, 0x90, ctl);
	}

	error = sysdev_class_register(&gart_sysdev_class);
	if (!error)
		error = sysdev_register(&device_gart);
	if (error)
		panic("Could not register gart_sysdev -- would corrupt data on next suspend");
	flush_gart();

	printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
+4 −1
Original line number Diff line number Diff line
@@ -649,8 +649,11 @@ struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct
	/* If the task has used fpu the last 5 timeslices, just do a full
	 * restore of the math state immediately to avoid the trap; the
	 * chances of needing FPU soon are obviously high now
	 *
	 * tsk_used_math() checks prevent calling math_state_restore(),
	 * which can sleep in the case of !tsk_used_math()
	 */
	if (next_p->fpu_counter > 5)
	if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
		math_state_restore();

	/*
+4 −1
Original line number Diff line number Diff line
@@ -658,8 +658,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
	/* If the task has used fpu the last 5 timeslices, just do a full
	 * restore of the math state immediately to avoid the trap; the
	 * chances of needing FPU soon are obviously high now
	 *
	 * tsk_used_math() checks prevent calling math_state_restore(),
	 * which can sleep in the case of !tsk_used_math()
	 */
	if (next_p->fpu_counter>5)
	if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
		math_state_restore();
	return prev_p;
}
Loading