Loading Documentation/kernel-parameters.txt +5 −0 Original line number Diff line number Diff line Loading @@ -452,6 +452,11 @@ running once the system is up. eata= [HW,SCSI] ec_intr= [HW,ACPI] ACPI Embedded Controller interrupt mode Format: <int> 0: polling mode non-0: interrupt mode (default) eda= [HW,PS2] edb= [HW,PS2] Loading arch/i386/kernel/acpi/Makefile +1 −1 Original line number Diff line number Diff line Loading @@ -3,6 +3,6 @@ obj-$(CONFIG_X86_IO_APIC) += earlyquirk.o obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o ifneq ($(CONFIG_ACPI_PROCESSOR),) obj-y += cstate.o obj-y += cstate.o processor.o endif arch/i386/kernel/acpi/boot.c +3 −3 Original line number Diff line number Diff line Loading @@ -464,7 +464,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) * success: return IRQ number (>=0) * failure: return < 0 */ int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low) int acpi_register_gsi(u32 gsi, int triggering, int polarity) { unsigned int irq; unsigned int plat_gsi = gsi; Loading @@ -476,14 +476,14 @@ int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low) if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { extern void eisa_set_level_irq(unsigned int irq); if (edge_level == ACPI_LEVEL_SENSITIVE) if (triggering == ACPI_LEVEL_SENSITIVE) eisa_set_level_irq(gsi); } #endif #ifdef CONFIG_X86_IO_APIC if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) { plat_gsi = mp_register_gsi(gsi, edge_level, active_high_low); plat_gsi = mp_register_gsi(gsi, triggering, polarity); } #endif acpi_gsi_to_irq(plat_gsi, &irq); Loading arch/i386/kernel/acpi/cstate.c +0 −58 Original line number Diff line number Diff line Loading @@ -14,64 +14,6 @@ #include <acpi/processor.h> #include <asm/acpi.h> static void acpi_processor_power_init_intel_pdc(struct acpi_processor_power *pow) { struct acpi_object_list *obj_list; union acpi_object *obj; u32 *buf; /* allocate and initialize pdc. It will be used later. */ obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL); if (!obj_list) { printk(KERN_ERR "Memory allocation error\n"); return; } obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL); if (!obj) { printk(KERN_ERR "Memory allocation error\n"); kfree(obj_list); return; } buf = kmalloc(12, GFP_KERNEL); if (!buf) { printk(KERN_ERR "Memory allocation error\n"); kfree(obj); kfree(obj_list); return; } buf[0] = ACPI_PDC_REVISION_ID; buf[1] = 1; buf[2] = ACPI_PDC_C_CAPABILITY_SMP; obj->type = ACPI_TYPE_BUFFER; obj->buffer.length = 12; obj->buffer.pointer = (u8 *) buf; obj_list->count = 1; obj_list->pointer = obj; pow->pdc = obj_list; return; } /* Initialize _PDC data based on the CPU vendor */ void acpi_processor_power_init_pdc(struct acpi_processor_power *pow, unsigned int cpu) { struct cpuinfo_x86 *c = cpu_data + cpu; pow->pdc = NULL; if (c->x86_vendor == X86_VENDOR_INTEL) acpi_processor_power_init_intel_pdc(pow); return; } EXPORT_SYMBOL(acpi_processor_power_init_pdc); /* * Initialize bm_flags based on the CPU cache properties * On SMP it depends on cache configuration Loading arch/i386/kernel/acpi/processor.c 0 → 100644 +75 −0 Original line number Diff line number Diff line /* * arch/i386/kernel/acpi/processor.c * * Copyright (C) 2005 Intel Corporation * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> * - Added _PDC for platforms with Intel CPUs */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/acpi.h> #include <acpi/processor.h> #include <asm/acpi.h> static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c) { struct acpi_object_list *obj_list; union acpi_object *obj; u32 *buf; /* allocate and initialize pdc. It will be used later. */ obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL); if (!obj_list) { printk(KERN_ERR "Memory allocation error\n"); return; } obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL); if (!obj) { printk(KERN_ERR "Memory allocation error\n"); kfree(obj_list); return; } buf = kmalloc(12, GFP_KERNEL); if (!buf) { printk(KERN_ERR "Memory allocation error\n"); kfree(obj); kfree(obj_list); return; } buf[0] = ACPI_PDC_REVISION_ID; buf[1] = 1; buf[2] = ACPI_PDC_C_CAPABILITY_SMP; if (cpu_has(c, X86_FEATURE_EST)) buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP; obj->type = ACPI_TYPE_BUFFER; obj->buffer.length = 12; obj->buffer.pointer = (u8 *) buf; obj_list->count = 1; obj_list->pointer = obj; pr->pdc = obj_list; return; } /* Initialize _PDC data based on the CPU vendor */ void arch_acpi_processor_init_pdc(struct acpi_processor *pr) { unsigned int cpu = pr->id; struct cpuinfo_x86 *c = cpu_data + cpu; pr->pdc = NULL; if (c->x86_vendor == X86_VENDOR_INTEL) init_intel_pdc(pr, c); return; } EXPORT_SYMBOL(arch_acpi_processor_init_pdc); Loading
Documentation/kernel-parameters.txt +5 −0 Original line number Diff line number Diff line Loading @@ -452,6 +452,11 @@ running once the system is up. eata= [HW,SCSI] ec_intr= [HW,ACPI] ACPI Embedded Controller interrupt mode Format: <int> 0: polling mode non-0: interrupt mode (default) eda= [HW,PS2] edb= [HW,PS2] Loading
arch/i386/kernel/acpi/Makefile +1 −1 Original line number Diff line number Diff line Loading @@ -3,6 +3,6 @@ obj-$(CONFIG_X86_IO_APIC) += earlyquirk.o obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o ifneq ($(CONFIG_ACPI_PROCESSOR),) obj-y += cstate.o obj-y += cstate.o processor.o endif
arch/i386/kernel/acpi/boot.c +3 −3 Original line number Diff line number Diff line Loading @@ -464,7 +464,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) * success: return IRQ number (>=0) * failure: return < 0 */ int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low) int acpi_register_gsi(u32 gsi, int triggering, int polarity) { unsigned int irq; unsigned int plat_gsi = gsi; Loading @@ -476,14 +476,14 @@ int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low) if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { extern void eisa_set_level_irq(unsigned int irq); if (edge_level == ACPI_LEVEL_SENSITIVE) if (triggering == ACPI_LEVEL_SENSITIVE) eisa_set_level_irq(gsi); } #endif #ifdef CONFIG_X86_IO_APIC if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) { plat_gsi = mp_register_gsi(gsi, edge_level, active_high_low); plat_gsi = mp_register_gsi(gsi, triggering, polarity); } #endif acpi_gsi_to_irq(plat_gsi, &irq); Loading
arch/i386/kernel/acpi/cstate.c +0 −58 Original line number Diff line number Diff line Loading @@ -14,64 +14,6 @@ #include <acpi/processor.h> #include <asm/acpi.h> static void acpi_processor_power_init_intel_pdc(struct acpi_processor_power *pow) { struct acpi_object_list *obj_list; union acpi_object *obj; u32 *buf; /* allocate and initialize pdc. It will be used later. */ obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL); if (!obj_list) { printk(KERN_ERR "Memory allocation error\n"); return; } obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL); if (!obj) { printk(KERN_ERR "Memory allocation error\n"); kfree(obj_list); return; } buf = kmalloc(12, GFP_KERNEL); if (!buf) { printk(KERN_ERR "Memory allocation error\n"); kfree(obj); kfree(obj_list); return; } buf[0] = ACPI_PDC_REVISION_ID; buf[1] = 1; buf[2] = ACPI_PDC_C_CAPABILITY_SMP; obj->type = ACPI_TYPE_BUFFER; obj->buffer.length = 12; obj->buffer.pointer = (u8 *) buf; obj_list->count = 1; obj_list->pointer = obj; pow->pdc = obj_list; return; } /* Initialize _PDC data based on the CPU vendor */ void acpi_processor_power_init_pdc(struct acpi_processor_power *pow, unsigned int cpu) { struct cpuinfo_x86 *c = cpu_data + cpu; pow->pdc = NULL; if (c->x86_vendor == X86_VENDOR_INTEL) acpi_processor_power_init_intel_pdc(pow); return; } EXPORT_SYMBOL(acpi_processor_power_init_pdc); /* * Initialize bm_flags based on the CPU cache properties * On SMP it depends on cache configuration Loading
arch/i386/kernel/acpi/processor.c 0 → 100644 +75 −0 Original line number Diff line number Diff line /* * arch/i386/kernel/acpi/processor.c * * Copyright (C) 2005 Intel Corporation * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> * - Added _PDC for platforms with Intel CPUs */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/acpi.h> #include <acpi/processor.h> #include <asm/acpi.h> static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c) { struct acpi_object_list *obj_list; union acpi_object *obj; u32 *buf; /* allocate and initialize pdc. It will be used later. */ obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL); if (!obj_list) { printk(KERN_ERR "Memory allocation error\n"); return; } obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL); if (!obj) { printk(KERN_ERR "Memory allocation error\n"); kfree(obj_list); return; } buf = kmalloc(12, GFP_KERNEL); if (!buf) { printk(KERN_ERR "Memory allocation error\n"); kfree(obj); kfree(obj_list); return; } buf[0] = ACPI_PDC_REVISION_ID; buf[1] = 1; buf[2] = ACPI_PDC_C_CAPABILITY_SMP; if (cpu_has(c, X86_FEATURE_EST)) buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP; obj->type = ACPI_TYPE_BUFFER; obj->buffer.length = 12; obj->buffer.pointer = (u8 *) buf; obj_list->count = 1; obj_list->pointer = obj; pr->pdc = obj_list; return; } /* Initialize _PDC data based on the CPU vendor */ void arch_acpi_processor_init_pdc(struct acpi_processor *pr) { unsigned int cpu = pr->id; struct cpuinfo_x86 *c = cpu_data + cpu; pr->pdc = NULL; if (c->x86_vendor == X86_VENDOR_INTEL) init_intel_pdc(pr, c); return; } EXPORT_SYMBOL(arch_acpi_processor_init_pdc);