Loading arch/x86/kernel/acpi/boot.c +0 −2 Original line number Original line Diff line number Diff line Loading @@ -514,8 +514,6 @@ int acpi_register_gsi(u32 gsi, int triggering, int polarity) * Make sure all (legacy) PCI IRQs are set as level-triggered. * Make sure all (legacy) PCI IRQs are set as level-triggered. */ */ if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { extern void eisa_set_level_irq(unsigned int irq); if (triggering == ACPI_LEVEL_SENSITIVE) if (triggering == ACPI_LEVEL_SENSITIVE) eisa_set_level_irq(gsi); eisa_set_level_irq(gsi); } } Loading arch/x86/kernel/apic_32.c +11 −7 Original line number Original line Diff line number Diff line Loading @@ -70,6 +70,10 @@ static int local_apic_timer_disabled; int local_apic_timer_c2_ok; int local_apic_timer_c2_ok; EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); int first_system_vector = 0xfe; char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE}; /* /* * Debug level, exported for io_apic.c * Debug level, exported for io_apic.c */ */ Loading Loading @@ -1351,13 +1355,13 @@ void __init smp_intr_init(void) * The reschedule interrupt is a CPU-to-CPU reschedule-helper * The reschedule interrupt is a CPU-to-CPU reschedule-helper * IPI, driven by wakeup. * IPI, driven by wakeup. */ */ set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); /* IPI for invalidation */ /* IPI for invalidation */ set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); /* IPI for generic function call */ /* IPI for generic function call */ set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); } } #endif #endif Loading @@ -1370,15 +1374,15 @@ void __init apic_intr_init(void) smp_intr_init(); smp_intr_init(); #endif #endif /* self generated IPI for local APIC timer */ /* self generated IPI for local APIC timer */ set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); /* IPI vectors for APIC spurious and error interrupts */ /* IPI vectors for APIC spurious and error interrupts */ set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); set_intr_gate(ERROR_APIC_VECTOR, error_interrupt); alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); /* thermal monitor LVT interrupt */ /* thermal monitor LVT interrupt */ #ifdef CONFIG_X86_MCE_P4THERMAL #ifdef CONFIG_X86_MCE_P4THERMAL set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); #endif #endif } } Loading arch/x86/kernel/entry_32.S +1 −1 Original line number Original line Diff line number Diff line Loading @@ -51,7 +51,7 @@ #include <asm/percpu.h> #include <asm/percpu.h> #include <asm/dwarf2.h> #include <asm/dwarf2.h> #include <asm/processor-flags.h> #include <asm/processor-flags.h> #include "irq_vectors.h" #include <asm/irq_vectors.h> /* /* * We use macros for low-level operations which need to be overridden * We use macros for low-level operations which need to be overridden Loading arch/x86/kernel/genx2apic_uv_x.c +98 −43 Original line number Original line Diff line number Diff line Loading @@ -5,7 +5,7 @@ * * * SGI UV APIC functions (note: not an Intel compatible APIC) * SGI UV APIC functions (note: not an Intel compatible APIC) * * * Copyright (C) 2007 Silicon Graphics, Inc. All rights reserved. * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. */ */ #include <linux/threads.h> #include <linux/threads.h> Loading Loading @@ -55,37 +55,37 @@ static cpumask_t uv_vector_allocation_domain(int cpu) int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) { { unsigned long val; unsigned long val; int nasid; int pnode; nasid = uv_apicid_to_nasid(phys_apicid); pnode = uv_apicid_to_pnode(phys_apicid); val = (1UL << UVH_IPI_INT_SEND_SHFT) | val = (1UL << UVH_IPI_INT_SEND_SHFT) | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | APIC_DM_INIT; APIC_DM_INIT; uv_write_global_mmr64(nasid, UVH_IPI_INT, val); uv_write_global_mmr64(pnode, UVH_IPI_INT, val); mdelay(10); mdelay(10); val = (1UL << UVH_IPI_INT_SEND_SHFT) | val = (1UL << UVH_IPI_INT_SEND_SHFT) | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | APIC_DM_STARTUP; APIC_DM_STARTUP; uv_write_global_mmr64(nasid, UVH_IPI_INT, val); uv_write_global_mmr64(pnode, UVH_IPI_INT, val); return 0; return 0; } } static void uv_send_IPI_one(int cpu, int vector) static void uv_send_IPI_one(int cpu, int vector) { { unsigned long val, apicid, lapicid; unsigned long val, apicid, lapicid; int nasid; int pnode; apicid = per_cpu(x86_cpu_to_apicid, cpu); /* ZZZ - cache node-local ? */ apicid = per_cpu(x86_cpu_to_apicid, cpu); /* ZZZ - cache node-local ? */ lapicid = apicid & 0x3f; /* ZZZ macro needed */ lapicid = apicid & 0x3f; /* ZZZ macro needed */ nasid = uv_apicid_to_nasid(apicid); pnode = uv_apicid_to_pnode(apicid); val = val = (1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid << (1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid << UVH_IPI_INT_APIC_ID_SHFT) | UVH_IPI_INT_APIC_ID_SHFT) | (vector << UVH_IPI_INT_VECTOR_SHFT); (vector << UVH_IPI_INT_VECTOR_SHFT); uv_write_global_mmr64(nasid, UVH_IPI_INT, val); uv_write_global_mmr64(pnode, UVH_IPI_INT, val); } } static void uv_send_IPI_mask(cpumask_t mask, int vector) static void uv_send_IPI_mask(cpumask_t mask, int vector) Loading Loading @@ -159,39 +159,81 @@ struct genapic apic_x2apic_uv_x = { .phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */ .phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */ }; }; static __cpuinit void set_x2apic_extra_bits(int nasid) static __cpuinit void set_x2apic_extra_bits(int pnode) { { __get_cpu_var(x2apic_extra_bits) = ((nasid >> 1) << 6); __get_cpu_var(x2apic_extra_bits) = (pnode << 6); } } /* /* * Called on boot cpu. * Called on boot cpu. */ */ static __init int boot_pnode_to_blade(int pnode) { int blade; for (blade = 0; blade < uv_num_possible_blades(); blade++) if (pnode == uv_blade_info[blade].pnode) return blade; BUG(); } struct redir_addr { unsigned long redirect; unsigned long alias; }; #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT static __initdata struct redir_addr redir_addrs[] = { {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG}, {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG}, {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG}, }; static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) { union uvh_si_alias0_overlay_config_u alias; union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; int i; for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) { alias.v = uv_read_local_mmr(redir_addrs[i].alias); if (alias.s.base == 0) { *size = (1UL << alias.s.m_alias); redirect.v = uv_read_local_mmr(redir_addrs[i].redirect); *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT; return; } } BUG(); } static __init void uv_system_init(void) static __init void uv_system_init(void) { { union uvh_si_addr_map_config_u m_n_config; union uvh_si_addr_map_config_u m_n_config; int bytes, nid, cpu, lcpu, nasid, last_nasid, blade; union uvh_node_id_u node_id; unsigned long mmr_base; unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; unsigned long mmr_base, present; m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); m_val = m_n_config.s.m_skt; n_val = m_n_config.s.n_skt; mmr_base = mmr_base = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & ~UV_MMR_ENABLE; ~UV_MMR_ENABLE; printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); last_nasid = -1; for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) for_each_possible_cpu(cpu) { uv_possible_blades += nid = cpu_to_node(cpu); hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8)); nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu)); if (nasid != last_nasid) uv_possible_blades++; last_nasid = nasid; } printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); uv_blade_info = alloc_bootmem_pages(bytes); uv_blade_info = alloc_bootmem_pages(bytes); get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); uv_node_to_blade = alloc_bootmem_pages(bytes); uv_node_to_blade = alloc_bootmem_pages(bytes); memset(uv_node_to_blade, 255, bytes); memset(uv_node_to_blade, 255, bytes); Loading @@ -200,43 +242,56 @@ static __init void uv_system_init(void) uv_cpu_to_blade = alloc_bootmem_pages(bytes); uv_cpu_to_blade = alloc_bootmem_pages(bytes); memset(uv_cpu_to_blade, 255, bytes); memset(uv_cpu_to_blade, 255, bytes); last_nasid = -1; blade = 0; blade = -1; for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) { lcpu = -1; present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8); for_each_possible_cpu(cpu) { for (j = 0; j < 64; j++) { nid = cpu_to_node(cpu); if (!test_bit(j, &present)) nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu)); continue; if (nasid != last_nasid) { uv_blade_info[blade].pnode = (i * 64 + j); blade++; uv_blade_info[blade].nr_possible_cpus = 0; lcpu = -1; uv_blade_info[blade].nr_posible_cpus = 0; uv_blade_info[blade].nr_online_cpus = 0; uv_blade_info[blade].nr_online_cpus = 0; blade++; } } } last_nasid = nasid; lcpu++; uv_cpu_hub_info(cpu)->m_val = m_n_config.s.m_skt; node_id.v = uv_read_local_mmr(UVH_NODE_ID); uv_cpu_hub_info(cpu)->n_val = m_n_config.s.n_skt; gnode_upper = (((unsigned long)node_id.s.node_id) & ~((1 << n_val) - 1)) << m_val; for_each_present_cpu(cpu) { nid = cpu_to_node(cpu); pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu)); blade = boot_pnode_to_blade(pnode); lcpu = uv_blade_info[blade].nr_possible_cpus; uv_blade_info[blade].nr_possible_cpus++; uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_base + lowmem_redir_size; uv_cpu_hub_info(cpu)->m_val = m_val; uv_cpu_hub_info(cpu)->n_val = m_val; uv_cpu_hub_info(cpu)->numa_blade_id = blade; uv_cpu_hub_info(cpu)->numa_blade_id = blade; uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; uv_cpu_hub_info(cpu)->local_nasid = nasid; uv_cpu_hub_info(cpu)->pnode = pnode; uv_cpu_hub_info(cpu)->gnode_upper = uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1; nasid & ~((1 << uv_hub_info->n_val) - 1); uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */ uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */ uv_blade_info[blade].nasid = nasid; uv_blade_info[blade].nr_posible_cpus++; uv_node_to_blade[nid] = blade; uv_node_to_blade[nid] = blade; uv_cpu_to_blade[cpu] = blade; uv_cpu_to_blade[cpu] = blade; printk(KERN_DEBUG "UV cpu %d, apicid 0x%x, nasid %d, nid %d\n", printk(KERN_DEBUG "UV cpu %d, apicid 0x%x, pnode %d, nid %d, " cpu, per_cpu(x86_cpu_to_apicid, cpu), nasid, nid); "lcpu %d, blade %d\n", printk(KERN_DEBUG "UV lcpu %d, blade %d\n", lcpu, blade); cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid, lcpu, blade); } } } } /* /* * Called on each cpu to initialize the per_cpu UV data area. * Called on each cpu to initialize the per_cpu UV data area. * ZZZ hotplug not supported yet */ */ void __cpuinit uv_cpu_init(void) void __cpuinit uv_cpu_init(void) { { Loading @@ -246,5 +301,5 @@ void __cpuinit uv_cpu_init(void) uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; if (get_uv_system_type() == UV_NON_UNIQUE_APIC) if (get_uv_system_type() == UV_NON_UNIQUE_APIC) set_x2apic_extra_bits(uv_hub_info->local_nasid); set_x2apic_extra_bits(uv_hub_info->pnode); } } arch/x86/kernel/i8259.c +8 −14 Original line number Original line Diff line number Diff line Loading @@ -297,34 +297,28 @@ void init_8259A(int auto_eoi) * outb_pic - this has to work on a wide range of PC hardware. * outb_pic - this has to work on a wide range of PC hardware. */ */ outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ #ifndef CONFIG_X86_64 outb_pic(0x20 + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */ /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 on x86-64, outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */ to 0x20-0x27 on i386 */ #else /* CONFIG_X86_64 */ /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 */ outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR); outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */ /* 8259A-1 (the master) has a slave on IR2 */ outb_pic(0x04, PIC_MASTER_IMR); outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); #endif /* CONFIG_X86_64 */ if (auto_eoi) /* master does Auto EOI */ if (auto_eoi) /* master does Auto EOI */ outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); else /* master expects normal EOI */ else /* master expects normal EOI */ outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); outb_pic(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */ outb_pic(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */ #ifndef CONFIG_X86_64 outb_pic(0x20 + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */ /* ICW2: 8259A-2 IR0-7 mapped to IRQ8_VECTOR */ outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */ outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */ #else /* CONFIG_X86_64 */ /* ICW2: 8259A-2 IR0-7 mapped to 0x38-0x3f */ outb_pic(IRQ8_VECTOR, PIC_SLAVE_IMR); outb_pic(IRQ8_VECTOR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */ /* 8259A-2 is a slave on master's IR2 */ outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR); outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */ /* (slave's support for AEOI in flat mode is to be investigated) */ outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); #endif /* CONFIG_X86_64 */ if (auto_eoi) if (auto_eoi) /* /* * In AEOI mode we just have to mask the interrupt * In AEOI mode we just have to mask the interrupt Loading Loading
arch/x86/kernel/acpi/boot.c +0 −2 Original line number Original line Diff line number Diff line Loading @@ -514,8 +514,6 @@ int acpi_register_gsi(u32 gsi, int triggering, int polarity) * Make sure all (legacy) PCI IRQs are set as level-triggered. * Make sure all (legacy) PCI IRQs are set as level-triggered. */ */ if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { extern void eisa_set_level_irq(unsigned int irq); if (triggering == ACPI_LEVEL_SENSITIVE) if (triggering == ACPI_LEVEL_SENSITIVE) eisa_set_level_irq(gsi); eisa_set_level_irq(gsi); } } Loading
arch/x86/kernel/apic_32.c +11 −7 Original line number Original line Diff line number Diff line Loading @@ -70,6 +70,10 @@ static int local_apic_timer_disabled; int local_apic_timer_c2_ok; int local_apic_timer_c2_ok; EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); int first_system_vector = 0xfe; char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE}; /* /* * Debug level, exported for io_apic.c * Debug level, exported for io_apic.c */ */ Loading Loading @@ -1351,13 +1355,13 @@ void __init smp_intr_init(void) * The reschedule interrupt is a CPU-to-CPU reschedule-helper * The reschedule interrupt is a CPU-to-CPU reschedule-helper * IPI, driven by wakeup. * IPI, driven by wakeup. */ */ set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); /* IPI for invalidation */ /* IPI for invalidation */ set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); /* IPI for generic function call */ /* IPI for generic function call */ set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); } } #endif #endif Loading @@ -1370,15 +1374,15 @@ void __init apic_intr_init(void) smp_intr_init(); smp_intr_init(); #endif #endif /* self generated IPI for local APIC timer */ /* self generated IPI for local APIC timer */ set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); /* IPI vectors for APIC spurious and error interrupts */ /* IPI vectors for APIC spurious and error interrupts */ set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); set_intr_gate(ERROR_APIC_VECTOR, error_interrupt); alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); /* thermal monitor LVT interrupt */ /* thermal monitor LVT interrupt */ #ifdef CONFIG_X86_MCE_P4THERMAL #ifdef CONFIG_X86_MCE_P4THERMAL set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); #endif #endif } } Loading
arch/x86/kernel/entry_32.S +1 −1 Original line number Original line Diff line number Diff line Loading @@ -51,7 +51,7 @@ #include <asm/percpu.h> #include <asm/percpu.h> #include <asm/dwarf2.h> #include <asm/dwarf2.h> #include <asm/processor-flags.h> #include <asm/processor-flags.h> #include "irq_vectors.h" #include <asm/irq_vectors.h> /* /* * We use macros for low-level operations which need to be overridden * We use macros for low-level operations which need to be overridden Loading
arch/x86/kernel/genx2apic_uv_x.c +98 −43 Original line number Original line Diff line number Diff line Loading @@ -5,7 +5,7 @@ * * * SGI UV APIC functions (note: not an Intel compatible APIC) * SGI UV APIC functions (note: not an Intel compatible APIC) * * * Copyright (C) 2007 Silicon Graphics, Inc. All rights reserved. * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. */ */ #include <linux/threads.h> #include <linux/threads.h> Loading Loading @@ -55,37 +55,37 @@ static cpumask_t uv_vector_allocation_domain(int cpu) int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) { { unsigned long val; unsigned long val; int nasid; int pnode; nasid = uv_apicid_to_nasid(phys_apicid); pnode = uv_apicid_to_pnode(phys_apicid); val = (1UL << UVH_IPI_INT_SEND_SHFT) | val = (1UL << UVH_IPI_INT_SEND_SHFT) | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | APIC_DM_INIT; APIC_DM_INIT; uv_write_global_mmr64(nasid, UVH_IPI_INT, val); uv_write_global_mmr64(pnode, UVH_IPI_INT, val); mdelay(10); mdelay(10); val = (1UL << UVH_IPI_INT_SEND_SHFT) | val = (1UL << UVH_IPI_INT_SEND_SHFT) | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | APIC_DM_STARTUP; APIC_DM_STARTUP; uv_write_global_mmr64(nasid, UVH_IPI_INT, val); uv_write_global_mmr64(pnode, UVH_IPI_INT, val); return 0; return 0; } } static void uv_send_IPI_one(int cpu, int vector) static void uv_send_IPI_one(int cpu, int vector) { { unsigned long val, apicid, lapicid; unsigned long val, apicid, lapicid; int nasid; int pnode; apicid = per_cpu(x86_cpu_to_apicid, cpu); /* ZZZ - cache node-local ? */ apicid = per_cpu(x86_cpu_to_apicid, cpu); /* ZZZ - cache node-local ? */ lapicid = apicid & 0x3f; /* ZZZ macro needed */ lapicid = apicid & 0x3f; /* ZZZ macro needed */ nasid = uv_apicid_to_nasid(apicid); pnode = uv_apicid_to_pnode(apicid); val = val = (1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid << (1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid << UVH_IPI_INT_APIC_ID_SHFT) | UVH_IPI_INT_APIC_ID_SHFT) | (vector << UVH_IPI_INT_VECTOR_SHFT); (vector << UVH_IPI_INT_VECTOR_SHFT); uv_write_global_mmr64(nasid, UVH_IPI_INT, val); uv_write_global_mmr64(pnode, UVH_IPI_INT, val); } } static void uv_send_IPI_mask(cpumask_t mask, int vector) static void uv_send_IPI_mask(cpumask_t mask, int vector) Loading Loading @@ -159,39 +159,81 @@ struct genapic apic_x2apic_uv_x = { .phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */ .phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */ }; }; static __cpuinit void set_x2apic_extra_bits(int nasid) static __cpuinit void set_x2apic_extra_bits(int pnode) { { __get_cpu_var(x2apic_extra_bits) = ((nasid >> 1) << 6); __get_cpu_var(x2apic_extra_bits) = (pnode << 6); } } /* /* * Called on boot cpu. * Called on boot cpu. */ */ static __init int boot_pnode_to_blade(int pnode) { int blade; for (blade = 0; blade < uv_num_possible_blades(); blade++) if (pnode == uv_blade_info[blade].pnode) return blade; BUG(); } struct redir_addr { unsigned long redirect; unsigned long alias; }; #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT static __initdata struct redir_addr redir_addrs[] = { {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG}, {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG}, {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG}, }; static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) { union uvh_si_alias0_overlay_config_u alias; union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; int i; for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) { alias.v = uv_read_local_mmr(redir_addrs[i].alias); if (alias.s.base == 0) { *size = (1UL << alias.s.m_alias); redirect.v = uv_read_local_mmr(redir_addrs[i].redirect); *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT; return; } } BUG(); } static __init void uv_system_init(void) static __init void uv_system_init(void) { { union uvh_si_addr_map_config_u m_n_config; union uvh_si_addr_map_config_u m_n_config; int bytes, nid, cpu, lcpu, nasid, last_nasid, blade; union uvh_node_id_u node_id; unsigned long mmr_base; unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; unsigned long mmr_base, present; m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); m_val = m_n_config.s.m_skt; n_val = m_n_config.s.n_skt; mmr_base = mmr_base = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & ~UV_MMR_ENABLE; ~UV_MMR_ENABLE; printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); last_nasid = -1; for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) for_each_possible_cpu(cpu) { uv_possible_blades += nid = cpu_to_node(cpu); hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8)); nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu)); if (nasid != last_nasid) uv_possible_blades++; last_nasid = nasid; } printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); uv_blade_info = alloc_bootmem_pages(bytes); uv_blade_info = alloc_bootmem_pages(bytes); get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); uv_node_to_blade = alloc_bootmem_pages(bytes); uv_node_to_blade = alloc_bootmem_pages(bytes); memset(uv_node_to_blade, 255, bytes); memset(uv_node_to_blade, 255, bytes); Loading @@ -200,43 +242,56 @@ static __init void uv_system_init(void) uv_cpu_to_blade = alloc_bootmem_pages(bytes); uv_cpu_to_blade = alloc_bootmem_pages(bytes); memset(uv_cpu_to_blade, 255, bytes); memset(uv_cpu_to_blade, 255, bytes); last_nasid = -1; blade = 0; blade = -1; for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) { lcpu = -1; present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8); for_each_possible_cpu(cpu) { for (j = 0; j < 64; j++) { nid = cpu_to_node(cpu); if (!test_bit(j, &present)) nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu)); continue; if (nasid != last_nasid) { uv_blade_info[blade].pnode = (i * 64 + j); blade++; uv_blade_info[blade].nr_possible_cpus = 0; lcpu = -1; uv_blade_info[blade].nr_posible_cpus = 0; uv_blade_info[blade].nr_online_cpus = 0; uv_blade_info[blade].nr_online_cpus = 0; blade++; } } } last_nasid = nasid; lcpu++; uv_cpu_hub_info(cpu)->m_val = m_n_config.s.m_skt; node_id.v = uv_read_local_mmr(UVH_NODE_ID); uv_cpu_hub_info(cpu)->n_val = m_n_config.s.n_skt; gnode_upper = (((unsigned long)node_id.s.node_id) & ~((1 << n_val) - 1)) << m_val; for_each_present_cpu(cpu) { nid = cpu_to_node(cpu); pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu)); blade = boot_pnode_to_blade(pnode); lcpu = uv_blade_info[blade].nr_possible_cpus; uv_blade_info[blade].nr_possible_cpus++; uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_base + lowmem_redir_size; uv_cpu_hub_info(cpu)->m_val = m_val; uv_cpu_hub_info(cpu)->n_val = m_val; uv_cpu_hub_info(cpu)->numa_blade_id = blade; uv_cpu_hub_info(cpu)->numa_blade_id = blade; uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; uv_cpu_hub_info(cpu)->local_nasid = nasid; uv_cpu_hub_info(cpu)->pnode = pnode; uv_cpu_hub_info(cpu)->gnode_upper = uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1; nasid & ~((1 << uv_hub_info->n_val) - 1); uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */ uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */ uv_blade_info[blade].nasid = nasid; uv_blade_info[blade].nr_posible_cpus++; uv_node_to_blade[nid] = blade; uv_node_to_blade[nid] = blade; uv_cpu_to_blade[cpu] = blade; uv_cpu_to_blade[cpu] = blade; printk(KERN_DEBUG "UV cpu %d, apicid 0x%x, nasid %d, nid %d\n", printk(KERN_DEBUG "UV cpu %d, apicid 0x%x, pnode %d, nid %d, " cpu, per_cpu(x86_cpu_to_apicid, cpu), nasid, nid); "lcpu %d, blade %d\n", printk(KERN_DEBUG "UV lcpu %d, blade %d\n", lcpu, blade); cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid, lcpu, blade); } } } } /* /* * Called on each cpu to initialize the per_cpu UV data area. * Called on each cpu to initialize the per_cpu UV data area. * ZZZ hotplug not supported yet */ */ void __cpuinit uv_cpu_init(void) void __cpuinit uv_cpu_init(void) { { Loading @@ -246,5 +301,5 @@ void __cpuinit uv_cpu_init(void) uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; if (get_uv_system_type() == UV_NON_UNIQUE_APIC) if (get_uv_system_type() == UV_NON_UNIQUE_APIC) set_x2apic_extra_bits(uv_hub_info->local_nasid); set_x2apic_extra_bits(uv_hub_info->pnode); } }
arch/x86/kernel/i8259.c +8 −14 Original line number Original line Diff line number Diff line Loading @@ -297,34 +297,28 @@ void init_8259A(int auto_eoi) * outb_pic - this has to work on a wide range of PC hardware. * outb_pic - this has to work on a wide range of PC hardware. */ */ outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ #ifndef CONFIG_X86_64 outb_pic(0x20 + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */ /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 on x86-64, outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */ to 0x20-0x27 on i386 */ #else /* CONFIG_X86_64 */ /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 */ outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR); outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */ /* 8259A-1 (the master) has a slave on IR2 */ outb_pic(0x04, PIC_MASTER_IMR); outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); #endif /* CONFIG_X86_64 */ if (auto_eoi) /* master does Auto EOI */ if (auto_eoi) /* master does Auto EOI */ outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); else /* master expects normal EOI */ else /* master expects normal EOI */ outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); outb_pic(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */ outb_pic(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */ #ifndef CONFIG_X86_64 outb_pic(0x20 + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */ /* ICW2: 8259A-2 IR0-7 mapped to IRQ8_VECTOR */ outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */ outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */ #else /* CONFIG_X86_64 */ /* ICW2: 8259A-2 IR0-7 mapped to 0x38-0x3f */ outb_pic(IRQ8_VECTOR, PIC_SLAVE_IMR); outb_pic(IRQ8_VECTOR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */ /* 8259A-2 is a slave on master's IR2 */ outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR); outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */ /* (slave's support for AEOI in flat mode is to be investigated) */ outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); #endif /* CONFIG_X86_64 */ if (auto_eoi) if (auto_eoi) /* /* * In AEOI mode we just have to mask the interrupt * In AEOI mode we just have to mask the interrupt Loading