Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a939098a authored by Glauber Costa's avatar Glauber Costa Committed by Ingo Molnar
Browse files

x86: move x86_64 gdt closer to i386



i386 and x86_64 used two different schemes for maintaining the gdt.
With this patch, x86_64 initial gdt table is defined in a .c file,
same way as i386 is now. Also, we call it "gdt_page", and the descriptor,
"early_gdt_descr". This way we achieve common naming, which can allow for
more code integration.

Signed-off-by: default avatarGlauber Costa <gcosta@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 736f12bf
Loading
Loading
Loading
Loading
+5 −43
Original line number Diff line number Diff line
@@ -203,7 +203,7 @@ ENTRY(secondary_startup_64)
	 * addresses where we're currently running on. We have to do that here
	 * because in 32bit we couldn't load a 64bit linear address.
	 */
	lgdt	cpu_gdt_descr(%rip)
	lgdt	early_gdt_descr(%rip)

	/* set up data segments. actually 0 would do too */
	movl $__KERNEL_DS,%eax
@@ -391,53 +391,15 @@ NEXT_PAGE(level2_spare_pgt)

	.data
	.align 16
	.globl cpu_gdt_descr
cpu_gdt_descr:
	.word	gdt_end-cpu_gdt_table-1
gdt:
	.quad	cpu_gdt_table
#ifdef CONFIG_SMP
	.rept	NR_CPUS-1
	.word	0
	.quad	0
	.endr
#endif
	.globl early_gdt_descr
early_gdt_descr:
	.word	GDT_ENTRIES*8-1
	.quad   per_cpu__gdt_page

ENTRY(phys_base)
	/* This must match the first entry in level2_kernel_pgt */
	.quad   0x0000000000000000

/* We need valid kernel segments for data and code in long mode too
 * IRET will check the segment types  kkeil 2000/10/28
 * Also sysret mandates a special GDT layout 
 */
		 		
	.section .data.page_aligned, "aw"
	.align PAGE_SIZE

/* The TLS descriptors are currently at a different place compared to i386.
   Hopefully nobody expects them at a fixed place (Wine?) */
	
ENTRY(cpu_gdt_table)
	.quad	0x0000000000000000	/* NULL descriptor */
	.quad	0x00cf9b000000ffff	/* __KERNEL32_CS */
	.quad	0x00af9b000000ffff	/* __KERNEL_CS */
	.quad	0x00cf93000000ffff	/* __KERNEL_DS */
	.quad	0x00cffb000000ffff	/* __USER32_CS */
	.quad	0x00cff3000000ffff	/* __USER_DS, __USER32_DS  */
	.quad	0x00affb000000ffff	/* __USER_CS */
	.quad	0x0			/* unused */
	.quad	0,0			/* TSS */
	.quad	0,0			/* LDT */
	.quad   0,0,0			/* three TLS descriptors */ 
	.quad	0x0000f40000000000	/* node/CPU stored in limit */
gdt_end:	
	/* asm/segment.h:GDT_ENTRIES must match this */	
	/* This should be a multiple of the cache line size */
	/* GDTs of other CPUs are now dynamically allocated */

	/* zero the remaining page */
	.fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
	
	.section .bss, "aw", @nobits
	.align L1_CACHE_BYTES
+1 −4
Original line number Diff line number Diff line
@@ -202,11 +202,8 @@ void __cpuinit cpu_init (void)
	 * Initialize the per-CPU GDT with the boot GDT,
	 * and set up the GDT descriptor:
	 */
	if (cpu)
		memcpy(get_cpu_gdt_table(cpu), cpu_gdt_table, GDT_SIZE);

	cpu_gdt_descr[cpu].size = GDT_SIZE;
	load_gdt((const struct desc_ptr *)&cpu_gdt_descr[cpu]);
	switch_to_new_gdt();
	load_idt((const struct desc_ptr *)&idt_descr);

	memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
+17 −2
Original line number Diff line number Diff line
@@ -81,8 +81,6 @@
#define ARCH_SETUP
#endif

#include "cpu/cpu.h"

/*
 * Machine setup..
 */
@@ -228,6 +226,23 @@ static inline void copy_edd(void)
}
#endif

/* Overridden in paravirt.c if CONFIG_PARAVIRT */
void __attribute__((weak)) __init memory_setup(void)
{
       machine_specific_memory_setup();
}

/* Current gdt points %fs at the "master" per-cpu area: after this,
 * it's on the real one. */
void switch_to_new_gdt(void)
{
	struct desc_ptr gdt_descr;

	gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
	gdt_descr.size = GDT_SIZE - 1;
	load_gdt(&gdt_descr);
}

/*
 * setup_arch - architecture-specific boot-time initializations
 *
+3 −9
Original line number Diff line number Diff line
@@ -849,14 +849,8 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
		.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
	};
	INIT_WORK(&c_idle.work, do_fork_idle);
#ifdef CONFIG_X86_64
	/* allocate memory for gdts of secondary cpus. Hotplug is considered */
	if (!cpu_gdt_descr[cpu].address &&
		!(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
		printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu);
		return -1;
	}

#ifdef CONFIG_X86_64
	/* Allocate node local memory for AP pdas */
	if (cpu > 0) {
		boot_error = get_local_pda(cpu);
@@ -898,7 +892,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
#ifdef CONFIG_X86_32
	per_cpu(current_task, cpu) = c_idle.idle;
	init_gdt(cpu);
	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
	c_idle.idle->thread.ip = (unsigned long) start_secondary;
	/* Stack for startup_32 can be just as for start_secondary onwards */
	irq_ctx_init(cpu);
@@ -908,6 +901,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
	initial_code = (unsigned long)start_secondary;
	clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
#endif
	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
	stack_start.sp = (void *) c_idle.idle->thread.sp;

	/* start_ip had better be page-aligned! */
@@ -1252,8 +1246,8 @@ void __init native_smp_prepare_boot_cpu(void)
	int me = smp_processor_id();
#ifdef CONFIG_X86_32
	init_gdt(me);
	switch_to_new_gdt();
#endif
	switch_to_new_gdt();
	/* already set me in cpu_online_map in boot_cpu_init() */
	cpu_set(me, cpu_callout_map);
	per_cpu(cpu_state, me) = CPU_ONLINE;
+0 −5
Original line number Diff line number Diff line
@@ -53,8 +53,3 @@ EXPORT_SYMBOL(init_level4_pgt);
EXPORT_SYMBOL(load_gs_index);

EXPORT_SYMBOL(_proxy_pda);

#ifdef CONFIG_PARAVIRT
/* Virtualized guests may want to use it */
EXPORT_SYMBOL_GPL(cpu_gdt_descr);
#endif
Loading