Loading arch/arm/kernel/vmlinux.lds.S +1 −0 Original line number Diff line number Diff line Loading @@ -64,6 +64,7 @@ SECTIONS __initramfs_end = .; #endif . = ALIGN(4096); __per_cpu_load = .; __per_cpu_start = .; *(.data.percpu.page_aligned) *(.data.percpu) Loading arch/ia64/kernel/vmlinux.lds.S +2 −10 Original line number Diff line number Diff line Loading @@ -213,17 +213,9 @@ SECTIONS { *(.data.cacheline_aligned) } /* Per-cpu data: */ percpu : { } :percpu . = ALIGN(PERCPU_PAGE_SIZE); __phys_per_cpu_start = .; .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET) { __per_cpu_start = .; *(.data.percpu.page_aligned) *(.data.percpu) *(.data.percpu.shared_aligned) __per_cpu_end = .; } PERCPU_VADDR(PERCPU_ADDR, :percpu) __phys_per_cpu_start = __per_cpu_load; . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits * into percpu page size */ Loading arch/powerpc/kernel/vmlinux.lds.S +1 −8 Original line number Diff line number Diff line Loading @@ -181,14 +181,7 @@ SECTIONS __initramfs_end = .; } #endif . = ALIGN(PAGE_SIZE); .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { __per_cpu_start = .; *(.data.percpu.page_aligned) *(.data.percpu) *(.data.percpu.shared_aligned) __per_cpu_end = .; } PERCPU(PAGE_SIZE) . = ALIGN(8); .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { Loading arch/x86/include/asm/percpu.h +0 −8 Original line number Diff line number Diff line Loading @@ -43,14 +43,6 @@ #else /* ...!ASSEMBLY */ #include <linux/stringify.h> #include <asm/sections.h> #define __addr_to_pcpu_ptr(addr) \ (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \ + (unsigned long)__per_cpu_start) #define __pcpu_ptr_to_addr(ptr) \ (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \ - (unsigned long)__per_cpu_start) #ifdef CONFIG_SMP #define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x Loading arch/x86/kernel/setup_percpu.c +10 −53 Original line number Diff line number Diff line Loading @@ -233,8 +233,8 @@ static ssize_t __init setup_pcpu_remap(size_t static_size) "%zu bytes\n", vm.addr, static_size); ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, PERCPU_FIRST_CHUNK_RESERVE, PMD_SIZE, dyn_size, vm.addr, NULL); PERCPU_FIRST_CHUNK_RESERVE, dyn_size, PMD_SIZE, vm.addr, NULL); goto out_free_ar; enomem: Loading @@ -257,31 +257,13 @@ static ssize_t __init setup_pcpu_remap(size_t static_size) * Embedding allocator * * The first chunk is sized to just contain the static area plus * module and dynamic reserves, and allocated as a contiguous area * using bootmem allocator and used as-is without being mapped into * vmalloc area. This enables the first chunk to piggy back on the * linear physical PMD mapping and doesn't add any additional pressure * to TLB. Note that if the needed size is smaller than the minimum * unit size, the leftover is returned to the bootmem allocator. * module and dynamic reserves and embedded into linear physical * mapping so that it can use PMD mapping without additional TLB * pressure. */ static void *pcpue_ptr __initdata; static size_t pcpue_size __initdata; static size_t pcpue_unit_size __initdata; static struct page * __init pcpue_get_page(unsigned int cpu, int pageno) { size_t off = (size_t)pageno << PAGE_SHIFT; if (off >= pcpue_size) return NULL; return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off); } static ssize_t __init setup_pcpu_embed(size_t static_size) { unsigned int cpu; size_t dyn_size; size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; /* * If large page isn't supported, there's no benefit in doing Loading @@ -291,33 +273,8 @@ static ssize_t __init setup_pcpu_embed(size_t static_size) if (!cpu_has_pse || pcpu_need_numa()) return -EINVAL; /* allocate and copy */ pcpue_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE); pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE); dyn_size = pcpue_size - static_size - PERCPU_FIRST_CHUNK_RESERVE; pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size, PAGE_SIZE); if (!pcpue_ptr) return -ENOMEM; for_each_possible_cpu(cpu) { void *ptr = pcpue_ptr + cpu * pcpue_unit_size; free_bootmem(__pa(ptr + pcpue_size), pcpue_unit_size - pcpue_size); memcpy(ptr, __per_cpu_load, static_size); } /* we're ready, commit */ pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n", pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size); return pcpu_setup_first_chunk(pcpue_get_page, static_size, PERCPU_FIRST_CHUNK_RESERVE, pcpue_unit_size, dyn_size, pcpue_ptr, NULL); return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, reserve - PERCPU_FIRST_CHUNK_RESERVE, -1); } /* Loading Loading @@ -375,8 +332,8 @@ static ssize_t __init setup_pcpu_4k(size_t static_size) pcpu4k_nr_static_pages, static_size); ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, PERCPU_FIRST_CHUNK_RESERVE, -1, -1, NULL, pcpu4k_populate_pte); PERCPU_FIRST_CHUNK_RESERVE, -1, -1, NULL, pcpu4k_populate_pte); goto out_free_ar; enomem: Loading Loading
arch/arm/kernel/vmlinux.lds.S +1 −0 Original line number Diff line number Diff line Loading @@ -64,6 +64,7 @@ SECTIONS __initramfs_end = .; #endif . = ALIGN(4096); __per_cpu_load = .; __per_cpu_start = .; *(.data.percpu.page_aligned) *(.data.percpu) Loading
arch/ia64/kernel/vmlinux.lds.S +2 −10 Original line number Diff line number Diff line Loading @@ -213,17 +213,9 @@ SECTIONS { *(.data.cacheline_aligned) } /* Per-cpu data: */ percpu : { } :percpu . = ALIGN(PERCPU_PAGE_SIZE); __phys_per_cpu_start = .; .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET) { __per_cpu_start = .; *(.data.percpu.page_aligned) *(.data.percpu) *(.data.percpu.shared_aligned) __per_cpu_end = .; } PERCPU_VADDR(PERCPU_ADDR, :percpu) __phys_per_cpu_start = __per_cpu_load; . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits * into percpu page size */ Loading
arch/powerpc/kernel/vmlinux.lds.S +1 −8 Original line number Diff line number Diff line Loading @@ -181,14 +181,7 @@ SECTIONS __initramfs_end = .; } #endif . = ALIGN(PAGE_SIZE); .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { __per_cpu_start = .; *(.data.percpu.page_aligned) *(.data.percpu) *(.data.percpu.shared_aligned) __per_cpu_end = .; } PERCPU(PAGE_SIZE) . = ALIGN(8); .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { Loading
arch/x86/include/asm/percpu.h +0 −8 Original line number Diff line number Diff line Loading @@ -43,14 +43,6 @@ #else /* ...!ASSEMBLY */ #include <linux/stringify.h> #include <asm/sections.h> #define __addr_to_pcpu_ptr(addr) \ (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \ + (unsigned long)__per_cpu_start) #define __pcpu_ptr_to_addr(ptr) \ (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \ - (unsigned long)__per_cpu_start) #ifdef CONFIG_SMP #define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x Loading
arch/x86/kernel/setup_percpu.c +10 −53 Original line number Diff line number Diff line Loading @@ -233,8 +233,8 @@ static ssize_t __init setup_pcpu_remap(size_t static_size) "%zu bytes\n", vm.addr, static_size); ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, PERCPU_FIRST_CHUNK_RESERVE, PMD_SIZE, dyn_size, vm.addr, NULL); PERCPU_FIRST_CHUNK_RESERVE, dyn_size, PMD_SIZE, vm.addr, NULL); goto out_free_ar; enomem: Loading @@ -257,31 +257,13 @@ static ssize_t __init setup_pcpu_remap(size_t static_size) * Embedding allocator * * The first chunk is sized to just contain the static area plus * module and dynamic reserves, and allocated as a contiguous area * using bootmem allocator and used as-is without being mapped into * vmalloc area. This enables the first chunk to piggy back on the * linear physical PMD mapping and doesn't add any additional pressure * to TLB. Note that if the needed size is smaller than the minimum * unit size, the leftover is returned to the bootmem allocator. * module and dynamic reserves and embedded into linear physical * mapping so that it can use PMD mapping without additional TLB * pressure. */ static void *pcpue_ptr __initdata; static size_t pcpue_size __initdata; static size_t pcpue_unit_size __initdata; static struct page * __init pcpue_get_page(unsigned int cpu, int pageno) { size_t off = (size_t)pageno << PAGE_SHIFT; if (off >= pcpue_size) return NULL; return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off); } static ssize_t __init setup_pcpu_embed(size_t static_size) { unsigned int cpu; size_t dyn_size; size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; /* * If large page isn't supported, there's no benefit in doing Loading @@ -291,33 +273,8 @@ static ssize_t __init setup_pcpu_embed(size_t static_size) if (!cpu_has_pse || pcpu_need_numa()) return -EINVAL; /* allocate and copy */ pcpue_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE); pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE); dyn_size = pcpue_size - static_size - PERCPU_FIRST_CHUNK_RESERVE; pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size, PAGE_SIZE); if (!pcpue_ptr) return -ENOMEM; for_each_possible_cpu(cpu) { void *ptr = pcpue_ptr + cpu * pcpue_unit_size; free_bootmem(__pa(ptr + pcpue_size), pcpue_unit_size - pcpue_size); memcpy(ptr, __per_cpu_load, static_size); } /* we're ready, commit */ pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n", pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size); return pcpu_setup_first_chunk(pcpue_get_page, static_size, PERCPU_FIRST_CHUNK_RESERVE, pcpue_unit_size, dyn_size, pcpue_ptr, NULL); return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, reserve - PERCPU_FIRST_CHUNK_RESERVE, -1); } /* Loading Loading @@ -375,8 +332,8 @@ static ssize_t __init setup_pcpu_4k(size_t static_size) pcpu4k_nr_static_pages, static_size); ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, PERCPU_FIRST_CHUNK_RESERVE, -1, -1, NULL, pcpu4k_populate_pte); PERCPU_FIRST_CHUNK_RESERVE, -1, -1, NULL, pcpu4k_populate_pte); goto out_free_ar; enomem: Loading