Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit aafade24 authored by Andy Lutomirski's avatar Andy Lutomirski Committed by H. Peter Anvin
Browse files

x86-64, vdso: Do not allocate memory for the vDSO



We can map the vDSO straight from kernel data, saving a few page
allocations.  As an added bonus, the deleted code contained a memory
leak.

Signed-off-by: default avatarAndy Lutomirski <luto@mit.edu>
Link: http://lkml.kernel.org/r/2c4ed5c2c2e93603790229e0c3403ae506ccc0cb.1311277573.git.luto@mit.edu


Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
parent ae7bd11b
Loading
Loading
Loading
Loading
+13 −2
Original line number Original line Diff line number Diff line
#include <asm/page_types.h>
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/init.h>


__INITDATA
__PAGE_ALIGNED_DATA


	.globl vdso_start, vdso_end
	.globl vdso_start, vdso_end
	.align PAGE_SIZE
vdso_start:
vdso_start:
	.incbin "arch/x86/vdso/vdso.so"
	.incbin "arch/x86/vdso/vdso.so"
vdso_end:
vdso_end:


__FINIT
.previous

	.globl vdso_pages
	.bss
	.align 8
	.type vdso_pages, @object
vdso_pages:
	.zero (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE * 8
	.size vdso_pages, .-vdso_pages
+6 −19
Original line number Original line Diff line number Diff line
@@ -14,13 +14,14 @@
#include <asm/vgtod.h>
#include <asm/vgtod.h>
#include <asm/proto.h>
#include <asm/proto.h>
#include <asm/vdso.h>
#include <asm/vdso.h>
#include <asm/page.h>


unsigned int __read_mostly vdso_enabled = 1;
unsigned int __read_mostly vdso_enabled = 1;


extern char vdso_start[], vdso_end[];
extern char vdso_start[], vdso_end[];
extern unsigned short vdso_sync_cpuid;
extern unsigned short vdso_sync_cpuid;


static struct page **vdso_pages;
extern struct page *vdso_pages[];
static unsigned vdso_size;
static unsigned vdso_size;


static void __init patch_vdso(void *vdso, size_t len)
static void __init patch_vdso(void *vdso, size_t len)
@@ -54,7 +55,7 @@ static void __init patch_vdso(void *vdso, size_t len)
	apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
	apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
}
}


static int __init init_vdso_vars(void)
static int __init init_vdso(void)
{
{
	int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
	int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
	int i;
	int i;
@@ -62,26 +63,12 @@ static int __init init_vdso_vars(void)
	patch_vdso(vdso_start, vdso_end - vdso_start);
	patch_vdso(vdso_start, vdso_end - vdso_start);


	vdso_size = npages << PAGE_SHIFT;
	vdso_size = npages << PAGE_SHIFT;
	vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
	for (i = 0; i < npages; i++)
	if (!vdso_pages)
		vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
		goto oom;
	for (i = 0; i < npages; i++) {
		struct page *p;
		p = alloc_page(GFP_KERNEL);
		if (!p)
			goto oom;
		vdso_pages[i] = p;
		copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
	}


	return 0;
	return 0;

 oom:
	printk("Cannot allocate vdso\n");
	vdso_enabled = 0;
	return -ENOMEM;
}
}
subsys_initcall(init_vdso_vars);
subsys_initcall(init_vdso);


struct linux_binprm;
struct linux_binprm;