Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a5bba930 authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Paul Mackerras
Browse files

[PATCH] powerpc vdso updates



This patch cleans up some locking & error handling in the ppc vdso and
moves the vdso base pointer from the thread struct to the mm context
where it more logically belongs. It brings the powerpc implementation
closer to Ingo's new x86 one and also adds an arch_vma_name() function
allowing to print [vsdo] in /proc/<pid>/maps if Ingo's x86 vdso patch is
also applied.

Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent 98a90c02
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -757,10 +757,10 @@ static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka,

	/* Save user registers on the stack */
	frame = &rt_sf->uc.uc_mcontext;
	if (vdso32_rt_sigtramp && current->thread.vdso_base) {
	if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
		if (save_user_regs(regs, frame, 0))
			goto badframe;
		regs->link = current->thread.vdso_base + vdso32_rt_sigtramp;
		regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
	} else {
		if (save_user_regs(regs, frame, __NR_rt_sigreturn))
			goto badframe;
@@ -1029,10 +1029,10 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
	    || __put_user(sig, &sc->signal))
		goto badframe;

	if (vdso32_sigtramp && current->thread.vdso_base) {
	if (vdso32_sigtramp && current->mm->context.vdso_base) {
		if (save_user_regs(regs, &frame->mctx, 0))
			goto badframe;
		regs->link = current->thread.vdso_base + vdso32_sigtramp;
		regs->link = current->mm->context.vdso_base + vdso32_sigtramp;
	} else {
		if (save_user_regs(regs, &frame->mctx, __NR_sigreturn))
			goto badframe;
+2 −2
Original line number Diff line number Diff line
@@ -394,8 +394,8 @@ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
	current->thread.fpscr.val = 0;

	/* Set up to return from userspace. */
	if (vdso64_rt_sigtramp && current->thread.vdso_base) {
		regs->link = current->thread.vdso_base + vdso64_rt_sigtramp;
	if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
		regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
	} else {
		err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
		if (err)
+36 −21
Original line number Diff line number Diff line
@@ -223,6 +223,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
	struct vm_area_struct *vma;
	unsigned long vdso_pages;
	unsigned long vdso_base;
	int rc;

#ifdef CONFIG_PPC64
	if (test_thread_flag(TIF_32BIT)) {
@@ -237,20 +238,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
	vdso_base = VDSO32_MBASE;
#endif

	current->thread.vdso_base = 0;
	current->mm->context.vdso_base = 0;

	/* vDSO has a problem and was disabled, just don't "enable" it for the
	 * process
	 */
	if (vdso_pages == 0)
		return 0;

	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
	if (vma == NULL)
		return -ENOMEM;

	memset(vma, 0, sizeof(*vma));

	/* Add a page to the vdso size for the data page */
	vdso_pages ++;

@@ -259,17 +253,23 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
	 * at vdso_base which is the "natural" base for it, but we might fail
	 * and end up putting it elsewhere.
	 */
	down_write(&mm->mmap_sem);
	vdso_base = get_unmapped_area(NULL, vdso_base,
				      vdso_pages << PAGE_SHIFT, 0, 0);
	if (vdso_base & ~PAGE_MASK) {
		kmem_cache_free(vm_area_cachep, vma);
		return (int)vdso_base;
	if (IS_ERR_VALUE(vdso_base)) {
		rc = vdso_base;
		goto fail_mmapsem;
	}

	current->thread.vdso_base = vdso_base;

	/* Allocate a VMA structure and fill it up */
	vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
	if (vma == NULL) {
		rc = -ENOMEM;
		goto fail_mmapsem;
	}
	vma->vm_mm = mm;
	vma->vm_start = current->thread.vdso_base;
	vma->vm_start = vdso_base;
	vma->vm_end = vma->vm_start + (vdso_pages << PAGE_SHIFT);

	/*
@@ -287,18 +287,33 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
	vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];
	vma->vm_ops = &vdso_vmops;

	down_write(&mm->mmap_sem);
	if (insert_vm_struct(mm, vma)) {
	/* Insert new VMA */
	rc = insert_vm_struct(mm, vma);
	if (rc)
		goto fail_vma;

	/* Put vDSO base into mm struct and account for memory usage */
	current->mm->context.vdso_base = vdso_base;
	mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
	up_write(&mm->mmap_sem);
	return 0;

 fail_vma:
	kmem_cache_free(vm_area_cachep, vma);
		return -ENOMEM;
	}
	mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
 fail_mmapsem:
	up_write(&mm->mmap_sem);
	return rc;
}

	return 0;
const char *arch_vma_name(struct vm_area_struct *vma)
{
	if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
		return "[vdso]";
	return NULL;
}



static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname,
				  unsigned long *size)
{
+1 −1
Original line number Diff line number Diff line
@@ -294,7 +294,7 @@ do { \
	NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize);			\
	NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize);			\
	NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize);			\
	VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->thread.vdso_base)	\
	VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base)	\
} while (0)

/* PowerPC64 relocations defined by the ABIs */
+1 −0
Original line number Diff line number Diff line
@@ -360,6 +360,7 @@ typedef struct {
#ifdef CONFIG_HUGETLB_PAGE
	u16 low_htlb_areas, high_htlb_areas;
#endif
	unsigned long vdso_base;
} mm_context_t;


Loading