Loading arch/alpha/include/asm/unistd.h +1 −0 Original line number Original line Diff line number Diff line Loading @@ -482,6 +482,7 @@ #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_RT_SIGSUSPEND #define __ARCH_WANT_SYS_RT_SIGSUSPEND #define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_KERNEL_EXECVE /* "Conditional" syscalls. What we want is /* "Conditional" syscalls. What we want is Loading arch/alpha/kernel/entry.S +13 −0 Original line number Original line Diff line number Diff line Loading @@ -626,6 +626,19 @@ ret_from_kernel_thread: jmp $31, sys_exit jmp $31, sys_exit .end ret_from_kernel_thread .end ret_from_kernel_thread .globl ret_from_kernel_execve .align 4 .ent ret_from_kernel_execve ret_from_kernel_execve: mov $16, $sp /* Avoid the HAE being gratuitously wrong, to avoid restoring it. */ ldq $2, alpha_mv+HAE_CACHE stq $2, 152($sp) /* HAE */ mov $31, $19 /* to disable syscall restarts */ br $31, ret_to_user .end ret_from_kernel_execve /* /* * Special system calls. Most of these are special in that they either * Special system calls. Most of these are special in that they either Loading arch/alpha/kernel/process.c +0 −19 Original line number Original line Diff line number Diff line Loading @@ -435,22 +435,3 @@ get_wchan(struct task_struct *p) } } return pc; return pc; } } int kernel_execve(const char *path, const char *const argv[], const char *const envp[]) { /* Avoid the HAE being gratuitously wrong, which would cause us to do the whole turn off interrupts thing and restore it. */ struct pt_regs regs = {.hae = alpha_mv.hae_cache}; int err = do_execve(path, argv, envp, ®s); if (!err) { struct pt_regs *p = current_pt_regs(); /* copy regs to normal position and off to userland we go... */ *p = regs; __asm__ __volatile__ ( "mov %0, $sp;" "br $31, ret_from_sys_call" : : "r"(p)); } return err; } EXPORT_SYMBOL(kernel_execve); Loading
arch/alpha/include/asm/unistd.h +1 −0 Original line number Original line Diff line number Diff line Loading @@ -482,6 +482,7 @@ #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_RT_SIGSUSPEND #define __ARCH_WANT_SYS_RT_SIGSUSPEND #define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_KERNEL_EXECVE /* "Conditional" syscalls. What we want is /* "Conditional" syscalls. What we want is Loading
arch/alpha/kernel/entry.S +13 −0 Original line number Original line Diff line number Diff line Loading @@ -626,6 +626,19 @@ ret_from_kernel_thread: jmp $31, sys_exit jmp $31, sys_exit .end ret_from_kernel_thread .end ret_from_kernel_thread .globl ret_from_kernel_execve .align 4 .ent ret_from_kernel_execve ret_from_kernel_execve: mov $16, $sp /* Avoid the HAE being gratuitously wrong, to avoid restoring it. */ ldq $2, alpha_mv+HAE_CACHE stq $2, 152($sp) /* HAE */ mov $31, $19 /* to disable syscall restarts */ br $31, ret_to_user .end ret_from_kernel_execve /* /* * Special system calls. Most of these are special in that they either * Special system calls. Most of these are special in that they either Loading
arch/alpha/kernel/process.c +0 −19 Original line number Original line Diff line number Diff line Loading @@ -435,22 +435,3 @@ get_wchan(struct task_struct *p) } } return pc; return pc; } } int kernel_execve(const char *path, const char *const argv[], const char *const envp[]) { /* Avoid the HAE being gratuitously wrong, which would cause us to do the whole turn off interrupts thing and restore it. */ struct pt_regs regs = {.hae = alpha_mv.hae_cache}; int err = do_execve(path, argv, envp, ®s); if (!err) { struct pt_regs *p = current_pt_regs(); /* copy regs to normal position and off to userland we go... */ *p = regs; __asm__ __volatile__ ( "mov %0, $sp;" "br $31, ret_from_sys_call" : : "r"(p)); } return err; } EXPORT_SYMBOL(kernel_execve);