Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8b51304e authored by Bodo Stroesser's avatar Bodo Stroesser Committed by Linus Torvalds
Browse files

[PATCH] uml: increase granularity of host capability checking



This change enables SKAS0/SKAS3 to work with all combinations of /proc/mm and
PTRACE_FAULTINFO being available or not.

Also it changes the initialization of proc_mm and ptrace_faultinfo slightly,
to ease forcing SKAS0 on a patched host.  Forcing UML to run without /proc/mm
or PTRACE_FAULTINFO by cmdline parameter can be implemented with a setup
resetting the related variable.

Signed-off-by: default avatarBodo Stroesser <bstroesser@fujitsu-siemens.com>
Signed-off-by: default avatarJeff Dike <jdike@addtoit.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 60d339f6
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -33,7 +33,7 @@ extern void *protect(struct mm_id * mm_idp, unsigned long addr,
		     unsigned long len, int r, int w, int x, int done,
		     void *data);
extern void user_signal(int sig, union uml_pt_regs *regs, int pid);
extern int new_mm(int from);
extern int new_mm(int from, unsigned long stack);
extern int start_userspace(unsigned long stub_stack);
extern int copy_context_skas0(unsigned long stack, int pid);
extern void get_skas_faultinfo(int pid, struct faultinfo * fi);
+28 −24
Original line number Diff line number Diff line
@@ -77,23 +77,14 @@ int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
	struct mm_struct *cur_mm = current->mm;
	struct mm_id *cur_mm_id = &cur_mm->context.skas.id;
	struct mm_id *mm_id = &mm->context.skas.id;
	unsigned long stack;
	int from, ret;
	unsigned long stack = 0;
	int from, ret = -ENOMEM;

	if(proc_mm){
		if((cur_mm != NULL) && (cur_mm != &init_mm))
			from = cur_mm->context.skas.id.u.mm_fd;
		else from = -1;
	if(!proc_mm || !ptrace_faultinfo){
		stack = get_zeroed_page(GFP_KERNEL);
		if(stack == 0)
			goto out;

		ret = new_mm(from);
		if(ret < 0){
			printk("init_new_context_skas - new_mm failed, "
			       "errno = %d\n", ret);
			return ret;
		}
		mm_id->u.mm_fd = ret;
	}
	else {
		/* This zeros the entry that pgd_alloc didn't, needed since
		 * we are about to reinitialize it, and want mm.nr_ptes to
		 * be accurate.
@@ -103,20 +94,30 @@ int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
		ret = init_stub_pte(mm, CONFIG_STUB_CODE,
				    (unsigned long) &__syscall_stub_start);
		if(ret)
			goto out;

		ret = -ENOMEM;
		stack = get_zeroed_page(GFP_KERNEL);
		if(stack == 0)
			goto out;
		mm_id->stack = stack;
			goto out_free;

		ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack);
		if(ret)
			goto out_free;

		mm->nr_ptes--;
	}
	mm_id->stack = stack;

	if(proc_mm){
		if((cur_mm != NULL) && (cur_mm != &init_mm))
			from = cur_mm_id->u.mm_fd;
		else from = -1;

		ret = new_mm(from, stack);
		if(ret < 0){
			printk("init_new_context_skas - new_mm failed, "
			       "errno = %d\n", ret);
			goto out_free;
		}
		mm_id->u.mm_fd = ret;
	}
	else {
		if((cur_mm != NULL) && (cur_mm != &init_mm))
			mm_id->u.pid = copy_context_skas0(stack,
							  cur_mm_id->u.pid);
@@ -126,6 +127,7 @@ int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
	return 0;

 out_free:
	if(mm_id->stack != 0)
		free_page(mm_id->stack);
 out:
	return ret;
@@ -137,8 +139,10 @@ void destroy_context_skas(struct mm_struct *mm)

	if(proc_mm)
		os_close_file(mmu->id.u.mm_fd);
	else {
	else
		os_kill_ptraced_process(mmu->id.u.pid, 1);

	if(!proc_mm || !ptrace_faultinfo){
		free_page(mmu->id.stack);
		free_page(mmu->last_page_table);
	}
+61 −8
Original line number Diff line number Diff line
@@ -138,6 +138,8 @@ static void handle_trap(int pid, union uml_pt_regs *regs, int local_using_sysemu
}

extern int __syscall_stub_start;
int stub_code_fd = -1;
__u64 stub_code_offset;

static int userspace_tramp(void *stack)
{
@@ -152,31 +154,31 @@ static int userspace_tramp(void *stack)
		/* This has a pte, but it can't be mapped in with the usual
		 * tlb_flush mechanism because this is part of that mechanism
		 */
		int fd;
		__u64 offset;

		fd = phys_mapping(to_phys(&__syscall_stub_start), &offset);
		addr = mmap64((void *) UML_CONFIG_STUB_CODE, page_size(),
			      PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
			      PROT_EXEC, MAP_FIXED | MAP_PRIVATE,
			      stub_code_fd, stub_code_offset);
		if(addr == MAP_FAILED){
			printk("mapping mmap stub failed, errno = %d\n",
			printk("mapping stub code failed, errno = %d\n",
			       errno);
			exit(1);
		}

		if(stack != NULL){
			int fd;
			__u64 offset;

			fd = phys_mapping(to_phys(stack), &offset);
			addr = mmap((void *) UML_CONFIG_STUB_DATA, page_size(),
				    PROT_READ | PROT_WRITE,
				    MAP_FIXED | MAP_SHARED, fd, offset);
			if(addr == MAP_FAILED){
				printk("mapping segfault stack failed, "
				printk("mapping stub stack failed, "
				       "errno = %d\n", errno);
				exit(1);
			}
		}
	}
	if(!ptrace_faultinfo && (stack != NULL)){
	if(!ptrace_faultinfo){
		unsigned long v = UML_CONFIG_STUB_CODE +
				  (unsigned long) stub_segv_handler -
				  (unsigned long) &__syscall_stub_start;
@@ -202,6 +204,10 @@ int start_userspace(unsigned long stub_stack)
	unsigned long sp;
	int pid, status, n, flags;

	if ( stub_code_fd == -1 )
		stub_code_fd = phys_mapping(to_phys(&__syscall_stub_start),
					    &stub_code_offset);

	stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
		     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
	if(stack == MAP_FAILED)
@@ -363,6 +369,53 @@ int copy_context_skas0(unsigned long new_stack, int pid)
	return pid;
}

/*
 * This is used only, if proc_mm is available, while PTRACE_FAULTINFO
 * isn't. Opening /proc/mm creates a new mm_context, which lacks the stub-pages
 * Thus, we map them using /proc/mm-fd
 */
void map_stub_pages(int fd, unsigned long code,
		    unsigned long data, unsigned long stack)
{
	struct proc_mm_op mmop;
	int n;

	mmop = ((struct proc_mm_op) { .op        = MM_MMAP,
				      .u         =
				      { .mmap    =
					{ .addr    = code,
					  .len     = PAGE_SIZE,
					  .prot    = PROT_EXEC,
					  .flags   = MAP_FIXED | MAP_PRIVATE,
					  .fd      = stub_code_fd,
					  .offset  = stub_code_offset
	} } });
	n = os_write_file(fd, &mmop, sizeof(mmop));
	if(n != sizeof(mmop))
		panic("map_stub_pages : /proc/mm map for code failed, "
		      "err = %d\n", -n);

	if ( stack ) {
		__u64 map_offset;
		int map_fd = phys_mapping(to_phys((void *)stack), &map_offset);
		mmop = ((struct proc_mm_op)
				{ .op        = MM_MMAP,
				  .u         =
				  { .mmap    =
				    { .addr    = data,
				      .len     = PAGE_SIZE,
				      .prot    = PROT_READ | PROT_WRITE,
				      .flags   = MAP_FIXED | MAP_SHARED,
				      .fd      = map_fd,
				      .offset  = map_offset
		} } });
		n = os_write_file(fd, &mmop, sizeof(mmop));
		if(n != sizeof(mmop))
			panic("map_stub_pages : /proc/mm map for data failed, "
			      "err = %d\n", -n);
	}
}

void new_thread(void *stack, void **switch_buf_ptr, void **fork_buf_ptr,
		void (*handler)(int))
{
+6 −1
Original line number Diff line number Diff line
@@ -129,7 +129,9 @@ int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp,
	return(0);
}

int new_mm(int from)
extern void map_stub_pages(int fd, unsigned long code,
			   unsigned long data, unsigned long stack);
int new_mm(int from, unsigned long stack)
{
	struct proc_mm_op copy;
	int n, fd;
@@ -148,6 +150,9 @@ int new_mm(int from)
			       "err = %d\n", -n);
	}

	if(!ptrace_faultinfo)
		map_stub_pages(fd, CONFIG_STUB_CODE, CONFIG_STUB_DATA, stack);

	return(fd);
}