Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 28b2ee20 authored by Rik van Riel's avatar Rik van Riel Committed by Linus Torvalds
Browse files

access_process_vm device memory infrastructure



In order to be able to debug things like the X server and programs using
the PPC Cell SPUs, the debugger needs to be able to access device memory
through ptrace and /proc/pid/mem.

This patch:

Add the generic_access_phys access function and put the hooks in place
to allow access_process_vm to access device or PPC Cell SPU memory.

[riel@redhat.com: Add documentation for the vm_ops->access function]
Signed-off-by: default avatarRik van Riel <riel@redhat.com>
Signed-off-by: default avatarBenjamin Herrensmidt <benh@kernel.crashing.org>
Cc: Dave Airlie <airlied@linux.ie>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0d71d10a
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -510,6 +510,7 @@ prototypes:
	void (*close)(struct vm_area_struct*);
	int (*fault)(struct vm_area_struct*, struct vm_fault *);
	int (*page_mkwrite)(struct vm_area_struct *, struct page *);
	int (*access)(struct vm_area_struct *, unsigned long, void*, int, int);

locking rules:
		BKL	mmap_sem	PageLocked(page)
@@ -517,6 +518,7 @@ open: no yes
close:		no	yes
fault:		no	yes
page_mkwrite:	no	yes		no
access:		no	yes

	->page_mkwrite() is called when a previously read-only page is
about to become writeable. The file system is responsible for
@@ -525,6 +527,11 @@ taking to lock out truncate, the page range should be verified to be
within i_size. The page mapping should also be checked that it is not
NULL.

	->access() is called when get_user_pages() fails in
acces_process_vm(), typically used to debug a process through
/proc/pid/mem or ptrace.  This function is needed only for
VM_IO | VM_PFNMAP VMAs.

================================================================================
			Dubious stuff

+3 −0
Original line number Diff line number Diff line
@@ -31,6 +31,9 @@ config KRETPROBES
	def_bool y
	depends on KPROBES && HAVE_KRETPROBES

config HAVE_IOREMAP_PROT
	def_bool n

config HAVE_KPROBES
	def_bool n

+1 −0
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@ config X86
	select HAVE_UNSTABLE_SCHED_CLOCK
	select HAVE_IDE
	select HAVE_OPROFILE
	select HAVE_IOREMAP_PROT
	select HAVE_KPROBES
	select HAVE_KRETPROBES
	select HAVE_DYNAMIC_FTRACE
+8 −0
Original line number Diff line number Diff line
@@ -330,6 +330,14 @@ static void __iomem *ioremap_default(resource_size_t phys_addr,
	return (void __iomem *)ret;
}

void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
				unsigned long prot_val)
{
	return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
				__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_prot);

/**
 * iounmap - Free a IO remapping
 * @addr: virtual address from ioremap_*
+2 −0
Original line number Diff line number Diff line
@@ -110,6 +110,8 @@ static inline void *phys_to_virt(unsigned long address)
 */
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
				unsigned long prot_val);

/*
 * The default ioremap() behavior is non-cached:
Loading