Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 10c43d2e authored by Pekka Paalanen's avatar Pekka Paalanen Committed by Thomas Gleixner
Browse files

x86: explicit call to mmiotrace in do_page_fault()



The custom page fault handler list is replaced with a single function
pointer. All related functions and variables are renamed for
mmiotrace.

Signed-off-by: default avatarPekka Paalanen <pq@iki.fi>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: pq@iki.fi
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 63ffa3e4
Loading
Loading
Loading
Loading
+6 −8
Original line number Original line Diff line number Diff line
@@ -168,20 +168,18 @@ config IOMMU_LEAK
	  Add a simple leak tracer to the IOMMU code. This is useful when you
	  Add a simple leak tracer to the IOMMU code. This is useful when you
	  are debugging a buggy device driver that leaks IOMMU mappings.
	  are debugging a buggy device driver that leaks IOMMU mappings.


config PAGE_FAULT_HANDLERS
config MMIOTRACE_HOOKS
	bool "Custom page fault handlers"
	bool
	depends on DEBUG_KERNEL
	default n
	help
	  Allow the use of custom page fault handlers. A kernel module may
	  register a function that is called on every page fault. Custom
	  handlers are used by some debugging and reverse engineering tools.


config MMIOTRACE
config MMIOTRACE
	tristate "Memory mapped IO tracing"
	tristate "Memory mapped IO tracing"
	depends on DEBUG_KERNEL && PAGE_FAULT_HANDLERS && RELAY && DEBUG_FS
	depends on DEBUG_KERNEL && RELAY && DEBUG_FS
	select MMIOTRACE_HOOKS
	default n
	default n
	help
	help
	  This will build a kernel module called mmiotrace.
	  This will build a kernel module called mmiotrace.
	  Making this a built-in is heavily discouraged.


	  Mmiotrace traces Memory Mapped I/O access and is meant for debugging
	  Mmiotrace traces Memory Mapped I/O access and is meant for debugging
	  and reverse engineering. The kernel module offers wrapped
	  and reverse engineering. The kernel module offers wrapped
+7 −7
Original line number Original line Diff line number Diff line
@@ -51,10 +51,6 @@ static LIST_HEAD(kmmio_probes);


static struct kmmio_context kmmio_ctx[NR_CPUS];
static struct kmmio_context kmmio_ctx[NR_CPUS];


static struct pf_handler kmmio_pf_hook = {
	.handler = kmmio_page_fault
};

static struct notifier_block nb_die = {
static struct notifier_block nb_die = {
	.notifier_call = kmmio_die_notifier
	.notifier_call = kmmio_die_notifier
};
};
@@ -77,7 +73,8 @@ void cleanup_kmmio(void)
	 * kmmio_page_table, kmmio_probes
	 * kmmio_page_table, kmmio_probes
	 */
	 */
	if (handler_registered) {
	if (handler_registered) {
		unregister_page_fault_handler(&kmmio_pf_hook);
		if (mmiotrace_unregister_pf(&kmmio_page_fault))
			BUG();
		synchronize_rcu();
		synchronize_rcu();
	}
	}
	unregister_die_notifier(&nb_die);
	unregister_die_notifier(&nb_die);
@@ -343,7 +340,10 @@ int register_kmmio_probe(struct kmmio_probe *p)
	}
	}


	if (!handler_registered) {
	if (!handler_registered) {
		register_page_fault_handler(&kmmio_pf_hook);
		if (mmiotrace_register_pf(&kmmio_page_fault))
			printk(KERN_ERR "mmiotrace: Cannot register page "
					"fault handler.\n");
		else
			handler_registered++;
			handler_registered++;
	}
	}


+34 −32
Original line number Original line Diff line number Diff line
@@ -49,53 +49,55 @@
#define PF_RSVD		(1<<3)
#define PF_RSVD		(1<<3)
#define PF_INSTR	(1<<4)
#define PF_INSTR	(1<<4)


#ifdef CONFIG_PAGE_FAULT_HANDLERS
#ifdef CONFIG_MMIOTRACE_HOOKS
static HLIST_HEAD(pf_handlers); /* protected by RCU */
static pf_handler_func mmiotrace_pf_handler; /* protected by RCU */
static DEFINE_SPINLOCK(pf_handlers_writer);
static DEFINE_SPINLOCK(mmiotrace_handler_lock);


void register_page_fault_handler(struct pf_handler *new_pfh)
int mmiotrace_register_pf(pf_handler_func new_pfh)
{
{
	int ret = 0;
	unsigned long flags;
	unsigned long flags;
	spin_lock_irqsave(&pf_handlers_writer, flags);
	spin_lock_irqsave(&mmiotrace_handler_lock, flags);
	hlist_add_head_rcu(&new_pfh->hlist, &pf_handlers);
	if (mmiotrace_pf_handler)
	spin_unlock_irqrestore(&pf_handlers_writer, flags);
		ret = -EBUSY;
	else
		mmiotrace_pf_handler = new_pfh;
	spin_unlock_irqrestore(&mmiotrace_handler_lock, flags);
	return ret;
}
}
EXPORT_SYMBOL_GPL(register_page_fault_handler);
EXPORT_SYMBOL_GPL(mmiotrace_register_pf);


/**
/**
 * unregister_page_fault_handler:
 * mmiotrace_unregister_pf:
 * The caller must ensure @old_pfh is not in use anymore before freeing it.
 * The caller must ensure @old_pfh is not in use anymore before freeing it.
 * This function does not guarantee it. The list of handlers is protected by
 * This function does not guarantee it. The handler function pointer is
 * RCU, so you can do this by e.g. calling synchronize_rcu().
 * protected by RCU, so you can do this by e.g. calling synchronize_rcu().
 */
 */
void unregister_page_fault_handler(struct pf_handler *old_pfh)
int mmiotrace_unregister_pf(pf_handler_func old_pfh)
{
{
	int ret = 0;
	unsigned long flags;
	unsigned long flags;
	spin_lock_irqsave(&pf_handlers_writer, flags);
	spin_lock_irqsave(&mmiotrace_handler_lock, flags);
	hlist_del_rcu(&old_pfh->hlist);
	if (mmiotrace_pf_handler != old_pfh)
	spin_unlock_irqrestore(&pf_handlers_writer, flags);
		ret = -EPERM;
	else
		mmiotrace_pf_handler = NULL;
	spin_unlock_irqrestore(&mmiotrace_handler_lock, flags);
	return ret;
}
}
EXPORT_SYMBOL_GPL(unregister_page_fault_handler);
EXPORT_SYMBOL_GPL(mmiotrace_unregister_pf);
#endif
#endif /* CONFIG_MMIOTRACE_HOOKS */


/* returns non-zero if do_page_fault() should return */
/* returns non-zero if do_page_fault() should return */
static int handle_custom_pf(struct pt_regs *regs, unsigned long error_code,
static inline int call_mmiotrace(struct pt_regs *regs,
					unsigned long error_code,
					unsigned long address)
					unsigned long address)
{
{
#ifdef CONFIG_PAGE_FAULT_HANDLERS
#ifdef CONFIG_MMIOTRACE_HOOKS
	int ret = 0;
	int ret = 0;
	struct pf_handler *cur;
	struct hlist_node *ncur;

	if (hlist_empty(&pf_handlers))
		return 0;

	rcu_read_lock();
	rcu_read_lock();
	hlist_for_each_entry_rcu(cur, ncur, &pf_handlers, hlist) {
	if (mmiotrace_pf_handler)
		ret = cur->handler(regs, error_code, address);
		ret = mmiotrace_pf_handler(regs, error_code, address);
		if (ret)
			break;
	}
	rcu_read_unlock();
	rcu_read_unlock();
	return ret;
	return ret;
#else
#else
@@ -655,7 +657,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)


	if (notify_page_fault(regs))
	if (notify_page_fault(regs))
		return;
		return;
	if (handle_custom_pf(regs, error_code, address))
	if (call_mmiotrace(regs, error_code, address))
		return;
		return;


	/*
	/*
+5 −7
Original line number Original line Diff line number Diff line
@@ -35,13 +35,11 @@ extern void show_regs(struct pt_regs *regs);
extern unsigned long oops_begin(void);
extern unsigned long oops_begin(void);
extern void oops_end(unsigned long, struct pt_regs *, int signr);
extern void oops_end(unsigned long, struct pt_regs *, int signr);


struct pf_handler {
typedef int (*pf_handler_func)(struct pt_regs *regs,
	struct hlist_node hlist;
				unsigned long error_code,
	int (*handler)(struct pt_regs *regs, unsigned long error_code,
				unsigned long address);
				unsigned long address);
};


extern void register_page_fault_handler(struct pf_handler *new_pfh);
extern int mmiotrace_register_pf(pf_handler_func new_pfh);
extern void unregister_page_fault_handler(struct pf_handler *old_pfh);
extern int mmiotrace_unregister_pf(pf_handler_func old_pfh);


#endif
#endif