Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 446c92b2 authored by Uwe Kleine-König's avatar Uwe Kleine-König Committed by Russell King
Browse files

[ARM] 5421/1: ftrace: fix crash due to tracing of __naked functions

This is a fix for the following crash observed in 2.6.29-rc3:
http://lkml.org/lkml/2009/1/29/150



On ARM it doesn't make sense to trace a naked function because then
mcount is called without stack and frame pointer being set up and there
is no chance to restore the lr register to the value before mcount was
called.

Reported-by: default avatarMatthias Kaehlcke <matthias@kaehlcke.net>
Tested-by: default avatarMatthias Kaehlcke <matthias@kaehlcke.net>

Cc: Abhishek Sagar <sagar.abhishek@gmail.com>
Cc: Steven Rostedt <rostedt@home.goodmis.org>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: default avatarUwe Kleine-König <u.kleine-koenig@pengutronix.de>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 9311c593
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -88,7 +88,7 @@ void set_fiq_handler(void *start, unsigned int length)
 * disable irqs for the duration.  Note - these functions are almost
 * disable irqs for the duration.  Note - these functions are almost
 * entirely coded in assembly.
 * entirely coded in assembly.
 */
 */
void __attribute__((naked)) set_fiq_regs(struct pt_regs *regs)
void __naked set_fiq_regs(struct pt_regs *regs)
{
{
	register unsigned long tmp;
	register unsigned long tmp;
	asm volatile (
	asm volatile (
@@ -106,7 +106,7 @@ void __attribute__((naked)) set_fiq_regs(struct pt_regs *regs)
	: "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
	: "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
}
}


void __attribute__((naked)) get_fiq_regs(struct pt_regs *regs)
void __naked get_fiq_regs(struct pt_regs *regs)
{
{
	register unsigned long tmp;
	register unsigned long tmp;
	asm volatile (
	asm volatile (
+1 −1
Original line number Original line Diff line number Diff line
@@ -13,7 +13,7 @@
#include <linux/init.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/highmem.h>


static void __attribute__((naked))
static void __naked
feroceon_copy_user_page(void *kto, const void *kfrom)
feroceon_copy_user_page(void *kto, const void *kfrom)
{
{
	asm("\
	asm("\
+1 −1
Original line number Original line Diff line number Diff line
@@ -15,7 +15,7 @@
 *
 *
 * FIXME: do we need to handle cache stuff...
 * FIXME: do we need to handle cache stuff...
 */
 */
static void __attribute__((naked))
static void __naked
v3_copy_user_page(void *kto, const void *kfrom)
v3_copy_user_page(void *kto, const void *kfrom)
{
{
	asm("\n\
	asm("\n\
+1 −1
Original line number Original line Diff line number Diff line
@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(minicache_lock);
 * instruction.  If your processor does not supply this, you have to write your
 * instruction.  If your processor does not supply this, you have to write your
 * own copy_user_highpage that does the right thing.
 * own copy_user_highpage that does the right thing.
 */
 */
static void __attribute__((naked))
static void __naked
mc_copy_user_page(void *from, void *to)
mc_copy_user_page(void *from, void *to)
{
{
	asm volatile(
	asm volatile(
+1 −1
Original line number Original line Diff line number Diff line
@@ -22,7 +22,7 @@
 * instruction.  If your processor does not supply this, you have to write your
 * instruction.  If your processor does not supply this, you have to write your
 * own copy_user_highpage that does the right thing.
 * own copy_user_highpage that does the right thing.
 */
 */
static void __attribute__((naked))
static void __naked
v4wb_copy_user_page(void *kto, const void *kfrom)
v4wb_copy_user_page(void *kto, const void *kfrom)
{
{
	asm("\
	asm("\
Loading