Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bb2f3486 authored by Chris Zankel's avatar Chris Zankel
Browse files

Merge tag 'xtensa-for-next-20160111' of git://github.com/jcmvbkbc/linux-xtensa

Xtensa improvements for 4.5:

- control whether perf IRQ is treated as NMI from Kconfig;
- implement ioremap for regions outside KIO segment.
parents d1208404 5bb8def5
Loading
Loading
Loading
Loading
+16 −0
Original line number Original line Diff line number Diff line
@@ -139,6 +139,22 @@ config XTENSA_VARIANT_HAVE_PERF_EVENTS


	  If unsure, say N.
	  If unsure, say N.


config XTENSA_FAKE_NMI
	bool "Treat PMM IRQ as NMI"
	depends on XTENSA_VARIANT_HAVE_PERF_EVENTS
	default n
	help
	  If PMM IRQ is the only IRQ at EXCM level it is safe to
	  treat it as NMI, which improves accuracy of profiling.

	  If there are other interrupts at or above PMM IRQ priority level
	  but not above the EXCM level, PMM IRQ still may be treated as NMI,
	  but only if these IRQs are not used. There will be a build warning
	  saying that this is not safe, and a bugcheck if one of these IRQs
	  actually fire.

	  If unsure, say N.

config XTENSA_UNALIGNED_USER
config XTENSA_UNALIGNED_USER
	bool "Unaligned memory access in use space"
	bool "Unaligned memory access in use space"
	help
	help
+13 −3
Original line number Original line Diff line number Diff line
@@ -25,9 +25,12 @@


#ifdef CONFIG_MMU
#ifdef CONFIG_MMU


void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size);
void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size);
void xtensa_iounmap(volatile void __iomem *addr);

/*
/*
 * Return the virtual address for the specified bus memory.
 * Return the virtual address for the specified bus memory.
 * Note that we currently don't support any address outside the KIO segment.
 */
 */
static inline void __iomem *ioremap_nocache(unsigned long offset,
static inline void __iomem *ioremap_nocache(unsigned long offset,
		unsigned long size)
		unsigned long size)
@@ -36,7 +39,7 @@ static inline void __iomem *ioremap_nocache(unsigned long offset,
	    && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
	    && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
		return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
		return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
	else
	else
		BUG();
		return xtensa_ioremap_nocache(offset, size);
}
}


static inline void __iomem *ioremap_cache(unsigned long offset,
static inline void __iomem *ioremap_cache(unsigned long offset,
@@ -46,7 +49,7 @@ static inline void __iomem *ioremap_cache(unsigned long offset,
	    && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
	    && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
		return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
		return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
	else
	else
		BUG();
		return xtensa_ioremap_cache(offset, size);
}
}
#define ioremap_cache ioremap_cache
#define ioremap_cache ioremap_cache


@@ -60,6 +63,13 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size)


static inline void iounmap(volatile void __iomem *addr)
static inline void iounmap(volatile void __iomem *addr)
{
{
	unsigned long va = (unsigned long) addr;

	if (!(va >= XCHAL_KIO_CACHED_VADDR &&
	      va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) &&
	    !(va >= XCHAL_KIO_BYPASS_VADDR &&
	      va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE))
		xtensa_iounmap(addr);
}
}


#define virt_to_bus     virt_to_phys
#define virt_to_bus     virt_to_phys
+5 −7
Original line number Original line Diff line number Diff line
@@ -78,22 +78,20 @@
#define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level)
#define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level)
#define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK)
#define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK)


#define IS_POW2(v) (((v) & ((v) - 1)) == 0)
#define XTENSA_INTLEVEL_ANDBELOW_MASK(l) _XTENSA_INTLEVEL_ANDBELOW_MASK(l)
#define _XTENSA_INTLEVEL_ANDBELOW_MASK(l) (XCHAL_INTLEVEL##l##_ANDBELOW_MASK)


#define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT)
#define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT)


/* LOCKLEVEL defines the interrupt level that masks all
/* LOCKLEVEL defines the interrupt level that masks all
 * general-purpose interrupts.
 * general-purpose interrupts.
 */
 */
#if defined(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) && \
#if defined(CONFIG_XTENSA_FAKE_NMI) && defined(XCHAL_PROFILING_INTERRUPT)
	defined(XCHAL_PROFILING_INTERRUPT) && \
#define LOCKLEVEL (PROFILING_INTLEVEL - 1)
	PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \
	XCHAL_EXCM_LEVEL > 1 && \
	IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL))
#define LOCKLEVEL (XCHAL_EXCM_LEVEL - 1)
#else
#else
#define LOCKLEVEL XCHAL_EXCM_LEVEL
#define LOCKLEVEL XCHAL_EXCM_LEVEL
#endif
#endif

#define TOPLEVEL XCHAL_EXCM_LEVEL
#define TOPLEVEL XCHAL_EXCM_LEVEL
#define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL)
#define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL)


+3 −6
Original line number Original line Diff line number Diff line
@@ -12,19 +12,16 @@
#include <asm/processor.h>
#include <asm/processor.h>
#include <linux/stringify.h>
#include <linux/stringify.h>


#define _INTLEVEL(x)	XCHAL_INT ## x ## _LEVEL
#define INTLEVEL(x)	_INTLEVEL(x)

#if XCHAL_NUM_TIMERS > 0 && \
#if XCHAL_NUM_TIMERS > 0 && \
	INTLEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
	XTENSA_INT_LEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
# define LINUX_TIMER     0
# define LINUX_TIMER     0
# define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT
# define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT
#elif XCHAL_NUM_TIMERS > 1 && \
#elif XCHAL_NUM_TIMERS > 1 && \
	INTLEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
	XTENSA_INT_LEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
# define LINUX_TIMER     1
# define LINUX_TIMER     1
# define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT
# define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT
#elif XCHAL_NUM_TIMERS > 2 && \
#elif XCHAL_NUM_TIMERS > 2 && \
	INTLEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
	XTENSA_INT_LEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
# define LINUX_TIMER     2
# define LINUX_TIMER     2
# define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT
# define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT
#else
#else
+27 −0
Original line number Original line Diff line number Diff line
@@ -205,6 +205,32 @@ extern void do_IRQ(int, struct pt_regs *);


#if XTENSA_FAKE_NMI
#if XTENSA_FAKE_NMI


#define IS_POW2(v) (((v) & ((v) - 1)) == 0)

#if !(PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \
      IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL)))
#warning "Fake NMI is requested for PMM, but there are other IRQs at or above its level."
#warning "Fake NMI will be used, but there will be a bugcheck if one of those IRQs fire."

static inline void check_valid_nmi(void)
{
	unsigned intread = get_sr(interrupt);
	unsigned intenable = get_sr(intenable);

	BUG_ON(intread & intenable &
	       ~(XTENSA_INTLEVEL_ANDBELOW_MASK(PROFILING_INTLEVEL) ^
		 XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL) ^
		 BIT(XCHAL_PROFILING_INTERRUPT)));
}

#else

static inline void check_valid_nmi(void)
{
}

#endif

irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id);
irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id);


DEFINE_PER_CPU(unsigned long, nmi_count);
DEFINE_PER_CPU(unsigned long, nmi_count);
@@ -219,6 +245,7 @@ void do_nmi(struct pt_regs *regs)
	old_regs = set_irq_regs(regs);
	old_regs = set_irq_regs(regs);
	nmi_enter();
	nmi_enter();
	++*this_cpu_ptr(&nmi_count);
	++*this_cpu_ptr(&nmi_count);
	check_valid_nmi();
	xtensa_pmu_irq_handler(0, NULL);
	xtensa_pmu_irq_handler(0, NULL);
	nmi_exit();
	nmi_exit();
	set_irq_regs(old_regs);
	set_irq_regs(old_regs);
Loading