Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9bd50df6 authored by Philippe Gerum's avatar Philippe Gerum Committed by Bryan Wu
Browse files

Blackfin arch: Update adeos blackfin arch patch to 1.9-00



Signed-off-by: default avatarPhilippe Gerum <rpm@xenomai.org>
Signed-off-by: default avatarBryan Wu <cooloney@kernel.org>
parent 97d4b35f
Loading
Loading
Loading
Loading
+36 −64
Original line number Diff line number Diff line
@@ -35,9 +35,9 @@
#include <asm/atomic.h>
#include <asm/traps.h>

#define IPIPE_ARCH_STRING     "1.8-00"
#define IPIPE_ARCH_STRING     "1.9-00"
#define IPIPE_MAJOR_NUMBER    1
#define IPIPE_MINOR_NUMBER    8
#define IPIPE_MINOR_NUMBER    9
#define IPIPE_PATCH_NUMBER    0

#ifdef CONFIG_SMP
@@ -83,9 +83,9 @@ struct ipipe_sysinfo {
				"%2 = CYCLES2\n"		\
				"CC = %2 == %0\n"		\
				"if ! CC jump 1b\n"		\
				: "=r" (((unsigned long *)&t)[1]),	\
				  "=r" (((unsigned long *)&t)[0]),	\
				  "=r" (__cy2)				\
				: "=d,a" (((unsigned long *)&t)[1]),	\
				  "=d,a" (((unsigned long *)&t)[0]),	\
				  "=d,a" (__cy2)				\
				: /*no input*/ : "CC");			\
	t;								\
	})
@@ -118,35 +118,40 @@ void __ipipe_disable_irqdesc(struct ipipe_domain *ipd,

#define __ipipe_disable_irq(irq)	(irq_desc[irq].chip->mask(irq))

#define __ipipe_lock_root()					\
	set_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags)
static inline int __ipipe_check_tickdev(const char *devname)
{
	return 1;
}

#define __ipipe_unlock_root()					\
	clear_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags)
static inline void __ipipe_lock_root(void)
{
	set_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status));
}

static inline void __ipipe_unlock_root(void)
{
	clear_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status));
}

void __ipipe_enable_pipeline(void);

#define __ipipe_hook_critical_ipi(ipd) do { } while (0)

#define __ipipe_sync_pipeline(syncmask)					\
	do {								\
		struct ipipe_domain *ipd = ipipe_current_domain;	\
		if (likely(ipd != ipipe_root_domain || !test_bit(IPIPE_ROOTLOCK_FLAG, &ipd->flags))) \
			__ipipe_sync_stage(syncmask);			\
	} while (0)
#define __ipipe_sync_pipeline  ___ipipe_sync_pipeline
void ___ipipe_sync_pipeline(unsigned long syncmask);

void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs);

int __ipipe_get_irq_priority(unsigned irq);

int __ipipe_get_irqthread_priority(unsigned irq);

void __ipipe_stall_root_raw(void);

void __ipipe_unstall_root_raw(void);

void __ipipe_serial_debug(const char *fmt, ...);

asmlinkage void __ipipe_call_irqtail(unsigned long addr);

DECLARE_PER_CPU(struct pt_regs, __ipipe_tick_regs);

extern unsigned long __ipipe_core_clock;
@@ -162,42 +167,25 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul)

#define __ipipe_run_irqtail()  /* Must be a macro */			\
	do {								\
		asmlinkage void __ipipe_call_irqtail(void);		\
		unsigned long __pending;				\
		CSYNC();						\
		__pending = bfin_read_IPEND();				\
		if (__pending & 0x8000) {				\
			__pending &= ~0x8010;				\
			if (__pending && (__pending & (__pending - 1)) == 0) \
				__ipipe_call_irqtail();			\
				__ipipe_call_irqtail(__ipipe_irq_tail_hook); \
		}							\
	} while (0)

#define __ipipe_run_isr(ipd, irq)					\
	do {								\
		if (ipd == ipipe_root_domain) {				\
			/*						\
			 * Note: the I-pipe implements a threaded interrupt model on \
			 * this arch for Linux external IRQs. The interrupt handler we \
			 * call here only wakes up the associated IRQ thread. \
			 */						\
			if (ipipe_virtual_irq_p(irq)) {			\
				/* No irqtail here; virtual interrupts have no effect \
				   on IPEND so there is no need for processing \
				   deferral. */				\
				local_irq_enable_nohead(ipd);		\
			local_irq_enable_hw();				\
			if (ipipe_virtual_irq_p(irq))			\
				ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
				local_irq_disable_nohead(ipd);		\
			} else						\
				/*					\
				 * No need to run the irqtail here either; \
				 * we can't be preempted by hw IRQs, so	\
				 * non-Linux IRQs cannot stack over the short \
				 * thread wakeup code. Which in turn means \
				 * that no irqtail condition could be pending \
				 * for domains above Linux in the pipeline. \
				 */					\
			else						\
				ipd->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)); \
			local_irq_disable_hw();				\
		} else {						\
			__clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
			local_irq_enable_nohead(ipd);			\
@@ -217,42 +205,24 @@ void ipipe_init_irq_threads(void);

int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);

#define IS_SYSIRQ(irq)		((irq) > IRQ_CORETMR && (irq) <= SYS_IRQS)
#define IS_GPIOIRQ(irq)		((irq) >= GPIO_IRQ_BASE && (irq) < NR_IRQS)

#ifdef CONFIG_GENERIC_CLOCKEVENTS
#define IRQ_SYSTMR		IRQ_CORETMR
#define IRQ_PRIOTMR		IRQ_CORETMR
#else
#define IRQ_SYSTMR		IRQ_TIMER0
#define IRQ_PRIOTMR		CONFIG_IRQ_TIMER0
#endif

#if defined(CONFIG_BF531) || defined(CONFIG_BF532) || defined(CONFIG_BF533)
#define PRIO_GPIODEMUX(irq)	CONFIG_PFA
#elif defined(CONFIG_BF534) || defined(CONFIG_BF536) || defined(CONFIG_BF537)
#define PRIO_GPIODEMUX(irq)	CONFIG_IRQ_PROG_INTA
#elif defined(CONFIG_BF52x)
#define PRIO_GPIODEMUX(irq)	((irq) == IRQ_PORTF_INTA ? CONFIG_IRQ_PORTF_INTA : \
				 (irq) == IRQ_PORTG_INTA ? CONFIG_IRQ_PORTG_INTA : \
				 (irq) == IRQ_PORTH_INTA ? CONFIG_IRQ_PORTH_INTA : \
				 -1)
#elif defined(CONFIG_BF561)
#define PRIO_GPIODEMUX(irq)	((irq) == IRQ_PROG0_INTA ? CONFIG_IRQ_PROG0_INTA : \
				 (irq) == IRQ_PROG1_INTA ? CONFIG_IRQ_PROG1_INTA : \
				 (irq) == IRQ_PROG2_INTA ? CONFIG_IRQ_PROG2_INTA : \
				 -1)
#ifdef CONFIG_BF561
#define bfin_write_TIMER_DISABLE(val)	bfin_write_TMRS8_DISABLE(val)
#define bfin_write_TIMER_ENABLE(val)	bfin_write_TMRS8_ENABLE(val)
#define bfin_write_TIMER_STATUS(val)	bfin_write_TMRS8_STATUS(val)
#define bfin_read_TIMER_STATUS()	bfin_read_TMRS8_STATUS()
#elif defined(CONFIG_BF54x)
#define PRIO_GPIODEMUX(irq)	((irq) == IRQ_PINT0 ? CONFIG_IRQ_PINT0 : \
				 (irq) == IRQ_PINT1 ? CONFIG_IRQ_PINT1 : \
				 (irq) == IRQ_PINT2 ? CONFIG_IRQ_PINT2 : \
				 (irq) == IRQ_PINT3 ? CONFIG_IRQ_PINT3 : \
				 -1)
#define bfin_write_TIMER_DISABLE(val)	bfin_write_TIMER_DISABLE0(val)
#define bfin_write_TIMER_ENABLE(val)	bfin_write_TIMER_ENABLE0(val)
#define bfin_write_TIMER_STATUS(val)	bfin_write_TIMER_STATUS0(val)
#define bfin_read_TIMER_STATUS(val)	bfin_read_TIMER_STATUS0(val)
#else
# error "no PRIO_GPIODEMUX() for this part"
#endif

#define __ipipe_root_tick_p(regs)	((regs->ipend & 0x10) != 0)
@@ -275,4 +245,6 @@ int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);

#endif /* !CONFIG_IPIPE */

#define ipipe_update_tick_evtdev(evtdev)	do { } while (0)

#endif	/* !__ASM_BLACKFIN_IPIPE_H */
+4 −8
Original line number Diff line number Diff line
/*   -*- linux-c -*-
 *   include/asm-blackfin/_baseipipe.h
 *   include/asm-blackfin/ipipe_base.h
 *
 *   Copyright (C) 2007 Philippe Gerum.
 *
@@ -27,8 +27,9 @@
#define IPIPE_NR_XIRQS		NR_IRQS
#define IPIPE_IRQ_ISHIFT	5	/* 2^5 for 32bits arch. */

/* Blackfin-specific, global domain flags */
#define IPIPE_ROOTLOCK_FLAG	1	/* Lock pipeline for root */
/* Blackfin-specific, per-cpu pipeline status */
#define IPIPE_SYNCDEFER_FLAG	15
#define IPIPE_SYNCDEFER_MASK	(1L << IPIPE_SYNCDEFER_MASK)

 /* Blackfin traps -- i.e. exception vector numbers */
#define IPIPE_NR_FAULTS		52 /* We leave a gap after VEC_ILL_RES. */
@@ -48,11 +49,6 @@

#ifndef __ASSEMBLY__

#include <linux/bitops.h>

extern int test_bit(int nr, const void *addr);


extern unsigned long __ipipe_root_status; /* Alias to ipipe_root_cpudom_var(status) */

static inline void __ipipe_stall_root(void)
+27 −9
Original line number Diff line number Diff line
@@ -65,16 +65,34 @@ void __ipipe_restore_root(unsigned long flags);
	do {							 \
		(x) = __ipipe_test_root() ?			 \
			__all_masked_irq_flags : bfin_irq_flags; \
		barrier();					 \
	} while (0)

#define local_irq_save(x)					 \
	do {						 	 \
		(x) = __ipipe_test_and_stall_root();	\
		(x) = __ipipe_test_and_stall_root() ?		 \
			__all_masked_irq_flags : bfin_irq_flags; \
		barrier();					 \
	} while (0)

#define local_irq_restore(x)	__ipipe_restore_root(x)
#define local_irq_disable()	__ipipe_stall_root()
#define local_irq_enable()	__ipipe_unstall_root()
static inline void local_irq_restore(unsigned long x)
{
	barrier();
	__ipipe_restore_root(x == __all_masked_irq_flags);
}

#define local_irq_disable()			\
	do {					\
		__ipipe_stall_root();		\
		barrier();			\
	} while (0)

static inline void local_irq_enable(void)
{
	barrier();
	__ipipe_unstall_root();
}

#define irqs_disabled()		__ipipe_test_root()

#define local_save_flags_hw(x) \
+2 −0
Original line number Diff line number Diff line
@@ -122,6 +122,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_MEMDIE              4
#define TIF_RESTORE_SIGMASK	5	/* restore signal mask in do_signal() */
#define TIF_FREEZE              6       /* is freezing for suspend */
#define TIF_IRQ_SYNC            7       /* sync pipeline stage */

/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
@@ -130,6 +131,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
#define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
#define _TIF_FREEZE             (1<<TIF_FREEZE)
#define _TIF_IRQ_SYNC           (1<<TIF_IRQ_SYNC)

#define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */

+46 −130
Original line number Diff line number Diff line
@@ -35,14 +35,8 @@
#include <asm/atomic.h>
#include <asm/io.h>

static int create_irq_threads;

DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);

static DEFINE_PER_CPU(unsigned long, pending_irqthread_mask);

static DEFINE_PER_CPU(int [IVG13 + 1], pending_irq_count);

asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);

static void __ipipe_no_irqtail(void);
@@ -93,6 +87,7 @@ void __ipipe_enable_pipeline(void)
 */
void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
{
	struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
	struct ipipe_domain *this_domain, *next_domain;
	struct list_head *head, *pos;
	int m_ack, s = -1;
@@ -104,7 +99,6 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
	 * interrupt.
	 */
	m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);

	this_domain = ipipe_current_domain;

	if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)))
@@ -114,49 +108,28 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
		next_domain = list_entry(head, struct ipipe_domain, p_link);
		if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
			if (!m_ack && next_domain->irqs[irq].acknowledge != NULL)
				next_domain->irqs[irq].acknowledge(irq, irq_desc + irq);
			if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
				s = __test_and_set_bit(IPIPE_STALL_FLAG,
						       &ipipe_root_cpudom_var(status));
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
			if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
				s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);
			__ipipe_dispatch_wired(next_domain, irq);
				goto finalize;
			return;
			goto out;
		}
	}

	/* Ack the interrupt. */

	pos = head;

	while (pos != &__ipipe_pipeline) {
		next_domain = list_entry(pos, struct ipipe_domain, p_link);
		/*
		 * For each domain handling the incoming IRQ, mark it
		 * as pending in its log.
		 */
		if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
			/*
			 * Domains that handle this IRQ are polled for
			 * acknowledging it by decreasing priority
			 * order. The interrupt must be made pending
			 * _first_ in the domain's status flags before
			 * the PIC is unlocked.
			 */
			__ipipe_set_irq_pending(next_domain, irq);

			if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) {
				next_domain->irqs[irq].acknowledge(irq, irq_desc + irq);
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
				m_ack = 1;
			}
		}

		/*
		 * If the domain does not want the IRQ to be passed
		 * down the interrupt pipe, exit the loop now.
		 */
		if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
			break;

		pos = next_domain->p_link.next;
	}

@@ -166,18 +139,24 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
	 * immediately to the current domain if the interrupt has been
	 * marked as 'sticky'. This search does not go beyond the
	 * current domain in the pipeline. We also enforce the
	 * additional root stage lock (blackfin-specific). */
	 * additional root stage lock (blackfin-specific).
	 */
	if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
		s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);

	if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
		s = __test_and_set_bit(IPIPE_STALL_FLAG,
				       &ipipe_root_cpudom_var(status));
finalize:
	/*
	 * If the interrupt preempted the head domain, then do not
	 * even try to walk the pipeline, unless an interrupt is
	 * pending for it.
	 */
	if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
	    ipipe_head_cpudom_var(irqpend_himask) == 0)
		goto out;

	__ipipe_walk_pipeline(head);

out:
	if (!s)
		__clear_bit(IPIPE_STALL_FLAG,
			    &ipipe_root_cpudom_var(status));
		__clear_bit(IPIPE_STALL_FLAG, &p->status);
}

int __ipipe_check_root(void)
@@ -187,7 +166,7 @@ int __ipipe_check_root(void)

void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
{
	struct irq_desc *desc = irq_desc + irq;
	struct irq_desc *desc = irq_to_desc(irq);
	int prio = desc->ic_prio;

	desc->depth = 0;
@@ -199,7 +178,7 @@ EXPORT_SYMBOL(__ipipe_enable_irqdesc);

void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
{
	struct irq_desc *desc = irq_desc + irq;
	struct irq_desc *desc = irq_to_desc(irq);
	int prio = desc->ic_prio;

	if (ipd != &ipipe_root &&
@@ -236,14 +215,17 @@ int __ipipe_syscall_root(struct pt_regs *regs)
{
	unsigned long flags;

	/* We need to run the IRQ tail hook whenever we don't
	/*
	 * We need to run the IRQ tail hook whenever we don't
	 * propagate a syscall to higher domains, because we know that
	 * important operations might be pending there (e.g. Xenomai
	 * deferred rescheduling). */
	 * deferred rescheduling).
	 */

	if (!__ipipe_syscall_watched_p(current, regs->orig_p0)) {
	if (regs->orig_p0 < NR_syscalls) {
		void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
		hook();
		if ((current->flags & PF_EVNOTIFY) == 0)
			return 0;
	}

@@ -312,112 +294,46 @@ int ipipe_trigger_irq(unsigned irq)
{
	unsigned long flags;

#ifdef CONFIG_IPIPE_DEBUG
	if (irq >= IPIPE_NR_IRQS ||
	    (ipipe_virtual_irq_p(irq)
	     && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)))
		return -EINVAL;
#endif

	local_irq_save_hw(flags);

	__ipipe_handle_irq(irq, NULL);

	local_irq_restore_hw(flags);

	return 1;
}

/* Move Linux IRQ to threads. */

static int do_irqd(void *__desc)
{
	struct irq_desc *desc = __desc;
	unsigned irq = desc - irq_desc;
	int thrprio = desc->thr_prio;
	int thrmask = 1 << thrprio;
	int cpu = smp_processor_id();
	cpumask_t cpumask;

	sigfillset(&current->blocked);
	current->flags |= PF_NOFREEZE;
	cpumask = cpumask_of_cpu(cpu);
	set_cpus_allowed(current, cpumask);
	ipipe_setscheduler_root(current, SCHED_FIFO, 50 + thrprio);

	while (!kthread_should_stop()) {
		local_irq_disable();
		if (!(desc->status & IRQ_SCHEDULED)) {
			set_current_state(TASK_INTERRUPTIBLE);
resched:
			local_irq_enable();
			schedule();
			local_irq_disable();
		}
		__set_current_state(TASK_RUNNING);
		/*
		 * If higher priority interrupt servers are ready to
		 * run, reschedule immediately. We need this for the
		 * GPIO demux IRQ handler to unmask the interrupt line
		 * _last_, after all GPIO IRQs have run.
		 */
		if (per_cpu(pending_irqthread_mask, cpu) & ~(thrmask|(thrmask-1)))
			goto resched;
		if (--per_cpu(pending_irq_count[thrprio], cpu) == 0)
			per_cpu(pending_irqthread_mask, cpu) &= ~thrmask;
		desc->status &= ~IRQ_SCHEDULED;
		desc->thr_handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs));
		local_irq_enable();
	}
	__set_current_state(TASK_RUNNING);
	return 0;
}

static void kick_irqd(unsigned irq, void *cookie)
asmlinkage void __ipipe_sync_root(void)
{
	struct irq_desc *desc = irq_desc + irq;
	int thrprio = desc->thr_prio;
	int thrmask = 1 << thrprio;
	int cpu = smp_processor_id();

	if (!(desc->status & IRQ_SCHEDULED)) {
		desc->status |= IRQ_SCHEDULED;
		per_cpu(pending_irqthread_mask, cpu) |= thrmask;
		++per_cpu(pending_irq_count[thrprio], cpu);
		wake_up_process(desc->thread);
	}
}
	unsigned long flags;

int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc)
{
	if (desc->thread || !create_irq_threads)
		return 0;
	BUG_ON(irqs_disabled());

	desc->thread = kthread_create(do_irqd, desc, "IRQ %d", irq);
	if (desc->thread == NULL) {
		printk(KERN_ERR "irqd: could not create IRQ thread %d!\n", irq);
		return -ENOMEM;
	}
	local_irq_save_hw(flags);

	wake_up_process(desc->thread);
	clear_thread_flag(TIF_IRQ_SYNC);

	desc->thr_handler = ipipe_root_domain->irqs[irq].handler;
	ipipe_root_domain->irqs[irq].handler = &kick_irqd;
	if (ipipe_root_cpudom_var(irqpend_himask) != 0)
		__ipipe_sync_pipeline(IPIPE_IRQMASK_ANY);

	return 0;
	local_irq_restore_hw(flags);
}

void __init ipipe_init_irq_threads(void)
void ___ipipe_sync_pipeline(unsigned long syncmask)
{
	unsigned irq;
	struct irq_desc *desc;
	struct ipipe_domain *ipd = ipipe_current_domain;

	create_irq_threads = 1;

	for (irq = 0; irq < NR_IRQS; irq++) {
		desc = irq_desc + irq;
		if (desc->action != NULL ||
			(desc->status & IRQ_NOREQUEST) != 0)
			ipipe_start_irq_thread(irq, desc);
	if (ipd == ipipe_root_domain) {
		if (test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
			return;
	}

	__ipipe_sync_stage(syncmask);
}

EXPORT_SYMBOL(show_stack);
Loading