Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bc1a298f authored by Chris Metcalf's avatar Chris Metcalf
Browse files

tile: support CONFIG_PREEMPT



This change adds support for CONFIG_PREEMPT (full kernel preemption).
In addition to the core support, this change includes a number
of places where we fix up uses of smp_processor_id() and per-cpu
variables.  I also eliminate the PAGE_HOME_HERE and PAGE_HOME_UNKNOWN
values for page homing, as it turns out they weren't being used.

Signed-off-by: default avatarChris Metcalf <cmetcalf@tilera.com>
parent 1182b69c
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -301,6 +301,8 @@ config PAGE_OFFSET

source "mm/Kconfig"

source "kernel/Kconfig.preempt"

config CMDLINE_BOOL
	bool "Built-in kernel command line"
	default n
+0 −8
Original line number Diff line number Diff line
@@ -44,16 +44,8 @@ struct zone;
 */
#define PAGE_HOME_INCOHERENT -3

#if CHIP_HAS_CBOX_HOME_MAP()
/* Home for the page is distributed via hash-for-home. */
#define PAGE_HOME_HASH -4
#endif

/* Homing is unknown or unspecified.  Not valid for page_home(). */
#define PAGE_HOME_UNKNOWN -5

/* Home on the current cpu.  Not valid for page_home(). */
#define PAGE_HOME_HERE -6

/* Support wrapper to use instead of explicit hv_flush_remote(). */
extern void flush_remote(unsigned long cache_pfn, unsigned long cache_length,
+18 −3
Original line number Diff line number Diff line
@@ -124,6 +124,12 @@
DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
#define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR)

#ifdef CONFIG_DEBUG_PREEMPT
/* Due to inclusion issues, we can't rely on <linux/smp.h> here. */
extern unsigned int debug_smp_processor_id(void);
# define smp_processor_id() debug_smp_processor_id()
#endif

/* Disable interrupts. */
#define arch_local_irq_disable() \
	interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS)
@@ -132,9 +138,18 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
#define arch_local_irq_disable_all() \
	interrupt_mask_set_mask(-1ULL)

/*
 * Read the set of maskable interrupts.
 * We avoid the preemption warning here via __this_cpu_ptr since even
 * if irqs are already enabled, it's harmless to read the wrong cpu's
 * enabled mask.
 */
#define arch_local_irqs_enabled() \
	(*__this_cpu_ptr(&interrupts_enabled_mask))

/* Re-enable all maskable interrupts. */
#define arch_local_irq_enable() \
	interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask))
	interrupt_mask_reset_mask(arch_local_irqs_enabled())

/* Disable or enable interrupts based on flag argument. */
#define arch_local_irq_restore(disabled) do { \
@@ -161,7 +176,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);

/* Prevent the given interrupt from being enabled next time we enable irqs. */
#define arch_local_irq_mask(interrupt) \
	(__get_cpu_var(interrupts_enabled_mask) &= ~(1ULL << (interrupt)))
	this_cpu_and(interrupts_enabled_mask, ~(1ULL << (interrupt)))

/* Prevent the given interrupt from being enabled immediately. */
#define arch_local_irq_mask_now(interrupt) do { \
@@ -171,7 +186,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);

/* Allow the given interrupt to be enabled next time we enable irqs. */
#define arch_local_irq_unmask(interrupt) \
	(__get_cpu_var(interrupts_enabled_mask) |= (1ULL << (interrupt)))
	this_cpu_or(interrupts_enabled_mask, (1ULL << (interrupt)))

/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
#define arch_local_irq_unmask_now(interrupt) do { \
+2 −0
Original line number Diff line number Diff line
@@ -58,6 +58,8 @@ void foo(void)
	       offsetof(struct thread_info, status));
	DEFINE(THREAD_INFO_HOMECACHE_CPU_OFFSET,
	       offsetof(struct thread_info, homecache_cpu));
	DEFINE(THREAD_INFO_PREEMPT_COUNT_OFFSET,
	       offsetof(struct thread_info, preempt_count));
	DEFINE(THREAD_INFO_STEP_STATE_OFFSET,
	       offsetof(struct thread_info, step_state));
#ifdef __tilegx__
+9 −9
Original line number Diff line number Diff line
@@ -272,9 +272,9 @@ static void hardwall_setup_func(void *info)
	struct hardwall_info *r = info;
	struct hardwall_type *hwt = r->type;

	int cpu = smp_processor_id();
	int x = cpu % smp_width;
	int y = cpu / smp_width;
	int cpu = smp_processor_id();  /* on_each_cpu disables preemption */
	int x = cpu_x(cpu);
	int y = cpu_y(cpu);
	int bits = 0;
	if (x == r->ulhc_x)
		bits |= W_PROTECT;
@@ -317,6 +317,7 @@ static void hardwall_protect_rectangle(struct hardwall_info *r)
	on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1);
}

/* Entered from INT_xDN_FIREWALL interrupt vector with irqs disabled. */
void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
{
	struct hardwall_info *rect;
@@ -325,7 +326,6 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
	struct siginfo info;
	int cpu = smp_processor_id();
	int found_processes;
	unsigned long flags;
	struct pt_regs *old_regs = set_irq_regs(regs);

	irq_enter();
@@ -346,7 +346,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
	BUG_ON(hwt->disabled);

	/* This tile trapped a network access; find the rectangle. */
	spin_lock_irqsave(&hwt->lock, flags);
	spin_lock(&hwt->lock);
	list_for_each_entry(rect, &hwt->list, list) {
		if (cpumask_test_cpu(cpu, &rect->cpumask))
			break;
@@ -401,7 +401,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
		pr_notice("hardwall: no associated processes!\n");

 done:
	spin_unlock_irqrestore(&hwt->lock, flags);
	spin_unlock(&hwt->lock);

	/*
	 * We have to disable firewall interrupts now, or else when we
@@ -661,7 +661,7 @@ static int hardwall_deactivate(struct hardwall_type *hwt,
		return -EINVAL;

	printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n",
	       task->pid, task->comm, hwt->name, smp_processor_id());
	       task->pid, task->comm, hwt->name, raw_smp_processor_id());
	return 0;
}

@@ -803,8 +803,8 @@ static void reset_xdn_network_state(struct hardwall_type *hwt)
	/* Reset UDN coordinates to their standard value */
	{
		unsigned int cpu = smp_processor_id();
		unsigned int x = cpu % smp_width;
		unsigned int y = cpu / smp_width;
		unsigned int x = cpu_x(cpu);
		unsigned int y = cpu_y(cpu);
		__insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
	}

Loading