Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1181a244 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'core-fixes-for-linus' of...

Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  sparc64: Fix cpumask related build failure
  smp_call_function_single(): be slightly less stupid, fix
  smp_call_function_single(): be slightly less stupid
  rcu: fix bug in rcutorture system-shutdown code
parents b7437916 9d079337
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -47,6 +47,10 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
	(pcibus_to_node(bus) == -1 ? \
	 CPU_MASK_ALL : \
	 node_to_cpumask(pcibus_to_node(bus)))
#define cpumask_of_pcibus(bus)	\
	(pcibus_to_node(bus) == -1 ? \
	 CPU_MASK_ALL_PTR : \
	 cpumask_of_node(pcibus_to_node(bus)))

#define SD_NODE_INIT (struct sched_domain) {		\
	.min_interval		= 8,			\
+3 −10
Original line number Diff line number Diff line
@@ -24,6 +24,9 @@ struct call_single_data {
/* total number of cpus in this system (may exceed NR_CPUS) */
extern unsigned int total_cpus;

int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
				int wait);

#ifdef CONFIG_SMP

#include <linux/preempt.h>
@@ -79,8 +82,6 @@ smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
	return 0;
}

int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
				int wait);
void __smp_call_function_single(int cpuid, struct call_single_data *data);

/*
@@ -140,14 +141,6 @@ static inline int up_smp_call_function(void (*func)(void *), void *info)
static inline void smp_send_reschedule(int cpu) { }
#define num_booting_cpus()			1
#define smp_prepare_boot_cpu()			do {} while (0)
#define smp_call_function_single(cpuid, func, info, wait) \
({ \
	WARN_ON(cpuid != 0);	\
	local_irq_disable();	\
	(func)(info);		\
	local_irq_enable();	\
	0;			\
})
#define smp_call_function_mask(mask, func, info, wait) \
			(up_smp_call_function(func, info))
#define smp_call_function_many(mask, func, info, wait) \
+5 −1
Original line number Diff line number Diff line
@@ -40,7 +40,11 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o
ifeq ($(CONFIG_USE_GENERIC_SMP_HELPERS),y)
obj-y += smp.o
else
obj-y += up.o
endif
obj-$(CONFIG_SMP) += spinlock.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
+68 −45
Original line number Diff line number Diff line
@@ -136,28 +136,46 @@ static int stutter_pause_test = 0;
#endif
int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;

#define FULLSTOP_SHUTDOWN 1	/* Bail due to system shutdown/panic. */
#define FULLSTOP_CLEANUP  2	/* Orderly shutdown. */
static int fullstop;		/* stop generating callbacks at test end. */
DEFINE_MUTEX(fullstop_mutex);	/* protect fullstop transitions and */
				/*  spawning of kthreads. */
/* Mediate rmmod and system shutdown.  Concurrent rmmod & shutdown illegal! */

#define FULLSTOP_DONTSTOP 0	/* Normal operation. */
#define FULLSTOP_SHUTDOWN 1	/* System shutdown with rcutorture running. */
#define FULLSTOP_RMMOD    2	/* Normal rmmod of rcutorture. */
static int fullstop = FULLSTOP_RMMOD;
DEFINE_MUTEX(fullstop_mutex);	/* Protect fullstop transitions and spawning */
				/*  of kthreads. */

/*
 * Detect and respond to a signal-based shutdown.
 * Detect and respond to a system shutdown.
 */
static int
rcutorture_shutdown_notify(struct notifier_block *unused1,
			   unsigned long unused2, void *unused3)
{
	if (fullstop)
		return NOTIFY_DONE;
	mutex_lock(&fullstop_mutex);
	if (!fullstop)
	if (fullstop == FULLSTOP_DONTSTOP)
		fullstop = FULLSTOP_SHUTDOWN;
	else
		printk(KERN_WARNING /* but going down anyway, so... */
		       "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
	mutex_unlock(&fullstop_mutex);
	return NOTIFY_DONE;
}

/*
 * Absorb kthreads into a kernel function that won't return, so that
 * they won't ever access module text or data again.
 */
static void rcutorture_shutdown_absorb(char *title)
{
	if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
		printk(KERN_NOTICE
		       "rcutorture thread %s parking due to system shutdown\n",
		       title);
		schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
	}
}

/*
 * Allocate an element from the rcu_tortures pool.
 */
@@ -219,13 +237,14 @@ rcu_random(struct rcu_random_state *rrsp)
}

static void
rcu_stutter_wait(void)
rcu_stutter_wait(char *title)
{
	while ((stutter_pause_test || !rcutorture_runnable) && !fullstop) {
	while (stutter_pause_test || !rcutorture_runnable) {
		if (rcutorture_runnable)
			schedule_timeout_interruptible(1);
		else
			schedule_timeout_interruptible(round_jiffies_relative(HZ));
		rcutorture_shutdown_absorb(title);
	}
}

@@ -287,7 +306,7 @@ rcu_torture_cb(struct rcu_head *p)
	int i;
	struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);

	if (fullstop) {
	if (fullstop != FULLSTOP_DONTSTOP) {
		/* Test is ending, just drop callbacks on the floor. */
		/* The next initialization will pick up the pieces. */
		return;
@@ -619,10 +638,11 @@ rcu_torture_writer(void *arg)
		}
		rcu_torture_current_version++;
		oldbatch = cur_ops->completed();
		rcu_stutter_wait();
	} while (!kthread_should_stop() && !fullstop);
		rcu_stutter_wait("rcu_torture_writer");
	} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
	VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
	while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN)
	rcutorture_shutdown_absorb("rcu_torture_writer");
	while (!kthread_should_stop())
		schedule_timeout_uninterruptible(1);
	return 0;
}
@@ -643,11 +663,12 @@ rcu_torture_fakewriter(void *arg)
		schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
		udelay(rcu_random(&rand) & 0x3ff);
		cur_ops->sync();
		rcu_stutter_wait();
	} while (!kthread_should_stop() && !fullstop);
		rcu_stutter_wait("rcu_torture_fakewriter");
	} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);

	VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
	while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN)
	rcutorture_shutdown_absorb("rcu_torture_fakewriter");
	while (!kthread_should_stop())
		schedule_timeout_uninterruptible(1);
	return 0;
}
@@ -752,12 +773,13 @@ rcu_torture_reader(void *arg)
		preempt_enable();
		cur_ops->readunlock(idx);
		schedule();
		rcu_stutter_wait();
	} while (!kthread_should_stop() && !fullstop);
		rcu_stutter_wait("rcu_torture_reader");
	} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
	VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
	rcutorture_shutdown_absorb("rcu_torture_reader");
	if (irqreader && cur_ops->irqcapable)
		del_timer_sync(&t);
	while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN)
	while (!kthread_should_stop())
		schedule_timeout_uninterruptible(1);
	return 0;
}
@@ -854,7 +876,8 @@ rcu_torture_stats(void *arg)
	do {
		schedule_timeout_interruptible(stat_interval * HZ);
		rcu_torture_stats_print();
	} while (!kthread_should_stop() && !fullstop);
		rcutorture_shutdown_absorb("rcu_torture_stats");
	} while (!kthread_should_stop());
	VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
	return 0;
}
@@ -866,52 +889,49 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
 */
static void rcu_torture_shuffle_tasks(void)
{
	cpumask_var_t tmp_mask;
	cpumask_t tmp_mask;
	int i;

	if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL))
		BUG();

	cpumask_setall(tmp_mask);
	cpus_setall(tmp_mask);
	get_online_cpus();

	/* No point in shuffling if there is only one online CPU (ex: UP) */
	if (num_online_cpus() == 1)
		goto out;
	if (num_online_cpus() == 1) {
		put_online_cpus();
		return;
	}

	if (rcu_idle_cpu != -1)
		cpumask_clear_cpu(rcu_idle_cpu, tmp_mask);
		cpu_clear(rcu_idle_cpu, tmp_mask);

	set_cpus_allowed_ptr(current, tmp_mask);
	set_cpus_allowed_ptr(current, &tmp_mask);

	if (reader_tasks) {
		for (i = 0; i < nrealreaders; i++)
			if (reader_tasks[i])
				set_cpus_allowed_ptr(reader_tasks[i],
						     tmp_mask);
						     &tmp_mask);
	}

	if (fakewriter_tasks) {
		for (i = 0; i < nfakewriters; i++)
			if (fakewriter_tasks[i])
				set_cpus_allowed_ptr(fakewriter_tasks[i],
						     tmp_mask);
						     &tmp_mask);
	}

	if (writer_task)
		set_cpus_allowed_ptr(writer_task, tmp_mask);
		set_cpus_allowed_ptr(writer_task, &tmp_mask);

	if (stats_task)
		set_cpus_allowed_ptr(stats_task, tmp_mask);
		set_cpus_allowed_ptr(stats_task, &tmp_mask);

	if (rcu_idle_cpu == -1)
		rcu_idle_cpu = num_online_cpus() - 1;
	else
		rcu_idle_cpu--;

out:
	put_online_cpus();
	free_cpumask_var(tmp_mask);
}

/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
@@ -925,7 +945,8 @@ rcu_torture_shuffle(void *arg)
	do {
		schedule_timeout_interruptible(shuffle_interval * HZ);
		rcu_torture_shuffle_tasks();
	} while (!kthread_should_stop() && !fullstop);
		rcutorture_shutdown_absorb("rcu_torture_shuffle");
	} while (!kthread_should_stop());
	VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
	return 0;
}
@@ -940,10 +961,11 @@ rcu_torture_stutter(void *arg)
	do {
		schedule_timeout_interruptible(stutter * HZ);
		stutter_pause_test = 1;
		if (!kthread_should_stop() && !fullstop)
		if (!kthread_should_stop())
			schedule_timeout_interruptible(stutter * HZ);
		stutter_pause_test = 0;
	} while (!kthread_should_stop() && !fullstop);
		rcutorture_shutdown_absorb("rcu_torture_stutter");
	} while (!kthread_should_stop());
	VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
	return 0;
}
@@ -970,15 +992,16 @@ rcu_torture_cleanup(void)
	int i;

	mutex_lock(&fullstop_mutex);
	if (!fullstop) {
		/* If being signaled, let it happen, then exit. */
	if (fullstop == FULLSTOP_SHUTDOWN) {
		printk(KERN_WARNING /* but going down anyway, so... */
		       "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
		mutex_unlock(&fullstop_mutex);
		schedule_timeout_interruptible(10 * HZ);
		schedule_timeout_uninterruptible(10);
		if (cur_ops->cb_barrier != NULL)
			cur_ops->cb_barrier();
		return;
	}
	fullstop = FULLSTOP_CLEANUP;
	fullstop = FULLSTOP_RMMOD;
	mutex_unlock(&fullstop_mutex);
	unregister_reboot_notifier(&rcutorture_nb);
	if (stutter_task) {
@@ -1078,7 +1101,7 @@ rcu_torture_init(void)
	else
		nrealreaders = 2 * num_online_cpus();
	rcu_torture_print_module_parms("Start of test");
	fullstop = 0;
	fullstop = FULLSTOP_DONTSTOP;

	/* Set up the freelist. */

kernel/up.c

0 → 100644
+20 −0
Original line number Diff line number Diff line
/*
 * Uniprocessor-only support functions.  The counterpart to kernel/smp.c
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/smp.h>

int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
				int wait)
{
	WARN_ON(cpu != 0);

	local_irq_disable();
	(func)(info);
	local_irq_enable();

	return 0;
}
EXPORT_SYMBOL(smp_call_function_single);