Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 37eaf8c7 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus:
  stop_machine: fix up ftrace.c
  stop_machine: Wean existing callers off stop_machine_run()
  stop_machine(): stop_machine_run() changed to use cpu mask
  Hotplug CPU: don't check cpu_online after take_cpu_down
  Simplify stop_machine
  stop_machine: add ALL_CPUS option
  module: fix build warning with !CONFIG_KALLSYMS
parents 58f25071 784e2d76
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -197,7 +197,7 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
	args.new = BREAKPOINT_INSTRUCTION;

	kcb->kprobe_status = KPROBE_SWAP_INST;
	stop_machine_run(swap_instruction, &args, NR_CPUS);
	stop_machine(swap_instruction, &args, NULL);
	kcb->kprobe_status = status;
}

@@ -212,7 +212,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
	args.new = p->opcode;

	kcb->kprobe_status = KPROBE_SWAP_INST;
	stop_machine_run(swap_instruction, &args, NR_CPUS);
	stop_machine(swap_instruction, &args, NULL);
	kcb->kprobe_status = status;
}

@@ -331,7 +331,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
		 * No kprobe at this address. The fault has not been
		 * caused by a kprobe breakpoint. The race of breakpoint
		 * vs. kprobe remove does not exist because on s390 we
		 * use stop_machine_run to arm/disarm the breakpoints.
		 * use stop_machine to arm/disarm the breakpoints.
		 */
		goto no_kprobe;

+3 −3
Original line number Diff line number Diff line
@@ -241,7 +241,7 @@ static int __init intel_rng_hw_init(void *_intel_rng_hw)
	struct intel_rng_hw *intel_rng_hw = _intel_rng_hw;
	u8 mfc, dvc;

	/* interrupts disabled in stop_machine_run call */
	/* interrupts disabled in stop_machine call */

	if (!(intel_rng_hw->fwh_dec_en1_val & FWH_F8_EN_MASK))
		pci_write_config_byte(intel_rng_hw->dev,
@@ -365,10 +365,10 @@ static int __init mod_init(void)
	 * location with the Read ID command, all activity on the system
	 * must be stopped until the state is back to normal.
	 *
	 * Use stop_machine_run because IPIs can be blocked by disabling
	 * Use stop_machine because IPIs can be blocked by disabling
	 * interrupts.
	 */
	err = stop_machine_run(intel_rng_hw_init, intel_rng_hw, NR_CPUS);
	err = stop_machine(intel_rng_hw_init, intel_rng_hw, NULL);
	pci_dev_put(dev);
	iounmap(intel_rng_hw->mem);
	kfree(intel_rng_hw);
+33 −17
Original line number Diff line number Diff line
@@ -5,41 +5,43 @@
   (and more).  So the "read" side to such a lock is anything which
   diables preeempt. */
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <asm/system.h>

#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP)

/* Deprecated, but useful for transition. */
#define ALL_CPUS ~0U

/**
 * stop_machine_run: freeze the machine on all CPUs and run this function
 * stop_machine: freeze the machine on all CPUs and run this function
 * @fn: the function to run
 * @data: the data ptr for the @fn()
 * @cpu: the cpu to run @fn() on (or any, if @cpu == NR_CPUS.
 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
 *
 * Description: This causes a thread to be scheduled on every other cpu,
 * each of which disables interrupts, and finally interrupts are disabled
 * on the current CPU.  The result is that noone is holding a spinlock
 * or inside any other preempt-disabled region when @fn() runs.
 * Description: This causes a thread to be scheduled on every cpu,
 * each of which disables interrupts.  The result is that noone is
 * holding a spinlock or inside any other preempt-disabled region when
 * @fn() runs.
 *
 * This can be thought of as a very heavy write lock, equivalent to
 * grabbing every spinlock in the kernel. */
int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu);
int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus);

/**
 * __stop_machine_run: freeze the machine on all CPUs and run this function
 * __stop_machine: freeze the machine on all CPUs and run this function
 * @fn: the function to run
 * @data: the data ptr for the @fn
 * @cpu: the cpu to run @fn on (or any, if @cpu == NR_CPUS.
 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
 *
 * Description: This is a special version of the above, which returns the
 * thread which has run @fn(): kthread_stop will return the return value
 * of @fn().  Used by hotplug cpu.
 * Description: This is a special version of the above, which assumes cpus
 * won't come or go while it's being called.  Used by hotplug cpu.
 */
struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
				       unsigned int cpu);

int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus);
#else

static inline int stop_machine_run(int (*fn)(void *), void *data,
				   unsigned int cpu)
static inline int stop_machine(int (*fn)(void *), void *data,
			       const cpumask_t *cpus)
{
	int ret;
	local_irq_disable();
@@ -48,4 +50,18 @@ static inline int stop_machine_run(int (*fn)(void *), void *data,
	return ret;
}
#endif /* CONFIG_SMP */

static inline int __deprecated stop_machine_run(int (*fn)(void *), void *data,
						unsigned int cpu)
{
	/* If they don't care which cpu fn runs on, just pick one. */
	if (cpu == NR_CPUS)
		return stop_machine(fn, data, NULL);
	else if (cpu == ~0U)
		return stop_machine(fn, data, &cpu_possible_map);
	else {
		cpumask_t cpus = cpumask_of_cpu(cpu);
		return stop_machine(fn, data, &cpus);
	}
}
#endif /* _LINUX_STOP_MACHINE */
+5 −11
Original line number Diff line number Diff line
@@ -216,7 +216,6 @@ static int __ref take_cpu_down(void *_param)
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
	int err, nr_calls = 0;
	struct task_struct *p;
	cpumask_t old_allowed, tmp;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
@@ -249,21 +248,18 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
	cpus_setall(tmp);
	cpu_clear(cpu, tmp);
	set_cpus_allowed_ptr(current, &tmp);
	tmp = cpumask_of_cpu(cpu);

	p = __stop_machine_run(take_cpu_down, &tcd_param, cpu);

	if (IS_ERR(p) || cpu_online(cpu)) {
	err = __stop_machine(take_cpu_down, &tcd_param, &tmp);
	if (err) {
		/* CPU didn't die: tell everyone.  Can't complain. */
		if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
					    hcpu) == NOTIFY_BAD)
			BUG();

		if (IS_ERR(p)) {
			err = PTR_ERR(p);
		goto out_allowed;
	}
		goto out_thread;
	}
	BUG_ON(cpu_online(cpu));

	/* Wait for it to sleep (leaving idle task). */
	while (!idle_cpu(cpu))
@@ -279,8 +275,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)

	check_for_tasks(cpu);

out_thread:
	err = kthread_stop(p);
out_allowed:
	set_cpus_allowed_ptr(current, &old_allowed);
out_release:
+17 −16
Original line number Diff line number Diff line
@@ -325,18 +325,6 @@ static unsigned long find_symbol(const char *name,
	return -ENOENT;
}

/* lookup symbol in given range of kernel_symbols */
static const struct kernel_symbol *lookup_symbol(const char *name,
	const struct kernel_symbol *start,
	const struct kernel_symbol *stop)
{
	const struct kernel_symbol *ks = start;
	for (; ks < stop; ks++)
		if (strcmp(ks->name, name) == 0)
			return ks;
	return NULL;
}

/* Search for module by name: must hold module_mutex. */
static struct module *find_module(const char *name)
{
@@ -690,7 +678,7 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
	if (flags & O_NONBLOCK) {
		struct stopref sref = { mod, flags, forced };

		return stop_machine_run(__try_stop_module, &sref, NR_CPUS);
		return stop_machine(__try_stop_module, &sref, NULL);
	} else {
		/* We don't need to stop the machine for this. */
		mod->state = MODULE_STATE_GOING;
@@ -1428,7 +1416,7 @@ static int __unlink_module(void *_mod)
static void free_module(struct module *mod)
{
	/* Delete from various lists */
	stop_machine_run(__unlink_module, mod, NR_CPUS);
	stop_machine(__unlink_module, mod, NULL);
	remove_notes_attrs(mod);
	remove_sect_attrs(mod);
	mod_kobject_remove(mod);
@@ -1703,6 +1691,19 @@ static void setup_modinfo(struct module *mod, Elf_Shdr *sechdrs,
}

#ifdef CONFIG_KALLSYMS

/* lookup symbol in given range of kernel_symbols */
static const struct kernel_symbol *lookup_symbol(const char *name,
	const struct kernel_symbol *start,
	const struct kernel_symbol *stop)
{
	const struct kernel_symbol *ks = start;
	for (; ks < stop; ks++)
		if (strcmp(ks->name, name) == 0)
			return ks;
	return NULL;
}

static int is_exported(const char *name, const struct module *mod)
{
	if (!mod && lookup_symbol(name, __start___ksymtab, __stop___ksymtab))
@@ -2196,7 +2197,7 @@ static struct module *load_module(void __user *umod,
	/* Now sew it into the lists so we can get lockdep and oops
         * info during argument parsing.  Noone should access us, since
         * strong_try_module_get() will fail. */
	stop_machine_run(__link_module, mod, NR_CPUS);
	stop_machine(__link_module, mod, NULL);

	/* Size of section 0 is 0, so this works well if no params */
	err = parse_args(mod->name, mod->args,
@@ -2230,7 +2231,7 @@ static struct module *load_module(void __user *umod,
	return mod;

 unlink:
	stop_machine_run(__unlink_module, mod, NR_CPUS);
	stop_machine(__unlink_module, mod, NULL);
	module_arch_cleanup(mod);
 cleanup:
	kobject_del(&mod->mkobj.kobj);
Loading