Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a9d9baa1 authored by Ashok Raj's avatar Ashok Raj Committed by Linus Torvalds
Browse files

[PATCH] clean up lock_cpu_hotplug() in cpufreq



There are some callers in cpufreq hotplug notify path that the lowest
function calls lock_cpu_hotplug().  The lock is already held during
cpu_up() and cpu_down() calls when the notify calls are broadcast to
registered clients.

Ideally if possible, we could disable_preempt() at the highest caller and
make sure we dont sleep in the path down in cpufreq->driver_target() calls
but the calls are so intertwined and cumbersome to cleanup.

Hence we consistently use lock_cpu_hotplug() and unlock_cpu_hotplug() in
all places.

 - Removed export of cpucontrol semaphore and made it static.
 - removed explicit uses of up/down with lock_cpu_hotplug()
   so we can keep track of the the callers in same thread context and
   just keep refcounts without calling a down() that causes a deadlock.
 - Removed current_in_hotplug() uses
 - Removed PF_HOTPLUG_CPU in sched.h introduced for the current_in_hotplug()
   temporary workaround.

Tested with insmod of cpufreq_stat.ko, and logical online/offline
to make sure we dont have any hang situations.

Signed-off-by: default avatarAshok Raj <ashok.raj@intel.com>
Cc: Zwane Mwaikambo <zwane@linuxpower.ca>
Cc: Shaohua Li <shaohua.li@intel.com>
Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent e0f39591
Loading
Loading
Loading
Loading
+2 −10
Original line number Diff line number Diff line
@@ -1113,20 +1113,12 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
{
	int retval = -EINVAL;

	/*
	 * If we are already in context of hotplug thread, we dont need to
	 * acquire the hotplug lock. Otherwise acquire cpucontrol to prevent
	 * hotplug from removing this cpu that we are working on.
	 */
	if (!current_in_cpu_hotplug())
	lock_cpu_hotplug();

	dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
		target_freq, relation);
	if (cpu_online(policy->cpu) && cpufreq_driver->target)
		retval = cpufreq_driver->target(policy, target_freq, relation);

	if (!current_in_cpu_hotplug())
	unlock_cpu_hotplug();

	return retval;
+3 −4
Original line number Diff line number Diff line
@@ -65,10 +65,9 @@ extern struct sysdev_class cpu_sysdev_class;

#ifdef CONFIG_HOTPLUG_CPU
/* Stop CPUs going up and down. */
extern struct semaphore cpucontrol;
#define lock_cpu_hotplug()	down(&cpucontrol)
#define unlock_cpu_hotplug()	up(&cpucontrol)
#define lock_cpu_hotplug_interruptible() down_interruptible(&cpucontrol)
extern void lock_cpu_hotplug(void);
extern void unlock_cpu_hotplug(void);
extern int lock_cpu_hotplug_interruptible(void);
#define hotcpu_notifier(fn, pri) {				\
	static struct notifier_block fn##_nb =			\
		{ .notifier_call = fn, .priority = pri };	\
+0 −1
Original line number Diff line number Diff line
@@ -908,7 +908,6 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0)
#define PF_SYNCWRITE	0x00200000	/* I am doing a sync write */
#define PF_BORROWED_MM	0x00400000	/* I am a kthread doing use_mm */
#define PF_RANDOMIZE	0x00800000	/* randomize virtual address space */
#define PF_HOTPLUG_CPU	0x01000000	/* Currently performing CPU hotplug */

/*
 * Only the _current_ task can read/write to tsk->flags, but other
+49 −34
Original line number Diff line number Diff line
@@ -16,47 +16,76 @@
#include <asm/semaphore.h>

/* This protects CPUs going up and down... */
DECLARE_MUTEX(cpucontrol);
EXPORT_SYMBOL_GPL(cpucontrol);
static DECLARE_MUTEX(cpucontrol);

static struct notifier_block *cpu_chain;

#ifdef CONFIG_HOTPLUG_CPU
static struct task_struct *lock_cpu_hotplug_owner;
static int lock_cpu_hotplug_depth;

static int __lock_cpu_hotplug(int interruptible)
{
	int ret = 0;

	if (lock_cpu_hotplug_owner != current) {
		if (interruptible)
			ret = down_interruptible(&cpucontrol);
		else
			down(&cpucontrol);
	}

	/*
 * Used to check by callers if they need to acquire the cpucontrol
 * or not to protect a cpu from being removed. Its sometimes required to
 * call these functions both for normal operations, and in response to
 * a cpu being added/removed. If the context of the call is in the same
 * thread context as a CPU hotplug thread, we dont need to take the lock
 * since its already protected
 * check drivers/cpufreq/cpufreq.c for its usage - Ashok Raj
	 * Set only if we succeed in locking
	 */
	if (!ret) {
		lock_cpu_hotplug_depth++;
		lock_cpu_hotplug_owner = current;
	}

int current_in_cpu_hotplug(void)
	return ret;
}

void lock_cpu_hotplug(void)
{
	return (current->flags & PF_HOTPLUG_CPU);
	__lock_cpu_hotplug(0);
}
EXPORT_SYMBOL_GPL(lock_cpu_hotplug);

EXPORT_SYMBOL_GPL(current_in_cpu_hotplug);
void unlock_cpu_hotplug(void)
{
	if (--lock_cpu_hotplug_depth == 0) {
		lock_cpu_hotplug_owner = NULL;
		up(&cpucontrol);
	}
}
EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);

int lock_cpu_hotplug_interruptible(void)
{
	return __lock_cpu_hotplug(1);
}
EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible);
#endif	/* CONFIG_HOTPLUG_CPU */

/* Need to know about CPUs going up/down? */
int register_cpu_notifier(struct notifier_block *nb)
{
	int ret;

	if ((ret = down_interruptible(&cpucontrol)) != 0)
	if ((ret = lock_cpu_hotplug_interruptible()) != 0)
		return ret;
	ret = notifier_chain_register(&cpu_chain, nb);
	up(&cpucontrol);
	unlock_cpu_hotplug();
	return ret;
}
EXPORT_SYMBOL(register_cpu_notifier);

void unregister_cpu_notifier(struct notifier_block *nb)
{
	down(&cpucontrol);
	lock_cpu_hotplug();
	notifier_chain_unregister(&cpu_chain, nb);
	up(&cpucontrol);
	unlock_cpu_hotplug();
}
EXPORT_SYMBOL(unregister_cpu_notifier);

@@ -112,13 +141,6 @@ int cpu_down(unsigned int cpu)
		goto out;
	}

	/*
	 * Leave a trace in current->flags indicating we are already in
	 * process of performing CPU hotplug. Callers can check if cpucontrol
	 * is already acquired by current thread, and if so not cause
	 * a dead lock by not acquiring the lock
	 */
	current->flags |= PF_HOTPLUG_CPU;
	err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
						(void *)(long)cpu);
	if (err == NOTIFY_BAD) {
@@ -171,7 +193,6 @@ int cpu_down(unsigned int cpu)
out_allowed:
	set_cpus_allowed(current, old_allowed);
out:
	current->flags &= ~PF_HOTPLUG_CPU;
	unlock_cpu_hotplug();
	return err;
}
@@ -182,7 +203,7 @@ int __devinit cpu_up(unsigned int cpu)
	int ret;
	void *hcpu = (void *)(long)cpu;

	if ((ret = down_interruptible(&cpucontrol)) != 0)
	if ((ret = lock_cpu_hotplug_interruptible()) != 0)
		return ret;

	if (cpu_online(cpu) || !cpu_present(cpu)) {
@@ -190,11 +211,6 @@ int __devinit cpu_up(unsigned int cpu)
		goto out;
	}

	/*
	 * Leave a trace in current->flags indicating we are already in
	 * process of performing CPU hotplug.
	 */
	current->flags |= PF_HOTPLUG_CPU;
	ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
	if (ret == NOTIFY_BAD) {
		printk("%s: attempt to bring up CPU %u failed\n",
@@ -217,7 +233,6 @@ int __devinit cpu_up(unsigned int cpu)
	if (ret != 0)
		notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu);
out:
	current->flags &= ~PF_HOTPLUG_CPU;
	up(&cpucontrol);
	unlock_cpu_hotplug();
	return ret;
}