Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 55db493b authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'cpumask-cleanups' of...

Merge branch 'cpumask-cleanups' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus

* 'cpumask-cleanups' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus:
  cpumask: rename tsk_cpumask to tsk_cpus_allowed
  cpumask: don't recommend set_cpus_allowed hack in Documentation/cpu-hotplug.txt
  cpumask: avoid dereferencing struct cpumask
  cpumask: convert drivers/idle/i7300_idle.c to cpumask_var_t
  cpumask: use modern cpumask style in drivers/scsi/fcoe/fcoe.c
  cpumask: avoid deprecated function in mm/slab.c
  cpumask: use cpu_online in kernel/perf_event.c
parents efc8e7f4 a4636818
Loading
Loading
Loading
Loading
+17 −32
Original line number Diff line number Diff line
@@ -315,42 +315,27 @@ A: The following are what is required for CPU hotplug infrastructure to work

Q: I need to ensure that a particular cpu is not removed when there is some
   work specific to this cpu is in progress.
A: First switch the current thread context to preferred cpu
A: There are two ways.  If your code can be run in interrupt context, use
   smp_call_function_single(), otherwise use work_on_cpu().  Note that
   work_on_cpu() is slow, and can fail due to out of memory:

	int my_func_on_cpu(int cpu)
	{
		cpumask_t saved_mask, new_mask = CPU_MASK_NONE;
		int curr_cpu, err = 0;

		saved_mask = current->cpus_allowed;
		cpu_set(cpu, new_mask);
		err = set_cpus_allowed(current, new_mask);

		if (err)
			return err;

		/*
		 * If we got scheduled out just after the return from
		 * set_cpus_allowed() before running the work, this ensures
		 * we stay locked.
		 */
		curr_cpu = get_cpu();

		if (curr_cpu != cpu) {
			err = -EAGAIN;
			goto ret;
		} else {
			/*
			 * Do work : But cant sleep, since get_cpu() disables preempt
			 */
		}
		ret:
			put_cpu();
			set_cpus_allowed(current, saved_mask);
		int err;
		get_online_cpus();
		if (!cpu_online(cpu))
			err = -EINVAL;
		else
#if NEEDS_BLOCKING
			err = work_on_cpu(cpu, __my_func_on_cpu, NULL);
#else
			smp_call_function_single(cpu, __my_func_on_cpu, &err,
						 true);
#endif
		put_online_cpus();
		return err;
	}


Q: How do we determine how many CPUs are available for hotplug.
A: There is no clear spec defined way from ACPI that can give us that
   information today. Based on some input from Natalie of Unisys,
+1 −1
Original line number Diff line number Diff line
@@ -1136,7 +1136,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
	if (!alloc_cpumask_var(&oldmask, GFP_KERNEL))
		return -ENOMEM;

	cpumask_copy(oldmask, tsk_cpumask(current));
	cpumask_copy(oldmask, tsk_cpus_allowed(current));
	set_cpus_allowed_ptr(current, cpumask_of(pol->cpu));

	if (smp_processor_id() != pol->cpu) {
+9 −6
Original line number Diff line number Diff line
@@ -81,7 +81,7 @@ static u8 i7300_idle_thrtctl_saved;
static u8 i7300_idle_thrtlow_saved;
static u32 i7300_idle_mc_saved;

static cpumask_t idle_cpumask;
static cpumask_var_t idle_cpumask;
static ktime_t start_ktime;
static unsigned long avg_idle_us;

@@ -459,9 +459,9 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
	spin_lock_irqsave(&i7300_idle_lock, flags);
	if (val == IDLE_START) {

		cpu_set(smp_processor_id(), idle_cpumask);
		cpumask_set_cpu(smp_processor_id(), idle_cpumask);

		if (cpus_weight(idle_cpumask) != num_online_cpus())
		if (cpumask_weight(idle_cpumask) != num_online_cpus())
			goto end;

		now_ktime = ktime_get();
@@ -478,8 +478,8 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
		i7300_idle_ioat_start();

	} else if (val == IDLE_END) {
		cpu_clear(smp_processor_id(), idle_cpumask);
		if (cpus_weight(idle_cpumask) == (num_online_cpus() - 1)) {
		cpumask_clear_cpu(smp_processor_id(), idle_cpumask);
		if (cpumask_weight(idle_cpumask) == (num_online_cpus() - 1)) {
			/* First CPU coming out of idle */
			u64 idle_duration_us;

@@ -553,7 +553,6 @@ struct debugfs_file_info {
static int __init i7300_idle_init(void)
{
	spin_lock_init(&i7300_idle_lock);
	cpus_clear(idle_cpumask);
	total_us = 0;

	if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
@@ -565,6 +564,9 @@ static int __init i7300_idle_init(void)
	if (i7300_idle_ioat_init())
		return -ENODEV;

	if (!zalloc_cpumask_var(&idle_cpumask, GFP_KERNEL))
		return -ENOMEM;

	debugfs_dir = debugfs_create_dir("i7300_idle", NULL);
	if (debugfs_dir) {
		int i = 0;
@@ -589,6 +591,7 @@ static int __init i7300_idle_init(void)
static void __exit i7300_idle_exit(void)
{
	idle_notifier_unregister(&i7300_idle_nb);
	free_cpumask_var(idle_cpumask);

	if (debugfs_dir) {
		int i = 0;
+1 −1
Original line number Diff line number Diff line
@@ -1260,7 +1260,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
				"CPU.\n");

		spin_unlock_bh(&fps->fcoe_rx_list.lock);
		cpu = first_cpu(cpu_online_map);
		cpu = cpumask_first(cpu_online_mask);
		fps = &per_cpu(fcoe_percpu, cpu);
		spin_lock_bh(&fps->fcoe_rx_list.lock);
		if (!fps->thread) {
+1 −1
Original line number Diff line number Diff line
@@ -1553,7 +1553,7 @@ struct task_struct {
};

/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)

/*
 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
Loading