Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d7a0dab8 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'core-smp-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull core SMP updates from Ingo Molnar:
 "Two main change is generic vCPU pinning and physical CPU SMP-call
  support, for Xen to be able to perform certain calls on specific
  physical CPUs - by Juergen Gross"

* 'core-smp-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  smp: Allocate smp_call_on_cpu() workqueue on stack too
  hwmon: Use smp_call_on_cpu() for dell-smm i8k
  dcdbas: Make use of smp_call_on_cpu()
  xen: Add xen_pin_vcpu() to support calling functions on a dedicated pCPU
  smp: Add function to execute a function synchronously on a CPU
  virt, sched: Add generic vCPU pinning support
  xen: Sync xen header
parents 4b978934 8db54949
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -8860,6 +8860,7 @@ S: Supported
F:	Documentation/virtual/paravirt_ops.txt
F:	arch/*/kernel/paravirt*
F:	arch/*/include/asm/paravirt.h
F:	include/linux/hypervisor.h

PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES
M:	Tim Waugh <tim@cyberelk.net>
+4 −0
Original line number Diff line number Diff line
@@ -43,6 +43,9 @@ struct hypervisor_x86 {

	/* X2APIC detection (run once per boot) */
	bool		(*x2apic_available)(void);

	/* pin current vcpu to specified physical cpu (run rarely) */
	void		(*pin_vcpu)(int);
};

extern const struct hypervisor_x86 *x86_hyper;
@@ -56,6 +59,7 @@ extern const struct hypervisor_x86 x86_hyper_kvm;
extern void init_hypervisor(struct cpuinfo_x86 *c);
extern void init_hypervisor_platform(void);
extern bool hypervisor_x2apic_available(void);
extern void hypervisor_pin_vcpu(int cpu);
#else
static inline void init_hypervisor(struct cpuinfo_x86 *c) { }
static inline void init_hypervisor_platform(void) { }
+11 −0
Original line number Diff line number Diff line
@@ -86,3 +86,14 @@ bool __init hypervisor_x2apic_available(void)
	       x86_hyper->x2apic_available &&
	       x86_hyper->x2apic_available();
}

void hypervisor_pin_vcpu(int cpu)
{
	if (!x86_hyper)
		return;

	if (x86_hyper->pin_vcpu)
		x86_hyper->pin_vcpu(cpu);
	else
		WARN_ONCE(1, "vcpu pinning requested but not supported!\n");
}
+40 −0
Original line number Diff line number Diff line
@@ -1925,6 +1925,45 @@ static void xen_set_cpu_features(struct cpuinfo_x86 *c)
	}
}

static void xen_pin_vcpu(int cpu)
{
	static bool disable_pinning;
	struct sched_pin_override pin_override;
	int ret;

	if (disable_pinning)
		return;

	pin_override.pcpu = cpu;
	ret = HYPERVISOR_sched_op(SCHEDOP_pin_override, &pin_override);

	/* Ignore errors when removing override. */
	if (cpu < 0)
		return;

	switch (ret) {
	case -ENOSYS:
		pr_warn("Unable to pin on physical cpu %d. In case of problems consider vcpu pinning.\n",
			cpu);
		disable_pinning = true;
		break;
	case -EPERM:
		WARN(1, "Trying to pin vcpu without having privilege to do so\n");
		disable_pinning = true;
		break;
	case -EINVAL:
	case -EBUSY:
		pr_warn("Physical cpu %d not available for pinning. Check Xen cpu configuration.\n",
			cpu);
		break;
	case 0:
		break;
	default:
		WARN(1, "rc %d while trying to pin vcpu\n", ret);
		disable_pinning = true;
	}
}

const struct hypervisor_x86 x86_hyper_xen = {
	.name			= "Xen",
	.detect			= xen_platform,
@@ -1933,6 +1972,7 @@ const struct hypervisor_x86 x86_hyper_xen = {
#endif
	.x2apic_available	= xen_x2apic_para_available,
	.set_cpu_features       = xen_set_cpu_features,
	.pin_vcpu               = xen_pin_vcpu,
};
EXPORT_SYMBOL(x86_hyper_xen);

+26 −25
Original line number Diff line number Diff line
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/cpu.h>
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -238,33 +239,14 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
	return count;
}

/**
 * dcdbas_smi_request: generate SMI request
 *
 * Called with smi_data_lock.
 */
int dcdbas_smi_request(struct smi_cmd *smi_cmd)
static int raise_smi(void *par)
{
	cpumask_var_t old_mask;
	int ret = 0;

	if (smi_cmd->magic != SMI_CMD_MAGIC) {
		dev_info(&dcdbas_pdev->dev, "%s: invalid magic value\n",
			 __func__);
		return -EBADR;
	}
	struct smi_cmd *smi_cmd = par;

	/* SMI requires CPU 0 */
	if (!alloc_cpumask_var(&old_mask, GFP_KERNEL))
		return -ENOMEM;

	cpumask_copy(old_mask, &current->cpus_allowed);
	set_cpus_allowed_ptr(current, cpumask_of(0));
	if (smp_processor_id() != 0) {
		dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
			__func__);
		ret = -EBUSY;
		goto out;
		return -EBUSY;
	}

	/* generate SMI */
@@ -280,9 +262,28 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd)
		: "memory"
	);

out:
	set_cpus_allowed_ptr(current, old_mask);
	free_cpumask_var(old_mask);
	return 0;
}
/**
 * dcdbas_smi_request: generate SMI request
 *
 * Called with smi_data_lock.
 */
int dcdbas_smi_request(struct smi_cmd *smi_cmd)
{
	int ret;

	if (smi_cmd->magic != SMI_CMD_MAGIC) {
		dev_info(&dcdbas_pdev->dev, "%s: invalid magic value\n",
			 __func__);
		return -EBADR;
	}

	/* SMI requires CPU 0 */
	get_online_cpus();
	ret = smp_call_on_cpu(0, raise_smi, smi_cmd, true);
	put_online_cpus();

	return ret;
}

Loading