Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d936d2d4 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'stable/for-linus-3.11-rc6-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull Xen bug-fixes from Konrad Rzeszutek Wilk:
 - On ARM did not have balanced calls to get/put_cpu.
 - Fix to make tboot + Xen + Linux correctly.
 - Fix events VCPU binding issues.
 - Fix a vCPU online race where IPIs are sent to not-yet-online vCPU.

* tag 'stable/for-linus-3.11-rc6-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/smp: initialize IPI vectors before marking CPU online
  xen/events: mask events when changing their VCPU binding
  xen/events: initialize local per-cpu mask for all possible events
  x86/xen: do not identity map UNUSABLE regions in the machine E820
  xen/arm: missing put_cpu in xen_percpu_init
parents 0903391a fc78d343
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -170,6 +170,7 @@ static void __init xen_percpu_init(void *unused)
	per_cpu(xen_vcpu, cpu) = vcpup;
	per_cpu(xen_vcpu, cpu) = vcpup;


	enable_percpu_irq(xen_events_irq, 0);
	enable_percpu_irq(xen_events_irq, 0);
	put_cpu();
}
}


static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
+22 −0
Original line number Original line Diff line number Diff line
@@ -313,6 +313,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
	e820_add_region(start, end - start, type);
	e820_add_region(start, end - start, type);
}
}


void xen_ignore_unusable(struct e820entry *list, size_t map_size)
{
	struct e820entry *entry;
	unsigned int i;

	for (i = 0, entry = list; i < map_size; i++, entry++) {
		if (entry->type == E820_UNUSABLE)
			entry->type = E820_RAM;
	}
}

/**
/**
 * machine_specific_memory_setup - Hook for machine specific memory setup.
 * machine_specific_memory_setup - Hook for machine specific memory setup.
 **/
 **/
@@ -353,6 +364,17 @@ char * __init xen_memory_setup(void)
	}
	}
	BUG_ON(rc);
	BUG_ON(rc);


	/*
	 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
	 * regions, so if we're using the machine memory map leave the
	 * region as RAM as it is in the pseudo-physical map.
	 *
	 * UNUSABLE regions in domUs are not handled and will need
	 * a patch in the future.
	 */
	if (xen_initial_domain())
		xen_ignore_unusable(map, memmap.nr_entries);

	/* Make sure the Xen-supplied memory map is well-ordered. */
	/* Make sure the Xen-supplied memory map is well-ordered. */
	sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
	sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);


+9 −2
Original line number Original line Diff line number Diff line
@@ -694,8 +694,15 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
{
{
	int rc;
	int rc;
	/*
	 * xen_smp_intr_init() needs to run before native_cpu_up()
	 * so that IPI vectors are set up on the booting CPU before
	 * it is marked online in native_cpu_up().
	*/
	rc = xen_smp_intr_init(cpu);
	WARN_ON(rc);
	if (!rc)
		rc =  native_cpu_up(cpu, tidle);
		rc =  native_cpu_up(cpu, tidle);
	WARN_ON (xen_smp_intr_init(cpu));
	return rc;
	return rc;
}
}


+12 −1
Original line number Original line Diff line number Diff line
@@ -348,7 +348,7 @@ static void init_evtchn_cpu_bindings(void)


	for_each_possible_cpu(i)
	for_each_possible_cpu(i)
		memset(per_cpu(cpu_evtchn_mask, i),
		memset(per_cpu(cpu_evtchn_mask, i),
		       (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
		       (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
}
}


static inline void clear_evtchn(int port)
static inline void clear_evtchn(int port)
@@ -1493,8 +1493,10 @@ void rebind_evtchn_irq(int evtchn, int irq)
/* Rebind an evtchn so that it gets delivered to a specific cpu */
/* Rebind an evtchn so that it gets delivered to a specific cpu */
static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
{
{
	struct shared_info *s = HYPERVISOR_shared_info;
	struct evtchn_bind_vcpu bind_vcpu;
	struct evtchn_bind_vcpu bind_vcpu;
	int evtchn = evtchn_from_irq(irq);
	int evtchn = evtchn_from_irq(irq);
	int masked;


	if (!VALID_EVTCHN(evtchn))
	if (!VALID_EVTCHN(evtchn))
		return -1;
		return -1;
@@ -1510,6 +1512,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
	bind_vcpu.port = evtchn;
	bind_vcpu.port = evtchn;
	bind_vcpu.vcpu = tcpu;
	bind_vcpu.vcpu = tcpu;


	/*
	 * Mask the event while changing the VCPU binding to prevent
	 * it being delivered on an unexpected VCPU.
	 */
	masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));

	/*
	/*
	 * If this fails, it usually just indicates that we're dealing with a
	 * If this fails, it usually just indicates that we're dealing with a
	 * virq or IPI channel, which don't actually need to be rebound. Ignore
	 * virq or IPI channel, which don't actually need to be rebound. Ignore
@@ -1518,6 +1526,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
		bind_evtchn_to_cpu(evtchn, tcpu);
		bind_evtchn_to_cpu(evtchn, tcpu);


	if (!masked)
		unmask_evtchn(evtchn);

	return 0;
	return 0;
}
}