Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 29567292 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-linus-4.7-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen bug fixes from David Vrabel.

* tag 'for-linus-4.7-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen: use same main loop for counting and remapping pages
  xen/events: Don't move disabled irqs
  xen/x86: actually allocate legacy interrupts on PV guests
  Xen: don't warn about 2-byte wchar_t in efi
  xen/gntdev: reduce copy batch size to 16
  xen/x86: don't lose event interrupts
parents ecaba718 dd14be92
Loading
Loading
Loading
Loading
+5 −2
Original line number Diff line number Diff line
@@ -491,8 +491,11 @@ int __init pci_xen_initial_domain(void)
#endif
	__acpi_register_gsi = acpi_register_gsi_xen;
	__acpi_unregister_gsi = NULL;
	/* Pre-allocate legacy irqs */
	for (irq = 0; irq < nr_legacy_irqs(); irq++) {
	/*
	 * Pre-allocate the legacy IRQs.  Use NR_LEGACY_IRQS here
	 * because we don't have a PIC and thus nr_legacy_irqs() is zero.
	 */
	for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
		int trigger, polarity;

		if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
+26 −39
Original line number Diff line number Diff line
@@ -393,6 +393,9 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
	unsigned long i = 0;
	unsigned long n = end_pfn - start_pfn;

	if (remap_pfn == 0)
		remap_pfn = nr_pages;

	while (i < n) {
		unsigned long cur_pfn = start_pfn + i;
		unsigned long left = n - i;
@@ -438,17 +441,29 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
	return remap_pfn;
}

static void __init xen_set_identity_and_remap(unsigned long nr_pages)
static unsigned long __init xen_count_remap_pages(
	unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
	unsigned long remap_pages)
{
	if (start_pfn >= nr_pages)
		return remap_pages;

	return remap_pages + min(end_pfn, nr_pages) - start_pfn;
}

static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
	unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
			      unsigned long nr_pages, unsigned long last_val))
{
	phys_addr_t start = 0;
	unsigned long last_pfn = nr_pages;
	unsigned long ret_val = 0;
	const struct e820entry *entry = xen_e820_map;
	int i;

	/*
	 * Combine non-RAM regions and gaps until a RAM region (or the
	 * end of the map) is reached, then set the 1:1 map and
	 * remap the memory in those non-RAM regions.
	 * end of the map) is reached, then call the provided function
	 * to perform its duty on the non-RAM region.
	 *
	 * The combined non-RAM regions are rounded to a whole number
	 * of pages so any partial pages are accessible via the 1:1
@@ -466,14 +481,13 @@ static void __init xen_set_identity_and_remap(unsigned long nr_pages)
				end_pfn = PFN_UP(entry->addr);

			if (start_pfn < end_pfn)
				last_pfn = xen_set_identity_and_remap_chunk(
						start_pfn, end_pfn, nr_pages,
						last_pfn);
				ret_val = func(start_pfn, end_pfn, nr_pages,
					       ret_val);
			start = end;
		}
	}

	pr_info("Released %ld page(s)\n", xen_released_pages);
	return ret_val;
}

/*
@@ -596,35 +610,6 @@ static void __init xen_ignore_unusable(void)
	}
}

static unsigned long __init xen_count_remap_pages(unsigned long max_pfn)
{
	unsigned long extra = 0;
	unsigned long start_pfn, end_pfn;
	const struct e820entry *entry = xen_e820_map;
	int i;

	end_pfn = 0;
	for (i = 0; i < xen_e820_map_entries; i++, entry++) {
		start_pfn = PFN_DOWN(entry->addr);
		/* Adjacent regions on non-page boundaries handling! */
		end_pfn = min(end_pfn, start_pfn);

		if (start_pfn >= max_pfn)
			return extra + max_pfn - end_pfn;

		/* Add any holes in map to result. */
		extra += start_pfn - end_pfn;

		end_pfn = PFN_UP(entry->addr + entry->size);
		end_pfn = min(end_pfn, max_pfn);

		if (entry->type != E820_RAM)
			extra += end_pfn - start_pfn;
	}

	return extra;
}

bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
{
	struct e820entry *entry;
@@ -804,7 +789,7 @@ char * __init xen_memory_setup(void)
	max_pages = xen_get_max_pages();

	/* How many extra pages do we need due to remapping? */
	max_pages += xen_count_remap_pages(max_pfn);
	max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);

	if (max_pages > max_pfn)
		extra_pages += max_pages - max_pfn;
@@ -922,7 +907,9 @@ char * __init xen_memory_setup(void)
	 * Set identity map on non-RAM pages and prepare remapping the
	 * underlying RAM.
	 */
	xen_set_identity_and_remap(max_pfn);
	xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);

	pr_info("Released %ld page(s)\n", xen_released_pages);

	return "Xen";
}
+3 −3
Original line number Diff line number Diff line
@@ -290,11 +290,11 @@ static int xen_vcpuop_set_next_event(unsigned long delta,
	WARN_ON(!clockevent_state_oneshot(evt));

	single.timeout_abs_ns = get_abs_timeout(delta);
	single.flags = VCPU_SSHOTTMR_future;
	/* Get an event anyway, even if the timeout is already expired */
	single.flags = 0;

	ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);

	BUG_ON(ret != 0 && ret != -ETIME);
	BUG_ON(ret != 0);

	return ret;
}
+1 −0
Original line number Diff line number Diff line
@@ -8,6 +8,7 @@ nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_features.o			:= $(nostackp)

CFLAGS_efi.o				+= -fshort-wchar
LDFLAGS					+= $(call ld-option, --no-wchar-size-warning)

dom0-$(CONFIG_PCI) += pci.o
dom0-$(CONFIG_USB_SUPPORT) += dbgp.o
+4 −2
Original line number Diff line number Diff line
@@ -487,7 +487,8 @@ static void eoi_pirq(struct irq_data *data)
	if (!VALID_EVTCHN(evtchn))
		return;

	if (unlikely(irqd_is_setaffinity_pending(data))) {
	if (unlikely(irqd_is_setaffinity_pending(data)) &&
	    likely(!irqd_irq_disabled(data))) {
		int masked = test_and_set_mask(evtchn);

		clear_evtchn(evtchn);
@@ -1370,7 +1371,8 @@ static void ack_dynirq(struct irq_data *data)
	if (!VALID_EVTCHN(evtchn))
		return;

	if (unlikely(irqd_is_setaffinity_pending(data))) {
	if (unlikely(irqd_is_setaffinity_pending(data)) &&
	    likely(!irqd_irq_disabled(data))) {
		int masked = test_and_set_mask(evtchn);

		clear_evtchn(evtchn);
Loading