Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 505569d2 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
 "Misc fixes: two vdso fixes, two kbuild fixes and a boot failure fix
  with certain odd memory mappings"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, vdso: Use asm volatile in __getcpu
  x86/build: Clean auto-generated processor feature files
  x86: Fix mkcapflags.sh bash-ism
  x86: Fix step size adjustment during initial memory mapping
  x86_64, vdso: Fix the vdso address randomization algorithm
parents 5ab551d6 2aba73a6
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -51,6 +51,7 @@ targets += cpustr.h
$(obj)/cpustr.h: $(obj)/mkcpustr FORCE
	$(call if_changed,cpustr)
endif
clean-files += cpustr.h

# ---------------------------------------------------------------------------

+4 −2
Original line number Diff line number Diff line
@@ -80,9 +80,11 @@ static inline unsigned int __getcpu(void)

	/*
	 * Load per CPU data from GDT.  LSL is faster than RDTSCP and
	 * works on all CPUs.
	 * works on all CPUs.  This is volatile so that it orders
	 * correctly wrt barrier() and to keep gcc from cleverly
	 * hoisting it out of the calling function.
	 */
	asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
	asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));

	return p;
}
+1 −0
Original line number Diff line number Diff line
@@ -66,3 +66,4 @@ targets += capflags.c
$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
	$(call if_changed,mkcapflags)
endif
clean-files += capflags.c
+1 −1
Original line number Diff line number Diff line
@@ -28,7 +28,7 @@ function dump_array()
		# If the /* comment */ starts with a quote string, grab that.
		VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')"
		[ -z "$VALUE" ] && VALUE="\"$NAME\""
		[ "$VALUE" == '""' ] && continue
		[ "$VALUE" = '""' ] && continue

		# Name is uppercase, VALUE is all lowercase
		VALUE="$(echo "$VALUE" | tr A-Z a-z)"
+17 −20
Original line number Diff line number Diff line
@@ -438,20 +438,20 @@ static unsigned long __init init_range_memory_mapping(
static unsigned long __init get_new_step_size(unsigned long step_size)
{
	/*
	 * Explain why we shift by 5 and why we don't have to worry about
	 * 'step_size << 5' overflowing:
	 *
	 * initial mapped size is PMD_SIZE (2M).
	 * Initial mapped size is PMD_SIZE (2M).
	 * We can not set step_size to be PUD_SIZE (1G) yet.
	 * In worse case, when we cross the 1G boundary, and
	 * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
	 * to map 1G range with PTE. Use 5 as shift for now.
	 * to map 1G range with PTE. Hence we use one less than the
	 * difference of page table level shifts.
	 *
	 * Don't need to worry about overflow, on 32bit, when step_size
	 * is 0, round_down() returns 0 for start, and that turns it
	 * into 0x100000000ULL.
	 * Don't need to worry about overflow in the top-down case, on 32bit,
	 * when step_size is 0, round_down() returns 0 for start, and that
	 * turns it into 0x100000000ULL.
	 * In the bottom-up case, round_up(x, 0) returns 0 though too, which
	 * needs to be taken into consideration by the code below.
	 */
	return step_size << 5;
	return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
}

/**
@@ -471,7 +471,6 @@ static void __init memory_map_top_down(unsigned long map_start,
	unsigned long step_size;
	unsigned long addr;
	unsigned long mapped_ram_size = 0;
	unsigned long new_mapped_ram_size;

	/* xen has big range in reserved near end of ram, skip it at first.*/
	addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
@@ -496,14 +495,12 @@ static void __init memory_map_top_down(unsigned long map_start,
				start = map_start;
		} else
			start = map_start;
		new_mapped_ram_size = init_range_memory_mapping(start,
		mapped_ram_size += init_range_memory_mapping(start,
							last_start);
		last_start = start;
		min_pfn_mapped = last_start >> PAGE_SHIFT;
		/* only increase step_size after big range get mapped */
		if (new_mapped_ram_size > mapped_ram_size)
		if (mapped_ram_size >= step_size)
			step_size = get_new_step_size(step_size);
		mapped_ram_size += new_mapped_ram_size;
	}

	if (real_end < map_end)
@@ -524,7 +521,7 @@ static void __init memory_map_top_down(unsigned long map_start,
static void __init memory_map_bottom_up(unsigned long map_start,
					unsigned long map_end)
{
	unsigned long next, new_mapped_ram_size, start;
	unsigned long next, start;
	unsigned long mapped_ram_size = 0;
	/* step_size need to be small so pgt_buf from BRK could cover it */
	unsigned long step_size = PMD_SIZE;
@@ -539,19 +536,19 @@ static void __init memory_map_bottom_up(unsigned long map_start,
	 * for page table.
	 */
	while (start < map_end) {
		if (map_end - start > step_size) {
		if (step_size && map_end - start > step_size) {
			next = round_up(start + 1, step_size);
			if (next > map_end)
				next = map_end;
		} else
		} else {
			next = map_end;
		}

		new_mapped_ram_size = init_range_memory_mapping(start, next);
		mapped_ram_size += init_range_memory_mapping(start, next);
		start = next;

		if (new_mapped_ram_size > mapped_ram_size)
		if (mapped_ram_size >= step_size)
			step_size = get_new_step_size(step_size);
		mapped_ram_size += new_mapped_ram_size;
	}
}

Loading