Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 675a0813 authored by Harvey Harrison's avatar Harvey Harrison Committed by Ingo Molnar
Browse files

x86: unify mmap_{32|64}.c



mmap_is_ia32 always true for X86_32, or while emulating IA32 on X86_64

Randomization not supported on X86_32 in legacy layout.  Both layouts allow
randomization on X86_64.

Signed-off-by: default avatarHarvey Harrison <harvey.harrison@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent f3f20de8
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -2,7 +2,7 @@
# Makefile for the linux i386-specific parts of the memory manager.
# Makefile for the linux i386-specific parts of the memory manager.
#
#


obj-y	:= init_32.o pgtable_32.o fault_32.o ioremap_32.o extable_32.o pageattr_32.o mmap_32.o
obj-y	:= init_32.o pgtable_32.o fault_32.o ioremap_32.o extable_32.o pageattr_32.o mmap.o


obj-$(CONFIG_NUMA) += discontig_32.o
obj-$(CONFIG_NUMA) += discontig_32.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+1 −2
Original line number Original line Diff line number Diff line
@@ -2,9 +2,8 @@
# Makefile for the linux x86_64-specific parts of the memory manager.
# Makefile for the linux x86_64-specific parts of the memory manager.
#
#


obj-y	 := init_64.o fault_64.o ioremap_64.o extable_64.o pageattr_64.o mmap_64.o
obj-y	 := init_64.o fault_64.o ioremap_64.o extable_64.o pageattr_64.o mmap.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_NUMA) += numa_64.o
obj-$(CONFIG_NUMA) += numa_64.o
obj-$(CONFIG_K8_NUMA) += k8topology_64.o
obj-$(CONFIG_K8_NUMA) += k8topology_64.o
obj-$(CONFIG_ACPI_NUMA) += srat_64.o
obj-$(CONFIG_ACPI_NUMA) += srat_64.o
obj-$(CONFIG_IA32_EMULATION) += mmap_32.o
+51 −44
Original line number Original line Diff line number Diff line
/*
/*
 *  linux/arch/x86-64/mm/mmap.c
 * Flexible mmap layout support
 *
 *  flexible mmap layout support
 *
 *
 * Based on code by Ingo Molnar and Andi Kleen, copyrighted
 * Based on code by Ingo Molnar and Andi Kleen, copyrighted
 * as follows:
 * as follows:
@@ -24,7 +22,6 @@
 * You should have received a copy of the GNU General Public License
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 *
 */
 */


#include <linux/personality.h>
#include <linux/personality.h>
@@ -32,7 +29,6 @@
#include <linux/random.h>
#include <linux/random.h>
#include <linux/limits.h>
#include <linux/limits.h>
#include <linux/sched.h>
#include <linux/sched.h>
#include <asm/ia32.h>


/*
/*
 * Top of mmap area (just below the process stack).
 * Top of mmap area (just below the process stack).
@@ -42,20 +38,14 @@
#define MIN_GAP (128*1024*1024)
#define MIN_GAP (128*1024*1024)
#define MAX_GAP (TASK_SIZE/6*5)
#define MAX_GAP (TASK_SIZE/6*5)


static unsigned long mmap_base(void)
/*
{
 * True on X86_32 or when emulating IA32 on X86_64
	unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
 */

static int mmap_is_ia32(void)
	if (gap < MIN_GAP)
		gap = MIN_GAP;
	else if (gap > MAX_GAP)
		gap = MAX_GAP;

	return TASK_SIZE - (gap & PAGE_MASK);
}

static int mmap_is_32(void)
{
{
#ifdef CONFIG_X86_32
	return 1;
#endif
#ifdef CONFIG_IA32_EMULATION
#ifdef CONFIG_IA32_EMULATION
	if (test_thread_flag(TIF_IA32))
	if (test_thread_flag(TIF_IA32))
		return 1;
		return 1;
@@ -74,43 +64,60 @@ static int mmap_is_legacy(void)
	return sysctl_legacy_va_layout;
	return sysctl_legacy_va_layout;
}
}


static unsigned long mmap_rnd(void)
{
	unsigned long rnd = 0;

	/*
	/*
 * This function, called very early during the creation of a new
	*  8 bits of randomness in 32bit mmaps, 20 address space bits
 * process VM image, sets up which VM layout function to use:
	* 28 bits of randomness in 64bit mmaps, 40 address space bits
	*/
	*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
	int rnd = 0;
	if (current->flags & PF_RANDOMIZE) {
	if (current->flags & PF_RANDOMIZE) {
		if (mmap_is_ia32())
			rnd = (long)get_random_int() % (1<<8);
		else
			rnd = (long)(get_random_int() % (1<<28));
	}
	return rnd << PAGE_SHIFT;
}

static unsigned long mmap_base(void)
{
	unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;

	if (gap < MIN_GAP)
		gap = MIN_GAP;
	else if (gap > MAX_GAP)
		gap = MAX_GAP;

	return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
}

/*
/*
		 * Add 28bit randomness which is about 40bits of address space
 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
		 * because mmap base has to be page aligned.
 * does, but not when emulating X86_32
		 * or ~1/128 of the total user VM
		 * (total user address space is 47bits)
 */
 */
		rnd = get_random_int() & 0xfffffff;
static unsigned long mmap_legacy_base(void)
{
	if (mmap_is_ia32())
		return TASK_UNMAPPED_BASE;
	else
		return TASK_UNMAPPED_BASE + mmap_rnd();
}
}


/*
/*
	 * Fall back to the standard layout if the personality
 * This function, called very early during the creation of a new
	 * bit is set, or if the expected stack growth is unlimited:
 * process VM image, sets up which VM layout function to use:
 */
 */
	if (mmap_is_32()) {
void arch_pick_mmap_layout(struct mm_struct *mm)
#ifdef CONFIG_IA32_EMULATION
{
		/* ia32_pick_mmap_layout has its own. */
	if (mmap_is_legacy()) {
		return ia32_pick_mmap_layout(mm);
		mm->mmap_base = mmap_legacy_base();
#endif
	} else if (mmap_is_legacy()) {
		mm->mmap_base = TASK_UNMAPPED_BASE;
		mm->get_unmapped_area = arch_get_unmapped_area;
		mm->get_unmapped_area = arch_get_unmapped_area;
		mm->unmap_area = arch_unmap_area;
		mm->unmap_area = arch_unmap_area;
	} else {
	} else {
		mm->mmap_base = mmap_base();
		mm->mmap_base = mmap_base();
		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
		mm->unmap_area = arch_unmap_area_topdown;
		mm->unmap_area = arch_unmap_area_topdown;
		if (current->flags & PF_RANDOMIZE)
			rnd = -rnd;
	}
	}
	if (current->flags & PF_RANDOMIZE)
		mm->mmap_base += ((long)rnd) << PAGE_SHIFT;
}
}

arch/x86/mm/mmap_32.c

deleted100644 → 0
+0 −81
Original line number Original line Diff line number Diff line
/*
 *  linux/arch/i386/mm/mmap.c
 *
 *  flexible mmap layout support
 *
 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
 * All Rights Reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 *
 *
 * Started by Ingo Molnar <mingo@elte.hu>
 */

#include <linux/personality.h>
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/sched.h>

/*
 * Top of mmap area (just below the process stack).
 *
 * Leave an at least ~128 MB hole.
 */
#define MIN_GAP (128*1024*1024)
#define MAX_GAP (TASK_SIZE/6*5)

static inline unsigned long mmap_base(struct mm_struct *mm)
{
	unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
	unsigned long random_factor = 0;

	if (current->flags & PF_RANDOMIZE)
		random_factor = get_random_int() % (1024*1024);

	if (gap < MIN_GAP)
		gap = MIN_GAP;
	else if (gap > MAX_GAP)
		gap = MAX_GAP;

	return PAGE_ALIGN(TASK_SIZE - gap - random_factor);
}

/*
 * This function, called very early during the creation of a new
 * process VM image, sets up which VM layout function to use:
 */
#ifdef CONFIG_X86_32
void arch_pick_mmap_layout(struct mm_struct *mm)
#else
void ia32_pick_mmap_layout(struct mm_struct *mm)
#endif
{
	/*
	 * Fall back to the standard layout if the personality
	 * bit is set, or if the expected stack growth is unlimited:
	 */
	if (sysctl_legacy_va_layout ||
	    (current->personality & ADDR_COMPAT_LAYOUT) ||
	    current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) {
		mm->mmap_base = TASK_UNMAPPED_BASE;
		mm->get_unmapped_area = arch_get_unmapped_area;
		mm->unmap_area = arch_unmap_area;
	} else {
		mm->mmap_base = mmap_base(mm);
		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
		mm->unmap_area = arch_unmap_area_topdown;
	}
}