Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dec4ad86 authored by David Gibson's avatar David Gibson Committed by Linus Torvalds
Browse files

hugepage: fix broken check for offset alignment in hugepage mappings



For hugepage mappings, the file offset, like the address and size, needs to
be aligned to the size of a hugepage.

In commit 68589bc3, the check for this was
moved into prepare_hugepage_range() along with the address and size checks.
 But since BenH's rework of the get_unmapped_area() paths leading up to
commit 4b1d8929, prepare_hugepage_range()
is only called for MAP_FIXED mappings, not for other mappings.  This means
we're no longer ever checking for an aligned offset - I've confirmed that
mmap() will (apparently) succeed with a misaligned offset on both powerpc
and i386 at least.

This patch restores the check, removing it from prepare_hugepage_range()
and putting it back into hugetlbfs_file_mmap().  I'm putting it there,
rather than in the get_unmapped_area() path so it only needs to go in one
place, than separately in the half-dozen or so arch-specific
implementations of hugetlb_get_unmapped_area().

Signed-off-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Cc: Adam Litke <agl@us.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4a58448b
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -367,7 +367,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
		return -ENOMEM;

	if (flags & MAP_FIXED) {
		if (prepare_hugepage_range(addr, len, pgoff))
		if (prepare_hugepage_range(addr, len))
			return -EINVAL;
		return addr;
	}
+2 −4
Original line number Diff line number Diff line
@@ -75,10 +75,8 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
 * Don't actually need to do any preparation, but need to make sure
 * the address is in the right region.
 */
int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
int prepare_hugepage_range(unsigned long addr, unsigned long len)
{
	if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
		return -EINVAL;
	if (len & ~HPAGE_MASK)
		return -EINVAL;
	if (addr & ~HPAGE_MASK)
@@ -151,7 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u

	/* Handle MAP_FIXED */
	if (flags & MAP_FIXED) {
		if (prepare_hugepage_range(addr, len, pgoff))
		if (prepare_hugepage_range(addr, len))
			return -EINVAL;
		return addr;
	}
+1 −1
Original line number Diff line number Diff line
@@ -175,7 +175,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
		return -ENOMEM;

	if (flags & MAP_FIXED) {
		if (prepare_hugepage_range(addr, len, pgoff))
		if (prepare_hugepage_range(addr, len))
			return -EINVAL;
		return addr;
	}
+10 −5
Original line number Diff line number Diff line
@@ -82,14 +82,19 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
	int ret;

	/*
	 * vma alignment has already been checked by prepare_hugepage_range.
	 * If you add any error returns here, do so after setting VM_HUGETLB,
	 * so is_vm_hugetlb_page tests below unmap_region go the right way
	 * when do_mmap_pgoff unwinds (may be important on powerpc and ia64).
	 * vma address alignment (but not the pgoff alignment) has
	 * already been checked by prepare_hugepage_range.  If you add
	 * any error returns here, do so after setting VM_HUGETLB, so
	 * is_vm_hugetlb_page tests below unmap_region go the right
	 * way when do_mmap_pgoff unwinds (may be important on powerpc
	 * and ia64).
	 */
	vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
	vma->vm_ops = &hugetlb_vm_ops;

	if (vma->vm_pgoff & ~(HPAGE_MASK >> PAGE_SHIFT))
		return -EINVAL;

	vma_len = (loff_t)(vma->vm_end - vma->vm_start);

	mutex_lock(&inode->i_mutex);
@@ -132,7 +137,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
		return -ENOMEM;

	if (flags & MAP_FIXED) {
		if (prepare_hugepage_range(addr, len, pgoff))
		if (prepare_hugepage_range(addr, len))
			return -EINVAL;
		return addr;
	}
+3 −7
Original line number Diff line number Diff line
@@ -66,11 +66,8 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
 * If the arch doesn't supply something else, assume that hugepage
 * size aligned regions are ok without further preparation.
 */
static inline int prepare_hugepage_range(unsigned long addr, unsigned long len,
						pgoff_t pgoff)
static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
{
	if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
		return -EINVAL;
	if (len & ~HPAGE_MASK)
		return -EINVAL;
	if (addr & ~HPAGE_MASK)
@@ -78,8 +75,7 @@ static inline int prepare_hugepage_range(unsigned long addr, unsigned long len,
	return 0;
}
#else
int prepare_hugepage_range(unsigned long addr, unsigned long len,
						pgoff_t pgoff);
int prepare_hugepage_range(unsigned long addr, unsigned long len);
#endif

#ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
@@ -117,7 +113,7 @@ static inline unsigned long hugetlb_total_pages(void)
#define hugetlb_report_meminfo(buf)		0
#define hugetlb_report_node_meminfo(n, buf)	0
#define follow_huge_pmd(mm, addr, pmd, write)	NULL
#define prepare_hugepage_range(addr,len,pgoff)	(-EINVAL)
#define prepare_hugepage_range(addr,len)	(-EINVAL)
#define pmd_huge(x)	0
#define is_hugepage_only_range(mm, addr, len)	0
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })