Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f36b7534 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "13 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm, thp: do not cause memcg oom for thp
  mm/vmscan: wake up flushers for legacy cgroups too
  Revert "mm: page_alloc: skip over regions of invalid pfns where possible"
  mm/shmem: do not wait for lock_page() in shmem_unused_huge_shrink()
  mm/thp: do not wait for lock_page() in deferred_split_scan()
  mm/khugepaged.c: convert VM_BUG_ON() to collapse fail
  x86/mm: implement free pmd/pte page interfaces
  mm/vmalloc: add interfaces to free unmapped page table
  h8300: remove extraneous __BIG_ENDIAN definition
  hugetlbfs: check for pgoff value overflow
  lockdep: fix fs_reclaim warning
  MAINTAINERS: update Mark Fasheh's e-mail
  mm/mempolicy.c: avoid use uninitialized preferred_node
parents 8401c72c 9d3c3354
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -10334,7 +10334,7 @@ F: drivers/oprofile/
F:	include/linux/oprofile.h

ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
M:	Mark Fasheh <mfasheh@versity.com>
M:	Mark Fasheh <mark@fasheh.com>
M:	Joel Becker <jlbec@evilplan.org>
L:	ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
W:	http://ocfs2.wiki.kernel.org
+10 −0
Original line number Diff line number Diff line
@@ -972,3 +972,13 @@ int pmd_clear_huge(pmd_t *pmdp)
	pmd_clear(pmdp);
	return 1;
}

int pud_free_pmd_page(pud_t *pud)
{
	return pud_none(*pud);
}

int pmd_free_pte_page(pmd_t *pmd)
{
	return pmd_none(*pmd);
}
+0 −1
Original line number Diff line number Diff line
@@ -2,7 +2,6 @@
#ifndef __H8300_BYTEORDER_H__
#define __H8300_BYTEORDER_H__

#define __BIG_ENDIAN __ORDER_BIG_ENDIAN__
#include <linux/byteorder/big_endian.h>

#endif
+48 −0
Original line number Diff line number Diff line
@@ -702,4 +702,52 @@ int pmd_clear_huge(pmd_t *pmd)

	return 0;
}

/**
 * pud_free_pmd_page - Clear pud entry and free pmd page.
 * @pud: Pointer to a PUD.
 *
 * Context: The pud range has been unmaped and TLB purged.
 * Return: 1 if clearing the entry succeeded. 0 otherwise.
 */
int pud_free_pmd_page(pud_t *pud)
{
	pmd_t *pmd;
	int i;

	if (pud_none(*pud))
		return 1;

	pmd = (pmd_t *)pud_page_vaddr(*pud);

	for (i = 0; i < PTRS_PER_PMD; i++)
		if (!pmd_free_pte_page(&pmd[i]))
			return 0;

	pud_clear(pud);
	free_page((unsigned long)pmd);

	return 1;
}

/**
 * pmd_free_pte_page - Clear pmd entry and free pte page.
 * @pmd: Pointer to a PMD.
 *
 * Context: The pmd range has been unmaped and TLB purged.
 * Return: 1 if clearing the entry succeeded. 0 otherwise.
 */
int pmd_free_pte_page(pmd_t *pmd)
{
	pte_t *pte;

	if (pmd_none(*pmd))
		return 1;

	pte = (pte_t *)pmd_page_vaddr(*pmd);
	pmd_clear(pmd);
	free_page((unsigned long)pte);

	return 1;
}
#endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
+14 −3
Original line number Diff line number Diff line
@@ -108,6 +108,16 @@ static void huge_pagevec_release(struct pagevec *pvec)
	pagevec_reinit(pvec);
}

/*
 * Mask used when checking the page offset value passed in via system
 * calls.  This value will be converted to a loff_t which is signed.
 * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
 * value.  The extra bit (- 1 in the shift value) is to take the sign
 * bit into account.
 */
#define PGOFF_LOFFT_MAX \
	(((1UL << (PAGE_SHIFT + 1)) - 1) <<  (BITS_PER_LONG - (PAGE_SHIFT + 1)))

static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct inode *inode = file_inode(file);
@@ -127,12 +137,13 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
	vma->vm_ops = &hugetlb_vm_ops;

	/*
	 * Offset passed to mmap (before page shift) could have been
	 * negative when represented as a (l)off_t.
	 * page based offset in vm_pgoff could be sufficiently large to
	 * overflow a (l)off_t when converted to byte offset.
	 */
	if (((loff_t)vma->vm_pgoff << PAGE_SHIFT) < 0)
	if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
		return -EINVAL;

	/* must be huge page aligned */
	if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
		return -EINVAL;

Loading