Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 00b3a331 authored by Laurent Dufour's avatar Laurent Dufour Committed by Linus Torvalds
Browse files

mm: remove odd HAVE_PTE_SPECIAL

Remove the additional define HAVE_PTE_SPECIAL and rely directly on
CONFIG_ARCH_HAS_PTE_SPECIAL.

There is no functional change introduced by this patch

Link: http://lkml.kernel.org/r/1523533733-25437-1-git-send-email-ldufour@linux.vnet.ibm.com


Signed-off-by: default avatarLaurent Dufour <ldufour@linux.vnet.ibm.com>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Christophe LEROY <christophe.leroy@c-s.fr>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3010a5ea
Loading
Loading
Loading
Loading
+6 −9
Original line number Original line Diff line number Diff line
@@ -817,17 +817,12 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
 * PFNMAP mappings in order to support COWable mappings.
 * PFNMAP mappings in order to support COWable mappings.
 *
 *
 */
 */
#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
# define HAVE_PTE_SPECIAL 1
#else
# define HAVE_PTE_SPECIAL 0
#endif
struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
			     pte_t pte, bool with_public_device)
			     pte_t pte, bool with_public_device)
{
{
	unsigned long pfn = pte_pfn(pte);
	unsigned long pfn = pte_pfn(pte);


	if (HAVE_PTE_SPECIAL) {
	if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
		if (likely(!pte_special(pte)))
		if (likely(!pte_special(pte)))
			goto check_pfn;
			goto check_pfn;
		if (vma->vm_ops && vma->vm_ops->find_special_page)
		if (vma->vm_ops && vma->vm_ops->find_special_page)
@@ -862,7 +857,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
		return NULL;
		return NULL;
	}
	}


	/* !HAVE_PTE_SPECIAL case follows: */
	/* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */


	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
		if (vma->vm_flags & VM_MIXEDMAP) {
		if (vma->vm_flags & VM_MIXEDMAP) {
@@ -881,6 +876,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,


	if (is_zero_pfn(pfn))
	if (is_zero_pfn(pfn))
		return NULL;
		return NULL;

check_pfn:
check_pfn:
	if (unlikely(pfn > highest_memmap_pfn)) {
	if (unlikely(pfn > highest_memmap_pfn)) {
		print_bad_pte(vma, addr, pte, NULL);
		print_bad_pte(vma, addr, pte, NULL);
@@ -904,7 +900,7 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
	/*
	/*
	 * There is no pmd_special() but there may be special pmds, e.g.
	 * There is no pmd_special() but there may be special pmds, e.g.
	 * in a direct-access (dax) mapping, so let's just replicate the
	 * in a direct-access (dax) mapping, so let's just replicate the
	 * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
	 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
	 */
	 */
	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
		if (vma->vm_flags & VM_MIXEDMAP) {
		if (vma->vm_flags & VM_MIXEDMAP) {
@@ -1933,7 +1929,8 @@ static int __vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
	 * without pte special, it would there be refcounted as a normal page.
	 * without pte special, it would there be refcounted as a normal page.
	 */
	 */
	if (!HAVE_PTE_SPECIAL && !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
	    !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
		struct page *page;
		struct page *page;


		/*
		/*