Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6afb5157 authored by Haicheng Li's avatar Haicheng Li Committed by H. Peter Anvin
Browse files

x86, mm: Separate x86_64 vmalloc_sync_all() into separate functions



No behavior change.

Move some of vmalloc_sync_all() code into a new function
sync_global_pgds() that will be useful for memory hotplug.

Signed-off-by: default avatarHaicheng Li <haicheng.li@linux.intel.com>
LKML-Reference: <4C6E4ECD.1090607@linux.intel.com>
Reviewed-by: default avatarWu Fengguang <fengguang.wu@intel.com>
Reviewed-by: default avatarAndi Kleen <ak@linux.intel.com>
Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 61c77326
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -102,6 +102,8 @@ static inline void native_pgd_clear(pgd_t *pgd)
	native_set_pgd(pgd, native_make_pgd(0));
}

extern void sync_global_pgds(unsigned long start, unsigned long end);

/*
 * Conversion functions: convert a page and protection to a page entry,
 * and a page entry and page directory to the page they refer to.
+1 −23
Original line number Diff line number Diff line
@@ -326,29 +326,7 @@ static void dump_pagetable(unsigned long address)

void vmalloc_sync_all(void)
{
	unsigned long address;

	for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
	     address += PGDIR_SIZE) {

		const pgd_t *pgd_ref = pgd_offset_k(address);
		unsigned long flags;
		struct page *page;

		if (pgd_none(*pgd_ref))
			continue;

		spin_lock_irqsave(&pgd_lock, flags);
		list_for_each_entry(page, &pgd_list, lru) {
			pgd_t *pgd;
			pgd = (pgd_t *)page_address(page) + pgd_index(address);
			if (pgd_none(*pgd))
				set_pgd(pgd, *pgd_ref);
			else
				BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
		}
		spin_unlock_irqrestore(&pgd_lock, flags);
	}
	sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
}

/*
+30 −0
Original line number Diff line number Diff line
@@ -97,6 +97,36 @@ static int __init nonx32_setup(char *str)
}
__setup("noexec32=", nonx32_setup);

/*
 * When memory was added/removed make sure all the processes MM have
 * suitable PGD entries in the local PGD level page.
 */
void sync_global_pgds(unsigned long start, unsigned long end)
{
       unsigned long address;

       for (address = start; address <= end; address += PGDIR_SIZE) {
	       const pgd_t *pgd_ref = pgd_offset_k(address);
	       unsigned long flags;
	       struct page *page;

	       if (pgd_none(*pgd_ref))
		       continue;

	       spin_lock_irqsave(&pgd_lock, flags);
	       list_for_each_entry(page, &pgd_list, lru) {
		       pgd_t *pgd;
		       pgd = (pgd_t *)page_address(page) + pgd_index(address);
		       if (pgd_none(*pgd))
			       set_pgd(pgd, *pgd_ref);
		       else
			       BUG_ON(pgd_page_vaddr(*pgd)
					!= pgd_page_vaddr(*pgd_ref));
	       }
	       spin_unlock_irqrestore(&pgd_lock, flags);
       }
}

/*
 * NOTE: This function is marked __ref because it calls __init function
 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.