Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e618c957 authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge Committed by Ingo Molnar
Browse files

x86: unify PAE/non-PAE pgd_ctor



The constructors for PAE and non-PAE pgd_ctors are more or less
identical, and can be made into the same function.

Signed-off-by: default avatarJeremy Fitzhardinge <jeremy@xensource.com>
Cc: William Irwin <wli@holomorphy.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent c66315e0
Loading
Loading
Loading
Loading
+20 −34
Original line number Diff line number Diff line
@@ -219,50 +219,39 @@ static inline void pgd_list_del(pgd_t *pgd)
	list_del(&page->lru);
}

#define UNSHARED_PTRS_PER_PGD				\
	(SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)


#if (PTRS_PER_PMD == 1)
/* Non-PAE pgd constructor */
static void pgd_ctor(void *pgd)
static void pgd_ctor(void *p)
{
	pgd_t *pgd = p;
	unsigned long flags;

	/* !PAE, no pagetable sharing */
	/* Clear usermode parts of PGD */
	memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));

	spin_lock_irqsave(&pgd_lock, flags);

	/* must happen under lock */
	clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
	/* If the pgd points to a shared pagetable level (either the
	   ptes in non-PAE, or shared PMD in PAE), then just copy the
	   references from swapper_pg_dir. */
	if (PAGETABLE_LEVELS == 2 ||
	    (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) {
		clone_pgd_range(pgd + USER_PTRS_PER_PGD,
				swapper_pg_dir + USER_PTRS_PER_PGD,
				KERNEL_PGD_PTRS);
		paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
					__pa(swapper_pg_dir) >> PAGE_SHIFT,
					USER_PTRS_PER_PGD,
					KERNEL_PGD_PTRS);
	pgd_list_add(pgd);
	spin_unlock_irqrestore(&pgd_lock, flags);
	}
#else  /* PTRS_PER_PMD > 1 */
/* PAE pgd constructor */
static void pgd_ctor(void *pgd)
{
	/* PAE, kernel PMD may be shared */

	if (SHARED_KERNEL_PMD) {
		clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
				swapper_pg_dir + USER_PTRS_PER_PGD,
				KERNEL_PGD_PTRS);
	} else {
		unsigned long flags;

		memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
		spin_lock_irqsave(&pgd_lock, flags);
	/* list required to sync kernel mapping updates */
	if (!SHARED_KERNEL_PMD)
		pgd_list_add(pgd);

	spin_unlock_irqrestore(&pgd_lock, flags);
}
}
#endif	/* PTRS_PER_PMD */

static void pgd_dtor(void *pgd)
{
@@ -276,9 +265,6 @@ static void pgd_dtor(void *pgd)
	spin_unlock_irqrestore(&pgd_lock, flags);
}

#define UNSHARED_PTRS_PER_PGD				\
	(SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)

#ifdef CONFIG_X86_PAE
/*
 * Mop up any pmd pages which may still be attached to the pgd.