Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f1d1a842 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds
Browse files

SLUB: i386 support



SLUB cannot run on i386 at this point because i386 uses the page->private and
page->index field of slab pages for the pgd cache.

Make SLUB run on i386 by replacing the pgd slab cache with a quicklist.
Limit the changes as much as possible. Leave the improvised linked list in place
etc etc. This has been working here for a couple of weeks now.

Acked-by: default avatarWilliam Lee Irwin III <wli@holomorphy.com>
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8df767dd
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -55,6 +55,10 @@ config ZONE_DMA
	bool
	default y

config QUICKLIST
	bool
	default y

config SBUS
	bool

@@ -79,10 +83,6 @@ config ARCH_MAY_HAVE_PC_FDC
	bool
	default y

config ARCH_USES_SLAB_PAGE_STRUCT
	bool
	default y

config DMI
	bool
	default y
+1 −0
Original line number Diff line number Diff line
@@ -186,6 +186,7 @@ void cpu_idle(void)
			if (__get_cpu_var(cpu_idle_state))
				__get_cpu_var(cpu_idle_state) = 0;

			check_pgt_cache();
			rmb();
			idle = pm_idle;

+1 −1
Original line number Diff line number Diff line
@@ -421,7 +421,7 @@ void flush_tlb_mm (struct mm_struct * mm)
	}
	if (!cpus_empty(cpu_mask))
		flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);

	check_pgt_cache();
	preempt_enable();
}

+0 −7
Original line number Diff line number Diff line
@@ -740,7 +740,6 @@ int remove_memory(u64 start, u64 size)
EXPORT_SYMBOL_GPL(remove_memory);
#endif

struct kmem_cache *pgd_cache;
struct kmem_cache *pmd_cache;

void __init pgtable_cache_init(void)
@@ -764,12 +763,6 @@ void __init pgtable_cache_init(void)
			pgd_size = PAGE_SIZE;
		}
	}
	pgd_cache = kmem_cache_create("pgd",
				pgd_size,
				pgd_size,
				SLAB_PANIC,
				pgd_ctor,
				(!SHARED_KERNEL_PMD) ? pgd_dtor : NULL);
}

/*
+17 −9
Original line number Diff line number Diff line
@@ -13,6 +13,7 @@
#include <linux/pagemap.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/quicklist.h>

#include <asm/system.h>
#include <asm/pgtable.h>
@@ -205,8 +206,6 @@ void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags)
 * against pageattr.c; it is the unique case in which a valid change
 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
 * vmalloc faults work because attached pagetables are never freed.
 * The locking scheme was chosen on the basis of manfred's
 * recommendations and having no core impact whatsoever.
 * -- wli
 */
DEFINE_SPINLOCK(pgd_lock);
@@ -232,9 +231,11 @@ static inline void pgd_list_del(pgd_t *pgd)
		set_page_private(next, (unsigned long)pprev);
}



#if (PTRS_PER_PMD == 1)
/* Non-PAE pgd constructor */
void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
void pgd_ctor(void *pgd)
{
	unsigned long flags;

@@ -256,7 +257,7 @@ void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
}
#else  /* PTRS_PER_PMD > 1 */
/* PAE pgd constructor */
void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
void pgd_ctor(void *pgd)
{
	/* PAE, kernel PMD may be shared */

@@ -275,11 +276,12 @@ void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
}
#endif	/* PTRS_PER_PMD */

void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
void pgd_dtor(void *pgd)
{
	unsigned long flags; /* can be called from interrupt context */

	BUG_ON(SHARED_KERNEL_PMD);
	if (SHARED_KERNEL_PMD)
		return;

	paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT);
	spin_lock_irqsave(&pgd_lock, flags);
@@ -321,7 +323,7 @@ static void pmd_cache_free(pmd_t *pmd, int idx)
pgd_t *pgd_alloc(struct mm_struct *mm)
{
	int i;
	pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
	pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor);

	if (PTRS_PER_PMD == 1 || !pgd)
		return pgd;
@@ -344,7 +346,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
		paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
		pmd_cache_free(pmd, i);
	}
	kmem_cache_free(pgd_cache, pgd);
	quicklist_free(0, pgd_dtor, pgd);
	return NULL;
}

@@ -361,5 +363,11 @@ void pgd_free(pgd_t *pgd)
			pmd_cache_free(pmd, i);
		}
	/* in the non-PAE case, free_pgtables() clears user pgd entries */
	kmem_cache_free(pgd_cache, pgd);
	quicklist_free(0, pgd_dtor, pgd);
}

void check_pgt_cache(void)
{
	quicklist_trim(0, pgd_dtor, 25, 16);
}
Loading