Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d6ee09a2 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

x86: simplify pageattr_64.c



simplify pageattr_64.c.

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent a5f55035
Loading
Loading
Loading
Loading
+55 −113
Original line number Diff line number Diff line
@@ -9,6 +9,14 @@
#include <linux/slab.h>
#include <linux/mm.h>

void clflush_cache_range(void *addr, int size)
{
	int i;

	for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
		clflush(addr+i);
}

#include <asm/processor.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
@@ -47,12 +55,6 @@ split_large_page(unsigned long address, pgprot_t prot, pgprot_t ref_prot)
	base = alloc_pages(GFP_KERNEL, 0);
	if (!base)
		return NULL;
	/*
	 * page_private is used to track the number of entries in
	 * the page table page have non standard attributes.
	 */
	SetPagePrivate(base);
	page_private(base) = 0;

	address = __pa(address);
	addr = address & LARGE_PAGE_MASK;
@@ -64,80 +66,13 @@ split_large_page(unsigned long address, pgprot_t prot, pgprot_t ref_prot)
	return base;
}

void clflush_cache_range(void *addr, int size)
{
	int i;

	for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
		clflush(addr+i);
}

static void flush_kernel_map(void *arg)
{
	struct list_head *l = (struct list_head *)arg;
	struct page *pg;

	__flush_tlb_all();

	/* When clflush is available always use it because it is
	   much cheaper than WBINVD. */
	/* clflush is still broken. Disable for now. */
	if (1 || !cpu_has_clflush) {
		wbinvd();
	} else {
		list_for_each_entry(pg, l, lru) {
			void *addr = page_address(pg);

			clflush_cache_range(addr, PAGE_SIZE);
		}
	}
}

static inline void flush_map(struct list_head *l)
{
	on_each_cpu(flush_kernel_map, l, 1, 1);
}

static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */

static inline void save_page(struct page *fpage)
{
	if (!test_and_set_bit(PG_arch_1, &fpage->flags))
		list_add(&fpage->lru, &deferred_pages);
}

/*
 * No more special protections in this 2/4MB area - revert to a
 * large page again.
 */
static void revert_page(unsigned long address, pgprot_t ref_prot)
{
	unsigned long pfn;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t large_pte;

	pgd = pgd_offset_k(address);
	BUG_ON(pgd_none(*pgd));
	pud = pud_offset(pgd, address);
	BUG_ON(pud_none(*pud));
	pmd = pmd_offset(pud, address);
	BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
	pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
	large_pte = pfn_pte(pfn, ref_prot);
	large_pte = pte_mkhuge(large_pte);

	set_pte((pte_t *)pmd, large_pte);
}

static int
__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
		   pgprot_t ref_prot)
{
	struct page *kpte_page;
	pgprot_t ref_prot2;
	pte_t *kpte;
	pgprot_t ref_prot2, oldprot;
	int level;

	kpte = lookup_address(address, &level);
@@ -145,8 +80,12 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
		return 0;

	kpte_page = virt_to_page(kpte);
	oldprot = pte_pgprot(*kpte);
	BUG_ON(PageLRU(kpte_page));
	BUG_ON(PageCompound(kpte_page));
	ref_prot = canon_pgprot(ref_prot);
	prot = canon_pgprot(prot);

	if (pgprot_val(prot) != pgprot_val(ref_prot)) {
		if (level == 4) {
			set_pte(kpte, pfn_pte(pfn, prot));
@@ -165,38 +104,29 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
			set_pte(kpte, mk_pte(split, ref_prot2));
			kpte_page = split;
		}
		page_private(kpte_page)++;
	} else {
		if (level == 4) {
			set_pte(kpte, pfn_pte(pfn, ref_prot));
			BUG_ON(page_private(kpte_page) == 0);
			page_private(kpte_page)--;
		} else
			BUG();
	}

	/* on x86-64 the direct mapping set at boot is not using 4k pages */
	BUG_ON(PageReserved(kpte_page));

	save_page(kpte_page);
	if (page_private(kpte_page) == 0)
		revert_page(address, ref_prot);
	return 0;
}

/*
 * Change the page attributes of an page in the linear mapping.
 *
 * This should be used when a page is mapped with a different caching policy
 * than write-back somewhere - some CPUs do not like it when mappings with
 * different caching policies exist. This changes the page attributes of the
 * in kernel linear mapping too.
/**
 * change_page_attr_addr - Change page table attributes in linear mapping
 * @address: Virtual address in linear mapping.
 * @numpages: Number of pages to change
 * @prot:    New page table attribute (PAGE_*)
 *
 * The caller needs to ensure that there are no conflicting mappings elsewhere.
 * This function only deals with the kernel linear map.
 * Change page attributes of a page in the direct mapping. This is a variant
 * of change_page_attr() that also works on memory holes that do not have
 * mem_map entry (pfn_valid() is false).
 *
 * Caller must call global_flush_tlb() after this.
 * See change_page_attr() documentation for more details.
 */

int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
{
	int err = 0, kernel_map = 0, i;
@@ -236,7 +166,26 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
	return err;
}

/* Don't call this for MMIO areas that may not have a mem_map entry */
/**
 * change_page_attr - Change page table attributes in the linear mapping.
 * @page: First page to change
 * @numpages: Number of pages to change
 * @prot: New protection/caching type (PAGE_*)
 *
 * Returns 0 on success, otherwise a negated errno.
 *
 * This should be used when a page is mapped with a different caching policy
 * than write-back somewhere - some CPUs do not like it when mappings with
 * different caching policies exist. This changes the page attributes of the
 * in kernel linear mapping too.
 *
 * Caller must call global_flush_tlb() later to make the changes active.
 *
 * The caller needs to ensure that there are no conflicting mappings elsewhere
 * (e.g. in user space) * This function only deals with the kernel linear map.
 *
 * For MMIO areas without mem_map use change_page_attr_addr() instead.
 */
int change_page_attr(struct page *page, int numpages, pgprot_t prot)
{
	unsigned long addr = (unsigned long)page_address(page);
@@ -245,29 +194,22 @@ int change_page_attr(struct page *page, int numpages, pgprot_t prot)
}
EXPORT_SYMBOL(change_page_attr);

void global_flush_tlb(void)
static void flush_kernel_map(void *arg)
{
	struct page *pg, *next;
	struct list_head l;

	/*
	 * Write-protect the semaphore, to exclude two contexts
	 * doing a list_replace_init() call in parallel and to
	 * exclude new additions to the deferred_pages list:
	 * Flush all to work around Errata in early athlons regarding
	 * large page flushing.
	 */
	down_write(&init_mm.mmap_sem);
	list_replace_init(&deferred_pages, &l);
	up_write(&init_mm.mmap_sem);

	flush_map(&l);
	__flush_tlb_all();

	list_for_each_entry_safe(pg, next, &l, lru) {
		list_del(&pg->lru);
		clear_bit(PG_arch_1, &pg->flags);
		if (page_private(pg) != 0)
			continue;
		ClearPagePrivate(pg);
		__free_page(pg);
	if (boot_cpu_data.x86_model >= 4)
		wbinvd();
}

void global_flush_tlb(void)
{
	BUG_ON(irqs_disabled());

	on_each_cpu(flush_kernel_map, NULL, 1, 1);
}
EXPORT_SYMBOL(global_flush_tlb);