Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d7c8f21a authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar
Browse files

x86: cpa: move flush to cpa



The set_memory_* and set_pages_* family of API's currently requires the
callers to do a global tlb flush after the function call; forgetting this is
a very nasty deathtrap. This patch moves the global tlb flush into
each of the callers

Signed-off-by: default avatarArjan van de Ven <arjan@linux.intel.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent d1028a15
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -572,7 +572,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
		panic("Cannot allocate GATT table");
	if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
		panic("Could not set GART PTEs to uncacheable pages");
	global_flush_tlb();

	memset(gatt, 0, gatt_size);
	agp_gatt_table = gatt;
+0 −14
Original line number Diff line number Diff line
@@ -752,15 +752,11 @@ void mark_rodata_ro(void)
		printk("Write protecting the kernel text: %luk\n", size >> 10);

#ifdef CONFIG_CPA_DEBUG
		global_flush_tlb();

		printk("Testing CPA: Reverting %lx-%lx\n", start, start+size);
		set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
		global_flush_tlb();

		printk("Testing CPA: write protecting again\n");
		set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
		global_flush_tlb();
#endif
	}
#endif
@@ -770,22 +766,12 @@ void mark_rodata_ro(void)
	printk("Write protecting the kernel read-only data: %luk\n",
	       size >> 10);

	/*
	 * set_pages_*() requires a global_flush_tlb() call after it.
	 * We do this after the printk so that if something went wrong in the
	 * change, the printk gets out at least to give a better debug hint
	 * of who is the culprit.
	 */
	global_flush_tlb();

#ifdef CONFIG_CPA_DEBUG
	printk("Testing CPA: undo %lx-%lx\n", start, start + size);
	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
	global_flush_tlb();

	printk("Testing CPA: write protecting again\n");
	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
	global_flush_tlb();
#endif
}
#endif
+0 −10
Original line number Diff line number Diff line
@@ -610,22 +610,12 @@ void mark_rodata_ro(void)
	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
	       (end - start) >> 10);

	/*
	 * set_memory_*() requires a global_flush_tlb() call after it.
	 * We do this after the printk so that if something went wrong in the
	 * change, the printk gets out at least to give a better debug hint
	 * of who is the culprit.
	 */
	global_flush_tlb();

#ifdef CONFIG_CPA_DEBUG
	printk("Testing CPA: undo %lx-%lx\n", start, end);
	set_memory_rw(start, (end-start) >> PAGE_SHIFT);
	global_flush_tlb();

	printk("Testing CPA: again\n");
	set_memory_ro(start, (end-start) >> PAGE_SHIFT);
	global_flush_tlb();
#endif
}
#endif
+0 −2
Original line number Diff line number Diff line
@@ -96,8 +96,6 @@ static int ioremap_change_attr(unsigned long paddr, unsigned long size,
		err = set_memory_wb(vaddr, nrpages);
		break;
	}
	if (!err)
		global_flush_tlb();

	return err;
}
+71 −66
Original line number Diff line number Diff line
@@ -22,6 +22,36 @@ within(unsigned long addr, unsigned long start, unsigned long end)
	return addr >= start && addr < end;
}

/*
 * Flushing functions
 */
void clflush_cache_range(void *addr, int size)
{
	int i;

	for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
		clflush(addr+i);
}

static void flush_kernel_map(void *arg)
{
	/*
	 * Flush all to work around Errata in early athlons regarding
	 * large page flushing.
	 */
	__flush_tlb_all();

	if (boot_cpu_data.x86_model >= 4)
		wbinvd();
}

static void global_flush_tlb(void)
{
	BUG_ON(irqs_disabled());

	on_each_cpu(flush_kernel_map, NULL, 1, 1);
}

/*
 * Certain areas of memory on x86 require very specific protection flags,
 * for example the BIOS area or kernel text. Callers don't always get this
@@ -328,149 +358,124 @@ static int change_page_attr_clear(unsigned long addr, int numpages,

int set_memory_uc(unsigned long addr, int numpages)
{
	pgprot_t uncached;
	int err;

	pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT;
	return change_page_attr_set(addr, numpages, uncached);
	err = change_page_attr_set(addr, numpages,
				__pgprot(_PAGE_PCD | _PAGE_PWT));
	global_flush_tlb();
	return err;
}
EXPORT_SYMBOL(set_memory_uc);

int set_memory_wb(unsigned long addr, int numpages)
{
	pgprot_t uncached;
	int err;

	pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT;
	return change_page_attr_clear(addr, numpages, uncached);
	err = change_page_attr_clear(addr, numpages,
				__pgprot(_PAGE_PCD | _PAGE_PWT));
	global_flush_tlb();
	return err;
}
EXPORT_SYMBOL(set_memory_wb);

int set_memory_x(unsigned long addr, int numpages)
{
	pgprot_t nx;
	int err;

	pgprot_val(nx) = _PAGE_NX;
	return change_page_attr_clear(addr, numpages, nx);
	err = change_page_attr_clear(addr, numpages,
				__pgprot(_PAGE_NX));
	global_flush_tlb();
	return err;
}
EXPORT_SYMBOL(set_memory_x);

int set_memory_nx(unsigned long addr, int numpages)
{
	pgprot_t nx;
	int err;

	pgprot_val(nx) = _PAGE_NX;
	return change_page_attr_set(addr, numpages, nx);
	err = change_page_attr_set(addr, numpages,
				__pgprot(_PAGE_NX));
	global_flush_tlb();
	return err;
}
EXPORT_SYMBOL(set_memory_nx);

int set_memory_ro(unsigned long addr, int numpages)
{
	pgprot_t rw;
	int err;

	pgprot_val(rw) = _PAGE_RW;
	return change_page_attr_clear(addr, numpages, rw);
	err = change_page_attr_clear(addr, numpages,
				__pgprot(_PAGE_RW));
	global_flush_tlb();
	return err;
}

int set_memory_rw(unsigned long addr, int numpages)
{
	pgprot_t rw;
	int err;

	pgprot_val(rw) = _PAGE_RW;
	return change_page_attr_set(addr, numpages, rw);
	err = change_page_attr_set(addr, numpages,
				__pgprot(_PAGE_RW));
	global_flush_tlb();
	return err;
}

int set_memory_np(unsigned long addr, int numpages)
{
	pgprot_t present;
	int err;

	pgprot_val(present) = _PAGE_PRESENT;
	return change_page_attr_clear(addr, numpages, present);
	err = change_page_attr_clear(addr, numpages,
				__pgprot(_PAGE_PRESENT));
	global_flush_tlb();
	return err;
}

int set_pages_uc(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);
	pgprot_t uncached;

	pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT;
	return change_page_attr_set(addr, numpages, uncached);
	return set_memory_uc(addr, numpages);
}
EXPORT_SYMBOL(set_pages_uc);

int set_pages_wb(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);
	pgprot_t uncached;

	pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT;
	return change_page_attr_clear(addr, numpages, uncached);
	return set_memory_wb(addr, numpages);
}
EXPORT_SYMBOL(set_pages_wb);

int set_pages_x(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);
	pgprot_t nx;

	pgprot_val(nx) = _PAGE_NX;
	return change_page_attr_clear(addr, numpages, nx);
	return set_memory_x(addr, numpages);
}
EXPORT_SYMBOL(set_pages_x);

int set_pages_nx(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);
	pgprot_t nx;

	pgprot_val(nx) = _PAGE_NX;
	return change_page_attr_set(addr, numpages, nx);
	return set_memory_nx(addr, numpages);
}
EXPORT_SYMBOL(set_pages_nx);

int set_pages_ro(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);
	pgprot_t rw;

	pgprot_val(rw) = _PAGE_RW;
	return change_page_attr_clear(addr, numpages, rw);
	return set_memory_ro(addr, numpages);
}

int set_pages_rw(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);
	pgprot_t rw;

	pgprot_val(rw) = _PAGE_RW;
	return change_page_attr_set(addr, numpages, rw);
}

void clflush_cache_range(void *addr, int size)
{
	int i;

	for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
		clflush(addr+i);
}

static void flush_kernel_map(void *arg)
{
	/*
	 * Flush all to work around Errata in early athlons regarding
	 * large page flushing.
	 */
	__flush_tlb_all();

	if (boot_cpu_data.x86_model >= 4)
		wbinvd();
	return set_memory_rw(addr, numpages);
}

void global_flush_tlb(void)
{
	BUG_ON(irqs_disabled());

	on_each_cpu(flush_kernel_map, NULL, 1, 1);
}
EXPORT_SYMBOL(global_flush_tlb);

#ifdef CONFIG_DEBUG_PAGEALLOC

Loading