Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7856dfeb authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds
Browse files

[PATCH] x86_64: Fixed guard page handling again in iounmap



Caused oopses again.  Also fix potential mismatch in checking if
change_page_attr was needed.

To do it without races I needed to change mm/vmalloc.c to export a
__remove_vm_area that does not take vmlist lock.

Noticed by Terence Ripperda and based on a patch of his.

Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent c4d1fcf3
Loading
Loading
Loading
Loading
+8 −21
Original line number Diff line number Diff line
@@ -133,7 +133,7 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size,
					unsigned long flags)
{
	int err = 0;
	if (flags && phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
	if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
		unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
		unsigned long vaddr = (unsigned long) __va(phys_addr);

@@ -214,7 +214,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
		remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
		return NULL;
	}
	if (ioremap_change_attr(phys_addr, size, flags) < 0) {
	if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) {
		area->flags &= 0xffffff;
		vunmap(addr);
		return NULL;
@@ -251,7 +251,7 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)

void iounmap(volatile void __iomem *addr)
{
	struct vm_struct *p, **pprev;
	struct vm_struct *p;

	if (addr <= high_memory) 
		return; 
@@ -260,24 +260,11 @@ void iounmap(volatile void __iomem *addr)
		return;

	write_lock(&vmlist_lock);
	for (p = vmlist, pprev = &vmlist; p != NULL; pprev = &p->next, p = *pprev)
		if (p->addr == (void *)(PAGE_MASK & (unsigned long)addr))
			break;
	if (!p) { 
		printk("__iounmap: bad address %p\n", addr);
		goto out_unlock;
	}
	*pprev = p->next;
	unmap_vm_area(p);
	if ((p->flags >> 20) &&
		p->phys_addr + p->size - 1 < virt_to_phys(high_memory)) {
		/* p->size includes the guard page, but cpa doesn't like that */
		change_page_attr_addr((unsigned long)__va(p->phys_addr),
				 p->size >> PAGE_SHIFT,
				 PAGE_KERNEL);
		global_flush_tlb();
	} 
out_unlock:
	p = __remove_vm_area((void *)((unsigned long)addr & PAGE_MASK));
	if (!p)
		printk("iounmap: bad address %p\n", addr);
	else if (p->flags >> 20)
		ioremap_change_attr(p->phys_addr, p->size, 0);
	write_unlock(&vmlist_lock);
	kfree(p); 
}
+1 −0
Original line number Diff line number Diff line
@@ -41,6 +41,7 @@ extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
					unsigned long start, unsigned long end);
extern struct vm_struct *remove_vm_area(void *addr);
extern struct vm_struct *__remove_vm_area(void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
			struct page ***pages);
extern void unmap_vm_area(struct vm_struct *area);
+20 −13
Original line number Diff line number Diff line
@@ -248,31 +248,20 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
	return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
}

/**
 *	remove_vm_area  -  find and remove a contingous kernel virtual area
 *
 *	@addr:		base address
 *
 *	Search for the kernel VM area starting at @addr, and remove it.
 *	This function returns the found VM area, but using it is NOT safe
 *	on SMP machines.
 */
struct vm_struct *remove_vm_area(void *addr)
/* Caller must hold vmlist_lock */
struct vm_struct *__remove_vm_area(void *addr)
{
	struct vm_struct **p, *tmp;

	write_lock(&vmlist_lock);
	for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
		 if (tmp->addr == addr)
			 goto found;
	}
	write_unlock(&vmlist_lock);
	return NULL;

found:
	unmap_vm_area(tmp);
	*p = tmp->next;
	write_unlock(&vmlist_lock);

	/*
	 * Remove the guard page.
@@ -281,6 +270,24 @@ struct vm_struct *remove_vm_area(void *addr)
	return tmp;
}

/**
 *	remove_vm_area  -  find and remove a contingous kernel virtual area
 *
 *	@addr:		base address
 *
 *	Search for the kernel VM area starting at @addr, and remove it.
 *	This function returns the found VM area, but using it is NOT safe
 *	on SMP machines, except for its size or flags.
 */
struct vm_struct *remove_vm_area(void *addr)
{
	struct vm_struct *v;
	write_lock(&vmlist_lock);
	v = __remove_vm_area(addr);
	write_unlock(&vmlist_lock);
	return v;
}

void __vunmap(void *addr, int deallocate_pages)
{
	struct vm_struct *area;