Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bf5421c3 authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds
Browse files

[PATCH] i386/x86-64: Don't call change_page_attr with a spinlock held



It's illegal because it can sleep.

Use a two step lookup scheme instead.  First look up the vm_struct, then
change the direct mapping, then finally unmap it.  That's ok because nobody
can change the particular virtual address range as long as the vm_struct is
still in the global list.

Also added some LinuxDoc documentation to iounmap.

Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 5e9ef02e
Loading
Loading
Loading
Loading
+29 −8
Original line number Original line Diff line number Diff line
@@ -223,9 +223,15 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
}
}
EXPORT_SYMBOL(ioremap_nocache);
EXPORT_SYMBOL(ioremap_nocache);


/**
 * iounmap - Free a IO remapping
 * @addr: virtual address from ioremap_*
 *
 * Caller must ensure there is only one unmapping for the same pointer.
 */
void iounmap(volatile void __iomem *addr)
void iounmap(volatile void __iomem *addr)
{
{
	struct vm_struct *p;
	struct vm_struct *p, *o;


	if ((void __force *)addr <= high_memory)
	if ((void __force *)addr <= high_memory)
		return;
		return;
@@ -239,22 +245,37 @@ void iounmap(volatile void __iomem *addr)
			addr < phys_to_virt(ISA_END_ADDRESS))
			addr < phys_to_virt(ISA_END_ADDRESS))
		return;
		return;


	write_lock(&vmlist_lock);
	addr = (volatile void *)(PAGE_MASK & (unsigned long __force)addr);
	p = __remove_vm_area((void *)(PAGE_MASK & (unsigned long __force)addr));

	/* Use the vm area unlocked, assuming the caller
	   ensures there isn't another iounmap for the same address
	   in parallel. Reuse of the virtual address is prevented by
	   leaving it in the global lists until we're done with it.
	   cpa takes care of the direct mappings. */
	read_lock(&vmlist_lock);
	for (p = vmlist; p; p = p->next) {
		if (p->addr == addr)
			break;
	}
	read_unlock(&vmlist_lock);

	if (!p) {
	if (!p) {
		printk(KERN_WARNING "iounmap: bad address %p\n", addr);
		printk("iounmap: bad address %p\n", addr);
		dump_stack();
		dump_stack();
		goto out_unlock;
		return;
	}
	}


	/* Reset the direct mapping. Can block */
	if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
	if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
		change_page_attr(virt_to_page(__va(p->phys_addr)),
		change_page_attr(virt_to_page(__va(p->phys_addr)),
				 p->size >> PAGE_SHIFT,
				 p->size >> PAGE_SHIFT,
				 PAGE_KERNEL);
				 PAGE_KERNEL);
		global_flush_tlb();
		global_flush_tlb();
	} 
	} 
out_unlock:

	write_unlock(&vmlist_lock);
	/* Finally remove it */
	o = remove_vm_area((void *)addr);
	BUG_ON(p != o || o == NULL);
	kfree(p); 
	kfree(p); 
}
}
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(iounmap);
+31 −6
Original line number Original line Diff line number Diff line
@@ -247,9 +247,15 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
	return __ioremap(phys_addr, size, _PAGE_PCD);
	return __ioremap(phys_addr, size, _PAGE_PCD);
}
}


/**
 * iounmap - Free a IO remapping
 * @addr: virtual address from ioremap_*
 *
 * Caller must ensure there is only one unmapping for the same pointer.
 */
void iounmap(volatile void __iomem *addr)
void iounmap(volatile void __iomem *addr)
{
{
	struct vm_struct *p;
	struct vm_struct *p, *o;


	if (addr <= high_memory) 
	if (addr <= high_memory) 
		return; 
		return; 
@@ -257,12 +263,31 @@ void iounmap(volatile void __iomem *addr)
		addr < phys_to_virt(ISA_END_ADDRESS))
		addr < phys_to_virt(ISA_END_ADDRESS))
		return;
		return;


	write_lock(&vmlist_lock);
	addr = (volatile void *)(PAGE_MASK & (unsigned long __force)addr);
	p = __remove_vm_area((void *)((unsigned long)addr & PAGE_MASK));
	/* Use the vm area unlocked, assuming the caller
	if (!p)
	   ensures there isn't another iounmap for the same address
	   in parallel. Reuse of the virtual address is prevented by
	   leaving it in the global lists until we're done with it.
	   cpa takes care of the direct mappings. */
	read_lock(&vmlist_lock);
	for (p = vmlist; p; p = p->next) {
		if (p->addr == addr)
			break;
	}
	read_unlock(&vmlist_lock);

	if (!p) {
		printk("iounmap: bad address %p\n", addr);
		printk("iounmap: bad address %p\n", addr);
	else if (p->flags >> 20)
		dump_stack();
		return;
	}

	/* Reset the direct mapping. Can block */
	if (p->flags >> 20)
		ioremap_change_attr(p->phys_addr, p->size, 0);
		ioremap_change_attr(p->phys_addr, p->size, 0);
	write_unlock(&vmlist_lock);

	/* Finally remove it */
	o = remove_vm_area((void *)addr);
	BUG_ON(p != o || o == NULL);
	kfree(p); 
	kfree(p); 
}
}