Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 20cee16c authored by David Gibson's avatar David Gibson Committed by Linus Torvalds
Browse files

[PATCH] ppc64: Abolish ioremap_mm



Currently ppc64 has two mm_structs for the kernel, init_mm and also
ioremap_mm.  The latter really isn't necessary: this patch abolishes it,
instead restricting vmallocs to the lower 1TB of the init_mm's range and
placing io mappings in the upper 1TB.  This simplifies the code in a number
of places and eliminates an unecessary set of pagetables.  It also tweaks
the unmap/free path a little, allowing us to remove the unmap_im_area() set
of page table walkers, replacing them with unmap_vm_area().

Signed-off-by: default avatarDavid Gibson <dwg@au1.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 6879dc13
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -505,7 +505,7 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
	pte_t *ptep;
	unsigned long pa;

	ptep = find_linux_pte(ioremap_mm.pgd, token);
	ptep = find_linux_pte(init_mm.pgd, token);
	if (!ptep)
		return token;
	pa = pte_pfn(*ptep) << PAGE_SHIFT;
+0 −4
Original line number Diff line number Diff line
@@ -2121,10 +2121,6 @@ empty_zero_page:
swapper_pg_dir:
	.space	4096

	.globl	ioremap_dir
ioremap_dir:
	.space	4096

#ifdef CONFIG_SMP
/* 1 page segment table per cpu (max 48, cpu0 allocated at STAB0_PHYS_ADDR) */
	.globl	stab_array
+0 −8
Original line number Diff line number Diff line
@@ -58,14 +58,6 @@ struct task_struct *last_task_used_math = NULL;
struct task_struct *last_task_used_altivec = NULL;
#endif

struct mm_struct ioremap_mm = {
	.pgd		= ioremap_dir,
	.mm_users	= ATOMIC_INIT(2),
	.mm_count	= ATOMIC_INIT(1),
	.cpu_vm_mask	= CPU_MASK_ALL,
	.page_table_lock = SPIN_LOCK_UNLOCKED,
};

/*
 * Make sure the floating-point register state in the
 * the thread_struct is up to date for task tsk.
+0 −4
Original line number Diff line number Diff line
@@ -310,10 +310,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)

		vsid = get_vsid(mm->context.id, ea);
		break;
	case IO_REGION_ID:
		mm = &ioremap_mm;
		vsid = get_kernel_vsid(ea);
		break;
	case VMALLOC_REGION_ID:
		mm = &init_mm;
		vsid = get_kernel_vsid(ea);
+12 −8
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@
#include <asm/pgtable.h>
#include <asm/semaphore.h>
#include <asm/imalloc.h>
#include <asm/cacheflush.h>

static DECLARE_MUTEX(imlist_sem);
struct vm_struct * imlist = NULL;
@@ -285,29 +286,32 @@ struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
	return area;
}

unsigned long im_free(void * addr)
void im_free(void * addr)
{
	struct vm_struct **p, *tmp;
	unsigned long ret_size = 0;
  
	if (!addr)
		return ret_size;
	if ((PAGE_SIZE-1) & (unsigned long) addr) {
		return;
	if ((unsigned long) addr & ~PAGE_MASK) {
		printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__,			addr);
		return ret_size;
		return;
	}
	down(&imlist_sem);
	for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
		if (tmp->addr == addr) {
			ret_size = tmp->size;
			*p = tmp->next;

			/* XXX: do we need the lock? */
			spin_lock(&init_mm.page_table_lock);
			unmap_vm_area(tmp);
			spin_unlock(&init_mm.page_table_lock);

			kfree(tmp);
			up(&imlist_sem);
			return ret_size;
			return;
		}
	}
	up(&imlist_sem);
	printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__,
			addr);
	return ret_size;
}
Loading