Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e0565a1c authored by Kyle McMartin's avatar Kyle McMartin Committed by Kyle McMartin
Browse files

[PARISC] Fix and cleanup ioremap.c to work with 4level-fixup.h



Fixup ioremap a bit. It seems to work on 32-bit kernels, but fails
miserably on the first ioremapped access on 64-bit kernels. Also, having
STI enabled causes it to fail. Probably because we're passing an ioremapped
region to a real-mode STI call...

Signed-off-by: default avatarKyle McMartin <kyle@parisc-linux.org>
parent 45dbe914
Loading
Loading
Loading
Loading
+63 −37
Original line number Diff line number Diff line
/*
 * arch/parisc/mm/ioremap.c
 *
 * Re-map IO memory to kernel address space so that we can access it.
 * This is needed for high PCI addresses that aren't mapped in the
 * 640k-1MB IO memory area on PC's
 *
 * (C) Copyright 1995 1996 Linus Torvalds
 * (C) Copyright 2001 Helge Deller <deller@gmx.de>
 * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
 */

#include <linux/vmalloc.h>
@@ -14,81 +11,107 @@
#include <linux/module.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>

static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
static inline void 
remap_area_pte(pte_t *pte, unsigned long address, unsigned long size,
	       unsigned long phys_addr, unsigned long flags)
{
	unsigned long end;
	unsigned long end, pfn;
	pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
				   _PAGE_ACCESSED | flags);

	address &= ~PMD_MASK;

	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;
	if (address >= end)
		BUG();

	BUG_ON(address >= end);

	pfn = phys_addr >> PAGE_SHIFT;
	do {
		if (!pte_none(*pte)) {
			printk(KERN_ERR "remap_area_pte: page already exists\n");
			BUG();
		}
		set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW | 
					_PAGE_DIRTY | _PAGE_ACCESSED | flags)));
		BUG_ON(!pte_none(*pte));

		set_pte(pte, pfn_pte(pfn, pgprot));

		address += PAGE_SIZE;
		phys_addr += PAGE_SIZE;
		pfn++;
		pte++;
	} while (address && (address < end));
}

static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
static inline int 
remap_area_pmd(pmd_t *pmd, unsigned long address, unsigned long size,
	       unsigned long phys_addr, unsigned long flags)
{
	unsigned long end;

	address &= ~PGDIR_MASK;

	end = address + size;
	if (end > PGDIR_SIZE)
		end = PGDIR_SIZE;

	BUG_ON(address >= end);

	phys_addr -= address;
	if (address >= end)
		BUG();
	do {
		pte_t *pte = pte_alloc_kernel(pmd, address);
		if (!pte)
			return -ENOMEM;
		remap_area_pte(pte, address, end - address, address + phys_addr, flags);

		remap_area_pte(pte, address, end - address, 
			       address + phys_addr, flags);

		address = (address + PMD_SIZE) & PMD_MASK;
		pmd++;
	} while (address && (address < end));

	return 0;
}

#if (USE_HPPA_IOREMAP)
static int remap_area_pages(unsigned long address, unsigned long phys_addr,
#if USE_HPPA_IOREMAP
static int 
remap_area_pages(unsigned long address, unsigned long phys_addr,
		 unsigned long size, unsigned long flags)
{
	int error;
	pgd_t *dir;
	int error = 0;
	unsigned long end = address + size;

	BUG_ON(address >= end);

	phys_addr -= address;
	dir = pgd_offset(&init_mm, address);
	dir = pgd_offset_k(address);

	flush_cache_all();
	if (address >= end)
		BUG();

	do {
		pud_t *pud;
		pmd_t *pmd;
		pmd = pmd_alloc(&init_mm, dir, address);

		error = -ENOMEM;
		pud = pud_alloc(&init_mm, dir, address);
		if (!pud)
			break;

		pmd = pmd_alloc(&init_mm, pud, address);
		if (!pmd)
			break;

		if (remap_area_pmd(pmd, address, end - address,
				   phys_addr + address, flags))
			break;

		error = 0;
		address = (address + PGDIR_SIZE) & PGDIR_MASK;
		dir++;
	} while (address && (address < end));

	flush_tlb_all();

	return error;
}
#endif /* USE_HPPA_IOREMAP */
@@ -123,8 +146,7 @@ EXPORT_SYMBOL(__raw_bad_addr);

/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 * address space.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
@@ -167,10 +189,12 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
		t_addr = __va(phys_addr);
		t_end = t_addr + (size - 1);
	   
		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
		for (page = virt_to_page(t_addr); 
		     page <= virt_to_page(t_end); page++) {
			if(!PageReserved(page))
				return NULL;
		}
	}

	/*
	 * Mappings have to be page-aligned
@@ -185,11 +209,13 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;

	addr = area->addr;
	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
		vfree(addr);
		return NULL;
	}

	return (void __iomem *) (offset + (char *)addr);
#endif
}