Loading arch/x86/mm/ioremap_32.c +18 −0 Original line number Diff line number Diff line Loading @@ -19,6 +19,18 @@ #include <asm/pgtable.h> #include <asm/tlbflush.h> #ifdef CONFIG_X86_64 unsigned long __phys_addr(unsigned long x) { if (x >= __START_KERNEL_map) return x - __START_KERNEL_map + phys_base; return x - PAGE_OFFSET; } EXPORT_SYMBOL(__phys_addr); #endif /* * Fix up the linear direct mapping of the kernel to avoid cache attribute * conflicts. Loading Loading @@ -49,6 +61,7 @@ static int ioremap_change_attr(unsigned long phys_addr, unsigned long size, * memmap entry. */ err = change_page_attr_addr(vaddr, npages, prot); if (!err) global_flush_tlb(); Loading Loading @@ -83,6 +96,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) return (__force void __iomem *)phys_to_virt(phys_addr); #ifdef CONFIG_X86_32 /* * Don't allow anybody to remap normal RAM that we're using.. */ Loading @@ -98,6 +112,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, if (!PageReserved(page)) return NULL; } #endif pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags); Loading Loading @@ -211,6 +226,7 @@ void iounmap(volatile void __iomem *addr) } EXPORT_SYMBOL(iounmap); #ifdef CONFIG_X86_32 int __initdata early_ioremap_debug; Loading Loading @@ -443,3 +459,5 @@ void __this_fixmap_does_not_exist(void) { WARN_ON(1); } #endif /* CONFIG_X86_32 */ arch/x86/mm/ioremap_64.c +295 −22 Original line number Diff line number Diff line Loading @@ -6,6 +6,7 @@ * (C) Copyright 1995 1996 Linus Torvalds */ #include <linux/bootmem.h> #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> Loading @@ -18,6 +19,8 @@ #include <asm/pgtable.h> #include <asm/tlbflush.h> #ifdef CONFIG_X86_64 unsigned long __phys_addr(unsigned long x) { if (x >= __START_KERNEL_map) Loading @@ -26,6 +29,8 @@ unsigned long __phys_addr(unsigned long x) } EXPORT_SYMBOL(__phys_addr); #endif /* * Fix up the linear direct mapping of the kernel to avoid cache attribute * conflicts. Loading @@ -33,28 +38,33 @@ EXPORT_SYMBOL(__phys_addr); static int ioremap_change_attr(unsigned long phys_addr, unsigned long size, pgprot_t prot) { int err = 0; if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) { unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned long vaddr = (unsigned long) __va(phys_addr); int level; unsigned long npages, vaddr, last_addr = phys_addr + size - 1; int err, level; /* No change for pages after the last mapping */ if (last_addr >= (max_pfn_mapped << PAGE_SHIFT)) return 0; npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; vaddr = (unsigned long) __va(phys_addr); /* * If there is no identity map for this address, * change_page_attr_addr is unnecessary */ if (!lookup_address(vaddr, &level)) return err; return 0; /* * Must use a address here and not struct page because * the phys addr can be a in hole between nodes and * not have an memmap entry. * Must use an address here and not struct page because the * phys addr can be a in hole between nodes and not have a * memmap entry. */ err = change_page_attr_addr(vaddr, npages, prot); if (!err) global_flush_tlb(); } return err; } Loading Loading @@ -86,7 +96,26 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) return (__force void __iomem *)phys_to_virt(phys_addr); #ifdef CONFIG_X86_32 /* * Don't allow anybody to remap normal RAM that we're using.. */ if (phys_addr <= virt_to_phys(high_memory - 1)) { char *t_addr, *t_end; struct page *page; t_addr = __va(phys_addr); t_end = t_addr + (size - 1); for (page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) if (!PageReserved(page)) return NULL; } #endif pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags); /* * Mappings have to be page-aligned */ Loading @@ -107,10 +136,12 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); return NULL; } if (ioremap_change_attr(phys_addr, size, pgprot) < 0) { vunmap(addr); return NULL; } return (void __iomem *) (offset + (char __iomem *)addr); } EXPORT_SYMBOL(__ioremap); Loading Loading @@ -154,12 +185,19 @@ void iounmap(volatile void __iomem *addr) if ((void __force *)addr <= high_memory) return; /* * __ioremap special-cases the PCI/ISA range by not instantiating a * vm_area and by simply returning an address into the kernel mapping * of ISA space. So handle that here. */ if (addr >= phys_to_virt(ISA_START_ADDRESS) && addr < phys_to_virt(ISA_END_ADDRESS)) return; addr = (volatile void __iomem *) (PAGE_MASK & (unsigned long __force)addr); /* Use the vm area unlocked, assuming the caller ensures there isn't another iounmap for the same address in parallel. Reuse of the virtual address is prevented by Loading Loading @@ -188,3 +226,238 @@ void iounmap(volatile void __iomem *addr) } EXPORT_SYMBOL(iounmap); #ifdef CONFIG_X86_32 int __initdata early_ioremap_debug; static int __init early_ioremap_debug_setup(char *str) { early_ioremap_debug = 1; return 0; } early_param("early_ioremap_debug", early_ioremap_debug_setup); static __initdata int after_paging_init; static __initdata unsigned long bm_pte[1024] __attribute__((aligned(PAGE_SIZE))); static inline unsigned long * __init early_ioremap_pgd(unsigned long addr) { return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023); } static inline unsigned long * __init early_ioremap_pte(unsigned long addr) { return bm_pte + ((addr >> PAGE_SHIFT) & 1023); } void __init early_ioremap_init(void) { unsigned long *pgd; if (early_ioremap_debug) printk(KERN_DEBUG "early_ioremap_init()\n"); pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); *pgd = __pa(bm_pte) | _PAGE_TABLE; memset(bm_pte, 0, sizeof(bm_pte)); /* * The boot-ioremap range spans multiple pgds, for which * we are not prepared: */ if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) { WARN_ON(1); printk(KERN_WARNING "pgd %p != %p\n", pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))); printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", fix_to_virt(FIX_BTMAP_BEGIN)); printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", fix_to_virt(FIX_BTMAP_END)); printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); } } void __init early_ioremap_clear(void) { unsigned long *pgd; if (early_ioremap_debug) printk(KERN_DEBUG "early_ioremap_clear()\n"); pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); *pgd = 0; __flush_tlb_all(); } void __init early_ioremap_reset(void) { enum fixed_addresses idx; unsigned long *pte, phys, addr; after_paging_init = 1; for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { addr = fix_to_virt(idx); pte = early_ioremap_pte(addr); if (!*pte & _PAGE_PRESENT) { phys = *pte & PAGE_MASK; set_fixmap(idx, phys); } } } static void __init __early_set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags) { unsigned long *pte, addr = __fix_to_virt(idx); if (idx >= __end_of_fixed_addresses) { BUG(); return; } pte = early_ioremap_pte(addr); if (pgprot_val(flags)) *pte = (phys & PAGE_MASK) | pgprot_val(flags); else *pte = 0; __flush_tlb_one(addr); } static inline void __init early_set_fixmap(enum fixed_addresses idx, unsigned long phys) { if (after_paging_init) set_fixmap(idx, phys); else __early_set_fixmap(idx, phys, PAGE_KERNEL); } static inline void __init early_clear_fixmap(enum fixed_addresses idx) { if (after_paging_init) clear_fixmap(idx); else __early_set_fixmap(idx, 0, __pgprot(0)); } int __initdata early_ioremap_nested; static int __init check_early_ioremap_leak(void) { if (!early_ioremap_nested) return 0; printk(KERN_WARNING "Debug warning: early ioremap leak of %d areas detected.\n", early_ioremap_nested); printk(KERN_WARNING "please boot with early_ioremap_debug and report the dmesg.\n"); WARN_ON(1); return 1; } late_initcall(check_early_ioremap_leak); void __init *early_ioremap(unsigned long phys_addr, unsigned long size) { unsigned long offset, last_addr; unsigned int nrpages, nesting; enum fixed_addresses idx0, idx; WARN_ON(system_state != SYSTEM_BOOTING); nesting = early_ioremap_nested; if (early_ioremap_debug) { printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ", phys_addr, size, nesting); dump_stack(); } /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) { WARN_ON(1); return NULL; } if (nesting >= FIX_BTMAPS_NESTING) { WARN_ON(1); return NULL; } early_ioremap_nested++; /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr) - phys_addr; /* * Mappings have to fit in the FIX_BTMAP area. */ nrpages = size >> PAGE_SHIFT; if (nrpages > NR_FIX_BTMAPS) { WARN_ON(1); return NULL; } /* * Ok, go for it.. */ idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; idx = idx0; while (nrpages > 0) { early_set_fixmap(idx, phys_addr); phys_addr += PAGE_SIZE; --idx; --nrpages; } if (early_ioremap_debug) printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); return (void *) (offset + fix_to_virt(idx0)); } void __init early_iounmap(void *addr, unsigned long size) { unsigned long virt_addr; unsigned long offset; unsigned int nrpages; enum fixed_addresses idx; unsigned int nesting; nesting = --early_ioremap_nested; WARN_ON(nesting < 0); if (early_ioremap_debug) { printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr, size, nesting); dump_stack(); } virt_addr = (unsigned long)addr; if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) { WARN_ON(1); return; } offset = virt_addr & ~PAGE_MASK; nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; while (nrpages > 0) { early_clear_fixmap(idx); --idx; --nrpages; } } void __this_fixmap_does_not_exist(void) { WARN_ON(1); } #endif /* CONFIG_X86_32 */ Loading
arch/x86/mm/ioremap_32.c +18 −0 Original line number Diff line number Diff line Loading @@ -19,6 +19,18 @@ #include <asm/pgtable.h> #include <asm/tlbflush.h> #ifdef CONFIG_X86_64 unsigned long __phys_addr(unsigned long x) { if (x >= __START_KERNEL_map) return x - __START_KERNEL_map + phys_base; return x - PAGE_OFFSET; } EXPORT_SYMBOL(__phys_addr); #endif /* * Fix up the linear direct mapping of the kernel to avoid cache attribute * conflicts. Loading Loading @@ -49,6 +61,7 @@ static int ioremap_change_attr(unsigned long phys_addr, unsigned long size, * memmap entry. */ err = change_page_attr_addr(vaddr, npages, prot); if (!err) global_flush_tlb(); Loading Loading @@ -83,6 +96,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) return (__force void __iomem *)phys_to_virt(phys_addr); #ifdef CONFIG_X86_32 /* * Don't allow anybody to remap normal RAM that we're using.. */ Loading @@ -98,6 +112,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, if (!PageReserved(page)) return NULL; } #endif pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags); Loading Loading @@ -211,6 +226,7 @@ void iounmap(volatile void __iomem *addr) } EXPORT_SYMBOL(iounmap); #ifdef CONFIG_X86_32 int __initdata early_ioremap_debug; Loading Loading @@ -443,3 +459,5 @@ void __this_fixmap_does_not_exist(void) { WARN_ON(1); } #endif /* CONFIG_X86_32 */
arch/x86/mm/ioremap_64.c +295 −22 Original line number Diff line number Diff line Loading @@ -6,6 +6,7 @@ * (C) Copyright 1995 1996 Linus Torvalds */ #include <linux/bootmem.h> #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> Loading @@ -18,6 +19,8 @@ #include <asm/pgtable.h> #include <asm/tlbflush.h> #ifdef CONFIG_X86_64 unsigned long __phys_addr(unsigned long x) { if (x >= __START_KERNEL_map) Loading @@ -26,6 +29,8 @@ unsigned long __phys_addr(unsigned long x) } EXPORT_SYMBOL(__phys_addr); #endif /* * Fix up the linear direct mapping of the kernel to avoid cache attribute * conflicts. Loading @@ -33,28 +38,33 @@ EXPORT_SYMBOL(__phys_addr); static int ioremap_change_attr(unsigned long phys_addr, unsigned long size, pgprot_t prot) { int err = 0; if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) { unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned long vaddr = (unsigned long) __va(phys_addr); int level; unsigned long npages, vaddr, last_addr = phys_addr + size - 1; int err, level; /* No change for pages after the last mapping */ if (last_addr >= (max_pfn_mapped << PAGE_SHIFT)) return 0; npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; vaddr = (unsigned long) __va(phys_addr); /* * If there is no identity map for this address, * change_page_attr_addr is unnecessary */ if (!lookup_address(vaddr, &level)) return err; return 0; /* * Must use a address here and not struct page because * the phys addr can be a in hole between nodes and * not have an memmap entry. * Must use an address here and not struct page because the * phys addr can be a in hole between nodes and not have a * memmap entry. */ err = change_page_attr_addr(vaddr, npages, prot); if (!err) global_flush_tlb(); } return err; } Loading Loading @@ -86,7 +96,26 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) return (__force void __iomem *)phys_to_virt(phys_addr); #ifdef CONFIG_X86_32 /* * Don't allow anybody to remap normal RAM that we're using.. */ if (phys_addr <= virt_to_phys(high_memory - 1)) { char *t_addr, *t_end; struct page *page; t_addr = __va(phys_addr); t_end = t_addr + (size - 1); for (page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) if (!PageReserved(page)) return NULL; } #endif pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags); /* * Mappings have to be page-aligned */ Loading @@ -107,10 +136,12 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); return NULL; } if (ioremap_change_attr(phys_addr, size, pgprot) < 0) { vunmap(addr); return NULL; } return (void __iomem *) (offset + (char __iomem *)addr); } EXPORT_SYMBOL(__ioremap); Loading Loading @@ -154,12 +185,19 @@ void iounmap(volatile void __iomem *addr) if ((void __force *)addr <= high_memory) return; /* * __ioremap special-cases the PCI/ISA range by not instantiating a * vm_area and by simply returning an address into the kernel mapping * of ISA space. So handle that here. */ if (addr >= phys_to_virt(ISA_START_ADDRESS) && addr < phys_to_virt(ISA_END_ADDRESS)) return; addr = (volatile void __iomem *) (PAGE_MASK & (unsigned long __force)addr); /* Use the vm area unlocked, assuming the caller ensures there isn't another iounmap for the same address in parallel. Reuse of the virtual address is prevented by Loading Loading @@ -188,3 +226,238 @@ void iounmap(volatile void __iomem *addr) } EXPORT_SYMBOL(iounmap); #ifdef CONFIG_X86_32 int __initdata early_ioremap_debug; static int __init early_ioremap_debug_setup(char *str) { early_ioremap_debug = 1; return 0; } early_param("early_ioremap_debug", early_ioremap_debug_setup); static __initdata int after_paging_init; static __initdata unsigned long bm_pte[1024] __attribute__((aligned(PAGE_SIZE))); static inline unsigned long * __init early_ioremap_pgd(unsigned long addr) { return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023); } static inline unsigned long * __init early_ioremap_pte(unsigned long addr) { return bm_pte + ((addr >> PAGE_SHIFT) & 1023); } void __init early_ioremap_init(void) { unsigned long *pgd; if (early_ioremap_debug) printk(KERN_DEBUG "early_ioremap_init()\n"); pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); *pgd = __pa(bm_pte) | _PAGE_TABLE; memset(bm_pte, 0, sizeof(bm_pte)); /* * The boot-ioremap range spans multiple pgds, for which * we are not prepared: */ if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) { WARN_ON(1); printk(KERN_WARNING "pgd %p != %p\n", pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))); printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", fix_to_virt(FIX_BTMAP_BEGIN)); printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", fix_to_virt(FIX_BTMAP_END)); printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); } } void __init early_ioremap_clear(void) { unsigned long *pgd; if (early_ioremap_debug) printk(KERN_DEBUG "early_ioremap_clear()\n"); pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); *pgd = 0; __flush_tlb_all(); } void __init early_ioremap_reset(void) { enum fixed_addresses idx; unsigned long *pte, phys, addr; after_paging_init = 1; for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { addr = fix_to_virt(idx); pte = early_ioremap_pte(addr); if (!*pte & _PAGE_PRESENT) { phys = *pte & PAGE_MASK; set_fixmap(idx, phys); } } } static void __init __early_set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags) { unsigned long *pte, addr = __fix_to_virt(idx); if (idx >= __end_of_fixed_addresses) { BUG(); return; } pte = early_ioremap_pte(addr); if (pgprot_val(flags)) *pte = (phys & PAGE_MASK) | pgprot_val(flags); else *pte = 0; __flush_tlb_one(addr); } static inline void __init early_set_fixmap(enum fixed_addresses idx, unsigned long phys) { if (after_paging_init) set_fixmap(idx, phys); else __early_set_fixmap(idx, phys, PAGE_KERNEL); } static inline void __init early_clear_fixmap(enum fixed_addresses idx) { if (after_paging_init) clear_fixmap(idx); else __early_set_fixmap(idx, 0, __pgprot(0)); } int __initdata early_ioremap_nested; static int __init check_early_ioremap_leak(void) { if (!early_ioremap_nested) return 0; printk(KERN_WARNING "Debug warning: early ioremap leak of %d areas detected.\n", early_ioremap_nested); printk(KERN_WARNING "please boot with early_ioremap_debug and report the dmesg.\n"); WARN_ON(1); return 1; } late_initcall(check_early_ioremap_leak); void __init *early_ioremap(unsigned long phys_addr, unsigned long size) { unsigned long offset, last_addr; unsigned int nrpages, nesting; enum fixed_addresses idx0, idx; WARN_ON(system_state != SYSTEM_BOOTING); nesting = early_ioremap_nested; if (early_ioremap_debug) { printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ", phys_addr, size, nesting); dump_stack(); } /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) { WARN_ON(1); return NULL; } if (nesting >= FIX_BTMAPS_NESTING) { WARN_ON(1); return NULL; } early_ioremap_nested++; /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr) - phys_addr; /* * Mappings have to fit in the FIX_BTMAP area. */ nrpages = size >> PAGE_SHIFT; if (nrpages > NR_FIX_BTMAPS) { WARN_ON(1); return NULL; } /* * Ok, go for it.. */ idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; idx = idx0; while (nrpages > 0) { early_set_fixmap(idx, phys_addr); phys_addr += PAGE_SIZE; --idx; --nrpages; } if (early_ioremap_debug) printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); return (void *) (offset + fix_to_virt(idx0)); } void __init early_iounmap(void *addr, unsigned long size) { unsigned long virt_addr; unsigned long offset; unsigned int nrpages; enum fixed_addresses idx; unsigned int nesting; nesting = --early_ioremap_nested; WARN_ON(nesting < 0); if (early_ioremap_debug) { printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr, size, nesting); dump_stack(); } virt_addr = (unsigned long)addr; if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) { WARN_ON(1); return; } offset = virt_addr & ~PAGE_MASK; nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; while (nrpages > 0) { early_clear_fixmap(idx); --idx; --nrpages; } } void __this_fixmap_does_not_exist(void) { WARN_ON(1); } #endif /* CONFIG_X86_32 */