Loading arch/arm/kernel/process.c +4 −1 Original line number Diff line number Diff line Loading @@ -32,6 +32,7 @@ #include <asm/leds.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/mach/time.h> extern const char *processor_modes[]; extern void setup_mm_for_reboot(char mode); Loading Loading @@ -85,8 +86,10 @@ EXPORT_SYMBOL(pm_power_off); void default_idle(void) { local_irq_disable(); if (!need_resched() && !hlt_counter) if (!need_resched() && !hlt_counter) { timer_dyn_reprogram(); arch_idle(); } local_irq_enable(); } Loading arch/arm/kernel/time.c +7 −3 Original line number Diff line number Diff line Loading @@ -424,15 +424,19 @@ static int timer_dyn_tick_disable(void) return ret; } /* * Reprogram the system timer for at least the calculated time interval. * This function should be called from the idle thread with IRQs disabled, * immediately before sleeping. */ void timer_dyn_reprogram(void) { struct dyn_tick_timer *dyn_tick = system_timer->dyn_tick; unsigned long flags; write_seqlock_irqsave(&xtime_lock, flags); write_seqlock(&xtime_lock); if (dyn_tick->state & DYN_TICK_ENABLED) dyn_tick->reprogram(next_timer_interrupt() - jiffies); write_sequnlock_irqrestore(&xtime_lock, flags); write_sequnlock(&xtime_lock); } static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf) Loading arch/arm/mach-aaec2000/Makefile.boot 0 → 100644 +1 −0 Original line number Diff line number Diff line zreladdr-y := 0xf0008000 arch/arm/mm/init.c +65 −6 Original line number Diff line number Diff line Loading @@ -522,6 +522,69 @@ static inline void free_area(unsigned long addr, unsigned long end, char *s) printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); } static inline void free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) { struct page *start_pg, *end_pg; unsigned long pg, pgend; /* * Convert start_pfn/end_pfn to a struct page pointer. */ start_pg = pfn_to_page(start_pfn); end_pg = pfn_to_page(end_pfn); /* * Convert to physical addresses, and * round start upwards and end downwards. */ pg = PAGE_ALIGN(__pa(start_pg)); pgend = __pa(end_pg) & PAGE_MASK; /* * If there are free pages between these, * free the section of the memmap array. */ if (pg < pgend) free_bootmem_node(NODE_DATA(node), pg, pgend - pg); } /* * The mem_map array can get very big. Free the unused area of the memory map. */ static void __init free_unused_memmap_node(int node, struct meminfo *mi) { unsigned long bank_start, prev_bank_end = 0; unsigned int i; /* * [FIXME] This relies on each bank being in address order. This * may not be the case, especially if the user has provided the * information on the command line. */ for (i = 0; i < mi->nr_banks; i++) { if (mi->bank[i].size == 0 || mi->bank[i].node != node) continue; bank_start = mi->bank[i].start >> PAGE_SHIFT; if (bank_start < prev_bank_end) { printk(KERN_ERR "MEM: unordered memory banks. " "Not freeing memmap.\n"); break; } /* * If we had a previous bank, and there is a space * between the current bank and the previous, free it. */ if (prev_bank_end && prev_bank_end != bank_start) free_memmap(node, prev_bank_end, bank_start); prev_bank_end = (mi->bank[i].start + mi->bank[i].size) >> PAGE_SHIFT; } } /* * mem_init() marks the free areas in the mem_map and tells us how much * memory is free. This is done after various parts of the system have Loading @@ -540,16 +603,12 @@ void __init mem_init(void) max_mapnr = virt_to_page(high_memory) - mem_map; #endif /* * We may have non-contiguous memory. */ if (meminfo.nr_banks != 1) create_memmap_holes(&meminfo); /* this will put all unused low memory onto the freelists */ for_each_online_node(node) { pg_data_t *pgdat = NODE_DATA(node); free_unused_memmap_node(node, &meminfo); if (pgdat->node_spanned_pages != 0) totalram_pages += free_all_bootmem_node(pgdat); } Loading arch/arm/mm/mm-armv.c +7 −80 Original line number Diff line number Diff line Loading @@ -169,7 +169,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ init_pgd = pgd_offset_k(0); memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); if (!vectors_high()) { /* Loading Loading @@ -198,14 +205,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) spin_unlock(&mm->page_table_lock); } /* * Copy over the kernel and IO PGD entries */ memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); return new_pgd; no_pte: Loading Loading @@ -698,75 +697,3 @@ void __init iotable_init(struct map_desc *io_desc, int nr) for (i = 0; i < nr; i++) create_mapping(io_desc + i); } static inline void free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) { struct page *start_pg, *end_pg; unsigned long pg, pgend; /* * Convert start_pfn/end_pfn to a struct page pointer. */ start_pg = pfn_to_page(start_pfn); end_pg = pfn_to_page(end_pfn); /* * Convert to physical addresses, and * round start upwards and end downwards. */ pg = PAGE_ALIGN(__pa(start_pg)); pgend = __pa(end_pg) & PAGE_MASK; /* * If there are free pages between these, * free the section of the memmap array. */ if (pg < pgend) free_bootmem_node(NODE_DATA(node), pg, pgend - pg); } static inline void free_unused_memmap_node(int node, struct meminfo *mi) { unsigned long bank_start, prev_bank_end = 0; unsigned int i; /* * [FIXME] This relies on each bank being in address order. This * may not be the case, especially if the user has provided the * information on the command line. */ for (i = 0; i < mi->nr_banks; i++) { if (mi->bank[i].size == 0 || mi->bank[i].node != node) continue; bank_start = mi->bank[i].start >> PAGE_SHIFT; if (bank_start < prev_bank_end) { printk(KERN_ERR "MEM: unordered memory banks. " "Not freeing memmap.\n"); break; } /* * If we had a previous bank, and there is a space * between the current bank and the previous, free it. */ if (prev_bank_end && prev_bank_end != bank_start) free_memmap(node, prev_bank_end, bank_start); prev_bank_end = PAGE_ALIGN(mi->bank[i].start + mi->bank[i].size) >> PAGE_SHIFT; } } /* * The mem_map array can get very big. Free * the unused area of the memory map. */ void __init create_memmap_holes(struct meminfo *mi) { int node; for_each_online_node(node) free_unused_memmap_node(node, mi); } Loading
arch/arm/kernel/process.c +4 −1 Original line number Diff line number Diff line Loading @@ -32,6 +32,7 @@ #include <asm/leds.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/mach/time.h> extern const char *processor_modes[]; extern void setup_mm_for_reboot(char mode); Loading Loading @@ -85,8 +86,10 @@ EXPORT_SYMBOL(pm_power_off); void default_idle(void) { local_irq_disable(); if (!need_resched() && !hlt_counter) if (!need_resched() && !hlt_counter) { timer_dyn_reprogram(); arch_idle(); } local_irq_enable(); } Loading
arch/arm/kernel/time.c +7 −3 Original line number Diff line number Diff line Loading @@ -424,15 +424,19 @@ static int timer_dyn_tick_disable(void) return ret; } /* * Reprogram the system timer for at least the calculated time interval. * This function should be called from the idle thread with IRQs disabled, * immediately before sleeping. */ void timer_dyn_reprogram(void) { struct dyn_tick_timer *dyn_tick = system_timer->dyn_tick; unsigned long flags; write_seqlock_irqsave(&xtime_lock, flags); write_seqlock(&xtime_lock); if (dyn_tick->state & DYN_TICK_ENABLED) dyn_tick->reprogram(next_timer_interrupt() - jiffies); write_sequnlock_irqrestore(&xtime_lock, flags); write_sequnlock(&xtime_lock); } static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf) Loading
arch/arm/mach-aaec2000/Makefile.boot 0 → 100644 +1 −0 Original line number Diff line number Diff line zreladdr-y := 0xf0008000
arch/arm/mm/init.c +65 −6 Original line number Diff line number Diff line Loading @@ -522,6 +522,69 @@ static inline void free_area(unsigned long addr, unsigned long end, char *s) printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); } static inline void free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) { struct page *start_pg, *end_pg; unsigned long pg, pgend; /* * Convert start_pfn/end_pfn to a struct page pointer. */ start_pg = pfn_to_page(start_pfn); end_pg = pfn_to_page(end_pfn); /* * Convert to physical addresses, and * round start upwards and end downwards. */ pg = PAGE_ALIGN(__pa(start_pg)); pgend = __pa(end_pg) & PAGE_MASK; /* * If there are free pages between these, * free the section of the memmap array. */ if (pg < pgend) free_bootmem_node(NODE_DATA(node), pg, pgend - pg); } /* * The mem_map array can get very big. Free the unused area of the memory map. */ static void __init free_unused_memmap_node(int node, struct meminfo *mi) { unsigned long bank_start, prev_bank_end = 0; unsigned int i; /* * [FIXME] This relies on each bank being in address order. This * may not be the case, especially if the user has provided the * information on the command line. */ for (i = 0; i < mi->nr_banks; i++) { if (mi->bank[i].size == 0 || mi->bank[i].node != node) continue; bank_start = mi->bank[i].start >> PAGE_SHIFT; if (bank_start < prev_bank_end) { printk(KERN_ERR "MEM: unordered memory banks. " "Not freeing memmap.\n"); break; } /* * If we had a previous bank, and there is a space * between the current bank and the previous, free it. */ if (prev_bank_end && prev_bank_end != bank_start) free_memmap(node, prev_bank_end, bank_start); prev_bank_end = (mi->bank[i].start + mi->bank[i].size) >> PAGE_SHIFT; } } /* * mem_init() marks the free areas in the mem_map and tells us how much * memory is free. This is done after various parts of the system have Loading @@ -540,16 +603,12 @@ void __init mem_init(void) max_mapnr = virt_to_page(high_memory) - mem_map; #endif /* * We may have non-contiguous memory. */ if (meminfo.nr_banks != 1) create_memmap_holes(&meminfo); /* this will put all unused low memory onto the freelists */ for_each_online_node(node) { pg_data_t *pgdat = NODE_DATA(node); free_unused_memmap_node(node, &meminfo); if (pgdat->node_spanned_pages != 0) totalram_pages += free_all_bootmem_node(pgdat); } Loading
arch/arm/mm/mm-armv.c +7 −80 Original line number Diff line number Diff line Loading @@ -169,7 +169,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ init_pgd = pgd_offset_k(0); memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); if (!vectors_high()) { /* Loading Loading @@ -198,14 +205,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) spin_unlock(&mm->page_table_lock); } /* * Copy over the kernel and IO PGD entries */ memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); return new_pgd; no_pte: Loading Loading @@ -698,75 +697,3 @@ void __init iotable_init(struct map_desc *io_desc, int nr) for (i = 0; i < nr; i++) create_mapping(io_desc + i); } static inline void free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) { struct page *start_pg, *end_pg; unsigned long pg, pgend; /* * Convert start_pfn/end_pfn to a struct page pointer. */ start_pg = pfn_to_page(start_pfn); end_pg = pfn_to_page(end_pfn); /* * Convert to physical addresses, and * round start upwards and end downwards. */ pg = PAGE_ALIGN(__pa(start_pg)); pgend = __pa(end_pg) & PAGE_MASK; /* * If there are free pages between these, * free the section of the memmap array. */ if (pg < pgend) free_bootmem_node(NODE_DATA(node), pg, pgend - pg); } static inline void free_unused_memmap_node(int node, struct meminfo *mi) { unsigned long bank_start, prev_bank_end = 0; unsigned int i; /* * [FIXME] This relies on each bank being in address order. This * may not be the case, especially if the user has provided the * information on the command line. */ for (i = 0; i < mi->nr_banks; i++) { if (mi->bank[i].size == 0 || mi->bank[i].node != node) continue; bank_start = mi->bank[i].start >> PAGE_SHIFT; if (bank_start < prev_bank_end) { printk(KERN_ERR "MEM: unordered memory banks. " "Not freeing memmap.\n"); break; } /* * If we had a previous bank, and there is a space * between the current bank and the previous, free it. */ if (prev_bank_end && prev_bank_end != bank_start) free_memmap(node, prev_bank_end, bank_start); prev_bank_end = PAGE_ALIGN(mi->bank[i].start + mi->bank[i].size) >> PAGE_SHIFT; } } /* * The mem_map array can get very big. Free * the unused area of the memory map. */ void __init create_memmap_holes(struct meminfo *mi) { int node; for_each_online_node(node) free_unused_memmap_node(node, mi); }