Loading arch/sparc/include/asm/mmu_context_32.h +3 −5 Original line number Diff line number Diff line Loading @@ -9,14 +9,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } /* * Initialize a new mmu context. This is invoked when a new /* Initialize a new mmu context. This is invoked when a new * address space instance (unique or shared) is instantiated. */ #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) int init_new_context(struct task_struct *tsk, struct mm_struct *mm); /* * Destroy a dead context. This occurs when mmput drops the /* Destroy a dead context. This occurs when mmput drops the * mm_users count to zero, the mmaps have been released, and * all the page tables have been flushed. Our job is to destroy * any remaining processor-specific state. Loading arch/sparc/include/asm/pgtable_32.h +0 −32 Original line number Diff line number Diff line Loading @@ -79,8 +79,6 @@ extern unsigned long ptr_in_current_pgd; #define __S110 PAGE_SHARED #define __S111 PAGE_SHARED extern int num_contexts; /* First physical page can be anywhere, the following is needed so that * va-->pa and vice versa conversions work properly without performance * hit for all __pa()/__va() operations. Loading Loading @@ -399,36 +397,6 @@ static inline pte_t pgoff_to_pte(unsigned long pgoff) */ #define PTE_FILE_MAX_BITS 24 /* */ struct ctx_list { struct ctx_list *next; struct ctx_list *prev; unsigned int ctx_number; struct mm_struct *ctx_mm; }; extern struct ctx_list *ctx_list_pool; /* Dynamically allocated */ extern struct ctx_list ctx_free; /* Head of free list */ extern struct ctx_list ctx_used; /* Head of used contexts list */ #define NO_CONTEXT -1 static inline void remove_from_ctx_list(struct ctx_list *entry) { entry->next->prev = entry->prev; entry->prev->next = entry->next; } static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry) { entry->next = head; (entry->prev = head->prev)->next = entry; head->prev = entry; } #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry) #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry) static inline unsigned long __get_phys (unsigned long addr) { Loading arch/sparc/kernel/setup_32.c +0 −1 Original line number Diff line number Diff line Loading @@ -371,7 +371,6 @@ void __init setup_arch(char **cmdline_p) (*(linux_dbvec->teach_debugger))(); } init_mm.context = (unsigned long) NO_CONTEXT; init_task.thread.kregs = &fake_swapper_regs; /* Run-time patch instructions to match the cpu model */ Loading arch/sparc/mm/fault_32.c +0 −6 Original line number Diff line number Diff line Loading @@ -32,12 +32,6 @@ int show_unhandled_signals = 1; /* At boot time we determine these two values necessary for setting * up the segment maps and page table entries (pte's). */ int num_contexts; /* Return how much physical memory we have. */ unsigned long probe_memory(void) { Loading arch/sparc/mm/init_32.c +0 −18 Original line number Diff line number Diff line Loading @@ -82,24 +82,6 @@ void show_mem(unsigned int filter) #endif } void __init sparc_context_init(int numctx) { int ctx; ctx_list_pool = __alloc_bootmem(numctx * sizeof(struct ctx_list), SMP_CACHE_BYTES, 0UL); for(ctx = 0; ctx < numctx; ctx++) { struct ctx_list *clist; clist = (ctx_list_pool + ctx); clist->ctx_number = ctx; clist->ctx_mm = NULL; } ctx_free.next = ctx_free.prev = &ctx_free; ctx_used.next = ctx_used.prev = &ctx_used; for(ctx = 0; ctx < numctx; ctx++) add_to_free_ctxlist(ctx_list_pool + ctx); } extern unsigned long cmdline_memory_size; unsigned long last_valid_pfn; Loading Loading
arch/sparc/include/asm/mmu_context_32.h +3 −5 Original line number Diff line number Diff line Loading @@ -9,14 +9,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } /* * Initialize a new mmu context. This is invoked when a new /* Initialize a new mmu context. This is invoked when a new * address space instance (unique or shared) is instantiated. */ #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) int init_new_context(struct task_struct *tsk, struct mm_struct *mm); /* * Destroy a dead context. This occurs when mmput drops the /* Destroy a dead context. This occurs when mmput drops the * mm_users count to zero, the mmaps have been released, and * all the page tables have been flushed. Our job is to destroy * any remaining processor-specific state. Loading
arch/sparc/include/asm/pgtable_32.h +0 −32 Original line number Diff line number Diff line Loading @@ -79,8 +79,6 @@ extern unsigned long ptr_in_current_pgd; #define __S110 PAGE_SHARED #define __S111 PAGE_SHARED extern int num_contexts; /* First physical page can be anywhere, the following is needed so that * va-->pa and vice versa conversions work properly without performance * hit for all __pa()/__va() operations. Loading Loading @@ -399,36 +397,6 @@ static inline pte_t pgoff_to_pte(unsigned long pgoff) */ #define PTE_FILE_MAX_BITS 24 /* */ struct ctx_list { struct ctx_list *next; struct ctx_list *prev; unsigned int ctx_number; struct mm_struct *ctx_mm; }; extern struct ctx_list *ctx_list_pool; /* Dynamically allocated */ extern struct ctx_list ctx_free; /* Head of free list */ extern struct ctx_list ctx_used; /* Head of used contexts list */ #define NO_CONTEXT -1 static inline void remove_from_ctx_list(struct ctx_list *entry) { entry->next->prev = entry->prev; entry->prev->next = entry->next; } static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry) { entry->next = head; (entry->prev = head->prev)->next = entry; head->prev = entry; } #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry) #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry) static inline unsigned long __get_phys (unsigned long addr) { Loading
arch/sparc/kernel/setup_32.c +0 −1 Original line number Diff line number Diff line Loading @@ -371,7 +371,6 @@ void __init setup_arch(char **cmdline_p) (*(linux_dbvec->teach_debugger))(); } init_mm.context = (unsigned long) NO_CONTEXT; init_task.thread.kregs = &fake_swapper_regs; /* Run-time patch instructions to match the cpu model */ Loading
arch/sparc/mm/fault_32.c +0 −6 Original line number Diff line number Diff line Loading @@ -32,12 +32,6 @@ int show_unhandled_signals = 1; /* At boot time we determine these two values necessary for setting * up the segment maps and page table entries (pte's). */ int num_contexts; /* Return how much physical memory we have. */ unsigned long probe_memory(void) { Loading
arch/sparc/mm/init_32.c +0 −18 Original line number Diff line number Diff line Loading @@ -82,24 +82,6 @@ void show_mem(unsigned int filter) #endif } void __init sparc_context_init(int numctx) { int ctx; ctx_list_pool = __alloc_bootmem(numctx * sizeof(struct ctx_list), SMP_CACHE_BYTES, 0UL); for(ctx = 0; ctx < numctx; ctx++) { struct ctx_list *clist; clist = (ctx_list_pool + ctx); clist->ctx_number = ctx; clist->ctx_mm = NULL; } ctx_free.next = ctx_free.prev = &ctx_free; ctx_used.next = ctx_used.prev = &ctx_used; for(ctx = 0; ctx < numctx; ctx++) add_to_free_ctxlist(ctx_list_pool + ctx); } extern unsigned long cmdline_memory_size; unsigned long last_valid_pfn; Loading