Loading arch/x86/kernel/alternative.c +3 −3 Original line number Diff line number Diff line Loading @@ -498,12 +498,12 @@ void *text_poke_early(void *addr, const void *opcode, size_t len) */ void *__kprobes text_poke(void *addr, const void *opcode, size_t len) { unsigned long flags; char *vaddr; int nr_pages = 2; struct page *pages[2]; int i; might_sleep(); if (!core_kernel_text((unsigned long)addr)) { pages[0] = vmalloc_to_page(addr); pages[1] = vmalloc_to_page(addr + PAGE_SIZE); Loading @@ -517,9 +517,9 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len) nr_pages = 1; vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); BUG_ON(!vaddr); local_irq_save(flags); local_irq_disable(); memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); local_irq_restore(flags); local_irq_enable(); vunmap(vaddr); sync_core(); /* Could also do a CLFLUSH here to speed up CPU recovery; but Loading mm/vmalloc.c +3 −0 Original line number Diff line number Diff line Loading @@ -1257,6 +1257,7 @@ EXPORT_SYMBOL(vfree); void vunmap(const void *addr) { BUG_ON(in_interrupt()); might_sleep(); __vunmap(addr, 0); } EXPORT_SYMBOL(vunmap); Loading @@ -1276,6 +1277,8 @@ void *vmap(struct page **pages, unsigned int count, { struct vm_struct *area; might_sleep(); if (count > num_physpages) return NULL; Loading Loading
arch/x86/kernel/alternative.c +3 −3 Original line number Diff line number Diff line Loading @@ -498,12 +498,12 @@ void *text_poke_early(void *addr, const void *opcode, size_t len) */ void *__kprobes text_poke(void *addr, const void *opcode, size_t len) { unsigned long flags; char *vaddr; int nr_pages = 2; struct page *pages[2]; int i; might_sleep(); if (!core_kernel_text((unsigned long)addr)) { pages[0] = vmalloc_to_page(addr); pages[1] = vmalloc_to_page(addr + PAGE_SIZE); Loading @@ -517,9 +517,9 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len) nr_pages = 1; vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); BUG_ON(!vaddr); local_irq_save(flags); local_irq_disable(); memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); local_irq_restore(flags); local_irq_enable(); vunmap(vaddr); sync_core(); /* Could also do a CLFLUSH here to speed up CPU recovery; but Loading
mm/vmalloc.c +3 −0 Original line number Diff line number Diff line Loading @@ -1257,6 +1257,7 @@ EXPORT_SYMBOL(vfree); void vunmap(const void *addr) { BUG_ON(in_interrupt()); might_sleep(); __vunmap(addr, 0); } EXPORT_SYMBOL(vunmap); Loading @@ -1276,6 +1277,8 @@ void *vmap(struct page **pages, unsigned int count, { struct vm_struct *area; might_sleep(); if (count > num_physpages) return NULL; Loading