Loading kernel/trace/ring_buffer.c +15 −26 Original line number Original line Diff line number Diff line Loading @@ -473,7 +473,7 @@ struct ring_buffer_per_cpu { int nr_pages_to_update; int nr_pages_to_update; struct list_head new_pages; /* new pages to add */ struct list_head new_pages; /* new pages to add */ struct work_struct update_pages_work; struct work_struct update_pages_work; struct completion update_completion; struct completion update_done; }; }; struct ring_buffer { struct ring_buffer { Loading Loading @@ -1058,7 +1058,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu) lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); init_completion(&cpu_buffer->update_completion); init_completion(&cpu_buffer->update_done); bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), GFP_KERNEL, cpu_to_node(cpu)); GFP_KERNEL, cpu_to_node(cpu)); Loading Loading @@ -1461,7 +1461,7 @@ static void update_pages_handler(struct work_struct *work) struct ring_buffer_per_cpu *cpu_buffer = container_of(work, struct ring_buffer_per_cpu *cpu_buffer = container_of(work, struct ring_buffer_per_cpu, update_pages_work); struct ring_buffer_per_cpu, update_pages_work); rb_update_pages(cpu_buffer); rb_update_pages(cpu_buffer); complete(&cpu_buffer->update_completion); complete(&cpu_buffer->update_done); } } /** /** Loading Loading @@ -1534,39 +1534,29 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, get_online_cpus(); get_online_cpus(); /* /* * Fire off all the required work handlers * Fire off all the required work handlers * Look out for offline CPUs * We can't schedule on offline CPUs, but it's not necessary */ for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; if (!cpu_buffer->nr_pages_to_update || !cpu_online(cpu)) continue; schedule_work_on(cpu, &cpu_buffer->update_pages_work); } /* * This loop is for the CPUs that are not online. * We can't schedule anything on them, but it's not necessary * since we can change their buffer sizes without any race. * since we can change their buffer sizes without any race. */ */ for_each_buffer_cpu(buffer, cpu) { for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu]; if (!cpu_buffer->nr_pages_to_update || if (!cpu_buffer->nr_pages_to_update) cpu_online(cpu)) continue; continue; if (cpu_online(cpu)) schedule_work_on(cpu, &cpu_buffer->update_pages_work); else rb_update_pages(cpu_buffer); rb_update_pages(cpu_buffer); } } /* wait for all the updates to complete */ /* wait for all the updates to complete */ for_each_buffer_cpu(buffer, cpu) { for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu]; if (!cpu_buffer->nr_pages_to_update || if (!cpu_buffer->nr_pages_to_update) !cpu_online(cpu)) continue; continue; wait_for_completion(&cpu_buffer->update_completion); if (cpu_online(cpu)) /* reset this value */ wait_for_completion(&cpu_buffer->update_done); cpu_buffer->nr_pages_to_update = 0; cpu_buffer->nr_pages_to_update = 0; } } Loading @@ -1593,13 +1583,12 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, if (cpu_online(cpu_id)) { if (cpu_online(cpu_id)) { schedule_work_on(cpu_id, schedule_work_on(cpu_id, &cpu_buffer->update_pages_work); &cpu_buffer->update_pages_work); wait_for_completion(&cpu_buffer->update_completion); wait_for_completion(&cpu_buffer->update_done); } else } else rb_update_pages(cpu_buffer); rb_update_pages(cpu_buffer); put_online_cpus(); /* reset this value */ cpu_buffer->nr_pages_to_update = 0; cpu_buffer->nr_pages_to_update = 0; put_online_cpus(); } } out: out: Loading kernel/trace/trace.c +2 −3 Original line number Original line Diff line number Diff line Loading @@ -763,8 +763,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) * Register a new plugin tracer. * Register a new plugin tracer. */ */ int register_tracer(struct tracer *type) int register_tracer(struct tracer *type) __releases(kernel_lock) __acquires(kernel_lock) { { struct tracer *t; struct tracer *t; int ret = 0; int ret = 0; Loading Loading @@ -5114,7 +5112,8 @@ __init static int tracer_alloc_buffers(void) max_tr.data[i] = &per_cpu(max_tr_data, i); max_tr.data[i] = &per_cpu(max_tr_data, i); } } set_buffer_entries(&global_trace, ring_buf_size); set_buffer_entries(&global_trace, ring_buffer_size(global_trace.buffer, 0)); #ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE set_buffer_entries(&max_tr, 1); set_buffer_entries(&max_tr, 1); #endif #endif Loading Loading
kernel/trace/ring_buffer.c +15 −26 Original line number Original line Diff line number Diff line Loading @@ -473,7 +473,7 @@ struct ring_buffer_per_cpu { int nr_pages_to_update; int nr_pages_to_update; struct list_head new_pages; /* new pages to add */ struct list_head new_pages; /* new pages to add */ struct work_struct update_pages_work; struct work_struct update_pages_work; struct completion update_completion; struct completion update_done; }; }; struct ring_buffer { struct ring_buffer { Loading Loading @@ -1058,7 +1058,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu) lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); init_completion(&cpu_buffer->update_completion); init_completion(&cpu_buffer->update_done); bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), GFP_KERNEL, cpu_to_node(cpu)); GFP_KERNEL, cpu_to_node(cpu)); Loading Loading @@ -1461,7 +1461,7 @@ static void update_pages_handler(struct work_struct *work) struct ring_buffer_per_cpu *cpu_buffer = container_of(work, struct ring_buffer_per_cpu *cpu_buffer = container_of(work, struct ring_buffer_per_cpu, update_pages_work); struct ring_buffer_per_cpu, update_pages_work); rb_update_pages(cpu_buffer); rb_update_pages(cpu_buffer); complete(&cpu_buffer->update_completion); complete(&cpu_buffer->update_done); } } /** /** Loading Loading @@ -1534,39 +1534,29 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, get_online_cpus(); get_online_cpus(); /* /* * Fire off all the required work handlers * Fire off all the required work handlers * Look out for offline CPUs * We can't schedule on offline CPUs, but it's not necessary */ for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; if (!cpu_buffer->nr_pages_to_update || !cpu_online(cpu)) continue; schedule_work_on(cpu, &cpu_buffer->update_pages_work); } /* * This loop is for the CPUs that are not online. * We can't schedule anything on them, but it's not necessary * since we can change their buffer sizes without any race. * since we can change their buffer sizes without any race. */ */ for_each_buffer_cpu(buffer, cpu) { for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu]; if (!cpu_buffer->nr_pages_to_update || if (!cpu_buffer->nr_pages_to_update) cpu_online(cpu)) continue; continue; if (cpu_online(cpu)) schedule_work_on(cpu, &cpu_buffer->update_pages_work); else rb_update_pages(cpu_buffer); rb_update_pages(cpu_buffer); } } /* wait for all the updates to complete */ /* wait for all the updates to complete */ for_each_buffer_cpu(buffer, cpu) { for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu]; if (!cpu_buffer->nr_pages_to_update || if (!cpu_buffer->nr_pages_to_update) !cpu_online(cpu)) continue; continue; wait_for_completion(&cpu_buffer->update_completion); if (cpu_online(cpu)) /* reset this value */ wait_for_completion(&cpu_buffer->update_done); cpu_buffer->nr_pages_to_update = 0; cpu_buffer->nr_pages_to_update = 0; } } Loading @@ -1593,13 +1583,12 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, if (cpu_online(cpu_id)) { if (cpu_online(cpu_id)) { schedule_work_on(cpu_id, schedule_work_on(cpu_id, &cpu_buffer->update_pages_work); &cpu_buffer->update_pages_work); wait_for_completion(&cpu_buffer->update_completion); wait_for_completion(&cpu_buffer->update_done); } else } else rb_update_pages(cpu_buffer); rb_update_pages(cpu_buffer); put_online_cpus(); /* reset this value */ cpu_buffer->nr_pages_to_update = 0; cpu_buffer->nr_pages_to_update = 0; put_online_cpus(); } } out: out: Loading
kernel/trace/trace.c +2 −3 Original line number Original line Diff line number Diff line Loading @@ -763,8 +763,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) * Register a new plugin tracer. * Register a new plugin tracer. */ */ int register_tracer(struct tracer *type) int register_tracer(struct tracer *type) __releases(kernel_lock) __acquires(kernel_lock) { { struct tracer *t; struct tracer *t; int ret = 0; int ret = 0; Loading Loading @@ -5114,7 +5112,8 @@ __init static int tracer_alloc_buffers(void) max_tr.data[i] = &per_cpu(max_tr_data, i); max_tr.data[i] = &per_cpu(max_tr_data, i); } } set_buffer_entries(&global_trace, ring_buf_size); set_buffer_entries(&global_trace, ring_buffer_size(global_trace.buffer, 0)); #ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE set_buffer_entries(&max_tr, 1); set_buffer_entries(&max_tr, 1); #endif #endif Loading