Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9e01c1b7 authored by Rusty Russell's avatar Rusty Russell
Browse files

cpumask: convert kernel trace functions



Impact: Reduce future memory usage, use new cpumask API.

(Eventually, cpumask_var_t will be allocated based on nr_cpu_ids, not NR_CPUS).

Convert kernel trace functions to use struct cpumask API:
1) Use cpumask_copy/cpumask_test_cpu/for_each_cpu.
2) Use cpumask_var_t and alloc_cpumask_var/free_cpumask_var everywhere.
3) Use on_each_cpu instead of playing with current->cpus_allowed.

Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarMike Travis <travis@sgi.com>
Acked-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 333af153
Loading
Loading
Loading
Loading
+25 −17
Original line number Diff line number Diff line
@@ -195,7 +195,7 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
EXPORT_SYMBOL_GPL(ring_buffer_event_data);

#define for_each_buffer_cpu(buffer, cpu)		\
	for_each_cpu_mask(cpu, buffer->cpumask)
	for_each_cpu(cpu, buffer->cpumask)

#define TS_SHIFT	27
#define TS_MASK		((1ULL << TS_SHIFT) - 1)
@@ -267,7 +267,7 @@ struct ring_buffer {
	unsigned			pages;
	unsigned			flags;
	int				cpus;
	cpumask_t			cpumask;
	cpumask_var_t			cpumask;
	atomic_t			record_disabled;

	struct mutex			mutex;
@@ -458,6 +458,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
	if (!buffer)
		return NULL;

	if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
		goto fail_free_buffer;

	buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
	buffer->flags = flags;

@@ -465,14 +468,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
	if (buffer->pages == 1)
		buffer->pages++;

	buffer->cpumask = cpu_possible_map;
	cpumask_copy(buffer->cpumask, cpu_possible_mask);
	buffer->cpus = nr_cpu_ids;

	bsize = sizeof(void *) * nr_cpu_ids;
	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
				  GFP_KERNEL);
	if (!buffer->buffers)
		goto fail_free_buffer;
		goto fail_free_cpumask;

	for_each_buffer_cpu(buffer, cpu) {
		buffer->buffers[cpu] =
@@ -492,6 +495,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
	}
	kfree(buffer->buffers);

 fail_free_cpumask:
	free_cpumask_var(buffer->cpumask);

 fail_free_buffer:
	kfree(buffer);
	return NULL;
@@ -510,6 +516,8 @@ ring_buffer_free(struct ring_buffer *buffer)
	for_each_buffer_cpu(buffer, cpu)
		rb_free_cpu_buffer(buffer->buffers[cpu]);

	free_cpumask_var(buffer->cpumask);

	kfree(buffer);
}
EXPORT_SYMBOL_GPL(ring_buffer_free);
@@ -1283,7 +1291,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,

	cpu = raw_smp_processor_id();

	if (!cpu_isset(cpu, buffer->cpumask))
	if (!cpumask_test_cpu(cpu, buffer->cpumask))
		goto out;

	cpu_buffer = buffer->buffers[cpu];
@@ -1396,7 +1404,7 @@ int ring_buffer_write(struct ring_buffer *buffer,

	cpu = raw_smp_processor_id();

	if (!cpu_isset(cpu, buffer->cpumask))
	if (!cpumask_test_cpu(cpu, buffer->cpumask))
		goto out;

	cpu_buffer = buffer->buffers[cpu];
@@ -1478,7 +1486,7 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
{
	struct ring_buffer_per_cpu *cpu_buffer;

	if (!cpu_isset(cpu, buffer->cpumask))
	if (!cpumask_test_cpu(cpu, buffer->cpumask))
		return;

	cpu_buffer = buffer->buffers[cpu];
@@ -1498,7 +1506,7 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
{
	struct ring_buffer_per_cpu *cpu_buffer;

	if (!cpu_isset(cpu, buffer->cpumask))
	if (!cpumask_test_cpu(cpu, buffer->cpumask))
		return;

	cpu_buffer = buffer->buffers[cpu];
@@ -1515,7 +1523,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
{
	struct ring_buffer_per_cpu *cpu_buffer;

	if (!cpu_isset(cpu, buffer->cpumask))
	if (!cpumask_test_cpu(cpu, buffer->cpumask))
		return 0;

	cpu_buffer = buffer->buffers[cpu];
@@ -1532,7 +1540,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
{
	struct ring_buffer_per_cpu *cpu_buffer;

	if (!cpu_isset(cpu, buffer->cpumask))
	if (!cpumask_test_cpu(cpu, buffer->cpumask))
		return 0;

	cpu_buffer = buffer->buffers[cpu];
@@ -1850,7 +1858,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
	struct buffer_page *reader;
	int nr_loops = 0;

	if (!cpu_isset(cpu, buffer->cpumask))
	if (!cpumask_test_cpu(cpu, buffer->cpumask))
		return NULL;

	cpu_buffer = buffer->buffers[cpu];
@@ -2025,7 +2033,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
	struct ring_buffer_event *event;
	unsigned long flags;

	if (!cpu_isset(cpu, buffer->cpumask))
	if (!cpumask_test_cpu(cpu, buffer->cpumask))
		return NULL;

	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@@ -2062,7 +2070,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
	struct ring_buffer_iter *iter;
	unsigned long flags;

	if (!cpu_isset(cpu, buffer->cpumask))
	if (!cpumask_test_cpu(cpu, buffer->cpumask))
		return NULL;

	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
@@ -2172,7 +2180,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
	unsigned long flags;

	if (!cpu_isset(cpu, buffer->cpumask))
	if (!cpumask_test_cpu(cpu, buffer->cpumask))
		return;

	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@@ -2228,7 +2236,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
{
	struct ring_buffer_per_cpu *cpu_buffer;

	if (!cpu_isset(cpu, buffer->cpumask))
	if (!cpumask_test_cpu(cpu, buffer->cpumask))
		return 1;

	cpu_buffer = buffer->buffers[cpu];
@@ -2252,8 +2260,8 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
	struct ring_buffer_per_cpu *cpu_buffer_a;
	struct ring_buffer_per_cpu *cpu_buffer_b;

	if (!cpu_isset(cpu, buffer_a->cpumask) ||
	    !cpu_isset(cpu, buffer_b->cpumask))
	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
		return -EINVAL;

	/* At least make sure the two buffers are somewhat the same */
+36 −24
Original line number Diff line number Diff line
@@ -89,10 +89,10 @@ static inline void ftrace_enable_cpu(void)
	preempt_enable();
}

static cpumask_t __read_mostly		tracing_buffer_mask;
static cpumask_var_t __read_mostly	tracing_buffer_mask;

#define for_each_tracing_cpu(cpu)	\
	for_each_cpu_mask(cpu, tracing_buffer_mask)
	for_each_cpu(cpu, tracing_buffer_mask)

/*
 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
@@ -2646,13 +2646,7 @@ static struct file_operations show_traces_fops = {
/*
 * Only trace on a CPU if the bitmask is set:
 */
static cpumask_t tracing_cpumask = CPU_MASK_ALL;

/*
 * When tracing/tracing_cpu_mask is modified then this holds
 * the new bitmask we are about to install:
 */
static cpumask_t tracing_cpumask_new;
static cpumask_var_t tracing_cpumask;

/*
 * The tracer itself will not take this lock, but still we want
@@ -2674,7 +2668,7 @@ tracing_cpumask_read(struct file *filp, char __user *ubuf,

	mutex_lock(&tracing_cpumask_update_lock);

	len = cpumask_scnprintf(mask_str, count, &tracing_cpumask);
	len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
	if (count - len < 2) {
		count = -EINVAL;
		goto out_err;
@@ -2693,9 +2687,13 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
		      size_t count, loff_t *ppos)
{
	int err, cpu;
	cpumask_var_t tracing_cpumask_new;

	if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
		return -ENOMEM;

	mutex_lock(&tracing_cpumask_update_lock);
	err = cpumask_parse_user(ubuf, count, &tracing_cpumask_new);
	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
	if (err)
		goto err_unlock;

@@ -2706,26 +2704,28 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
		 * Increase/decrease the disabled counter if we are
		 * about to flip a bit in the cpumask:
		 */
		if (cpu_isset(cpu, tracing_cpumask) &&
				!cpu_isset(cpu, tracing_cpumask_new)) {
		if (cpumask_test_cpu(cpu, tracing_cpumask) &&
				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
			atomic_inc(&global_trace.data[cpu]->disabled);
		}
		if (!cpu_isset(cpu, tracing_cpumask) &&
				cpu_isset(cpu, tracing_cpumask_new)) {
		if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
			atomic_dec(&global_trace.data[cpu]->disabled);
		}
	}
	__raw_spin_unlock(&ftrace_max_lock);
	local_irq_enable();

	tracing_cpumask = tracing_cpumask_new;
	cpumask_copy(tracing_cpumask, tracing_cpumask_new);

	mutex_unlock(&tracing_cpumask_update_lock);
	free_cpumask_var(tracing_cpumask_new);

	return count;

err_unlock:
	mutex_unlock(&tracing_cpumask_update_lock);
	free_cpumask_var(tracing_cpumask);

	return err;
}
@@ -3752,7 +3752,6 @@ void ftrace_dump(void)
	static DEFINE_SPINLOCK(ftrace_dump_lock);
	/* use static because iter can be a bit big for the stack */
	static struct trace_iterator iter;
	static cpumask_t mask;
	static int dump_ran;
	unsigned long flags;
	int cnt = 0, cpu;
@@ -3786,8 +3785,6 @@ void ftrace_dump(void)
	 * and then release the locks again.
	 */

	cpus_clear(mask);

	while (!trace_empty(&iter)) {

		if (!cnt)
@@ -3823,19 +3820,28 @@ __init static int tracer_alloc_buffers(void)
{
	struct trace_array_cpu *data;
	int i;
	int ret = -ENOMEM;

	/* TODO: make the number of buffers hot pluggable with CPUS */
	tracing_buffer_mask = cpu_possible_map;
	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
		goto out;

	if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
		goto out_free_buffer_mask;

	cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
	cpumask_copy(tracing_cpumask, cpu_all_mask);

	/* TODO: make the number of buffers hot pluggable with CPUS */
	global_trace.buffer = ring_buffer_alloc(trace_buf_size,
						   TRACE_BUFFER_FLAGS);
	if (!global_trace.buffer) {
		printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
		WARN_ON(1);
		return 0;
		goto out_free_cpumask;
	}
	global_trace.entries = ring_buffer_size(global_trace.buffer);


#ifdef CONFIG_TRACER_MAX_TRACE
	max_tr.buffer = ring_buffer_alloc(trace_buf_size,
					     TRACE_BUFFER_FLAGS);
@@ -3843,7 +3849,7 @@ __init static int tracer_alloc_buffers(void)
		printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
		WARN_ON(1);
		ring_buffer_free(global_trace.buffer);
		return 0;
		goto out_free_cpumask;
	}
	max_tr.entries = ring_buffer_size(max_tr.buffer);
	WARN_ON(max_tr.entries != global_trace.entries);
@@ -3873,8 +3879,14 @@ __init static int tracer_alloc_buffers(void)
				       &trace_panic_notifier);

	register_die_notifier(&trace_die_notifier);
	ret = 0;

	return 0;
out_free_cpumask:
	free_cpumask_var(tracing_cpumask);
out_free_buffer_mask:
	free_cpumask_var(tracing_buffer_mask);
out:
	return ret;
}
early_initcall(tracer_alloc_buffers);
fs_initcall(tracer_init_debugfs);
+3 −10
Original line number Diff line number Diff line
@@ -196,9 +196,9 @@ static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
	return HRTIMER_RESTART;
}

static void start_stack_timer(int cpu)
static void start_stack_timer(void *unused)
{
	struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
	struct hrtimer *hrtimer = &__get_cpu_var(stack_trace_hrtimer);

	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hrtimer->function = stack_trace_timer_fn;
@@ -208,14 +208,7 @@ static void start_stack_timer(int cpu)

static void start_stack_timers(void)
{
	cpumask_t saved_mask = current->cpus_allowed;
	int cpu;

	for_each_online_cpu(cpu) {
		set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
		start_stack_timer(cpu);
	}
	set_cpus_allowed_ptr(current, &saved_mask);
	on_each_cpu(start_stack_timer, NULL, 1);
}

static void stop_stack_timer(int cpu)