Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 18421015 authored by Steven Rostedt's avatar Steven Rostedt Committed by Steven Rostedt
Browse files

ring-buffer: Use sync sched protection on ring buffer resizing



There was a comment in the ring buffer code that says the calling
layers should prevent tracing or reading of the ring buffer while
resizing. I have discovered that the tracers do not honor this
arrangement.

This patch moves the disabling and synchronizing the ring buffer to
a higher layer during resizing. This guarantees that no writes
are occurring while the resize takes place.

Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent d954fbf0
Loading
Loading
Loading
Loading
+9 −16
Original line number Diff line number Diff line
@@ -1193,9 +1193,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
	struct list_head *p;
	unsigned i;

	atomic_inc(&cpu_buffer->record_disabled);
	synchronize_sched();

	spin_lock_irq(&cpu_buffer->reader_lock);
	rb_head_page_deactivate(cpu_buffer);

@@ -1214,9 +1211,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
	spin_unlock_irq(&cpu_buffer->reader_lock);

	rb_check_pages(cpu_buffer);

	atomic_dec(&cpu_buffer->record_disabled);

}

static void
@@ -1227,9 +1221,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
	struct list_head *p;
	unsigned i;

	atomic_inc(&cpu_buffer->record_disabled);
	synchronize_sched();

	spin_lock_irq(&cpu_buffer->reader_lock);
	rb_head_page_deactivate(cpu_buffer);

@@ -1245,8 +1236,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
	spin_unlock_irq(&cpu_buffer->reader_lock);

	rb_check_pages(cpu_buffer);

	atomic_dec(&cpu_buffer->record_disabled);
}

/**
@@ -1254,11 +1243,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
 * @buffer: the buffer to resize.
 * @size: the new size.
 *
 * The tracer is responsible for making sure that the buffer is
 * not being used while changing the size.
 * Note: We may be able to change the above requirement by using
 *  RCU synchronizations.
 *
 * Minimum size is 2 * BUF_PAGE_SIZE.
 *
 * Returns -1 on failure.
@@ -1290,6 +1274,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
	if (size == buffer_size)
		return size;

	atomic_inc(&buffer->record_disabled);

	/* Make sure all writers are done with this buffer. */
	synchronize_sched();

	mutex_lock(&buffer->mutex);
	get_online_cpus();

@@ -1352,6 +1341,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
	put_online_cpus();
	mutex_unlock(&buffer->mutex);

	atomic_dec(&buffer->record_disabled);

	return size;

 free_pages:
@@ -1361,6 +1352,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
	}
	put_online_cpus();
	mutex_unlock(&buffer->mutex);
	atomic_dec(&buffer->record_disabled);
	return -ENOMEM;

	/*
@@ -1370,6 +1362,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
 out_fail:
	put_online_cpus();
	mutex_unlock(&buffer->mutex);
	atomic_dec(&buffer->record_disabled);
	return -1;
}
EXPORT_SYMBOL_GPL(ring_buffer_resize);