Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d0f21836 authored by Lee Jones's avatar Lee Jones Committed by Treehugger Robot
Browse files

ANDROID: Revert "tracing/ring-buffer: Have polling block on watermark"



This reverts commit e65ac2bd.

Bug: 263508491
Change-Id: I0acf7869b0d4a3977787c9280a4d8f39041b354b
Signed-off-by: default avatarLee Jones <joneslee@google.com>
parent 5a8d108c
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -99,7 +99,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k


int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full);
int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full);
__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
			  struct file *filp, poll_table *poll_table, int full);
			  struct file *filp, poll_table *poll_table);




#define RING_BUFFER_ALL_CPUS -1
#define RING_BUFFER_ALL_CPUS -1
+19 −35
Original line number Original line Diff line number Diff line
@@ -566,21 +566,6 @@ size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu)
	return cnt - read;
	return cnt - read;
}
}


static __always_inline bool full_hit(struct ring_buffer *buffer, int cpu, int full)
{
	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
	size_t nr_pages;
	size_t dirty;

	nr_pages = cpu_buffer->nr_pages;
	if (!nr_pages || !full)
		return true;

	dirty = ring_buffer_nr_dirty_pages(buffer, cpu);

	return (dirty * 100) > (full * nr_pages);
}

/*
/*
 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
 *
 *
@@ -676,20 +661,22 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
		    !ring_buffer_empty_cpu(buffer, cpu)) {
		    !ring_buffer_empty_cpu(buffer, cpu)) {
			unsigned long flags;
			unsigned long flags;
			bool pagebusy;
			bool pagebusy;
			bool done;
			size_t nr_pages;
			size_t dirty;


			if (!full)
			if (!full)
				break;
				break;


			raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
			raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
			pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
			pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
			done = !pagebusy && full_hit(buffer, cpu, full);
			nr_pages = cpu_buffer->nr_pages;

			dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
			if (!cpu_buffer->shortest_full ||
			if (!cpu_buffer->shortest_full ||
			    cpu_buffer->shortest_full > full)
			    cpu_buffer->shortest_full > full)
				cpu_buffer->shortest_full = full;
				cpu_buffer->shortest_full = full;
			raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
			raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
			if (done)
			if (!pagebusy &&
			    (!nr_pages || (dirty * 100) > full * nr_pages))
				break;
				break;
		}
		}


@@ -710,7 +697,6 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
 * @cpu: the cpu buffer to wait on
 * @cpu: the cpu buffer to wait on
 * @filp: the file descriptor
 * @filp: the file descriptor
 * @poll_table: The poll descriptor
 * @poll_table: The poll descriptor
 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
 *
 *
 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
 * as data is added to any of the @buffer's cpu buffers. Otherwise
 * as data is added to any of the @buffer's cpu buffers. Otherwise
@@ -720,14 +706,14 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
 * zero otherwise.
 * zero otherwise.
 */
 */
__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
			  struct file *filp, poll_table *poll_table, int full)
			  struct file *filp, poll_table *poll_table)
{
{
	struct ring_buffer_per_cpu *cpu_buffer;
	struct ring_buffer_per_cpu *cpu_buffer;
	struct rb_irq_work *work;
	struct rb_irq_work *work;


	if (cpu == RING_BUFFER_ALL_CPUS) {
	if (cpu == RING_BUFFER_ALL_CPUS)
		work = &buffer->irq_work;
		work = &buffer->irq_work;
	} else {
	else {
		if (!cpumask_test_cpu(cpu, buffer->cpumask))
		if (!cpumask_test_cpu(cpu, buffer->cpumask))
			return -EINVAL;
			return -EINVAL;


@@ -735,14 +721,8 @@ __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
		work = &cpu_buffer->irq_work;
		work = &cpu_buffer->irq_work;
	}
	}


	if (full) {
		poll_wait(filp, &work->full_waiters, poll_table);
		work->full_waiters_pending = true;
	} else {
	poll_wait(filp, &work->waiters, poll_table);
	poll_wait(filp, &work->waiters, poll_table);
	work->waiters_pending = true;
	work->waiters_pending = true;
	}

	/*
	/*
	 * There's a tight race between setting the waiters_pending and
	 * There's a tight race between setting the waiters_pending and
	 * checking if the ring buffer is empty.  Once the waiters_pending bit
	 * checking if the ring buffer is empty.  Once the waiters_pending bit
@@ -758,9 +738,6 @@ __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
	 */
	 */
	smp_mb();
	smp_mb();


	if (full)
		return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0;

	if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
	if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
	    (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
	    (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
		return EPOLLIN | EPOLLRDNORM;
		return EPOLLIN | EPOLLRDNORM;
@@ -2663,6 +2640,10 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
static __always_inline void
static __always_inline void
rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
{
{
	size_t nr_pages;
	size_t dirty;
	size_t full;

	if (buffer->irq_work.waiters_pending) {
	if (buffer->irq_work.waiters_pending) {
		buffer->irq_work.waiters_pending = false;
		buffer->irq_work.waiters_pending = false;
		/* irq_work_queue() supplies it's own memory barriers */
		/* irq_work_queue() supplies it's own memory barriers */
@@ -2686,7 +2667,10 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)


	cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
	cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);


	if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full))
	full = cpu_buffer->shortest_full;
	nr_pages = cpu_buffer->nr_pages;
	dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu);
	if (full && nr_pages && (dirty * 100) <= full * nr_pages)
		return;
		return;


	cpu_buffer->irq_work.wakeup_full = true;
	cpu_buffer->irq_work.wakeup_full = true;
+1 −1
Original line number Original line Diff line number Diff line
@@ -5993,7 +5993,7 @@ trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_tabl
		return EPOLLIN | EPOLLRDNORM;
		return EPOLLIN | EPOLLRDNORM;
	else
	else
		return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
		return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
					     filp, poll_table, iter->tr->buffer_percent);
					     filp, poll_table);
}
}


static __poll_t
static __poll_t