Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fc348a94 authored by Isaac J. Manjarres's avatar Isaac J. Manjarres
Browse files

soc: qcom: ramdump: Rework multiple reader synchronization



The synchronization for multiple readers makes it so that if
a reader opens a ramdump_* node, and tries to close it without
reading the contents, and while there is a ramdump in progress,
then that thread will have to wait until the thread that
initiated the response times out. This is because the
implementation assumes that the only condition by which
consumers can finish their work with the ramdump, and wake
the ramdump thread is by reading the ramdump. This is not
correct, as the ramdump node can also be closed at any time,
including during the time that a ramdump is being read.

Rework the implementation to consider both reading the ramdump,
and closing the ramdump nodes as conditions that can result in
waking the ramdump thread, and fix the synchronization so that
if threads want to open/close the nodes, and there is a ramdump
in progress, they need not wait for that ramdump to finish.

Change-Id: I6135342492081092de58362387807da29c5568d7
Signed-off-by: default avatarIsaac J. Manjarres <isaacm@codeaurora.org>
parent 10268b7b
Loading
Loading
Loading
Loading
+54 −22
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
 * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
 */

#include <linux/kernel.h>
@@ -45,7 +45,7 @@ struct ramdump_device {
	char name[256];

	unsigned int consumers;
	atomic_t readers_done;
	atomic_t readers_left;
	int ramdump_status;

	struct completion ramdump_complete;
@@ -83,6 +83,15 @@ static int ramdump_open(struct inode *inode, struct file *filep)
	return 0;
}

static void reset_ramdump_entry(struct consumer_entry *entry)
{
	struct ramdump_device *rd_dev = entry->rd_dev;

	entry->data_ready = false;
	if (atomic_dec_return(&rd_dev->readers_left) == 0)
		complete(&rd_dev->ramdump_complete);
}

static int ramdump_release(struct inode *inode, struct file *filep)
{

@@ -91,6 +100,13 @@ static int ramdump_release(struct inode *inode, struct file *filep)
	struct consumer_entry *entry = filep->private_data;

	mutex_lock(&rd_dev->consumer_lock);
	/*
	 * Avoid double decrementing in cases where we finish reading the dump
	 * and then close the file, but there are other readers that have not
	 * yet finished.
	 */
	if (entry->data_ready)
		reset_ramdump_entry(entry);
	rd_dev->consumers--;
	list_del(&entry->list);
	mutex_unlock(&rd_dev->consumer_lock);
@@ -250,11 +266,7 @@ static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count,

	kfree(finalbuf);
	*pos = 0;
	entry->data_ready = false;
	if (atomic_inc_return(&rd_dev->readers_done) == rd_dev->consumers) {
		atomic_set(&rd_dev->readers_done, 0);
		complete(&rd_dev->ramdump_complete);
	}
	reset_ramdump_entry(entry);
	return ret;
}

@@ -355,7 +367,7 @@ void *create_ramdump_device(const char *dev_name, struct device *parent)
	}

	mutex_init(&rd_dev->consumer_lock);
	atomic_set(&rd_dev->readers_done, 0);
	atomic_set(&rd_dev->readers_left, 0);
	cdev_init(&rd_dev->cdev, &ramdump_file_ops);

	ret = cdev_add(&rd_dev->cdev, MKDEV(MAJOR(ramdump_dev), minor), 1);
@@ -403,13 +415,24 @@ static int _do_ramdump(void *handle, struct ramdump_segment *segments,
	Elf32_Ehdr *ehdr;
	unsigned long offset;

	/*
	 * Acquire the consumer lock here, and hold the lock until we are done
	 * preparing the data structures required for the ramdump session, and
	 * have woken all readers. This essentially freezes the current readers
	 * when the lock is taken here, such that the readers at that time are
	 * the only ones that will participate in the ramdump session. After
	 * the current list of readers has been awoken, new readers that add
	 * themselves to the reader list will not participate in the current
	 * ramdump session. This allows for the lock to be free while the
	 * ramdump is occurring, which prevents stalling readers who want to
	 * close the ramdump node or new readers that want to open it.
	 */
	mutex_lock(&rd_dev->consumer_lock);
	if (!rd_dev->consumers) {
		pr_err("Ramdump(%s): No consumers. Aborting..\n", rd_dev->name);
		mutex_unlock(&rd_dev->consumer_lock);
		return -EPIPE;
	}
	mutex_unlock(&rd_dev->consumer_lock);

	if (rd_dev->complete_ramdump) {
		for (i = 0; i < nsegments-1; i++)
@@ -425,8 +448,10 @@ static int _do_ramdump(void *handle, struct ramdump_segment *segments,
				       sizeof(*phdr) * nsegments;
		ehdr = kzalloc(rd_dev->elfcore_size, GFP_KERNEL);
		rd_dev->elfcore_buf = (char *)ehdr;
		if (!rd_dev->elfcore_buf)
		if (!rd_dev->elfcore_buf) {
			mutex_unlock(&rd_dev->consumer_lock);
			return -ENOMEM;
		}

		memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
		ehdr->e_ident[EI_CLASS] = ELFCLASS32;
@@ -452,15 +477,16 @@ static int _do_ramdump(void *handle, struct ramdump_segment *segments,
		}
	}

	mutex_lock(&rd_dev->consumer_lock);
	list_for_each_entry(entry, &rd_dev->consumer_list, list)
		entry->data_ready = true;
	rd_dev->ramdump_status = -1;

	reinit_completion(&rd_dev->ramdump_complete);
	atomic_set(&rd_dev->readers_left, rd_dev->consumers);

	/* Tell userspace that the data is ready */
	wake_up(&rd_dev->dump_wait_q);
	mutex_unlock(&rd_dev->consumer_lock);

	/* Wait (with a timeout) to let the ramdump complete */
	ret = wait_for_completion_timeout(&rd_dev->ramdump_complete,
@@ -473,13 +499,9 @@ static int _do_ramdump(void *handle, struct ramdump_segment *segments,
	} else
		ret = (rd_dev->ramdump_status == 0) ? 0 : -EPIPE;

	list_for_each_entry(entry, &rd_dev->consumer_list, list)
		entry->data_ready = false;
	rd_dev->elfcore_size = 0;
	atomic_set(&rd_dev->readers_done, 0);
	kfree(rd_dev->elfcore_buf);
	rd_dev->elfcore_buf = NULL;
	mutex_unlock(&rd_dev->consumer_lock);
	return ret;

}
@@ -512,13 +534,24 @@ static int _do_minidump(void *handle, struct ramdump_segment *segments,
	struct elf_shdr *shdr;
	unsigned long offset, strtbl_off;

	/*
	 * Acquire the consumer lock here, and hold the lock until we are done
	 * preparing the data structures required for the ramdump session, and
	 * have woken all readers. This essentially freezes the current readers
	 * when the lock is taken here, such that the readers at that time are
	 * the only ones that will participate in the ramdump session. After
	 * the current list of readers has been awoken, new readers that add
	 * themselves to the reader list will not participate in the current
	 * ramdump session. This allows for the lock to be free while the
	 * ramdump is occurring, which prevents stalling readers who want to
	 * close the ramdump node or new readers that want to open it.
	 */
	mutex_lock(&rd_dev->consumer_lock);
	if (!rd_dev->consumers) {
		pr_err("Ramdump(%s): No consumers. Aborting..\n", rd_dev->name);
		mutex_unlock(&rd_dev->consumer_lock);
		return -EPIPE;
	}
	mutex_unlock(&rd_dev->consumer_lock);

	rd_dev->segments = segments;
	rd_dev->nsegments = nsegments;
@@ -527,8 +560,10 @@ static int _do_minidump(void *handle, struct ramdump_segment *segments,
			(sizeof(*shdr) * (nsegments + 2)) + MAX_STRTBL_SIZE;
	ehdr = kzalloc(rd_dev->elfcore_size, GFP_KERNEL);
	rd_dev->elfcore_buf = (char *)ehdr;
	if (!rd_dev->elfcore_buf)
	if (!rd_dev->elfcore_buf) {
		mutex_unlock(&rd_dev->consumer_lock);
		return -ENOMEM;
	}

	memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
	ehdr->e_ident[EI_CLASS] = ELF_CLASS;
@@ -569,15 +604,16 @@ static int _do_minidump(void *handle, struct ramdump_segment *segments,
	}
	ehdr->e_shnum = nsegments + 2;

	mutex_lock(&rd_dev->consumer_lock);
	list_for_each_entry(entry, &rd_dev->consumer_list, list)
		entry->data_ready = true;
	rd_dev->ramdump_status = -1;

	reinit_completion(&rd_dev->ramdump_complete);
	atomic_set(&rd_dev->readers_left, rd_dev->consumers);

	/* Tell userspace that the data is ready */
	wake_up(&rd_dev->dump_wait_q);
	mutex_unlock(&rd_dev->consumer_lock);

	/* Wait (with a timeout) to let the ramdump complete */
	ret = wait_for_completion_timeout(&rd_dev->ramdump_complete,
@@ -591,13 +627,9 @@ static int _do_minidump(void *handle, struct ramdump_segment *segments,
		ret = (rd_dev->ramdump_status == 0) ? 0 : -EPIPE;
	}

	list_for_each_entry(entry, &rd_dev->consumer_list, list)
		entry->data_ready = false;
	rd_dev->elfcore_size = 0;
	atomic_set(&rd_dev->readers_done, 0);
	kfree(rd_dev->elfcore_buf);
	rd_dev->elfcore_buf = NULL;
	mutex_unlock(&rd_dev->consumer_lock);
	return ret;
}