Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 72c70a91 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "soc: qcom: ramdump: Rework multiple reader synchronization"

parents e794019e fc348a94
Loading
Loading
Loading
Loading
+106 −23
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
 * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
 */

#include <linux/kernel.h>
@@ -17,6 +17,7 @@
#include <linux/elf.h>
#include <linux/wait.h>
#include <linux/cdev.h>
#include <linux/atomic.h>
#include <soc/qcom/ramdump.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
@@ -34,14 +35,22 @@ static bool ramdump_devnode_inited;
#define MAX_STRTBL_SIZE 512
#define MAX_NAME_LENGTH 16

struct consumer_entry {
	bool data_ready;
	struct ramdump_device *rd_dev;
	struct list_head list;
};

struct ramdump_device {
	char name[256];

	unsigned int data_ready;
	unsigned int consumer_present;
	unsigned int consumers;
	atomic_t readers_left;
	int ramdump_status;

	struct completion ramdump_complete;
	struct mutex consumer_lock;
	struct list_head consumer_list;
	struct cdev cdev;
	struct device *dev;

@@ -58,20 +67,51 @@ static int ramdump_open(struct inode *inode, struct file *filep)
{
	struct ramdump_device *rd_dev = container_of(inode->i_cdev,
					struct ramdump_device, cdev);
	rd_dev->consumer_present = 1;
	struct consumer_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);

	if (!entry)
		return -ENOMEM;

	INIT_LIST_HEAD(&entry->list);
	entry->rd_dev = rd_dev;
	mutex_lock(&rd_dev->consumer_lock);
	rd_dev->consumers++;
	rd_dev->ramdump_status = 0;
	filep->private_data = rd_dev;
	list_add_tail(&entry->list, &rd_dev->consumer_list);
	mutex_unlock(&rd_dev->consumer_lock);
	filep->private_data = entry;
	return 0;
}

static void reset_ramdump_entry(struct consumer_entry *entry)
{
	struct ramdump_device *rd_dev = entry->rd_dev;

	entry->data_ready = false;
	if (atomic_dec_return(&rd_dev->readers_left) == 0)
		complete(&rd_dev->ramdump_complete);
}

static int ramdump_release(struct inode *inode, struct file *filep)
{

	struct ramdump_device *rd_dev = container_of(inode->i_cdev,
					struct ramdump_device, cdev);
	rd_dev->consumer_present = 0;
	rd_dev->data_ready = 0;
	complete(&rd_dev->ramdump_complete);
	struct consumer_entry *entry = filep->private_data;

	mutex_lock(&rd_dev->consumer_lock);
	/*
	 * Avoid double decrementing in cases where we finish reading the dump
	 * and then close the file, but there are other readers that have not
	 * yet finished.
	 */
	if (entry->data_ready)
		reset_ramdump_entry(entry);
	rd_dev->consumers--;
	list_del(&entry->list);
	mutex_unlock(&rd_dev->consumer_lock);
	entry->rd_dev = NULL;
	kfree(entry);
	return 0;
}

@@ -112,7 +152,8 @@ static unsigned long offset_translate(loff_t user_offset,
static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count,
			loff_t *pos)
{
	struct ramdump_device *rd_dev = filep->private_data;
	struct consumer_entry *entry = filep->private_data;
	struct ramdump_device *rd_dev = entry->rd_dev;
	void *device_mem = NULL, *origdevice_mem = NULL, *vaddr = NULL;
	unsigned long data_left = 0, bytes_before, bytes_after;
	unsigned long addr = 0;
@@ -121,10 +162,10 @@ static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count,
	int ret = 0;
	loff_t orig_pos = *pos;

	if ((filep->f_flags & O_NONBLOCK) && !rd_dev->data_ready)
	if ((filep->f_flags & O_NONBLOCK) && !entry->data_ready)
		return -EAGAIN;

	ret = wait_event_interruptible(rd_dev->dump_wait_q, rd_dev->data_ready);
	ret = wait_event_interruptible(rd_dev->dump_wait_q, entry->data_ready);
	if (ret)
		return ret;

@@ -224,19 +265,19 @@ static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count,
		dma_unremap(rd_dev->dev->parent, origdevice_mem, copy_size);

	kfree(finalbuf);
	rd_dev->data_ready = 0;
	*pos = 0;
	complete(&rd_dev->ramdump_complete);
	reset_ramdump_entry(entry);
	return ret;
}

static unsigned int ramdump_poll(struct file *filep,
					struct poll_table_struct *wait)
{
	struct ramdump_device *rd_dev = filep->private_data;
	struct consumer_entry *entry = filep->private_data;
	struct ramdump_device *rd_dev = entry->rd_dev;
	unsigned int mask = 0;

	if (rd_dev->data_ready)
	if (entry->data_ready)
		mask |= (POLLIN | POLLRDNORM);

	poll_wait(filep, &rd_dev->dump_wait_q, wait);
@@ -312,6 +353,7 @@ void *create_ramdump_device(const char *dev_name, struct device *parent)
			"for %s segments only will be dumped.", dev_name);
	}

	INIT_LIST_HEAD(&rd_dev->consumer_list);
	init_waitqueue_head(&rd_dev->dump_wait_q);

	rd_dev->dev = device_create(ramdump_class, parent,
@@ -324,6 +366,8 @@ void *create_ramdump_device(const char *dev_name, struct device *parent)
		goto fail_return_minor;
	}

	mutex_init(&rd_dev->consumer_lock);
	atomic_set(&rd_dev->readers_left, 0);
	cdev_init(&rd_dev->cdev, &ramdump_file_ops);

	ret = cdev_add(&rd_dev->cdev, MKDEV(MAJOR(ramdump_dev), minor), 1);
@@ -336,6 +380,7 @@ void *create_ramdump_device(const char *dev_name, struct device *parent)
	return (void *)rd_dev;

fail_cdev_add:
	mutex_destroy(&rd_dev->consumer_lock);
	device_unregister(rd_dev->dev);
fail_return_minor:
	ida_simple_remove(&rd_minor_id, minor);
@@ -365,12 +410,27 @@ static int _do_ramdump(void *handle, struct ramdump_segment *segments,
{
	int ret, i;
	struct ramdump_device *rd_dev = (struct ramdump_device *)handle;
	struct consumer_entry *entry;
	Elf32_Phdr *phdr;
	Elf32_Ehdr *ehdr;
	unsigned long offset;

	if (!rd_dev->consumer_present) {
	/*
	 * Acquire the consumer lock here, and hold the lock until we are done
	 * preparing the data structures required for the ramdump session, and
	 * have woken all readers. This essentially freezes the current readers
	 * when the lock is taken here, such that the readers at that time are
	 * the only ones that will participate in the ramdump session. After
	 * the current list of readers has been awoken, new readers that add
	 * themselves to the reader list will not participate in the current
	 * ramdump session. This allows for the lock to be free while the
	 * ramdump is occurring, which prevents stalling readers who want to
	 * close the ramdump node or new readers that want to open it.
	 */
	mutex_lock(&rd_dev->consumer_lock);
	if (!rd_dev->consumers) {
		pr_err("Ramdump(%s): No consumers. Aborting..\n", rd_dev->name);
		mutex_unlock(&rd_dev->consumer_lock);
		return -EPIPE;
	}

@@ -388,8 +448,10 @@ static int _do_ramdump(void *handle, struct ramdump_segment *segments,
				       sizeof(*phdr) * nsegments;
		ehdr = kzalloc(rd_dev->elfcore_size, GFP_KERNEL);
		rd_dev->elfcore_buf = (char *)ehdr;
		if (!rd_dev->elfcore_buf)
		if (!rd_dev->elfcore_buf) {
			mutex_unlock(&rd_dev->consumer_lock);
			return -ENOMEM;
		}

		memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
		ehdr->e_ident[EI_CLASS] = ELFCLASS32;
@@ -415,13 +477,16 @@ static int _do_ramdump(void *handle, struct ramdump_segment *segments,
		}
	}

	rd_dev->data_ready = 1;
	list_for_each_entry(entry, &rd_dev->consumer_list, list)
		entry->data_ready = true;
	rd_dev->ramdump_status = -1;

	reinit_completion(&rd_dev->ramdump_complete);
	atomic_set(&rd_dev->readers_left, rd_dev->consumers);

	/* Tell userspace that the data is ready */
	wake_up(&rd_dev->dump_wait_q);
	mutex_unlock(&rd_dev->consumer_lock);

	/* Wait (with a timeout) to let the ramdump complete */
	ret = wait_for_completion_timeout(&rd_dev->ramdump_complete,
@@ -434,7 +499,6 @@ static int _do_ramdump(void *handle, struct ramdump_segment *segments,
	} else
		ret = (rd_dev->ramdump_status == 0) ? 0 : -EPIPE;

	rd_dev->data_ready = 0;
	rd_dev->elfcore_size = 0;
	kfree(rd_dev->elfcore_buf);
	rd_dev->elfcore_buf = NULL;
@@ -465,12 +529,27 @@ static int _do_minidump(void *handle, struct ramdump_segment *segments,
{
	int ret, i;
	struct ramdump_device *rd_dev = (struct ramdump_device *)handle;
	struct consumer_entry *entry;
	struct elfhdr *ehdr;
	struct elf_shdr *shdr;
	unsigned long offset, strtbl_off;

	if (!rd_dev->consumer_present) {
	/*
	 * Acquire the consumer lock here, and hold the lock until we are done
	 * preparing the data structures required for the ramdump session, and
	 * have woken all readers. This essentially freezes the current readers
	 * when the lock is taken here, such that the readers at that time are
	 * the only ones that will participate in the ramdump session. After
	 * the current list of readers has been awoken, new readers that add
	 * themselves to the reader list will not participate in the current
	 * ramdump session. This allows for the lock to be free while the
	 * ramdump is occurring, which prevents stalling readers who want to
	 * close the ramdump node or new readers that want to open it.
	 */
	mutex_lock(&rd_dev->consumer_lock);
	if (!rd_dev->consumers) {
		pr_err("Ramdump(%s): No consumers. Aborting..\n", rd_dev->name);
		mutex_unlock(&rd_dev->consumer_lock);
		return -EPIPE;
	}

@@ -481,8 +560,10 @@ static int _do_minidump(void *handle, struct ramdump_segment *segments,
			(sizeof(*shdr) * (nsegments + 2)) + MAX_STRTBL_SIZE;
	ehdr = kzalloc(rd_dev->elfcore_size, GFP_KERNEL);
	rd_dev->elfcore_buf = (char *)ehdr;
	if (!rd_dev->elfcore_buf)
	if (!rd_dev->elfcore_buf) {
		mutex_unlock(&rd_dev->consumer_lock);
		return -ENOMEM;
	}

	memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
	ehdr->e_ident[EI_CLASS] = ELF_CLASS;
@@ -523,13 +604,16 @@ static int _do_minidump(void *handle, struct ramdump_segment *segments,
	}
	ehdr->e_shnum = nsegments + 2;

	rd_dev->data_ready = 1;
	list_for_each_entry(entry, &rd_dev->consumer_list, list)
		entry->data_ready = true;
	rd_dev->ramdump_status = -1;

	reinit_completion(&rd_dev->ramdump_complete);
	atomic_set(&rd_dev->readers_left, rd_dev->consumers);

	/* Tell userspace that the data is ready */
	wake_up(&rd_dev->dump_wait_q);
	mutex_unlock(&rd_dev->consumer_lock);

	/* Wait (with a timeout) to let the ramdump complete */
	ret = wait_for_completion_timeout(&rd_dev->ramdump_complete,
@@ -543,7 +627,6 @@ static int _do_minidump(void *handle, struct ramdump_segment *segments,
		ret = (rd_dev->ramdump_status == 0) ? 0 : -EPIPE;
	}

	rd_dev->data_ready = 0;
	rd_dev->elfcore_size = 0;
	kfree(rd_dev->elfcore_buf);
	rd_dev->elfcore_buf = NULL;