Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 796c864c authored by Vijayanand Jitta's avatar Vijayanand Jitta Committed by Vinayak Menon
Browse files

soc: qcom: Enable slabowner support in minidump



Register and dump slabowner on kernel panic to minidump
table.

Change-Id: I1644ead8ce75ddfca7181ccf102dedabf42c6ad7
Signed-off-by: default avatarVijayanand Jitta <vjitta@codeaurora.org>
parent 3f17f5a3
Loading
Loading
Loading
Loading
+51 −0
Original line number Diff line number Diff line
@@ -118,6 +118,11 @@ size_t md_pageowner_dump_size = SZ_2M;
char *md_pageowner_dump_addr;
#endif

#ifdef CONFIG_SLUB_DEBUG
size_t md_slabowner_dump_size = SZ_2M;
char *md_slabowner_dump_addr;
#endif

/* Modules information */
#ifdef CONFIG_MODULES
#define NUM_MD_MODULES	200
@@ -984,6 +989,11 @@ static int md_panic_handler(struct notifier_block *this,
	if (md_slabinfo_seq_buf)
		md_dump_slabinfo();

#ifdef CONFIG_SLUB_DEBUG
	if (md_slabowner_dump_addr)
		md_dump_slabowner();
#endif

#ifdef CONFIG_PAGE_OWNER
	if (md_pageowner_dump_addr)
		md_dump_pageowner();
@@ -1079,6 +1089,10 @@ static bool md_register_memory_dump(int size, char *name)
#ifdef CONFIG_PAGE_OWNER
	if (!strcmp(name, "PAGEOWNER"))
		WRITE_ONCE(md_pageowner_dump_addr, buffer_start);
#endif
#ifdef CONFIG_SLUB_DEBUG
	if (!strcmp(name, "SLABOWNER"))
		WRITE_ONCE(md_slabowner_dump_addr, buffer_start);
#endif
	return true;
}
@@ -1183,6 +1197,38 @@ static const struct file_operations proc_page_owner_dump_size_ops = {
};
#endif

#ifdef CONFIG_SLUB_DEBUG
static ssize_t slab_owner_dump_size_write(struct file *file,
					  const char __user *ubuf,
					  size_t count, loff_t *offset)
{
	unsigned long long  size;

	if (kstrtoull_from_user(ubuf, count, 0, &size)) {
		pr_err_ratelimited("Invalid format for size\n");
		return -EINVAL;
	}
	update_dump_size("SLABOWNER", size,
			&md_slabowner_dump_addr, &md_slabowner_dump_size);
	return count;
}

static ssize_t slab_owner_dump_size_read(struct file *file, char __user *ubuf,
				       size_t count, loff_t *offset)
{
	char buf[100];

	snprintf(buf, sizeof(buf), "%llu MB\n", md_slabowner_dump_size/SZ_1M);
	return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
}

static const struct file_operations proc_slab_owner_dump_size_ops = {
	.open	= simple_open,
	.write	= slab_owner_dump_size_write,
	.read	= slab_owner_dump_size_read,
};
#endif

static void md_register_panic_data(void)
{
	md_register_panic_entries(MD_RUNQUEUE_PAGES, "KRUNQUEUE",
@@ -1200,6 +1246,11 @@ static void md_register_panic_data(void)
		debugfs_create_file("page_owner_dump_size_mb", 0400, NULL, NULL,
			    &proc_page_owner_dump_size_ops);
	}
	if (is_slub_debug_enabled()) {
		md_register_memory_dump(md_slabowner_dump_size, "SLABOWNER");
		debugfs_create_file("slab_owner_dump_size_mb", 0400, NULL, NULL,
			    &proc_slab_owner_dump_size_ops);
	}
}

#ifdef CONFIG_MODULES
+10 −0
Original line number Diff line number Diff line
@@ -82,4 +82,14 @@ extern void md_dump_pageowner(void);
static inline void md_dump_pageowner(void) {}
static inline bool is_page_owner_enabled(void) { return false; }
#endif
#ifdef CONFIG_SLUB_DEBUG
extern size_t md_slabowner_dump_size;
extern char *md_slabowner_dump_addr;

extern bool is_slub_debug_enabled(void);
extern void md_dump_slabowner(void);
#else
static inline void md_dump_slabowner(void) {}
static inline bool is_slub_debug_enabled(void) { return false; }
#endif
#endif
+273 −1
Original line number Diff line number Diff line
@@ -34,8 +34,12 @@
#include <linux/prefetch.h>
#include <linux/memcontrol.h>
#include <linux/random.h>

#include <trace/events/kmem.h>
#ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP
#include <soc/qcom/minidump.h>
#include <linux/debugfs.h>
#include <linux/jhash.h>
#endif

#include "internal.h"

@@ -4769,6 +4773,176 @@ static int list_locations(struct kmem_cache *s, char *buf,
		len += sprintf(buf, "No data\n");
	return len;
}

#ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP

#define STACK_HASH_SEED 0x9747b28c

static unsigned long slab_owner_filter;
static unsigned long slab_owner_handles_size = SZ_16K;
static int num_handles;

bool is_slub_debug_enabled(void)
{
	if (slub_debug)
		return true;
	return false;
}

static bool find_stack(u32 handle,
		 char *md_slabowner_dump_addr, char *cur)
{
	int *handles, i;

	handles = (int *) (md_slabowner_dump_addr +
			md_slabowner_dump_size - slab_owner_handles_size);

	for (i = 0; i < num_handles; i++)
		if (handle == handles[i])
			return true;

	if ((handles + num_handles)
		< (int *)(md_slabowner_dump_addr +
			md_slabowner_dump_size)) {
		handles[num_handles] = handle;
		num_handles += 1;
	} else {
		pr_err_ratelimited("Can't stores handles increase slab_owner_handle_size\n");
	}
	return false;
}

/* Calculate hash for a stack */
static u32 hash_stack(unsigned long *entries, unsigned int size)
{
	return jhash2((u32 *)entries,
			       size * sizeof(unsigned long) / sizeof(u32),
			       STACK_HASH_SEED);
}

static ssize_t dump_tracking(char *buf, size_t size, struct kmem_cache *s,
			void *object)
{
	struct track *t;
	int ret;
	u32 handle, nr_entries;

	if (!(s->flags & SLAB_STORE_USER))
		return 0;

	t = get_track(s, object, TRACK_ALLOC);
	if (!t->addr)
		return 0;

#ifdef CONFIG_STACKTRACE
	{
		int i;

		for (i = 0; i < TRACK_ADDRS_COUNT; i++)
			if (t->addrs[i])
				continue;
			else
				break;
		nr_entries = i;
		handle = hash_stack(t->addrs, nr_entries);

		if ((buf > (md_slabowner_dump_addr +
			md_slabowner_dump_size - slab_owner_handles_size))
			|| !find_stack(handle, md_slabowner_dump_addr, buf)) {

			ret = scnprintf(buf, size, "%p %u %u\n",
				object, handle, nr_entries);
			if (ret == size)
				goto err;

			for (i = 0; i < nr_entries; i++) {
				ret += scnprintf(buf + ret, size - ret,
						"%p\n", (void *)t->addrs[i]);
				if (ret == size)
					goto err;
			}
		} else {
			ret = scnprintf(buf, size, "%p %u %u\n",
					object, handle, 0);
		}
	}
#else
	ret = scnprintf(buf, size, "%p %p\n", object, (void *)t->addr);

#endif
err:
	return ret;
}

void md_dump_slabowner(void)
{
	struct kmem_cache *s;
	int node;
	char *buf = md_slabowner_dump_addr;
	struct kmem_cache_node *n;
	void *addr;
	void *p;
	ssize_t ret, size = md_slabowner_dump_size;
	int i;

	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
		if (!test_bit(i, &slab_owner_filter))
			continue;
		s = kmalloc_caches[KMALLOC_NORMAL][i];
		if (!s)
			continue;
		ret = scnprintf(buf, size, "%s\n", s->name);
		if (ret == size)
			return;
		buf += ret;
		size -= ret;
		for_each_kmem_cache_node(s, node, n) {
			unsigned long flags;
			struct page *page;

			if (!atomic_long_read(&n->nr_slabs))
				continue;

			spin_lock_irqsave(&n->list_lock, flags);

			list_for_each_entry(page, &n->partial, lru) {
				addr = page_address(page);
				slab_lock(page);
				for_each_object(p, s, addr, page->objects) {
					ret  = dump_tracking(buf, size, s, p);
					if (ret == size) {
						pr_err("slabowner minidump region exhausted\n");
						return;
					}
					buf += ret;
					size -= ret;
				}
				slab_unlock(page);
			}
			list_for_each_entry(page, &n->full, lru) {
				addr = page_address(page);
				slab_lock(page);
				for_each_object(p, s, addr, page->objects) {
					ret  = dump_tracking(buf, size, s, p);
					if (ret == size) {
						pr_err("slabowner minidump region exhausted\n");
						return;
					}
					buf += ret;
					size -= ret;
				}
				slab_unlock(page);
			}
			spin_unlock_irqrestore(&n->list_lock, flags);
		}
		ret = scnprintf(buf, size, "\n");
		if (ret == size)
			return;
		buf += ret;
		size -= ret;
	}
}
#endif /* CONFIG_QCOM_MINIDUMP_PANIC_DUMP */
#endif	/* CONFIG_SLUB_DEBUG */

#ifdef SLUB_RESILIENCY_TEST
@@ -5933,6 +6107,89 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
	return 0;
}

#ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP
static ssize_t slab_owner_filter_write(struct file *file,
					  const char __user *ubuf,
					  size_t count, loff_t *offset)
{
	unsigned long filter;
	int bit, i;
	struct kmem_cache *s;

	if (kstrtoul_from_user(ubuf, count, 0, &filter)) {
		pr_err_ratelimited("Invalid format for filter\n");
		return -EINVAL;
	}

	for (i = 0, bit = 1; filter >= bit; bit *= 2, i++) {
		if (filter & bit) {
			s = kmalloc_caches[KMALLOC_NORMAL][i];
			if (!s) {
				pr_err("Invalid filter : %lx kmalloc-%d doesn't exist\n",
						filter, bit);
				return -EINVAL;
			}
		}
	}
	slab_owner_filter = filter;
	return count;
}

static ssize_t slab_owner_filter_read(struct file *file, char __user *ubuf,
				       size_t count, loff_t *offset)
{
	char buf[64];

	snprintf(buf, sizeof(buf), "0x%lx\n", slab_owner_filter);
	return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
}

static const struct file_operations proc_slab_owner_filter_ops = {
	.open	= simple_open,
	.write	= slab_owner_filter_write,
	.read	= slab_owner_filter_read,
};

static ssize_t slab_owner_handle_write(struct file *file,
					  const char __user *ubuf,
					  size_t count, loff_t *offset)
{
	unsigned long size;

	if (kstrtoul_from_user(ubuf, count, 0, &size)) {
		pr_err_ratelimited("Invalid format for handle size\n");
		return -EINVAL;
	}

	if (size) {
		if (size > (md_slabowner_dump_size / SZ_16K)) {
			pr_err_ratelimited("size : %lu KB exceeds max size : %lu KB\n",
				size, (md_slabowner_dump_size / SZ_16K));
			goto err;
		}
		slab_owner_handles_size = size * SZ_1K;
	}
err:
	return count;
}

static ssize_t slab_owner_handle_read(struct file *file, char __user *ubuf,
				       size_t count, loff_t *offset)
{
	char buf[64];

	snprintf(buf, sizeof(buf), "%lu KB\n",
			(slab_owner_handles_size / SZ_1K));
	return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
}

static const struct file_operations proc_slab_owner_handle_ops = {
	.open	= simple_open,
	.write	= slab_owner_handle_write,
	.read	= slab_owner_handle_read,
};
#endif

static int __init slab_sysfs_init(void)
{
	struct kmem_cache *s;
@@ -5967,6 +6224,21 @@ static int __init slab_sysfs_init(void)
		kfree(al);
	}

#ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP
	if (slub_debug) {
		int i;

		debugfs_create_file("slab_owner_filter", 0400, NULL, NULL,
			    &proc_slab_owner_filter_ops);
		debugfs_create_file("slab_owner_handles_size_kb", 0400,
				NULL, NULL, &proc_slab_owner_handle_ops);

		for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
			if (kmalloc_caches[KMALLOC_NORMAL][i])
				set_bit(i, &slab_owner_filter);
		}
	}
#endif
	mutex_unlock(&slab_mutex);
	resiliency_test();
	return 0;