Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 27bd362f authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "soc: qcom: Enable slabowner support in minidump"

parents c15f8391 796c864c
Loading
Loading
Loading
Loading
+200 −0
Original line number Diff line number Diff line
@@ -44,6 +44,8 @@
#include <linux/percpu.h>

#include <linux/module.h>
#include <linux/cma.h>
#include <linux/dma-contiguous.h>
#endif

#ifdef CONFIG_QCOM_DYN_MINIDUMP_STACK
@@ -111,6 +113,16 @@ struct seq_buf *md_meminfo_seq_buf;

struct seq_buf *md_slabinfo_seq_buf;

#ifdef CONFIG_PAGE_OWNER
size_t md_pageowner_dump_size = SZ_2M;
char *md_pageowner_dump_addr;
#endif

#ifdef CONFIG_SLUB_DEBUG
size_t md_slabowner_dump_size = SZ_2M;
char *md_slabowner_dump_addr;
#endif

/* Modules information */
#ifdef CONFIG_MODULES
#define NUM_MD_MODULES	200
@@ -977,6 +989,15 @@ static int md_panic_handler(struct notifier_block *this,
	if (md_slabinfo_seq_buf)
		md_dump_slabinfo();

#ifdef CONFIG_SLUB_DEBUG
	if (md_slabowner_dump_addr)
		md_dump_slabowner();
#endif

#ifdef CONFIG_PAGE_OWNER
	if (md_pageowner_dump_addr)
		md_dump_pageowner();
#endif
	md_in_oops_handler = false;
	return NOTIFY_DONE;
}
@@ -1039,6 +1060,175 @@ static int md_register_panic_entries(int num_pages, char *name,
	return ret;
}

static bool md_register_memory_dump(int size, char *name)
{
	void *buffer_start;
	struct page *page;
	int ret;

	page  = cma_alloc(dev_get_cma_area(NULL), size >> PAGE_SHIFT,
			0, false);

	if (!page) {
		pr_err("Failed to allocate %s minidump, increase cma size\n",
			name);
		return false;
	}

	buffer_start = page_to_virt(page);
	ret = md_register_minidump_entry(name, (uintptr_t)buffer_start,
			virt_to_phys(buffer_start), size);
	if (ret < 0) {
		cma_release(dev_get_cma_area(NULL), page, size >> PAGE_SHIFT);
		return false;
	}

	/* Complete registration before adding enteries */
	smp_mb();

#ifdef CONFIG_PAGE_OWNER
	if (!strcmp(name, "PAGEOWNER"))
		WRITE_ONCE(md_pageowner_dump_addr, buffer_start);
#endif
#ifdef CONFIG_SLUB_DEBUG
	if (!strcmp(name, "SLABOWNER"))
		WRITE_ONCE(md_slabowner_dump_addr, buffer_start);
#endif
	return true;
}

static bool md_unregister_memory_dump(char *name)
{
	struct page *page;
	struct md_region *mdr;
	struct md_region md_entry;

	mdr = md_get_region(name);
	if (!mdr) {
		pr_err("minidump entry for %s not found\n", name);
		return false;
	}
	strlcpy(md_entry.name, mdr->name, sizeof(md_entry.name));
	md_entry.virt_addr = mdr->virt_addr;
	md_entry.phys_addr = mdr->phys_addr;
	md_entry.size = mdr->size;
	page = virt_to_page(mdr->virt_addr);

	if (msm_minidump_remove_region(&md_entry) < 0)
		return false;

	cma_release(dev_get_cma_area(NULL), page,
			(md_entry.size) >> PAGE_SHIFT);
	return true;
}

static void update_dump_size(char *name, size_t size,
		char **addr, size_t *dump_size)
{
	if ((*dump_size) == 0) {
		if (md_register_memory_dump(size * SZ_1M,
						name)) {
			*dump_size = size * SZ_1M;
			pr_info_ratelimited("%s Minidump set to %zd MB size\n",
					name, size);
		}
		return;
	}
	if (md_unregister_memory_dump(name)) {
		*addr = NULL;
		if (size == 0) {
			*dump_size = 0;
			pr_info_ratelimited("%s Minidump : disabled\n", name);
			return;
		}
		if (md_register_memory_dump(size * SZ_1M,
						name)) {
			*dump_size = size * SZ_1M;
			pr_info_ratelimited("%s Minidump : set to %zd MB\n",
					name, size);
		} else if (md_register_memory_dump(*dump_size,
							name)) {
			pr_info_ratelimited("%s Minidump : Fallback to %zd MB\n",
					name, (*dump_size) / SZ_1M);
		} else {
			pr_err_ratelimited("%s Minidump : disabled, Can't fallback to %zd MB,\n",
						name, (*dump_size) / SZ_1M);
			*dump_size = 0;
		}
	} else {
		pr_err_ratelimited("Failed to unregister %s Minidump\n", name);
	}
}

#ifdef CONFIG_PAGE_OWNER
static DEFINE_MUTEX(page_owner_dump_size_lock);

static ssize_t page_owner_dump_size_write(struct file *file,
					  const char __user *ubuf,
					  size_t count, loff_t *offset)
{
	unsigned long long  size;

	if (kstrtoull_from_user(ubuf, count, 0, &size)) {
		pr_err_ratelimited("Invalid format for size\n");
		return -EINVAL;
	}
	mutex_lock(&page_owner_dump_size_lock);
	update_dump_size("PAGEOWNER", size,
			&md_pageowner_dump_addr, &md_pageowner_dump_size);
	mutex_unlock(&page_owner_dump_size_lock);
	return count;
}

static ssize_t page_owner_dump_size_read(struct file *file, char __user *ubuf,
				       size_t count, loff_t *offset)
{
	char buf[100];

	snprintf(buf, sizeof(buf), "%llu MB\n",
			md_pageowner_dump_size / SZ_1M);
	return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
}

static const struct file_operations proc_page_owner_dump_size_ops = {
	.open	= simple_open,
	.write	= page_owner_dump_size_write,
	.read	= page_owner_dump_size_read,
};
#endif

#ifdef CONFIG_SLUB_DEBUG
static ssize_t slab_owner_dump_size_write(struct file *file,
					  const char __user *ubuf,
					  size_t count, loff_t *offset)
{
	unsigned long long  size;

	if (kstrtoull_from_user(ubuf, count, 0, &size)) {
		pr_err_ratelimited("Invalid format for size\n");
		return -EINVAL;
	}
	update_dump_size("SLABOWNER", size,
			&md_slabowner_dump_addr, &md_slabowner_dump_size);
	return count;
}

static ssize_t slab_owner_dump_size_read(struct file *file, char __user *ubuf,
				       size_t count, loff_t *offset)
{
	char buf[100];

	snprintf(buf, sizeof(buf), "%llu MB\n", md_slabowner_dump_size/SZ_1M);
	return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
}

static const struct file_operations proc_slab_owner_dump_size_ops = {
	.open	= simple_open,
	.write	= slab_owner_dump_size_write,
	.read	= slab_owner_dump_size_read,
};
#endif

static void md_register_panic_data(void)
{
	md_register_panic_entries(MD_RUNQUEUE_PAGES, "KRUNQUEUE",
@@ -1051,6 +1241,16 @@ static void md_register_panic_data(void)
				  &md_meminfo_seq_buf);
	md_register_panic_entries(MD_SLABINFO_PAGES, "SLABINFO",
				  &md_slabinfo_seq_buf);
	if (is_page_owner_enabled()) {
		md_register_memory_dump(md_pageowner_dump_size, "PAGEOWNER");
		debugfs_create_file("page_owner_dump_size_mb", 0400, NULL, NULL,
			    &proc_page_owner_dump_size_ops);
	}
	if (is_slub_debug_enabled()) {
		md_register_memory_dump(md_slabowner_dump_size, "SLABOWNER");
		debugfs_create_file("slab_owner_dump_size_mb", 0400, NULL, NULL,
			    &proc_slab_owner_dump_size_ops);
	}
}

#ifdef CONFIG_MODULES
+20 −0
Original line number Diff line number Diff line
@@ -72,4 +72,24 @@ extern void minidump_add_trace_event(char *buf, size_t size);
#else
static inline void minidump_add_trace_event(char *buf, size_t size) {}
#endif
#ifdef CONFIG_PAGE_OWNER
extern size_t md_pageowner_dump_size;
extern char *md_pageowner_dump_addr;

extern bool is_page_owner_enabled(void);
extern void md_dump_pageowner(void);
#else
static inline void md_dump_pageowner(void) {}
static inline bool is_page_owner_enabled(void) { return false; }
#endif
#ifdef CONFIG_SLUB_DEBUG
extern size_t md_slabowner_dump_size;
extern char *md_slabowner_dump_addr;

extern bool is_slub_debug_enabled(void);
extern void md_dump_slabowner(void);
#else
static inline void md_dump_slabowner(void) {}
static inline bool is_slub_debug_enabled(void) { return false; }
#endif
#endif
+334 −4
Original line number Diff line number Diff line
@@ -12,7 +12,10 @@
#include <linux/seq_file.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>

#ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP
#include <soc/qcom/minidump.h>
#include <linux/ctype.h>
#endif
#include "internal.h"

/*
@@ -423,6 +426,145 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
	return -ENOMEM;
}

#ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP

static unsigned long page_owner_filter = 0xF;
static unsigned long page_owner_handles_size =  SZ_16K;
static int nr_handles;
static LIST_HEAD(accounted_call_site_list);
static DEFINE_MUTEX(accounted_call_site_lock);
struct accounted_call_site {
	struct list_head list;
	char name[50];
};

bool is_page_owner_enabled(void)
{
	return page_owner_enabled;
}

static bool found_stack(depot_stack_handle_t handle,
		 char *md_pageowner_dump_addr, char *cur)
{
	int *handles, i;

	handles = (int *) (md_pageowner_dump_addr +
			md_pageowner_dump_size - page_owner_handles_size);

	for (i = 0; i < nr_handles; i++)
		if (handle == handles[i])
			return true;

	if ((handles + nr_handles)
		< (int *)(md_pageowner_dump_addr +
			md_pageowner_dump_size)) {
		handles[nr_handles] = handle;
		nr_handles += 1;
	} else {
		pr_err_ratelimited("Can't stores handles increase page_owner_handles_size\n");
	}
	return false;
}

static bool check_unaccounted(char *buf, ssize_t count,
		struct page *page, depot_stack_handle_t handle)
{
	int i, ret = 0;
	unsigned long *entries;
	unsigned int nr_entries;
	struct accounted_call_site *call_site;

	if ((page->flags &
		((1UL << PG_lru) | (1UL << PG_slab) | (1UL << PG_swapbacked))))
		return false;

	nr_entries = stack_depot_fetch(handle, &entries);
	for (i = 0; i < nr_entries; i++) {
		ret = scnprintf(buf, count, "%pS\n",
				(void *)entries[i]);
		if (ret == count)
			return false;

		mutex_lock(&accounted_call_site_lock);
		list_for_each_entry(call_site,
				&accounted_call_site_list, list) {
			if (strnstr(buf, call_site->name,
					strlen(buf))) {
				mutex_unlock(&accounted_call_site_lock);
				return false;
			}
		}
		mutex_unlock(&accounted_call_site_lock);
	}
	return true;
}

static ssize_t
dump_page_owner_md(char *buf, size_t count, unsigned long pfn,
		struct page *page, struct page_owner *page_owner,
		depot_stack_handle_t handle)
{
	int i, bit, ret = 0;
	unsigned long *entries;
	unsigned int nr_entries;

	if (page_owner_filter == 0xF)
		goto dump;

	for (bit = 1; page_owner_filter >= bit; bit *= 2) {
		if (page_owner_filter & bit) {
			switch (bit) {
			case 0x1:
				if (check_unaccounted(buf, count, page, handle))
					goto dump;
				break;
			case 0x2:
				if (page->flags & (1UL << PG_slab))
					goto dump;
				break;
			case 0x4:
				if (page->flags & (1UL << PG_swapbacked))
					goto dump;
				break;
			case 0x8:
				if ((page->flags & (1UL << PG_lru)) &&
					~(page->flags & (1UL << PG_swapbacked)))
					goto dump;
				break;
			default:
				break;
			}
		}
		if (bit >= 0x8)
			return ret;
	}

	if (bit > page_owner_filter)
		return ret;
dump:
	nr_entries = stack_depot_fetch(handle, &entries);
	if ((buf > (md_pageowner_dump_addr +
			md_pageowner_dump_size - page_owner_handles_size))
			|| !found_stack(handle, md_pageowner_dump_addr, buf)) {
		ret = scnprintf(buf, count, "%lu %u %u\n",
				pfn, handle, nr_entries);
		if (ret == count)
			goto err;

		for (i = 0; i < nr_entries; i++) {
			ret += scnprintf(buf + ret, count - ret,
					"%p\n", (void *)entries[i]);
			if (ret == count)
				goto err;
		}
	} else {
		ret = scnprintf(buf, count, "%lu %u %u\n",  pfn, handle, 0);
	}
err:
	return ret;
}
#endif

void __dump_page_owner(struct page *page)
{
	struct page_ext *page_ext = lookup_page_ext(page);
@@ -486,6 +628,12 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
	struct page_owner *page_owner;
	depot_stack_handle_t handle;

#ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP
	char *addr;
	ssize_t size;

	addr = md_pageowner_dump_addr;
#endif
	if (!static_branch_unlikely(&page_owner_inited))
		return -EINVAL;

@@ -496,6 +644,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
	while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
		pfn++;

	if (file)
		drain_all_pages(NULL);

	/* Find an allocated page */
@@ -560,13 +709,34 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
		/* Record the next PFN to read in the file offset */
		*ppos = (pfn - min_low_pfn) + 1;

		if (file) {
			return print_page_owner(buf, count, pfn, page,
				page_owner, handle);
		} else {
#ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP
			size = dump_page_owner_md(addr, count, pfn, page,
				page_owner, handle);
			if (size == count) {
				pr_err("pageowner minidump region exhausted\n");
				return 0;
			}
			count -= size;
			addr += size;
#endif
		}
	}

	return 0;
}

#ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP
void md_dump_pageowner(void)
{
	loff_t k = 0;

	read_page_owner(NULL, NULL, md_pageowner_dump_size, &k);
}
#endif

static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
{
	unsigned long pfn = zone->zone_start_pfn;
@@ -664,6 +834,158 @@ static const struct file_operations proc_page_owner_operations = {
	.read		= read_page_owner,
};

#ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP
static ssize_t page_owner_filter_write(struct file *file,
					  const char __user *ubuf,
					  size_t count, loff_t *offset)
{
	unsigned long filter;

	if (kstrtoul_from_user(ubuf, count, 0, &filter)) {
		pr_err_ratelimited("Invalid format for filter\n");
		return -EINVAL;
	}

	if (filter & (~0xF)) {
		pr_err_ratelimited("Invalid filter : use following filters or any combinations of these\n"
				"0x1 - unaccounted\n"
				"0x2 - slab\n"
				"0x4 - Anon\n"
				"0x8 - File\n");
		return -EINVAL;
	}
	page_owner_filter = filter;
	return count;
}

static ssize_t page_owner_filter_read(struct file *file, char __user *ubuf,
				       size_t count, loff_t *offset)
{
	char buf[64];

	snprintf(buf, sizeof(buf), "0x%lx\n", page_owner_filter);
	return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
}

static const struct file_operations proc_page_owner_filter_ops = {
	.open	= simple_open,
	.write	= page_owner_filter_write,
	.read	= page_owner_filter_read,
};

static ssize_t page_owner_handle_write(struct file *file,
					  const char __user *ubuf,
					  size_t count, loff_t *offset)
{
	unsigned long size;

	if (kstrtoul_from_user(ubuf, count, 0, &size)) {
		pr_err_ratelimited("Invalid format for handle size\n");
		return -EINVAL;
	}

	if (size) {
		if (size > (md_pageowner_dump_size / SZ_16K)) {
			pr_err_ratelimited("size : %lu KB exceeds max size : %lu KB\n",
				size, (md_pageowner_dump_size / SZ_16K));
			goto err;
		}
		page_owner_handles_size = size * SZ_1K;
	}
err:
	return count;
}

static ssize_t page_owner_handle_read(struct file *file, char __user *ubuf,
				       size_t count, loff_t *offset)
{
	char buf[64];

	snprintf(buf, sizeof(buf), "%lu KB\n",
			(page_owner_handles_size / SZ_1K));
	return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
}

static const struct file_operations proc_page_owner_handle_ops = {
	.open	= simple_open,
	.write	= page_owner_handle_write,
	.read	= page_owner_handle_read,
};

static ssize_t page_owner_call_site_write(struct file *file,
					  const char __user *ubuf,
					  size_t count, loff_t *offset)
{
	struct accounted_call_site *call_site;
	char buf[50];

	if (count >= 50) {
		pr_err_ratelimited("Input string size too large\n");
		return -EINVAL;
	}

	memset(buf, 0, 50);

	if (copy_from_user(buf, ubuf, count)) {
		pr_err_ratelimited("Couldn't copy from user\n");
		return -EFAULT;
	}

	if (!isalpha(buf[0]) && buf[0] != '_') {
		pr_err_ratelimited("Invalid call site name\n");
		return -EINVAL;
	}

	call_site = kzalloc(sizeof(*call_site), GFP_KERNEL);
	if (!call_site)
		return -ENOMEM;

	strlcpy(call_site->name, buf, strlen(buf));
	mutex_lock(&accounted_call_site_lock);
	list_add_tail(&call_site->list, &accounted_call_site_list);
	mutex_unlock(&accounted_call_site_lock);

	return count;
}

static ssize_t page_owner_call_site_read(struct file *file, char __user *ubuf,
				       size_t count, loff_t *offset)
{
	char *kbuf;
	struct accounted_call_site *call_site;
	int i = 1, ret = 0;
	size_t size = PAGE_SIZE;

	kbuf = kmalloc(size, GFP_KERNEL);
	if (!kbuf)
		return -ENOMEM;

	ret = scnprintf(kbuf, count, "%s\n", "Accounted call sites:");
	mutex_lock(&accounted_call_site_lock);
	list_for_each_entry(call_site, &accounted_call_site_list, list) {
		ret += scnprintf(kbuf + ret, size - ret,
			"%d. %s\n", i, call_site->name);
		i += 1;
		if (ret == size) {
			ret = -ENOMEM;
			mutex_unlock(&accounted_call_site_lock);
			goto err;
		}
	}
	mutex_unlock(&accounted_call_site_lock);
	ret = simple_read_from_buffer(ubuf, count, offset, kbuf, strlen(kbuf));
err:
	kfree(kbuf);
	return ret;
}

static const struct file_operations proc_page_owner_call_site_ops = {
	.open	= simple_open,
	.write	= page_owner_call_site_write,
	.read	= page_owner_call_site_read,
};
#endif

static int __init pageowner_init(void)
{
	if (!static_branch_unlikely(&page_owner_inited)) {
@@ -674,6 +996,14 @@ static int __init pageowner_init(void)
	debugfs_create_file("page_owner", 0400, NULL, NULL,
			    &proc_page_owner_operations);

#ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP
	debugfs_create_file("page_owner_filter", 0400, NULL, NULL,
			    &proc_page_owner_filter_ops);
	debugfs_create_file("page_owner_handles_size_kb", 0400, NULL, NULL,
			    &proc_page_owner_handle_ops);
	debugfs_create_file("page_owner_call_sites", 0400, NULL, NULL,
			    &proc_page_owner_call_site_ops);
#endif
	return 0;
}
late_initcall(pageowner_init)
+273 −1
Original line number Diff line number Diff line
@@ -34,8 +34,12 @@
#include <linux/prefetch.h>
#include <linux/memcontrol.h>
#include <linux/random.h>

#include <trace/events/kmem.h>
#ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP
#include <soc/qcom/minidump.h>
#include <linux/debugfs.h>
#include <linux/jhash.h>
#endif

#include "internal.h"

@@ -4769,6 +4773,176 @@ static int list_locations(struct kmem_cache *s, char *buf,
		len += sprintf(buf, "No data\n");
	return len;
}

#ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP

#define STACK_HASH_SEED 0x9747b28c

static unsigned long slab_owner_filter;
static unsigned long slab_owner_handles_size = SZ_16K;
static int num_handles;

bool is_slub_debug_enabled(void)
{
	if (slub_debug)
		return true;
	return false;
}

static bool find_stack(u32 handle,
		 char *md_slabowner_dump_addr, char *cur)
{
	int *handles, i;

	handles = (int *) (md_slabowner_dump_addr +
			md_slabowner_dump_size - slab_owner_handles_size);

	for (i = 0; i < num_handles; i++)
		if (handle == handles[i])
			return true;

	if ((handles + num_handles)
		< (int *)(md_slabowner_dump_addr +
			md_slabowner_dump_size)) {
		handles[num_handles] = handle;
		num_handles += 1;
	} else {
		pr_err_ratelimited("Can't stores handles increase slab_owner_handle_size\n");
	}
	return false;
}

/* Calculate hash for a stack */
static u32 hash_stack(unsigned long *entries, unsigned int size)
{
	return jhash2((u32 *)entries,
			       size * sizeof(unsigned long) / sizeof(u32),
			       STACK_HASH_SEED);
}

static ssize_t dump_tracking(char *buf, size_t size, struct kmem_cache *s,
			void *object)
{
	struct track *t;
	int ret;
	u32 handle, nr_entries;

	if (!(s->flags & SLAB_STORE_USER))
		return 0;

	t = get_track(s, object, TRACK_ALLOC);
	if (!t->addr)
		return 0;

#ifdef CONFIG_STACKTRACE
	{
		int i;

		for (i = 0; i < TRACK_ADDRS_COUNT; i++)
			if (t->addrs[i])
				continue;
			else
				break;
		nr_entries = i;
		handle = hash_stack(t->addrs, nr_entries);

		if ((buf > (md_slabowner_dump_addr +
			md_slabowner_dump_size - slab_owner_handles_size))
			|| !find_stack(handle, md_slabowner_dump_addr, buf)) {

			ret = scnprintf(buf, size, "%p %u %u\n",
				object, handle, nr_entries);
			if (ret == size)
				goto err;

			for (i = 0; i < nr_entries; i++) {
				ret += scnprintf(buf + ret, size - ret,
						"%p\n", (void *)t->addrs[i]);
				if (ret == size)
					goto err;
			}
		} else {
			ret = scnprintf(buf, size, "%p %u %u\n",
					object, handle, 0);
		}
	}
#else
	ret = scnprintf(buf, size, "%p %p\n", object, (void *)t->addr);

#endif
err:
	return ret;
}

void md_dump_slabowner(void)
{
	struct kmem_cache *s;
	int node;
	char *buf = md_slabowner_dump_addr;
	struct kmem_cache_node *n;
	void *addr;
	void *p;
	ssize_t ret, size = md_slabowner_dump_size;
	int i;

	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
		if (!test_bit(i, &slab_owner_filter))
			continue;
		s = kmalloc_caches[KMALLOC_NORMAL][i];
		if (!s)
			continue;
		ret = scnprintf(buf, size, "%s\n", s->name);
		if (ret == size)
			return;
		buf += ret;
		size -= ret;
		for_each_kmem_cache_node(s, node, n) {
			unsigned long flags;
			struct page *page;

			if (!atomic_long_read(&n->nr_slabs))
				continue;

			spin_lock_irqsave(&n->list_lock, flags);

			list_for_each_entry(page, &n->partial, lru) {
				addr = page_address(page);
				slab_lock(page);
				for_each_object(p, s, addr, page->objects) {
					ret  = dump_tracking(buf, size, s, p);
					if (ret == size) {
						pr_err("slabowner minidump region exhausted\n");
						return;
					}
					buf += ret;
					size -= ret;
				}
				slab_unlock(page);
			}
			list_for_each_entry(page, &n->full, lru) {
				addr = page_address(page);
				slab_lock(page);
				for_each_object(p, s, addr, page->objects) {
					ret  = dump_tracking(buf, size, s, p);
					if (ret == size) {
						pr_err("slabowner minidump region exhausted\n");
						return;
					}
					buf += ret;
					size -= ret;
				}
				slab_unlock(page);
			}
			spin_unlock_irqrestore(&n->list_lock, flags);
		}
		ret = scnprintf(buf, size, "\n");
		if (ret == size)
			return;
		buf += ret;
		size -= ret;
	}
}
#endif /* CONFIG_QCOM_MINIDUMP_PANIC_DUMP */
#endif	/* CONFIG_SLUB_DEBUG */

#ifdef SLUB_RESILIENCY_TEST
@@ -5933,6 +6107,89 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
	return 0;
}

#ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP
static ssize_t slab_owner_filter_write(struct file *file,
					  const char __user *ubuf,
					  size_t count, loff_t *offset)
{
	unsigned long filter;
	int bit, i;
	struct kmem_cache *s;

	if (kstrtoul_from_user(ubuf, count, 0, &filter)) {
		pr_err_ratelimited("Invalid format for filter\n");
		return -EINVAL;
	}

	for (i = 0, bit = 1; filter >= bit; bit *= 2, i++) {
		if (filter & bit) {
			s = kmalloc_caches[KMALLOC_NORMAL][i];
			if (!s) {
				pr_err("Invalid filter : %lx kmalloc-%d doesn't exist\n",
						filter, bit);
				return -EINVAL;
			}
		}
	}
	slab_owner_filter = filter;
	return count;
}

static ssize_t slab_owner_filter_read(struct file *file, char __user *ubuf,
				       size_t count, loff_t *offset)
{
	char buf[64];

	snprintf(buf, sizeof(buf), "0x%lx\n", slab_owner_filter);
	return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
}

static const struct file_operations proc_slab_owner_filter_ops = {
	.open	= simple_open,
	.write	= slab_owner_filter_write,
	.read	= slab_owner_filter_read,
};

static ssize_t slab_owner_handle_write(struct file *file,
					  const char __user *ubuf,
					  size_t count, loff_t *offset)
{
	unsigned long size;

	if (kstrtoul_from_user(ubuf, count, 0, &size)) {
		pr_err_ratelimited("Invalid format for handle size\n");
		return -EINVAL;
	}

	if (size) {
		if (size > (md_slabowner_dump_size / SZ_16K)) {
			pr_err_ratelimited("size : %lu KB exceeds max size : %lu KB\n",
				size, (md_slabowner_dump_size / SZ_16K));
			goto err;
		}
		slab_owner_handles_size = size * SZ_1K;
	}
err:
	return count;
}

static ssize_t slab_owner_handle_read(struct file *file, char __user *ubuf,
				       size_t count, loff_t *offset)
{
	char buf[64];

	snprintf(buf, sizeof(buf), "%lu KB\n",
			(slab_owner_handles_size / SZ_1K));
	return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
}

static const struct file_operations proc_slab_owner_handle_ops = {
	.open	= simple_open,
	.write	= slab_owner_handle_write,
	.read	= slab_owner_handle_read,
};
#endif

static int __init slab_sysfs_init(void)
{
	struct kmem_cache *s;
@@ -5967,6 +6224,21 @@ static int __init slab_sysfs_init(void)
		kfree(al);
	}

#ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP
	if (slub_debug) {
		int i;

		debugfs_create_file("slab_owner_filter", 0400, NULL, NULL,
			    &proc_slab_owner_filter_ops);
		debugfs_create_file("slab_owner_handles_size_kb", 0400,
				NULL, NULL, &proc_slab_owner_handle_ops);

		for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
			if (kmalloc_caches[KMALLOC_NORMAL][i])
				set_bit(i, &slab_owner_filter);
		}
	}
#endif
	mutex_unlock(&slab_mutex);
	resiliency_test();
	return 0;