Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1a68ae6c authored by Lingutla Chandrasekhar's avatar Lingutla Chandrasekhar Committed by Neeraj Upadhyay
Browse files

Minidump: Add support for cpu current stack



Currently cpu's current stack are being dumped only in
panic path, which is not always helpful. So extend it to
be always dumped for any type of reset reasons.

For every task switch of cpu, update the minidump table with
latest stack. so that we can reconstruct callstack of cpu
for any type of resets.

Change-Id: Ia57a8e0473d3706efc9a88c91fb1dd60523648ad
Signed-off-by: default avatarLingutla Chandrasekhar <clingutla@codeaurora.org>
Signed-off-by: default avatarNeeraj Upadhyay <neeraju@codeaurora.org>
parent facd0a72
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -517,6 +517,15 @@ config QCOM_MINIDUMP
	  Minidump would dump all registered entries, only when DLOAD mode
	  is enabled.

config QCOM_DYN_MINIDUMP_STACK
	bool "QTI Dynamic Minidump Stack Registration Support"
	depends on QCOM_MINIDUMP
	help
	  This enables minidump dynamic current stack registration feature.
	  It allows current task stack to be available in minidump, for cases
	  where CPU is unable to register it from IPI_CPU_STOP. The stack data
	  can be used to unwind stack frames.

config MINIDUMP_MAX_ENTRIES
	int "Minidump Maximum num of entries"
	default 200
+171 −10
Original line number Diff line number Diff line
@@ -3,6 +3,7 @@
 * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
 */

#include <linux/cache.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -10,12 +11,34 @@
#include <linux/slab.h>
#include <linux/thread_info.h>
#include <soc/qcom/minidump.h>
#include <asm/page.h>
#include <asm/memory.h>
#include <asm/sections.h>
#include <asm/stacktrace.h>
#include <linux/mm.h>
#include <linux/sched/task.h>
#include <linux/vmalloc.h>

static bool is_vmap_stack __read_mostly;

#ifdef CONFIG_QCOM_DYN_MINIDUMP_STACK

#ifdef CONFIG_VMAP_STACK
#define STACK_NUM_PAGES (THREAD_SIZE / PAGE_SIZE)
#else
#define STACK_NUM_PAGES 1
#endif	/* !CONFIG_VMAP_STACK */

struct md_stack_cpu_data {
	int stack_mdidx[STACK_NUM_PAGES];
	struct md_region stack_mdr[STACK_NUM_PAGES];
} ____cacheline_aligned_in_smp;

static int md_current_stack_init __read_mostly;

static DEFINE_PER_CPU_SHARED_ALIGNED(struct md_stack_cpu_data, md_stack_data);
#endif

static void __init register_log_buf(void)
{
	char **log_bufp;
@@ -38,24 +61,26 @@ static void __init register_log_buf(void)
		pr_err("Failed to add logbuf in Minidump\n");
}

static void register_stack_entry(struct md_region *ksp_entry, u64 sp, u64 size,
static int register_stack_entry(struct md_region *ksp_entry, u64 sp, u64 size,
				 u32 cpu)
{
	struct page *sp_page;
	struct vm_struct *stack_vm_area = task_stack_vm_area(current);
	int entry;

	ksp_entry->id = MINIDUMP_DEFAULT_ID;
	ksp_entry->virt_addr = sp;
	ksp_entry->size = size;
	if (stack_vm_area) {
	if (is_vmap_stack) {
		sp_page = vmalloc_to_page((const void *) sp);
		ksp_entry->phys_addr = page_to_phys(sp_page);
	} else {
		ksp_entry->phys_addr = virt_to_phys((uintptr_t *)sp);
	}

	if (msm_minidump_add_region(ksp_entry) < 0)
	entry = msm_minidump_add_region(ksp_entry);
	if (entry < 0)
		pr_err("Failed to add stack of cpu %d in Minidump\n", cpu);
	return entry;
}

static void __init register_kernel_sections(void)
@@ -90,8 +115,8 @@ static void __init register_kernel_sections(void)
	}
}

static inline bool in_stack_range(u64 sp, u64 base_addr, unsigned int
				  stack_size)
static inline bool in_stack_range(
		u64 sp, u64 base_addr, unsigned int stack_size)
{
	u64 min_addr = base_addr;
	u64 max_addr = base_addr + stack_size;
@@ -122,9 +147,14 @@ void dump_stack_minidump(u64 sp)
	struct vm_struct *stack_vm_area;
	unsigned int i, copy_pages;

	if (IS_ENABLED(CONFIG_QCOM_DYN_MINIDUMP_STACK))
		return;

	if (is_idle_task(current))
		return;

	is_vmap_stack = IS_ENABLED(CONFIG_VMAP_STACK);

	if (sp < MODULES_END || sp > -256UL)
		sp = current_stack_pointer;

@@ -137,20 +167,21 @@ void dump_stack_minidump(u64 sp)
	 * address of one page of the stack.
	 */
	stack_vm_area = task_stack_vm_area(current);
	if (stack_vm_area) {
	if (is_vmap_stack) {
		sp &= ~(PAGE_SIZE - 1);
		copy_pages = calculate_copy_pages(sp, stack_vm_area);
		for (i = 0; i < copy_pages; i++) {
			scnprintf(ksp_entry.name, sizeof(ksp_entry.name),
				  "KSTACK%d_%d", cpu, i);
			register_stack_entry(&ksp_entry, sp, PAGE_SIZE, cpu);
			(void)register_stack_entry(&ksp_entry, sp,
						   PAGE_SIZE, cpu);
			sp += PAGE_SIZE;
		}
	} else {
		sp &= ~(THREAD_SIZE - 1);
		scnprintf(ksp_entry.name, sizeof(ksp_entry.name), "KSTACK%d",
			  cpu);
		register_stack_entry(&ksp_entry, sp, THREAD_SIZE, cpu);
		(void)register_stack_entry(&ksp_entry, sp, THREAD_SIZE, cpu);
	}

	scnprintf(ktsk_entry.name, sizeof(ktsk_entry.name), "KTASK%d", cpu);
@@ -162,6 +193,132 @@ void dump_stack_minidump(u64 sp)
		pr_err("Failed to add current task %d in Minidump\n", cpu);
}

#ifdef CONFIG_QCOM_DYN_MINIDUMP_STACK
static void update_stack_entry(struct md_region *ksp_entry, u64 sp,
			       int mdno, u32 cpu)
{
	struct page *sp_page;

	ksp_entry->virt_addr = sp;
	if (likely(is_vmap_stack)) {
		sp_page = vmalloc_to_page((const void *) sp);
		ksp_entry->phys_addr = page_to_phys(sp_page);
	} else {
		ksp_entry->phys_addr = virt_to_phys((uintptr_t *)sp);
	}
	if (msm_minidump_update_region(mdno, ksp_entry) < 0) {
		pr_err("Failed to update cpu[%d] current stack in minidump\n",
		       cpu);
	}
}

static void register_vmapped_stack(struct md_stack_cpu_data *md_stack_cpu_d,
				   struct vm_struct *stack_area, u32 cpu,
				   bool update)
{
	u64 sp;
	u64 tsk_stack_base = (u64)stack_area->addr;
	struct md_region *mdr;
	int *mdno;
	int i;

	sp = tsk_stack_base & ~(PAGE_SIZE - 1);
	for (i = 0; i < STACK_NUM_PAGES; i++) {
		mdr = md_stack_cpu_d->stack_mdr + i;
		mdno = md_stack_cpu_d->stack_mdidx + i;
		if (unlikely(!update)) {
			scnprintf(mdr->name, sizeof(mdr->name),
				  "KSTACK%d_%d", cpu, i);
			*mdno = register_stack_entry(mdr, sp, PAGE_SIZE, cpu);
		} else {
			update_stack_entry(mdr, sp, *mdno, cpu);
		}
		sp += PAGE_SIZE;
	}
}

static void register_normal_stack(struct md_stack_cpu_data *md_stack_cpu_d,
				  u64 sp, u32 cpu, bool update)
{
	struct md_region *mdr;

	mdr = md_stack_cpu_d->stack_mdr;
	sp &= ~(THREAD_SIZE - 1);
	if (unlikely(!update)) {
		scnprintf(mdr->name, sizeof(mdr->name), "KSTACK%d", cpu);
		*md_stack_cpu_d->stack_mdidx = register_stack_entry(
						mdr, sp, THREAD_SIZE, cpu);
	} else {
		update_stack_entry(mdr, sp,
				   *md_stack_cpu_d->stack_mdidx, cpu);
	}
}

void update_md_current_stack(void *data)
{
	u32 cpu = smp_processor_id();
	unsigned int i;
	u64 sp = current_stack_pointer;
	struct md_stack_cpu_data *md_stack_cpu_d =
				&per_cpu(md_stack_data, cpu);
	int *mdno;
	struct vm_struct *stack_vm_area;

	if (is_idle_task(current) || !md_current_stack_init)
		return;

	if (likely(is_vmap_stack)) {
		for (i = 0; i < STACK_NUM_PAGES; i++) {
			mdno = md_stack_cpu_d->stack_mdidx + i;
			if (unlikely(*mdno < 0))
				return;
		}
		stack_vm_area = task_stack_vm_area(current);
		register_vmapped_stack(md_stack_cpu_d, stack_vm_area,
				       cpu, true);
	} else {
		if (unlikely(*md_stack_cpu_d->stack_mdidx < 0))
			return;
		register_normal_stack(md_stack_cpu_d, sp, cpu, true);
	}
}

static void register_current_stack(void)
{
	int cpu;
	u64 sp = current_stack_pointer;
	struct md_stack_cpu_data *md_stack_cpu_d;
	struct vm_struct *stack_vm_area;

	stack_vm_area = task_stack_vm_area(current);
	/*
	 * Since stacks are now allocated with vmalloc, the translation to
	 * physical address is not a simple linear transformation like it is
	 * for kernel logical addresses, since vmalloc creates a virtual
	 * mapping. Thus, virt_to_phys() should not be used in this context;
	 * instead the page table must be walked to acquire the physical
	 * address of all pages of the stack.
	 */
	for_each_possible_cpu(cpu) {
		/*
		 * Let's register dummies for now,
		 * once system up and running, let the cpu update its currents.
		 */
		md_stack_cpu_d = &per_cpu(md_stack_data, cpu);
		if (is_vmap_stack) {
			register_vmapped_stack(md_stack_cpu_d, stack_vm_area,
				       cpu, false);
		} else {
			register_normal_stack(md_stack_cpu_d, sp, cpu, false);
		}
	}

	md_current_stack_init = 1;
	/* Let online cpus update currents now */
	smp_call_function(update_md_current_stack, NULL, 1);
}
#endif

#ifdef CONFIG_ARM64
static void register_irq_stack(void)
{
@@ -174,7 +331,7 @@ static void register_irq_stack(void)

	for_each_possible_cpu(cpu) {
		irq_stack_base = (u64)per_cpu(irq_stack_ptr, cpu);
		if (IS_ENABLED(CONFIG_VMAP_STACK)) {
		if (is_vmap_stack) {
			irq_stack_pages_count = IRQ_STACK_SIZE / PAGE_SIZE;
			sp = irq_stack_base & ~(PAGE_SIZE - 1);
			for (i = 0; i < irq_stack_pages_count; i++) {
@@ -201,7 +358,11 @@ static inline void register_irq_stack(void) {}
static int __init msm_minidump_log_init(void)
{
	register_kernel_sections();
	is_vmap_stack = IS_ENABLED(CONFIG_VMAP_STACK);
	register_irq_stack();
#ifdef CONFIG_QCOM_DYN_MINIDUMP_STACK
	register_current_stack();
#endif
	register_log_buf();
	return 0;
}
+7 −0
Original line number Diff line number Diff line
@@ -59,4 +59,11 @@ static inline int msm_minidump_remove_region(const struct md_region *entry)
static inline bool msm_minidump_enabled(void) { return false; }
static inline void dump_stack_minidump(u64 sp) {}
#endif

#ifdef CONFIG_QCOM_DYN_MINIDUMP_STACK
/* Update current stack of this cpu in Minidump table. */
extern void update_md_current_stack(void *data);
#else
static inline void update_md_current_stack(void *data) {}
#endif
#endif
+3 −0
Original line number Diff line number Diff line
@@ -17,6 +17,8 @@
#include <asm/switch_to.h>
#include <asm/tlb.h>

#include <soc/qcom/minidump.h>

#include "../workqueue_internal.h"
#include "../smpboot.h"

@@ -4271,6 +4273,7 @@ static void __sched notrace __schedule(bool preempt)

		/* Also unlocks the rq: */
		rq = context_switch(rq, prev, next, &rf);
		update_md_current_stack(NULL);
	} else {
		update_task_ravg(prev, rq, TASK_UPDATE, wallclock, 0);
		rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);