Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c373fc1f authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "soc: qcom: Move watchdog suspend/resume callbacks to syscore ops"

parents 4afbca2a 62ea1d38
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -478,6 +478,15 @@ config QCOM_MINIDUMP
	  Minidump would dump all registered entries, only when DLOAD mode
	  is enabled.

config QCOM_DYN_MINIDUMP_STACK
	bool "QTI Dynamic Minidump Stack Registration Support"
	depends on QCOM_MINIDUMP
	help
	  This enables minidump dynamic current stack registration feature.
	  It allows current task stack to be available in minidump, for cases
	  where CPU is unable to register it from IPI_CPU_STOP. The stack data
	  can be used to unwind stack frames.

config MINIDUMP_MAX_ENTRIES
	int "Minidump Maximum num of entries"
	default 200
+193 −10
Original line number Diff line number Diff line
@@ -3,6 +3,7 @@
 * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
 */

#include <linux/cache.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -10,12 +11,38 @@
#include <linux/slab.h>
#include <linux/thread_info.h>
#include <soc/qcom/minidump.h>
#include <asm/page.h>
#include <asm/memory.h>
#include <asm/sections.h>
#include <asm/stacktrace.h>
#include <linux/mm.h>
#include <linux/ratelimit.h>
#include <linux/sched/task.h>
#include <linux/vmalloc.h>

#ifdef CONFIG_QCOM_DYN_MINIDUMP_STACK

#include <trace/events/sched.h>

#ifdef CONFIG_VMAP_STACK
#define STACK_NUM_PAGES (THREAD_SIZE / PAGE_SIZE)
#else
#define STACK_NUM_PAGES 1
#endif	/* !CONFIG_VMAP_STACK */

struct md_stack_cpu_data {
	int stack_mdidx[STACK_NUM_PAGES];
	struct md_region stack_mdr[STACK_NUM_PAGES];
} ____cacheline_aligned_in_smp;

static int md_current_stack_init __read_mostly;

static DEFINE_PER_CPU_SHARED_ALIGNED(struct md_stack_cpu_data, md_stack_data);
#endif

static bool is_vmap_stack __read_mostly;


static void __init register_log_buf(void)
{
	char **log_bufp;
@@ -37,23 +64,25 @@ static void __init register_log_buf(void)
		pr_err("Failed to add logbuf in Minidump\n");
}

static void register_stack_entry(struct md_region *ksp_entry, u64 sp, u64 size,
static int register_stack_entry(struct md_region *ksp_entry, u64 sp, u64 size,
				 u32 cpu)
{
	struct page *sp_page;
	struct vm_struct *stack_vm_area = task_stack_vm_area(current);
	int entry;

	ksp_entry->virt_addr = sp;
	ksp_entry->size = size;
	if (stack_vm_area) {
	if (is_vmap_stack) {
		sp_page = vmalloc_to_page((const void *) sp);
		ksp_entry->phys_addr = page_to_phys(sp_page);
	} else {
		ksp_entry->phys_addr = virt_to_phys((uintptr_t *)sp);
	}

	if (msm_minidump_add_region(ksp_entry) < 0)
	entry = msm_minidump_add_region(ksp_entry);
	if (entry < 0)
		pr_err("Failed to add stack of cpu %d in Minidump\n", cpu);
	return entry;
}

static void __init register_kernel_sections(void)
@@ -86,8 +115,8 @@ static void __init register_kernel_sections(void)
	}
}

static inline bool in_stack_range(u64 sp, u64 base_addr, unsigned int
				  stack_size)
static inline bool in_stack_range(
		u64 sp, u64 base_addr, unsigned int stack_size)
{
	u64 min_addr = base_addr;
	u64 max_addr = base_addr + stack_size;
@@ -118,9 +147,14 @@ void dump_stack_minidump(u64 sp)
	struct vm_struct *stack_vm_area;
	unsigned int i, copy_pages;

	if (IS_ENABLED(CONFIG_QCOM_DYN_MINIDUMP_STACK))
		return;

	if (is_idle_task(current))
		return;

	is_vmap_stack = IS_ENABLED(CONFIG_VMAP_STACK);

	if (sp < KIMAGE_VADDR || sp > -256UL)
		sp = current_stack_pointer;

@@ -133,20 +167,21 @@ void dump_stack_minidump(u64 sp)
	 * address of one page of the stack.
	 */
	stack_vm_area = task_stack_vm_area(current);
	if (stack_vm_area) {
	if (is_vmap_stack) {
		sp &= ~(PAGE_SIZE - 1);
		copy_pages = calculate_copy_pages(sp, stack_vm_area);
		for (i = 0; i < copy_pages; i++) {
			scnprintf(ksp_entry.name, sizeof(ksp_entry.name),
				  "KSTACK%d_%d", cpu, i);
			register_stack_entry(&ksp_entry, sp, PAGE_SIZE, cpu);
			(void)register_stack_entry(&ksp_entry, sp,
						   PAGE_SIZE, cpu);
			sp += PAGE_SIZE;
		}
	} else {
		sp &= ~(THREAD_SIZE - 1);
		scnprintf(ksp_entry.name, sizeof(ksp_entry.name), "KSTACK%d",
			  cpu);
		register_stack_entry(&ksp_entry, sp, THREAD_SIZE, cpu);
		(void)register_stack_entry(&ksp_entry, sp, THREAD_SIZE, cpu);
	}

	scnprintf(ktsk_entry.name, sizeof(ktsk_entry.name), "KTASK%d", cpu);
@@ -157,6 +192,150 @@ void dump_stack_minidump(u64 sp)
		pr_err("Failed to add current task %d in Minidump\n", cpu);
}

#ifdef CONFIG_QCOM_DYN_MINIDUMP_STACK
static void update_stack_entry(struct md_region *ksp_entry, u64 sp,
			       int mdno, u32 cpu)
{
	struct page *sp_page;

	ksp_entry->virt_addr = sp;
	if (likely(is_vmap_stack)) {
		sp_page = vmalloc_to_page((const void *) sp);
		ksp_entry->phys_addr = page_to_phys(sp_page);
	} else {
		ksp_entry->phys_addr = virt_to_phys((uintptr_t *)sp);
	}
	if (msm_minidump_update_region(mdno, ksp_entry) < 0) {
		pr_err_ratelimited(
			"Failed to update cpu[%d] current stack in minidump\n",
			cpu);
	}
}

static void register_vmapped_stack(struct md_stack_cpu_data *md_stack_cpu_d,
				   u64 sp, u32 cpu, bool update)
{
	struct md_region *mdr;
	int *mdno;
	int i;

	sp &= ~(PAGE_SIZE - 1);
	for (i = 0; i < STACK_NUM_PAGES; i++) {
		mdr = md_stack_cpu_d->stack_mdr + i;
		mdno = md_stack_cpu_d->stack_mdidx + i;
		if (unlikely(!update)) {
			scnprintf(mdr->name, sizeof(mdr->name),
				  "KSTACK%d_%d", cpu, i);
			*mdno = register_stack_entry(mdr, sp, PAGE_SIZE, cpu);
		} else {
			update_stack_entry(mdr, sp, *mdno, cpu);
		}
		sp += PAGE_SIZE;
	}
}

static void register_normal_stack(struct md_stack_cpu_data *md_stack_cpu_d,
				  u64 sp, u32 cpu, bool update)
{
	struct md_region *mdr;

	mdr = md_stack_cpu_d->stack_mdr;
	sp &= ~(THREAD_SIZE - 1);
	if (unlikely(!update)) {
		scnprintf(mdr->name, sizeof(mdr->name), "KSTACK%d", cpu);
		*md_stack_cpu_d->stack_mdidx = register_stack_entry(
						mdr, sp, THREAD_SIZE, cpu);
	} else {
		update_stack_entry(mdr, sp,
				   *md_stack_cpu_d->stack_mdidx, cpu);
	}
}

static void update_md_stack(u32 cpu, u64 sp)
{
	struct md_stack_cpu_data *md_stack_cpu_d =
				&per_cpu(md_stack_data, cpu);
	int *mdno;
	unsigned int i;

	if (is_idle_task(current) || !md_current_stack_init)
		return;

	if (likely(is_vmap_stack)) {
		for (i = 0; i < STACK_NUM_PAGES; i++) {
			mdno = md_stack_cpu_d->stack_mdidx + i;
			if (unlikely(*mdno < 0))
				return;
		}
		register_vmapped_stack(md_stack_cpu_d, sp, cpu, true);
	} else {
		if (unlikely(*md_stack_cpu_d->stack_mdidx < 0))
			return;
		register_normal_stack(md_stack_cpu_d, sp, cpu, true);
	}
}

void md_current_stack_notifer(void *ignore, bool preempt,
		struct task_struct *prev, struct task_struct *next)
{
	u32 cpu = task_cpu(next);
	u64 sp = (u64)next->stack;

	update_md_stack(cpu, sp);
}

void md_current_stack_ipi_handler(void *data)
{
	u32 cpu = smp_processor_id();
	struct vm_struct *stack_vm_area;
	u64 sp = current_stack_pointer;

	if (is_idle_task(current))
		return;
	if (likely(is_vmap_stack)) {
		stack_vm_area = task_stack_vm_area(current);
		sp = (u64)stack_vm_area->addr;
	}
	update_md_stack(cpu, sp);
}

static void register_current_stack(void)
{
	int cpu;
	u64 sp = current_stack_pointer;
	struct md_stack_cpu_data *md_stack_cpu_d;
	struct vm_struct *stack_vm_area;

	/*
	 * Since stacks are now allocated with vmalloc, the translation to
	 * physical address is not a simple linear transformation like it is
	 * for kernel logical addresses, since vmalloc creates a virtual
	 * mapping. Thus, virt_to_phys() should not be used in this context;
	 * instead the page table must be walked to acquire the physical
	 * address of all pages of the stack.
	 */
	if (likely(is_vmap_stack)) {
		stack_vm_area = task_stack_vm_area(current);
		sp = (u64)stack_vm_area->addr;
	}
	for_each_possible_cpu(cpu) {
		/*
		 * Let's register dummies for now,
		 * once system up and running, let the cpu update its currents.
		 */
		md_stack_cpu_d = &per_cpu(md_stack_data, cpu);
		if (is_vmap_stack)
			register_vmapped_stack(md_stack_cpu_d, sp, cpu, false);
		else
			register_normal_stack(md_stack_cpu_d, sp, cpu, false);
	}

	register_trace_sched_switch(md_current_stack_notifer, NULL);
	md_current_stack_init = 1;
	smp_call_function(md_current_stack_ipi_handler, NULL, 1);
}
#endif

#ifdef CONFIG_ARM64
static void register_irq_stack(void)
{
@@ -169,7 +348,7 @@ static void register_irq_stack(void)

	for_each_possible_cpu(cpu) {
		irq_stack_base = (u64)per_cpu(irq_stack_ptr, cpu);
		if (IS_ENABLED(CONFIG_VMAP_STACK)) {
		if (is_vmap_stack) {
			irq_stack_pages_count = IRQ_STACK_SIZE / PAGE_SIZE;
			sp = irq_stack_base & ~(PAGE_SIZE - 1);
			for (i = 0; i < irq_stack_pages_count; i++) {
@@ -196,7 +375,11 @@ static inline void register_irq_stack(void) {}
static int __init msm_minidump_log_init(void)
{
	register_kernel_sections();
	is_vmap_stack = IS_ENABLED(CONFIG_VMAP_STACK);
	register_irq_stack();
#ifdef CONFIG_QCOM_DYN_MINIDUMP_STACK
	register_current_stack();
#endif
	register_log_buf();
	return 0;
}
+0 −11
Original line number Diff line number Diff line
@@ -105,16 +105,6 @@ static int qcom_soc_wdt_probe(struct platform_device *pdev)
	return qcom_wdt_register(pdev, wdog_dd, "msm-watchdog");
}

#ifdef CONFIG_PM_SLEEP
static const struct dev_pm_ops qcom_soc_dev_pm_ops = {
	.suspend_noirq = qcom_wdt_suspend,
	.resume_noirq = qcom_wdt_resume,
};
#else
static const struct dev_pm_ops qcom_soc_dev_pm_ops = {
};
#endif

static const struct of_device_id qcom_soc_match_table[] = {
	{ .compatible = "qcom,msm-watchdog" },
	{}
@@ -125,7 +115,6 @@ static struct platform_driver qcom_soc_wdt_driver = {
	.remove = qcom_wdt_remove,
	.driver = {
		.name = "msm_watchdog",
		.pm = &qcom_soc_dev_pm_ops,
		.of_match_table = qcom_soc_match_table,
	},
};
+35 −25
Original line number Diff line number Diff line
@@ -20,6 +20,7 @@
#include <uapi/linux/sched/types.h>
#include <linux/sched/clock.h>
#include <linux/irq.h>
#include <linux/syscore_ops.h>

#define MASK_SIZE        32

@@ -45,23 +46,22 @@ static void qcom_wdt_dump_cpu_alive_mask(struct msm_watchdog_data *wdog_dd)
 *  will cause the watchdog counter to be frozen.
 *
 */
int qcom_wdt_suspend(struct device *dev)
static int qcom_wdt_suspend(void)
{
	struct msm_watchdog_data *wdog_dd = dev_get_drvdata(dev);

	if (!wdog_dd)
	if (!wdog_data)
		return 0;
	wdog_dd->ops->reset_wdt(wdog_dd);
	if (wdog_dd->wakeup_irq_enable) {
		wdog_dd->last_pet = sched_clock();

	wdog_data->ops->reset_wdt(wdog_data);
	if (wdog_data->wakeup_irq_enable) {
		wdog_data->last_pet = sched_clock();
		return 0;
	}
	wdog_dd->ops->disable_wdt(wdog_dd);
	wdog_dd->enabled = false;
	wdog_dd->last_pet = sched_clock();

	wdog_data->ops->disable_wdt(wdog_data);
	wdog_data->enabled = false;
	wdog_data->last_pet = sched_clock();
	return 0;
}
EXPORT_SYMBOL(qcom_wdt_suspend);

/**
 *  qcom_wdt_resume() - Resumes qcom watchdog after a suspend.
@@ -72,26 +72,32 @@ EXPORT_SYMBOL(qcom_wdt_suspend);
 *  This will cause the watchdog counter to be reset and resumed.
 *
 */
int qcom_wdt_resume(struct device *dev)
static void qcom_wdt_resume(void)
{
	struct msm_watchdog_data *wdog_dd = dev_get_drvdata(dev);
	if (!wdog_data)
		return;

	if (!wdog_dd)
		return 0;
	if (wdog_dd->wakeup_irq_enable) {
		wdog_dd->ops->reset_wdt(wdog_dd);
		wdog_dd->last_pet = sched_clock();
		return 0;
	if (wdog_data->wakeup_irq_enable) {
		wdog_data->ops->reset_wdt(wdog_data);
		wdog_data->last_pet = sched_clock();
		return;
	}
	wdog_dd->ops->enable_wdt(1, wdog_dd);
	wdog_dd->ops->reset_wdt(wdog_dd);
	wdog_dd->enabled = true;
	wdog_dd->last_pet = sched_clock();
	return 0;

	wdog_data->ops->enable_wdt(1, wdog_data);
	wdog_data->ops->reset_wdt(wdog_data);
	wdog_data->enabled = true;
	wdog_data->last_pet = sched_clock();
	return;
}
EXPORT_SYMBOL(qcom_wdt_resume);
#endif

static struct syscore_ops qcom_wdt_syscore_ops = {
#ifdef CONFIG_PM_SLEEP
	.suspend = qcom_wdt_suspend,
	.resume = qcom_wdt_resume,
#endif
};

static int qcom_wdt_panic_handler(struct notifier_block *this,
			      unsigned long event, void *ptr)
{
@@ -374,6 +380,7 @@ int qcom_wdt_remove(struct platform_device *pdev)
{
	struct msm_watchdog_data *wdog_dd = platform_get_drvdata(pdev);

	unregister_syscore_ops(&qcom_wdt_syscore_ops);
	if (!IPI_CORES_IN_LPM)
		cpu_pm_unregister_notifier(&wdog_dd->wdog_cpu_pm_nb);

@@ -575,6 +582,9 @@ int qcom_wdt_register(struct platform_device *pdev,
	md_entry.size = sizeof(*wdog_dd);
	if (msm_minidump_add_region(&md_entry) < 0)
		dev_err(wdog_dd->dev, "Failed to add Wdt data in Minidump\n");

	register_syscore_ops(&qcom_wdt_syscore_ops);

	return 0;
err:
	return ret;
+0 −11
Original line number Diff line number Diff line
@@ -261,16 +261,6 @@ static int hh_wdt_probe(struct platform_device *pdev)
	return qcom_wdt_register(pdev, wdog_dd, "hh-watchdog");
}

#ifdef CONFIG_PM_SLEEP
static const struct dev_pm_ops hh_wdt_dev_pm_ops = {
	.suspend_noirq = qcom_wdt_suspend,
	.resume_noirq = qcom_wdt_resume,
};
#else
static const struct dev_pm_ops hh_wdt_dev_pm_ops = {
};
#endif

static const struct of_device_id hh_wdt_match_table[] = {
	{ .compatible = "qcom,hh-watchdog" },
	{}
@@ -281,7 +271,6 @@ static struct platform_driver hh_wdt_driver = {
	.remove = qcom_wdt_remove,
	.driver = {
		.name = "hh-watchdog",
		.pm = &hh_wdt_dev_pm_ops,
		.of_match_table = hh_wdt_match_table,
	},
};
Loading