Loading drivers/base/core.c +5 −0 Original line number Diff line number Diff line Loading @@ -639,6 +639,11 @@ int lock_device_hotplug_sysfs(void) return restart_syscall(); } void lock_device_hotplug_assert(void) { lockdep_assert_held(&device_hotplug_lock); } #ifdef CONFIG_BLOCK static inline int device_is_not_partition(struct device *dev) { Loading include/linux/device.h +1 −0 Original line number Diff line number Diff line Loading @@ -1141,6 +1141,7 @@ static inline bool device_supports_offline(struct device *dev) extern void lock_device_hotplug(void); extern void unlock_device_hotplug(void); extern int lock_device_hotplug_sysfs(void); extern void lock_device_hotplug_assert(void); extern int device_offline(struct device *dev); extern int device_online(struct device *dev); extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); Loading include/linux/nmi.h +17 −0 Original line number Diff line number Diff line Loading @@ -13,6 +13,9 @@ #ifdef CONFIG_LOCKUP_DETECTOR void lockup_detector_init(void); extern void watchdog_enable(unsigned int cpu); extern void watchdog_disable(unsigned int cpu); extern bool watchdog_configured(unsigned int cpu); void lockup_detector_soft_poweroff(void); void lockup_detector_cleanup(void); bool is_hardlockup(void); Loading @@ -37,6 +40,20 @@ extern int sysctl_hardlockup_all_cpu_backtrace; static inline void lockup_detector_init(void) { } static inline void lockup_detector_soft_poweroff(void) { } static inline void lockup_detector_cleanup(void) { } static inline void watchdog_enable(unsigned int cpu) { } static inline void watchdog_disable(unsigned int cpu) { } static inline bool watchdog_configured(unsigned int cpu) { /* * Pretend the watchdog is always configured. * We will be waiting for the watchdog to be enabled in core isolation */ return true; } #endif /* !CONFIG_LOCKUP_DETECTOR */ #ifdef CONFIG_SOFTLOCKUP_DETECTOR Loading kernel/watchdog.c +20 −2 Original line number Diff line number Diff line Loading @@ -14,6 +14,7 @@ #include <linux/mm.h> #include <linux/cpu.h> #include <linux/device.h> #include <linux/nmi.h> #include <linux/init.h> #include <linux/module.h> Loading Loading @@ -174,6 +175,7 @@ static u64 __read_mostly sample_period; static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); static DEFINE_PER_CPU(unsigned int, watchdog_en); static DEFINE_PER_CPU(bool, softlockup_touch_sync); static DEFINE_PER_CPU(bool, soft_watchdog_warn); static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); Loading Loading @@ -454,9 +456,17 @@ static void watchdog_set_prio(unsigned int policy, unsigned int prio) sched_setscheduler(current, policy, ¶m); } static void watchdog_enable(unsigned int cpu) /* Must be called with hotplug lock (lock_device_hotplug()) held. */ void watchdog_enable(unsigned int cpu) { struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer); unsigned int *enabled = this_cpu_ptr(&watchdog_en); lock_device_hotplug_assert(); if (*enabled) return; *enabled = 1; /* * Start the timer first to prevent the NMI watchdog triggering Loading @@ -476,9 +486,17 @@ static void watchdog_enable(unsigned int cpu) watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1); } static void watchdog_disable(unsigned int cpu) /* Must be called with hotplug lock (lock_device_hotplug()) held. */ void watchdog_disable(unsigned int cpu) { struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer); unsigned int *enabled = this_cpu_ptr(&watchdog_en); lock_device_hotplug_assert(); if (!*enabled) return; *enabled = 0; watchdog_set_prio(SCHED_NORMAL, 0); /* Loading Loading
drivers/base/core.c +5 −0 Original line number Diff line number Diff line Loading @@ -639,6 +639,11 @@ int lock_device_hotplug_sysfs(void) return restart_syscall(); } void lock_device_hotplug_assert(void) { lockdep_assert_held(&device_hotplug_lock); } #ifdef CONFIG_BLOCK static inline int device_is_not_partition(struct device *dev) { Loading
include/linux/device.h +1 −0 Original line number Diff line number Diff line Loading @@ -1141,6 +1141,7 @@ static inline bool device_supports_offline(struct device *dev) extern void lock_device_hotplug(void); extern void unlock_device_hotplug(void); extern int lock_device_hotplug_sysfs(void); extern void lock_device_hotplug_assert(void); extern int device_offline(struct device *dev); extern int device_online(struct device *dev); extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); Loading
include/linux/nmi.h +17 −0 Original line number Diff line number Diff line Loading @@ -13,6 +13,9 @@ #ifdef CONFIG_LOCKUP_DETECTOR void lockup_detector_init(void); extern void watchdog_enable(unsigned int cpu); extern void watchdog_disable(unsigned int cpu); extern bool watchdog_configured(unsigned int cpu); void lockup_detector_soft_poweroff(void); void lockup_detector_cleanup(void); bool is_hardlockup(void); Loading @@ -37,6 +40,20 @@ extern int sysctl_hardlockup_all_cpu_backtrace; static inline void lockup_detector_init(void) { } static inline void lockup_detector_soft_poweroff(void) { } static inline void lockup_detector_cleanup(void) { } static inline void watchdog_enable(unsigned int cpu) { } static inline void watchdog_disable(unsigned int cpu) { } static inline bool watchdog_configured(unsigned int cpu) { /* * Pretend the watchdog is always configured. * We will be waiting for the watchdog to be enabled in core isolation */ return true; } #endif /* !CONFIG_LOCKUP_DETECTOR */ #ifdef CONFIG_SOFTLOCKUP_DETECTOR Loading
kernel/watchdog.c +20 −2 Original line number Diff line number Diff line Loading @@ -14,6 +14,7 @@ #include <linux/mm.h> #include <linux/cpu.h> #include <linux/device.h> #include <linux/nmi.h> #include <linux/init.h> #include <linux/module.h> Loading Loading @@ -174,6 +175,7 @@ static u64 __read_mostly sample_period; static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); static DEFINE_PER_CPU(unsigned int, watchdog_en); static DEFINE_PER_CPU(bool, softlockup_touch_sync); static DEFINE_PER_CPU(bool, soft_watchdog_warn); static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); Loading Loading @@ -454,9 +456,17 @@ static void watchdog_set_prio(unsigned int policy, unsigned int prio) sched_setscheduler(current, policy, ¶m); } static void watchdog_enable(unsigned int cpu) /* Must be called with hotplug lock (lock_device_hotplug()) held. */ void watchdog_enable(unsigned int cpu) { struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer); unsigned int *enabled = this_cpu_ptr(&watchdog_en); lock_device_hotplug_assert(); if (*enabled) return; *enabled = 1; /* * Start the timer first to prevent the NMI watchdog triggering Loading @@ -476,9 +486,17 @@ static void watchdog_enable(unsigned int cpu) watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1); } static void watchdog_disable(unsigned int cpu) /* Must be called with hotplug lock (lock_device_hotplug()) held. */ void watchdog_disable(unsigned int cpu) { struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer); unsigned int *enabled = this_cpu_ptr(&watchdog_en); lock_device_hotplug_assert(); if (!*enabled) return; *enabled = 0; watchdog_set_prio(SCHED_NORMAL, 0); /* Loading