]> Pileus Git - ~andy/linux/blobdiff - kernel/watchdog.c
Merge tag 'v3.11-rc5' into perf/core
[~andy/linux] / kernel / watchdog.c
index 75a2ab3d0b0208dfa51e40339ffd00206622732e..51c4f34d258ea397266e0dd1a96a16436415a38f 100644 (file)
 #include <linux/module.h>
 #include <linux/sysctl.h>
 #include <linux/smpboot.h>
+#include <linux/sched/rt.h>
 
 #include <asm/irq_regs.h>
 #include <linux/kvm_para.h>
 #include <linux/perf_event.h>
 
-int watchdog_enabled = 1;
+int watchdog_user_enabled = 1;
 int __read_mostly watchdog_thresh = 10;
-static int __read_mostly watchdog_disabled;
+static int __read_mostly watchdog_running;
 static u64 __read_mostly sample_period;
 
 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
@@ -62,7 +63,7 @@ static int __init hardlockup_panic_setup(char *str)
        else if (!strncmp(str, "nopanic", 7))
                hardlockup_panic = 0;
        else if (!strncmp(str, "0", 1))
-               watchdog_enabled = 0;
+               watchdog_user_enabled = 0;
        return 1;
 }
 __setup("nmi_watchdog=", hardlockup_panic_setup);
@@ -81,7 +82,7 @@ __setup("softlockup_panic=", softlockup_panic_setup);
 
 static int __init nowatchdog_setup(char *str)
 {
-       watchdog_enabled = 0;
+       watchdog_user_enabled = 0;
        return 1;
 }
 __setup("nowatchdog", nowatchdog_setup);
@@ -89,7 +90,7 @@ __setup("nowatchdog", nowatchdog_setup);
 /* deprecated */
 static int __init nosoftlockup_setup(char *str)
 {
-       watchdog_enabled = 0;
+       watchdog_user_enabled = 0;
        return 1;
 }
 __setup("nosoftlockup", nosoftlockup_setup);
@@ -112,9 +113,9 @@ static int get_softlockup_thresh(void)
  * resolution, and we don't need to waste time with a big divide when
  * 2^30ns == 1.074s.
  */
-static unsigned long get_timestamp(int this_cpu)
+static unsigned long get_timestamp(void)
 {
-       return cpu_clock(this_cpu) >> 30LL;  /* 2^30 ~= 10^9 */
+       return local_clock() >> 30LL;  /* 2^30 ~= 10^9 */
 }
 
 static void set_sample_period(void)
@@ -132,9 +133,7 @@ static void set_sample_period(void)
 /* Commands for resetting the watchdog */
 static void __touch_watchdog(void)
 {
-       int this_cpu = smp_processor_id();
-
-       __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu));
+       __this_cpu_write(watchdog_touch_ts, get_timestamp());
 }
 
 void touch_softlockup_watchdog(void)
@@ -159,7 +158,7 @@ void touch_all_softlockup_watchdogs(void)
 #ifdef CONFIG_HARDLOCKUP_DETECTOR
 void touch_nmi_watchdog(void)
 {
-       if (watchdog_enabled) {
+       if (watchdog_user_enabled) {
                unsigned cpu;
 
                for_each_present_cpu(cpu) {
@@ -195,7 +194,7 @@ static int is_hardlockup(void)
 
 static int is_softlockup(unsigned long touch_ts)
 {
-       unsigned long now = get_timestamp(smp_processor_id());
+       unsigned long now = get_timestamp();
 
        /* Warn about unreasonable delays: */
        if (time_after(now, touch_ts + get_softlockup_thresh()))
@@ -348,11 +347,6 @@ static void watchdog_enable(unsigned int cpu)
        hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        hrtimer->function = watchdog_timer_fn;
 
-       if (!watchdog_enabled) {
-               kthread_park(current);
-               return;
-       }
-
        /* Enable the perf event */
        watchdog_nmi_enable(cpu);
 
@@ -375,6 +369,11 @@ static void watchdog_disable(unsigned int cpu)
        watchdog_nmi_disable(cpu);
 }
 
+static void watchdog_cleanup(unsigned int cpu, bool online)
+{
+       watchdog_disable(cpu);
+}
+
 static int watchdog_should_run(unsigned int cpu)
 {
        return __this_cpu_read(hrtimer_interrupts) !=
@@ -476,28 +475,40 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
 static void watchdog_nmi_disable(unsigned int cpu) { return; }
 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
 
-/* prepare/enable/disable routines */
-/* sysctl functions */
-#ifdef CONFIG_SYSCTL
-static void watchdog_enable_all_cpus(void)
+static struct smp_hotplug_thread watchdog_threads = {
+       .store                  = &softlockup_watchdog,
+       .thread_should_run      = watchdog_should_run,
+       .thread_fn              = watchdog,
+       .thread_comm            = "watchdog/%u",
+       .setup                  = watchdog_enable,
+       .cleanup                = watchdog_cleanup,
+       .park                   = watchdog_disable,
+       .unpark                 = watchdog_enable,
+};
+
+static int watchdog_enable_all_cpus(void)
 {
-       unsigned int cpu;
+       int err = 0;
 
-       if (watchdog_disabled) {
-               watchdog_disabled = 0;
-               for_each_online_cpu(cpu)
-                       kthread_unpark(per_cpu(softlockup_watchdog, cpu));
+       if (!watchdog_running) {
+               err = smpboot_register_percpu_thread(&watchdog_threads);
+               if (err)
+                       pr_err("Failed to create watchdog threads, disabled\n");
+               else
+                       watchdog_running = 1;
        }
+
+       return err;
 }
 
+/* prepare/enable/disable routines */
+/* sysctl functions */
+#ifdef CONFIG_SYSCTL
 static void watchdog_disable_all_cpus(void)
 {
-       unsigned int cpu;
-
-       if (!watchdog_disabled) {
-               watchdog_disabled = 1;
-               for_each_online_cpu(cpu)
-                       kthread_park(per_cpu(softlockup_watchdog, cpu));
+       if (watchdog_running) {
+               watchdog_running = 0;
+               smpboot_unregister_percpu_thread(&watchdog_threads);
        }
 }
 
@@ -508,40 +519,40 @@ static void watchdog_disable_all_cpus(void)
 int proc_dowatchdog(struct ctl_table *table, int write,
                    void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       int ret;
+       int err, old_thresh, old_enabled;
 
-       if (watchdog_disabled < 0)
-               return -ENODEV;
+       old_thresh = ACCESS_ONCE(watchdog_thresh);
+       old_enabled = ACCESS_ONCE(watchdog_user_enabled);
 
-       ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-       if (ret || !write)
-               return ret;
+       err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+       if (err || !write)
+               return err;
 
        set_sample_period();
-       if (watchdog_enabled && watchdog_thresh)
-               watchdog_enable_all_cpus();
+       /*
+        * Watchdog threads shouldn't be enabled if they are
+        * disabled. The 'watchdog_running' variable check in
+        * watchdog_*_all_cpus() function takes care of this.
+        */
+       if (watchdog_user_enabled && watchdog_thresh)
+               err = watchdog_enable_all_cpus();
        else
                watchdog_disable_all_cpus();
 
-       return ret;
+       /* Restore old values on failure */
+       if (err) {
+               watchdog_thresh = old_thresh;
+               watchdog_user_enabled = old_enabled;
+       }
+
+       return err;
 }
 #endif /* CONFIG_SYSCTL */
 
-static struct smp_hotplug_thread watchdog_threads = {
-       .store                  = &softlockup_watchdog,
-       .thread_should_run      = watchdog_should_run,
-       .thread_fn              = watchdog,
-       .thread_comm            = "watchdog/%u",
-       .setup                  = watchdog_enable,
-       .park                   = watchdog_disable,
-       .unpark                 = watchdog_enable,
-};
-
 void __init lockup_detector_init(void)
 {
        set_sample_period();
-       if (smpboot_register_percpu_thread(&watchdog_threads)) {
-               pr_err("Failed to create watchdog threads, disabled\n");
-               watchdog_disabled = -ENODEV;
-       }
+
+       if (watchdog_user_enabled)
+               watchdog_enable_all_cpus();
 }