]> Pileus Git - ~andy/linux/commitdiff
Merge branches 'timers-urgent-for-linus', 'irq-urgent-for-linus' and 'core-urgent...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 15 Apr 2013 14:03:01 +0000 (07:03 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 15 Apr 2013 14:03:01 +0000 (07:03 -0700)
Pull {timer,irq,core} fixes from Thomas Gleixner:

 - timer: bug fix for a cpu hotplug race.

 - irq: single bugfix for a wrong return value, which prevents the
   calling function to invoke the software fallback.

 - core: bugfix which plugs two race confitions which can cause hotplug
   per cpu threads to end up on the wrong cpu.

* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  hrtimer: Don't reinitialize a cpu_base lock on CPU_UP

* 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  irqchip: gic: fix irq_trigger return

* 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  kthread: Prevent unpark race which puts threads on the wrong cpu

drivers/irqchip/irq-gic.c
fs/proc/array.c
include/linux/sched.h
include/trace/events/sched.h
kernel/hrtimer.c
kernel/kthread.c
kernel/smpboot.c

index a32e0d5aa45f43eb71c91ab9020696436212ae95..fc6aebf1e4b2630493b8e3ff89a2f76650019ef9 100644 (file)
@@ -236,7 +236,8 @@ static int gic_retrigger(struct irq_data *d)
        if (gic_arch_extn.irq_retrigger)
                return gic_arch_extn.irq_retrigger(d);
 
-       return -ENXIO;
+       /* the genirq layer expects 0 if we can't retrigger in hardware */
+       return 0;
 }
 
 #ifdef CONFIG_SMP
index f7ed9ee46eb9d3818d2210c100731e5eda6ee35e..cbd0f1b324b972b96f036139fcd96bf53a03fb01 100644 (file)
@@ -143,6 +143,7 @@ static const char * const task_state_array[] = {
        "x (dead)",             /*  64 */
        "K (wakekill)",         /* 128 */
        "W (waking)",           /* 256 */
+       "P (parked)",           /* 512 */
 };
 
 static inline const char *get_task_state(struct task_struct *tsk)
index d35d2b6ddbfb69f098b1660fe280b4d2da8a5ffe..e692a022527bdaaace8b388268b0c280946fc5c2 100644 (file)
@@ -163,9 +163,10 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 #define TASK_DEAD              64
 #define TASK_WAKEKILL          128
 #define TASK_WAKING            256
-#define TASK_STATE_MAX         512
+#define TASK_PARKED            512
+#define TASK_STATE_MAX         1024
 
-#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
+#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
 
 extern char ___assert_task_state[1 - 2*!!(
                sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
index 5a8671e8a67ff8fa8f715311300901181555d78e..e5586caff67a973c962a3ed6bf43d4cc01664083 100644 (file)
@@ -147,7 +147,7 @@ TRACE_EVENT(sched_switch,
                  __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
                                { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
                                { 16, "Z" }, { 32, "X" }, { 64, "x" },
-                               { 128, "W" }) : "R",
+                               { 128, "K" }, { 256, "W" }, { 512, "P" }) : "R",
                __entry->prev_state & TASK_STATE_MAX ? "+" : "",
                __entry->next_comm, __entry->next_pid, __entry->next_prio)
 );
index cc47812d3feb0c86cbcf8f03e6ee81340ef7c57c..14be27feda491da1c3dc9990a5ae80ce649570aa 100644 (file)
@@ -63,6 +63,7 @@
 DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
 {
 
+       .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
        .clock_base =
        {
                {
@@ -1642,8 +1643,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
        struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
        int i;
 
-       raw_spin_lock_init(&cpu_base->lock);
-
        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
                cpu_base->clock_base[i].cpu_base = cpu_base;
                timerqueue_init_head(&cpu_base->clock_base[i].active);
index 691dc2ef9baf241c121144799360a7e1237afcb5..9eb7fed0bbaa9895973a14e551c76de31fffe533 100644 (file)
@@ -124,12 +124,12 @@ void *kthread_data(struct task_struct *task)
 
 static void __kthread_parkme(struct kthread *self)
 {
-       __set_current_state(TASK_INTERRUPTIBLE);
+       __set_current_state(TASK_PARKED);
        while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
                if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
                        complete(&self->parked);
                schedule();
-               __set_current_state(TASK_INTERRUPTIBLE);
+               __set_current_state(TASK_PARKED);
        }
        clear_bit(KTHREAD_IS_PARKED, &self->flags);
        __set_current_state(TASK_RUNNING);
@@ -256,8 +256,13 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
 }
 EXPORT_SYMBOL(kthread_create_on_node);
 
-static void __kthread_bind(struct task_struct *p, unsigned int cpu)
+static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
 {
+       /* Must have done schedule() in kthread() before we set_task_cpu */
+       if (!wait_task_inactive(p, state)) {
+               WARN_ON(1);
+               return;
+       }
        /* It's safe because the task is inactive. */
        do_set_cpus_allowed(p, cpumask_of(cpu));
        p->flags |= PF_THREAD_BOUND;
@@ -274,12 +279,7 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu)
  */
 void kthread_bind(struct task_struct *p, unsigned int cpu)
 {
-       /* Must have done schedule() in kthread() before we set_task_cpu */
-       if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
-               WARN_ON(1);
-               return;
-       }
-       __kthread_bind(p, cpu);
+       __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(kthread_bind);
 
@@ -324,6 +324,22 @@ static struct kthread *task_get_live_kthread(struct task_struct *k)
        return NULL;
 }
 
+static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
+{
+       clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
+       /*
+        * We clear the IS_PARKED bit here as we don't wait
+        * until the task has left the park code. So if we'd
+        * park before that happens we'd see the IS_PARKED bit
+        * which might be about to be cleared.
+        */
+       if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
+               if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
+                       __kthread_bind(k, kthread->cpu, TASK_PARKED);
+               wake_up_state(k, TASK_PARKED);
+       }
+}
+
 /**
  * kthread_unpark - unpark a thread created by kthread_create().
  * @k:         thread created by kthread_create().
@@ -336,20 +352,8 @@ void kthread_unpark(struct task_struct *k)
 {
        struct kthread *kthread = task_get_live_kthread(k);
 
-       if (kthread) {
-               clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
-               /*
-                * We clear the IS_PARKED bit here as we don't wait
-                * until the task has left the park code. So if we'd
-                * park before that happens we'd see the IS_PARKED bit
-                * which might be about to be cleared.
-                */
-               if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
-                       if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
-                               __kthread_bind(k, kthread->cpu);
-                       wake_up_process(k);
-               }
-       }
+       if (kthread)
+               __kthread_unpark(k, kthread);
        put_task_struct(k);
 }
 
@@ -407,7 +411,7 @@ int kthread_stop(struct task_struct *k)
        trace_sched_kthread_stop(k);
        if (kthread) {
                set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
-               clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
+               __kthread_unpark(k, kthread);
                wake_up_process(k);
                wait_for_completion(&kthread->exited);
        }
index 8eaed9aa9cf0c1995520605af1de8ef3b9e95485..02fc5c9336735a834378dbbb6fff8e57b919b0fa 100644 (file)
@@ -185,8 +185,18 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
        }
        get_task_struct(tsk);
        *per_cpu_ptr(ht->store, cpu) = tsk;
-       if (ht->create)
-               ht->create(cpu);
+       if (ht->create) {
+               /*
+                * Make sure that the task has actually scheduled out
+                * into park position, before calling the create
+                * callback. At least the migration thread callback
+                * requires that the task is off the runqueue.
+                */
+               if (!wait_task_inactive(tsk, TASK_PARKED))
+                       WARN_ON(1);
+               else
+                       ht->create(cpu);
+       }
        return 0;
 }