]> Pileus Git - ~andy/linux/commitdiff
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 28 May 2011 19:56:46 +0000 (12:56 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 28 May 2011 19:56:46 +0000 (12:56 -0700)
* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  cpuset: Fix cpuset_cpus_allowed_fallback(), don't update tsk->rt.nr_cpus_allowed
  sched: Fix ->min_vruntime calculation in dequeue_entity()
  sched: Fix ttwu() for __ARCH_WANT_INTERRUPTS_ON_CTXSW
  sched: More sched_domain iterations fixes

include/linux/cpuset.h
include/linux/sched.h
kernel/cpuset.c
kernel/kthread.c
kernel/sched.c
kernel/sched_fair.c
kernel/sched_rt.c
kernel/sched_stats.h

index f20eb8f16025d74534dd2b62fa22cf494404ef1c..e9eaec522655c4da77f55fefb851564c31cae711 100644 (file)
@@ -146,7 +146,7 @@ static inline void cpuset_cpus_allowed(struct task_struct *p,
 
 static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
 {
-       cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
+       do_set_cpus_allowed(p, cpu_possible_mask);
        return cpumask_any(cpu_active_mask);
 }
 
index 8f441d1c6550c2a9142dbfbe17c39a7d75187013..bcddd01381050852b856b3144bdc054f0c79204a 100644 (file)
@@ -1841,9 +1841,16 @@ static inline void rcu_copy_process(struct task_struct *p)
 #endif
 
 #ifdef CONFIG_SMP
+extern void do_set_cpus_allowed(struct task_struct *p,
+                              const struct cpumask *new_mask);
+
 extern int set_cpus_allowed_ptr(struct task_struct *p,
                                const struct cpumask *new_mask);
 #else
+static inline void do_set_cpus_allowed(struct task_struct *p,
+                                     const struct cpumask *new_mask)
+{
+}
 static inline int set_cpus_allowed_ptr(struct task_struct *p,
                                       const struct cpumask *new_mask)
 {
index 1ceeb049c82749e796fe8001f56fb5c7d67fe846..9c9b7545c81078c7ae43253cc1714a73fcb2691e 100644 (file)
@@ -2190,7 +2190,7 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
        rcu_read_lock();
        cs = task_cs(tsk);
        if (cs)
-               cpumask_copy(&tsk->cpus_allowed, cs->cpus_allowed);
+               do_set_cpus_allowed(tsk, cs->cpus_allowed);
        rcu_read_unlock();
 
        /*
@@ -2217,7 +2217,7 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
                 * Like above we can temporary set any mask and rely on
                 * set_cpus_allowed_ptr() as synchronization point.
                 */
-               cpumask_copy(&tsk->cpus_allowed, cpu_possible_mask);
+               do_set_cpus_allowed(tsk, cpu_possible_mask);
                cpu = cpumask_any(cpu_active_mask);
        }
 
index 3b34d2732bcecb3719b67976f5b8b7bc66e68b2b..4ba7cccb4994f24d6bc5965cda9b7f568d214f35 100644 (file)
@@ -202,8 +202,8 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
                return;
        }
 
-       p->cpus_allowed = cpumask_of_cpu(cpu);
-       p->rt.nr_cpus_allowed = 1;
+       /* It's safe because the task is inactive. */
+       do_set_cpus_allowed(p, cpumask_of(cpu));
        p->flags |= PF_THREAD_BOUND;
 }
 EXPORT_SYMBOL(kthread_bind);
index 5e43e9dc65d1c197aa9b085a4ccf546551570177..cbb3a0eee58eb2c5c6748b949fca5579bbb57432 100644 (file)
@@ -2573,7 +2573,26 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu)
        if (!next)
                smp_send_reschedule(cpu);
 }
-#endif
+
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+static int ttwu_activate_remote(struct task_struct *p, int wake_flags)
+{
+       struct rq *rq;
+       int ret = 0;
+
+       rq = __task_rq_lock(p);
+       if (p->on_cpu) {
+               ttwu_activate(rq, p, ENQUEUE_WAKEUP);
+               ttwu_do_wakeup(rq, p, wake_flags);
+               ret = 1;
+       }
+       __task_rq_unlock(rq);
+
+       return ret;
+
+}
+#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
+#endif /* CONFIG_SMP */
 
 static void ttwu_queue(struct task_struct *p, int cpu)
 {
@@ -2631,17 +2650,17 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        while (p->on_cpu) {
 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
                /*
-                * If called from interrupt context we could have landed in the
-                * middle of schedule(), in this case we should take care not
-                * to spin on ->on_cpu if p is current, since that would
-                * deadlock.
+                * In case the architecture enables interrupts in
+                * context_switch(), we cannot busy wait, since that
+                * would lead to deadlocks when an interrupt hits and
+                * tries to wake up @prev. So bail and do a complete
+                * remote wakeup.
                 */
-               if (p == current) {
-                       ttwu_queue(p, cpu);
+               if (ttwu_activate_remote(p, wake_flags))
                        goto stat;
-               }
-#endif
+#else
                cpu_relax();
+#endif
        }
        /*
         * Pairs with the smp_wmb() in finish_lock_switch().
@@ -5841,7 +5860,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
        idle->state = TASK_RUNNING;
        idle->se.exec_start = sched_clock();
 
-       cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
+       do_set_cpus_allowed(idle, cpumask_of(cpu));
        /*
         * We're having a chicken and egg problem, even though we are
         * holding rq->lock, the cpu isn't yet set to this cpu so the
@@ -5929,6 +5948,16 @@ static inline void sched_init_granularity(void)
 }
 
 #ifdef CONFIG_SMP
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{
+       if (p->sched_class && p->sched_class->set_cpus_allowed)
+               p->sched_class->set_cpus_allowed(p, new_mask);
+       else {
+               cpumask_copy(&p->cpus_allowed, new_mask);
+               p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
+       }
+}
+
 /*
  * This is how migration works:
  *
@@ -5974,12 +6003,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
                goto out;
        }
 
-       if (p->sched_class->set_cpus_allowed)
-               p->sched_class->set_cpus_allowed(p, new_mask);
-       else {
-               cpumask_copy(&p->cpus_allowed, new_mask);
-               p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
-       }
+       do_set_cpus_allowed(p, new_mask);
 
        /* Can the task run on the task's current CPU? If so, we're done */
        if (cpumask_test_cpu(task_cpu(p), new_mask))
index e32a9b70ee9c716149d57a33f298495d368b292c..433491c2dc8f5c9952655de72958c7019dadd57f 100644 (file)
@@ -1076,8 +1076,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
        se->on_rq = 0;
        update_cfs_load(cfs_rq, 0);
        account_entity_dequeue(cfs_rq, se);
-       update_min_vruntime(cfs_rq);
-       update_cfs_shares(cfs_rq);
 
        /*
         * Normalize the entity after updating the min_vruntime because the
@@ -1086,6 +1084,9 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
         */
        if (!(flags & DEQUEUE_SLEEP))
                se->vruntime -= cfs_rq->min_vruntime;
+
+       update_min_vruntime(cfs_rq);
+       update_cfs_shares(cfs_rq);
 }
 
 /*
index 64b2a37c07d0839ffe771ecaf4fa46cb73647750..88725c939e0b8000253905332db8725507781f48 100644 (file)
@@ -1263,6 +1263,7 @@ static int find_lowest_rq(struct task_struct *task)
        if (!cpumask_test_cpu(this_cpu, lowest_mask))
                this_cpu = -1; /* Skip this_cpu opt if not among lowest */
 
+       rcu_read_lock();
        for_each_domain(cpu, sd) {
                if (sd->flags & SD_WAKE_AFFINE) {
                        int best_cpu;
@@ -1272,15 +1273,20 @@ static int find_lowest_rq(struct task_struct *task)
                         * remote processor.
                         */
                        if (this_cpu != -1 &&
-                           cpumask_test_cpu(this_cpu, sched_domain_span(sd)))
+                           cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
+                               rcu_read_unlock();
                                return this_cpu;
+                       }
 
                        best_cpu = cpumask_first_and(lowest_mask,
                                                     sched_domain_span(sd));
-                       if (best_cpu < nr_cpu_ids)
+                       if (best_cpu < nr_cpu_ids) {
+                               rcu_read_unlock();
                                return best_cpu;
+                       }
                }
        }
+       rcu_read_unlock();
 
        /*
         * And finally, if there were no matches within the domains
index 48ddf431db0ee4da60025e364e0b2a64e459508f..331e01bcd0260c9fc9db3269ba1416a83c355039 100644 (file)
@@ -37,7 +37,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
 
 #ifdef CONFIG_SMP
                /* domain-specific stats */
-               preempt_disable();
+               rcu_read_lock();
                for_each_domain(cpu, sd) {
                        enum cpu_idle_type itype;
 
@@ -64,7 +64,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
                            sd->ttwu_wake_remote, sd->ttwu_move_affine,
                            sd->ttwu_move_balance);
                }
-               preempt_enable();
+               rcu_read_unlock();
 #endif
        }
        kfree(mask_str);