]> Pileus Git - ~andy/linux/blobdiff - kernel/sched/core.c
Merge branch 'for-3.5' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
[~andy/linux] / kernel / sched / core.c
index afc6d7e7155708812b839ad0776e3a33848ece02..03667c3fdb33fa267295bc98667ad02fc6178ed9 100644 (file)
@@ -83,6 +83,7 @@
 
 #include "sched.h"
 #include "../workqueue_sched.h"
+#include "../smpboot.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/sched.h>
@@ -2083,6 +2084,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
 #endif
 
        /* Here we just switch the register state and the stack. */
+       rcu_switch_from(prev);
        switch_to(prev, next, prev);
 
        barrier();
@@ -6382,6 +6384,8 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
                        if (!sg)
                                return -ENOMEM;
 
+                       sg->next = sg;
+
                        *per_cpu_ptr(sdd->sg, j) = sg;
 
                        sgp = kzalloc_node(sizeof(struct sched_group_power),
@@ -6405,16 +6409,26 @@ static void __sdt_free(const struct cpumask *cpu_map)
                struct sd_data *sdd = &tl->data;
 
                for_each_cpu(j, cpu_map) {
-                       struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
-                       if (sd && (sd->flags & SD_OVERLAP))
-                               free_sched_groups(sd->groups, 0);
-                       kfree(*per_cpu_ptr(sdd->sd, j));
-                       kfree(*per_cpu_ptr(sdd->sg, j));
-                       kfree(*per_cpu_ptr(sdd->sgp, j));
+                       struct sched_domain *sd;
+
+                       if (sdd->sd) {
+                               sd = *per_cpu_ptr(sdd->sd, j);
+                               if (sd && (sd->flags & SD_OVERLAP))
+                                       free_sched_groups(sd->groups, 0);
+                               kfree(*per_cpu_ptr(sdd->sd, j));
+                       }
+
+                       if (sdd->sg)
+                               kfree(*per_cpu_ptr(sdd->sg, j));
+                       if (sdd->sgp)
+                               kfree(*per_cpu_ptr(sdd->sgp, j));
                }
                free_percpu(sdd->sd);
+               sdd->sd = NULL;
                free_percpu(sdd->sg);
+               sdd->sg = NULL;
                free_percpu(sdd->sgp);
+               sdd->sgp = NULL;
        }
 }
 
@@ -7049,6 +7063,7 @@ void __init sched_init(void)
        /* May be allocated at isolcpus cmdline parse time */
        if (cpu_isolated_map == NULL)
                zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
+       idle_thread_set_boot_cpu();
 #endif
        init_sched_fair_class();