]> Pileus Git - ~andy/linux/commitdiff
sched: Separate out build of NUMA sched groups from __build_sched_domains
authorAndreas Herrmann <andreas.herrmann3@amd.com>
Tue, 18 Aug 2009 11:01:11 +0000 (13:01 +0200)
committerIngo Molnar <mingo@elte.hu>
Tue, 18 Aug 2009 16:35:44 +0000 (18:35 +0200)
... to further strip down __build_sched_domains().

Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <20090818110111.GL29515@alberich.amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched.c

index 52c1953bc41dc3a0251d49f7f6e780366b9d3c15..c1ce884a0163d6c525b0f31334c1f7f1e3431e79 100644 (file)
@@ -8246,6 +8246,71 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
                sg = sg->next;
        } while (sg != group_head);
 }
+
+static int build_numa_sched_groups(struct s_data *d,
+                                  const struct cpumask *cpu_map, int num)
+{
+       struct sched_domain *sd;
+       struct sched_group *sg, *prev;
+       int n, j;
+
+       cpumask_clear(d->covered);
+       cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map);
+       if (cpumask_empty(d->nodemask)) {
+               d->sched_group_nodes[num] = NULL;
+               goto out;
+       }
+
+       sched_domain_node_span(num, d->domainspan);
+       cpumask_and(d->domainspan, d->domainspan, cpu_map);
+
+       sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
+                         GFP_KERNEL, num);
+       if (!sg) {
+               printk(KERN_WARNING "Can not alloc domain group for node %d\n",
+                      num);
+               return -ENOMEM;
+       }
+       d->sched_group_nodes[num] = sg;
+
+       for_each_cpu(j, d->nodemask) {
+               sd = &per_cpu(node_domains, j).sd;
+               sd->groups = sg;
+       }
+
+       sg->__cpu_power = 0;
+       cpumask_copy(sched_group_cpus(sg), d->nodemask);
+       sg->next = sg;
+       cpumask_or(d->covered, d->covered, d->nodemask);
+
+       prev = sg;
+       for (j = 0; j < nr_node_ids; j++) {
+               n = (num + j) % nr_node_ids;
+               cpumask_complement(d->notcovered, d->covered);
+               cpumask_and(d->tmpmask, d->notcovered, cpu_map);
+               cpumask_and(d->tmpmask, d->tmpmask, d->domainspan);
+               if (cpumask_empty(d->tmpmask))
+                       break;
+               cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n));
+               if (cpumask_empty(d->tmpmask))
+                       continue;
+               sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
+                                 GFP_KERNEL, num);
+               if (!sg) {
+                       printk(KERN_WARNING
+                              "Can not alloc domain group for node %d\n", j);
+                       return -ENOMEM;
+               }
+               sg->__cpu_power = 0;
+               cpumask_copy(sched_group_cpus(sg), d->tmpmask);
+               sg->next = prev->next;
+               cpumask_or(d->covered, d->covered, d->tmpmask);
+               prev->next = sg;
+               prev = sg;
+       }
+out:
+       return 0;
+}
 #endif /* CONFIG_NUMA */
 
 #ifdef CONFIG_NUMA
@@ -8652,70 +8717,9 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
        if (d.sd_allnodes)
                build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);
 
-       for (i = 0; i < nr_node_ids; i++) {
-               /* Set up node groups */
-               struct sched_group *sg, *prev;
-               int j;
-
-               cpumask_clear(d.covered);
-               cpumask_and(d.nodemask, cpumask_of_node(i), cpu_map);
-               if (cpumask_empty(d.nodemask)) {
-                       d.sched_group_nodes[i] = NULL;
-                       continue;
-               }
-
-               sched_domain_node_span(i, d.domainspan);
-               cpumask_and(d.domainspan, d.domainspan, cpu_map);
-
-               sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
-                                 GFP_KERNEL, i);
-               if (!sg) {
-                       printk(KERN_WARNING "Can not alloc domain group for "
-                               "node %d\n", i);
+       for (i = 0; i < nr_node_ids; i++)
+               if (build_numa_sched_groups(&d, cpu_map, i))
                        goto error;
-               }
-               d.sched_group_nodes[i] = sg;
-               for_each_cpu(j, d.nodemask) {
-                       struct sched_domain *sd;
-
-                       sd = &per_cpu(node_domains, j).sd;
-                       sd->groups = sg;
-               }
-               sg->__cpu_power = 0;
-               cpumask_copy(sched_group_cpus(sg), d.nodemask);
-               sg->next = sg;
-               cpumask_or(d.covered, d.covered, d.nodemask);
-               prev = sg;
-
-               for (j = 0; j < nr_node_ids; j++) {
-                       int n = (i + j) % nr_node_ids;
-
-                       cpumask_complement(d.notcovered, d.covered);
-                       cpumask_and(d.tmpmask, d.notcovered, cpu_map);
-                       cpumask_and(d.tmpmask, d.tmpmask, d.domainspan);
-                       if (cpumask_empty(d.tmpmask))
-                               break;
-
-                       cpumask_and(d.tmpmask, d.tmpmask, cpumask_of_node(n));
-                       if (cpumask_empty(d.tmpmask))
-                               continue;
-
-                       sg = kmalloc_node(sizeof(struct sched_group) +
-                                         cpumask_size(),
-                                         GFP_KERNEL, i);
-                       if (!sg) {
-                               printk(KERN_WARNING
-                               "Can not alloc domain group for node %d\n", j);
-                               goto error;
-                       }
-                       sg->__cpu_power = 0;
-                       cpumask_copy(sched_group_cpus(sg), d.tmpmask);
-                       sg->next = prev->next;
-                       cpumask_or(d.covered, d.covered, d.tmpmask);
-                       prev->next = sg;
-                       prev = sg;
-               }
-       }
 #endif
 
        /* Calculate CPU power for physical packages and nodes */