]> Pileus Git - ~andy/linux/commitdiff
sched/numa: Reschedule task on preferred NUMA node once selected
authorMel Gorman <mgorman@suse.de>
Mon, 7 Oct 2013 10:29:02 +0000 (11:29 +0100)
committerIngo Molnar <mingo@kernel.org>
Wed, 9 Oct 2013 10:40:28 +0000 (12:40 +0200)
A preferred node is selected based on the node the most NUMA hinting
faults was incurred on. There is no guarantee that the task is running
on that node at the time so this patch rescheules the task to run on
the most idle CPU of the selected node when selected. This avoids
waiting for the balancer to make a decision.

Signed-off-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1381141781-10992-25-git-send-email-mgorman@suse.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/sched.h

index b7e6b6f9c5f6b888c93e38f735c969ae6d7ead19..66b878e945548d1415e545489efcb603f6d389e1 100644 (file)
@@ -4348,6 +4348,25 @@ fail:
        return ret;
 }
 
+#ifdef CONFIG_NUMA_BALANCING
+/* Migrate current task p to target_cpu */
+int migrate_task_to(struct task_struct *p, int target_cpu)
+{
+       struct migration_arg arg = { p, target_cpu };
+       int curr_cpu = task_cpu(p);
+
+       if (curr_cpu == target_cpu)
+               return 0;
+
+       if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p)))
+               return -EINVAL;
+
+       /* TODO: This is not properly updating schedstats */
+
+       return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
+}
+#endif
+
 /*
  * migration_cpu_stop - this will be executed by a highprio stopper thread
  * and performs thread migration by bumping thread off CPU then
index 89431248d33ddc12cd6ef00c540fec58813bc56b..8b15e9e1d1b889b51c37ae10b9e579b12ff78936 100644 (file)
@@ -886,6 +886,31 @@ static unsigned int task_scan_max(struct task_struct *p)
  */
 unsigned int sysctl_numa_balancing_settle_count __read_mostly = 3;
 
+static unsigned long weighted_cpuload(const int cpu);
+
+
+static int
+find_idlest_cpu_node(int this_cpu, int nid)
+{
+       unsigned long load, min_load = ULONG_MAX;
+       int i, idlest_cpu = this_cpu;
+
+       BUG_ON(cpu_to_node(this_cpu) == nid);
+
+       rcu_read_lock();
+       for_each_cpu(i, cpumask_of_node(nid)) {
+               load = weighted_cpuload(i);
+
+               if (load < min_load) {
+                       min_load = load;
+                       idlest_cpu = i;
+               }
+       }
+       rcu_read_unlock();
+
+       return idlest_cpu;
+}
+
 static void task_numa_placement(struct task_struct *p)
 {
        int seq, nid, max_nid = -1;
@@ -916,10 +941,29 @@ static void task_numa_placement(struct task_struct *p)
                }
        }
 
-       /* Update the tasks preferred node if necessary */
+       /*
+        * Record the preferred node as the node with the most faults,
+        * requeue the task to be running on the idlest CPU on the
+        * preferred node and reset the scanning rate to recheck
+        * the working set placement.
+        */
        if (max_faults && max_nid != p->numa_preferred_nid) {
+               int preferred_cpu;
+
+               /*
+                * If the task is not on the preferred node then find the most
+                * idle CPU to migrate to.
+                */
+               preferred_cpu = task_cpu(p);
+               if (cpu_to_node(preferred_cpu) != max_nid) {
+                       preferred_cpu = find_idlest_cpu_node(preferred_cpu,
+                                                            max_nid);
+               }
+
+               /* Update the preferred nid and migrate task if possible */
                p->numa_preferred_nid = max_nid;
                p->numa_migrate_seq = 0;
+               migrate_task_to(p, preferred_cpu);
        }
 }
 
index 199099c7aa229db80d7fb549cad3b600d6b377dd..66458c902d84ad9706834b6fb51cd3f05f1c1c97 100644 (file)
@@ -557,6 +557,7 @@ static inline u64 rq_clock_task(struct rq *rq)
 }
 
 #ifdef CONFIG_NUMA_BALANCING
+extern int migrate_task_to(struct task_struct *p, int cpu);
 static inline void task_numa_free(struct task_struct *p)
 {
        kfree(p->numa_faults);