From f809ca9a554dda49fb264c79e31c722e0b063ff8 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 7 Oct 2013 11:28:57 +0100 Subject: [PATCH] sched/numa: Track NUMA hinting faults on per-node basis This patch tracks what nodes numa hinting faults were incurred on. This information is later used to schedule a task on the node storing the pages most frequently faulted by the task. Signed-off-by: Mel Gorman Reviewed-by: Rik van Riel Cc: Andrea Arcangeli Cc: Johannes Weiner Cc: Srikar Dronamraju Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1381141781-10992-20-git-send-email-mgorman@suse.de Signed-off-by: Ingo Molnar --- include/linux/sched.h | 2 ++ kernel/sched/core.c | 3 +++ kernel/sched/fair.c | 11 ++++++++++- kernel/sched/sched.h | 12 ++++++++++++ 4 files changed, 27 insertions(+), 1 deletion(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index fdcb4c85507..a810e95bca2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1342,6 +1342,8 @@ struct task_struct { unsigned int numa_scan_period_max; u64 node_stamp; /* migration stamp */ struct callback_head numa_work; + + unsigned long *numa_faults; #endif /* CONFIG_NUMA_BALANCING */ struct rcu_head rcu; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index aee7e4dcbbf..6808d35fd7e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1634,6 +1634,7 @@ static void __sched_fork(struct task_struct *p) p->numa_migrate_seq = p->mm ? p->mm->numa_scan_seq - 1 : 0; p->numa_scan_period = sysctl_numa_balancing_scan_delay; p->numa_work.next = &p->numa_work; + p->numa_faults = NULL; #endif /* CONFIG_NUMA_BALANCING */ } @@ -1892,6 +1893,8 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) if (mm) mmdrop(mm); if (unlikely(prev_state == TASK_DEAD)) { + task_numa_free(prev); + /* * Remove function-return probe instances associated with this * task and put them back on the free list. diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c6c330245f7..0bb3e0aa110 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -902,7 +902,14 @@ void task_numa_fault(int node, int pages, bool migrated) if (!numabalancing_enabled) return; - /* FIXME: Allocate task-specific structure for placement policy here */ + /* Allocate buffer to track faults on a per-node basis */ + if (unlikely(!p->numa_faults)) { + int size = sizeof(*p->numa_faults) * nr_node_ids; + + p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN); + if (!p->numa_faults) + return; + } /* * If pages are properly placed (did not migrate) then scan slower. @@ -918,6 +925,8 @@ void task_numa_fault(int node, int pages, bool migrated) } task_numa_placement(p); + + p->numa_faults[node] += pages; } static void reset_ptenuma_scan(struct task_struct *p) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index e82484db769..199099c7aa2 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -6,6 +6,7 @@ #include #include #include +#include #include "cpupri.h" #include "cpuacct.h" @@ -555,6 +556,17 @@ static inline u64 rq_clock_task(struct rq *rq) return rq->clock_task; } +#ifdef CONFIG_NUMA_BALANCING +static inline void task_numa_free(struct task_struct *p) +{ + kfree(p->numa_faults); +} +#else /* CONFIG_NUMA_BALANCING */ +static inline void task_numa_free(struct task_struct *p) +{ +} +#endif /* CONFIG_NUMA_BALANCING */ + #ifdef CONFIG_SMP #define rcu_dereference_check_sched_domain(p) \ -- 2.43.2