This patch builds on patch 2 and periodically decays that max value to
do idle balancing per sched domain by approximately 1% per second. Also
decay the rq's max_idle_balance_cost value.
Signed-off-by: Jason Low <jason.low2@hp.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1379096813-3032-4-git-send-email-jason.low2@hp.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
.balance_interval = 1, \
.nr_balance_failed = 0, \
.max_newidle_lb_cost = 0, \
.balance_interval = 1, \
.nr_balance_failed = 0, \
.max_newidle_lb_cost = 0, \
+ .next_decay_max_lb_cost = jiffies, \
}
#define cpu_to_node(cpu) ((void)(cpu), 0)
}
#define cpu_to_node(cpu) ((void)(cpu), 0)
unsigned int nr_balance_failed; /* initialise to 0 */
u64 last_update;
unsigned int nr_balance_failed; /* initialise to 0 */
u64 last_update;
+
+ /* idle_balance() stats */
+ unsigned long next_decay_max_lb_cost;
#ifdef CONFIG_SCHEDSTATS
/* load_balance() stats */
#ifdef CONFIG_SCHEDSTATS
/* load_balance() stats */
.balance_interval = 1, \
.smt_gain = 1178, /* 15% */ \
.max_newidle_lb_cost = 0, \
.balance_interval = 1, \
.smt_gain = 1178, /* 15% */ \
.max_newidle_lb_cost = 0, \
+ .next_decay_max_lb_cost = jiffies, \
}
#endif
#endif /* CONFIG_SCHED_SMT */
}
#endif
#endif /* CONFIG_SCHED_SMT */
.last_balance = jiffies, \
.balance_interval = 1, \
.max_newidle_lb_cost = 0, \
.last_balance = jiffies, \
.balance_interval = 1, \
.max_newidle_lb_cost = 0, \
+ .next_decay_max_lb_cost = jiffies, \
}
#endif
#endif /* CONFIG_SCHED_MC */
}
#endif
#endif /* CONFIG_SCHED_MC */
.last_balance = jiffies, \
.balance_interval = 1, \
.max_newidle_lb_cost = 0, \
.last_balance = jiffies, \
.balance_interval = 1, \
.max_newidle_lb_cost = 0, \
+ .next_decay_max_lb_cost = jiffies, \
/* Earliest time when we have to do rebalance again */
unsigned long next_balance = jiffies + 60*HZ;
int update_next_balance = 0;
/* Earliest time when we have to do rebalance again */
unsigned long next_balance = jiffies + 60*HZ;
int update_next_balance = 0;
+ int need_serialize, need_decay = 0;
+ u64 max_cost = 0;
update_blocked_averages(cpu);
rcu_read_lock();
for_each_domain(cpu, sd) {
update_blocked_averages(cpu);
rcu_read_lock();
for_each_domain(cpu, sd) {
+ /*
+ * Decay the newidle max times here because this is a regular
+ * visit to all the domains. Decay ~1% per second.
+ */
+ if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
+ sd->max_newidle_lb_cost =
+ (sd->max_newidle_lb_cost * 253) / 256;
+ sd->next_decay_max_lb_cost = jiffies + HZ;
+ need_decay = 1;
+ }
+ max_cost += sd->max_newidle_lb_cost;
+
if (!(sd->flags & SD_LOAD_BALANCE))
continue;
if (!(sd->flags & SD_LOAD_BALANCE))
continue;
+ /*
+ * Stop the load balance at this level. There is another
+ * CPU in our sched group which is doing load balancing more
+ * actively.
+ */
+ if (!continue_balancing) {
+ if (need_decay)
+ continue;
+ break;
+ }
+
interval = sd->balance_interval;
if (idle != CPU_IDLE)
interval *= sd->busy_factor;
interval = sd->balance_interval;
if (idle != CPU_IDLE)
interval *= sd->busy_factor;
next_balance = sd->last_balance + interval;
update_next_balance = 1;
}
next_balance = sd->last_balance + interval;
update_next_balance = 1;
}
- * Stop the load balance at this level. There is another
- * CPU in our sched group which is doing load balancing more
- * actively.
+ * Ensure the rq-wide value also decays but keep it at a
+ * reasonable floor to avoid funnies with rq->avg_idle.
- if (!continue_balancing)
- break;
+ rq->max_idle_balance_cost =
+ max((u64)sysctl_sched_migration_cost, max_cost);