X-Git-Url: http://pileus.org/git/?a=blobdiff_plain;ds=sidebyside;f=kernel%2Fsched_fair.c;h=9971831b560e966281b8ba4c70a7bd8f3dffafbc;hb=d182c10c842007984e12b3b816df2b10d997cc8e;hp=926491f7f8037ea5e9d4cdf8bf35bdc897262f9f;hpb=8465e792e82c567b80358e38732164b770ed4b7f;p=~andy%2Flinux diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 926491f7f80..9971831b560 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -25,14 +25,12 @@ * (default: 20ms, units: nanoseconds) * * NOTE: this latency value is not the same as the concept of - * 'timeslice length' - timeslices in CFS are of variable length. - * (to see the precise effective timeslice length of your workload, - * run vmstat and monitor the context-switches field) + * 'timeslice length' - timeslices in CFS are of variable length + * and have no persistent notion like in traditional, time-slice + * based scheduling concepts. * - * On SMP systems the value of this is multiplied by the log2 of the - * number of CPUs. (i.e. factor 2x on 2-way systems, 3x on 4-way - * systems, 4x on 8-way systems, 5x on 16-way systems, etc.) - * Targeted preemption latency for CPU-bound tasks: + * (to see the precise effective timeslice length of your workload, + * run vmstat and monitor the context-switches (cs) field) */ const_debug unsigned int sysctl_sched_latency = 20000000ULL; @@ -46,7 +44,7 @@ const_debug unsigned int sysctl_sched_child_runs_first = 1; * Minimal preemption granularity for CPU-bound tasks: * (default: 2 msec, units: nanoseconds) */ -unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL; +const_debug unsigned int sysctl_sched_nr_latency = 20; /* * sys_sched_yield() compat mode @@ -58,25 +56,25 @@ unsigned int __read_mostly sysctl_sched_compat_yield; /* * SCHED_BATCH wake-up granularity. - * (default: 25 msec, units: nanoseconds) + * (default: 10 msec, units: nanoseconds) * * This option delays the preemption effects of decoupled workloads * and reduces their over-scheduling. Synchronous workloads will still * have immediate wakeup/sleep latencies. */ -const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL; +const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL; /* * SCHED_OTHER wake-up granularity. - * (default: 1 msec, units: nanoseconds) + * (default: 10 msec, units: nanoseconds) * * This option delays the preemption effects of decoupled workloads * and reduces their over-scheduling. Synchronous workloads will still * have immediate wakeup/sleep latencies. */ -const_debug unsigned int sysctl_sched_wakeup_granularity = 2000000UL; +const_debug unsigned int sysctl_sched_wakeup_granularity = 10000000UL; -extern struct sched_class fair_sched_class; +const_debug unsigned int sysctl_sched_migration_cost = 500000UL; /************************************************************** * CFS operations on generic schedulable entities: @@ -114,28 +112,25 @@ static inline struct task_struct *task_of(struct sched_entity *se) * Scheduling class tree data structure manipulation methods: */ -static inline u64 -max_vruntime(u64 min_vruntime, u64 vruntime) +static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime) { - if ((vruntime > min_vruntime) || - (min_vruntime > (1ULL << 61) && vruntime < (1ULL << 50))) + s64 delta = (s64)(vruntime - min_vruntime); + if (delta > 0) min_vruntime = vruntime; return min_vruntime; } -static inline void -set_leftmost(struct cfs_rq *cfs_rq, struct rb_node *leftmost) +static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) { - struct sched_entity *se; + s64 delta = (s64)(vruntime - min_vruntime); + if (delta < 0) + min_vruntime = vruntime; - cfs_rq->rb_leftmost = leftmost; - if (leftmost) - se = rb_entry(leftmost, struct sched_entity, run_node); + return min_vruntime; } -static inline s64 -entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) +static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) { return se->vruntime - cfs_rq->min_vruntime; } @@ -143,8 +138,7 @@ entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) /* * Enqueue an entity into the rb-tree: */ -static void -__enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) +static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) { struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; struct rb_node *parent = NULL; @@ -175,17 +169,16 @@ __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) * used): */ if (leftmost) - set_leftmost(cfs_rq, &se->run_node); + cfs_rq->rb_leftmost = &se->run_node; rb_link_node(&se->run_node, parent, link); rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); } -static void -__dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) +static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) { if (cfs_rq->rb_leftmost == &se->run_node) - set_leftmost(cfs_rq, rb_next(&se->run_node)); + cfs_rq->rb_leftmost = rb_next(&se->run_node); rb_erase(&se->run_node, &cfs_rq->tasks_timeline); } @@ -219,11 +212,19 @@ static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) * Scheduling class statistics methods: */ + +/* + * The idea is to set a period in which each task runs once. + * + * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch + * this period because otherwise the slices get too small. + * + * p = (nr <= nl) ? l : l*nr/nl + */ static u64 __sched_period(unsigned long nr_running) { u64 period = sysctl_sched_latency; - unsigned long nr_latency = - sysctl_sched_latency / sysctl_sched_min_granularity; + unsigned long nr_latency = sysctl_sched_nr_latency; if (unlikely(nr_running > nr_latency)) { period *= nr_running; @@ -233,23 +234,45 @@ static u64 __sched_period(unsigned long nr_running) return period; } +/* + * We calculate the wall-time slice from the period by taking a part + * proportional to the weight. + * + * s = p*w/rw + */ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) { - u64 period = __sched_period(cfs_rq->nr_running); + u64 slice = __sched_period(cfs_rq->nr_running); - period *= se->load.weight; - do_div(period, cfs_rq->load.weight); + slice *= se->load.weight; + do_div(slice, cfs_rq->load.weight); - return period; + return slice; } -static u64 __sched_vslice(unsigned long nr_running) +/* + * We calculate the vruntime slice. + * + * vs = s/w = p/rw + */ +static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running) { - u64 period = __sched_period(nr_running); + u64 vslice = __sched_period(nr_running); - do_div(period, nr_running); + do_div(vslice, rq_weight); - return period; + return vslice; +} + +static u64 sched_vslice(struct cfs_rq *cfs_rq) +{ + return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running); +} + +static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) +{ + return __sched_vslice(cfs_rq->load.weight + se->load.weight, + cfs_rq->nr_running + 1); } /* @@ -261,7 +284,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, unsigned long delta_exec) { unsigned long delta_exec_weighted; - u64 next_vruntime, min_vruntime; + u64 vruntime; schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); @@ -279,19 +302,13 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, * value tracking the leftmost vruntime in the tree. */ if (first_fair(cfs_rq)) { - next_vruntime = __pick_next_entity(cfs_rq)->vruntime; - - /* min_vruntime() := !max_vruntime() */ - min_vruntime = max_vruntime(curr->vruntime, next_vruntime); - if (min_vruntime == next_vruntime) - min_vruntime = curr->vruntime; - else - min_vruntime = next_vruntime; + vruntime = min_vruntime(curr->vruntime, + __pick_next_entity(cfs_rq)->vruntime); } else - min_vruntime = curr->vruntime; + vruntime = curr->vruntime; cfs_rq->min_vruntime = - max_vruntime(cfs_rq->min_vruntime, min_vruntime); + max_vruntime(cfs_rq->min_vruntime, vruntime); } static void update_curr(struct cfs_rq *cfs_rq) @@ -320,17 +337,6 @@ update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) schedstat_set(se->wait_start, rq_of(cfs_rq)->clock); } -static inline unsigned long -calc_weighted(unsigned long delta, struct sched_entity *se) -{ - unsigned long weight = se->load.weight; - - if (unlikely(weight != NICE_0_LOAD)) - return (u64)delta * se->load.weight >> NICE_0_SHIFT; - else - return delta; -} - /* * Task is being enqueued - update stats: */ @@ -355,7 +361,6 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) static inline void update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) { - update_curr(cfs_rq); /* * Mark the end of the wait period if dequeueing a * waiting task: @@ -376,15 +381,6 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) se->exec_start = rq_of(cfs_rq)->clock; } -/* - * We are descheduling a task - update its stats: - */ -static inline void -update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se) -{ - se->exec_start = 0; -} - /************************************************** * Scheduling class queueing methods: */ @@ -467,28 +463,24 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) vruntime = cfs_rq->min_vruntime; - if (sched_feat(USE_TREE_AVG)) { + if (sched_feat(TREE_AVG)) { struct sched_entity *last = __pick_last_entity(cfs_rq); if (last) { vruntime += last->vruntime; vruntime >>= 1; } } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running) - vruntime += __sched_vslice(cfs_rq->nr_running)/2; + vruntime += sched_vslice(cfs_rq)/2; if (initial && sched_feat(START_DEBIT)) - vruntime += __sched_vslice(cfs_rq->nr_running + 1); + vruntime += sched_vslice_add(cfs_rq, se); if (!initial) { - if (sched_feat(NEW_FAIR_SLEEPERS)) { - s64 latency = cfs_rq->min_vruntime - se->last_min_vruntime; - if (latency < 0 || !cfs_rq->nr_running) - latency = 0; - else - latency = min_t(s64, latency, sysctl_sched_latency); - vruntime -= latency; - } - vruntime = max(vruntime, se->vruntime); + if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se) && + task_of(se)->policy != SCHED_BATCH) + vruntime -= sysctl_sched_latency; + + vruntime = max_t(s64, vruntime, se->vruntime); } se->vruntime = vruntime; @@ -499,12 +491,11 @@ static void enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) { /* - * Update the fair clock. + * Update run-time statistics of the 'current'. */ update_curr(cfs_rq); if (wakeup) { - /* se->vruntime += cfs_rq->min_vruntime; */ place_entity(cfs_rq, se, 0); enqueue_sleeper(cfs_rq, se); } @@ -519,8 +510,14 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) static void dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) { + /* + * Update run-time statistics of the 'current'. + */ + update_curr(cfs_rq); + update_stats_dequeue(cfs_rq, se); if (sleep) { + se->peer_preempt = 0; #ifdef CONFIG_SCHEDSTATS if (entity_is_task(se)) { struct task_struct *tsk = task_of(se); @@ -531,8 +528,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) se->block_start = rq_of(cfs_rq)->clock; } #endif - /* se->vruntime = entity_key(cfs_rq, se); */ - se->last_min_vruntime = cfs_rq->min_vruntime; } if (se != cfs_rq->curr) @@ -550,8 +545,10 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; - if (delta_exec > ideal_runtime) + if (delta_exec > ideal_runtime || + (sched_feat(PREEMPT_RESTRICT) && curr->peer_preempt)) resched_task(rq_of(cfs_rq)->curr); + curr->peer_preempt = 0; } static void @@ -586,9 +583,12 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) { - struct sched_entity *se = __pick_next_entity(cfs_rq); + struct sched_entity *se = NULL; - set_next_entity(cfs_rq, se); + if (first_fair(cfs_rq)) { + se = __pick_next_entity(cfs_rq); + set_next_entity(cfs_rq, se); + } return se; } @@ -602,8 +602,6 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) if (prev->on_rq) update_curr(cfs_rq); - update_stats_curr_end(cfs_rq, prev); - check_spread(cfs_rq, prev); if (prev->on_rq) { update_stats_wait_start(cfs_rq, prev); @@ -620,7 +618,7 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) */ update_curr(cfs_rq); - if (cfs_rq->nr_running > 1) + if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT)) check_preempt_tick(cfs_rq, curr); } @@ -663,15 +661,21 @@ static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) #define for_each_leaf_cfs_rq(rq, cfs_rq) \ list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) -/* Do the two (enqueued) tasks belong to the same group ? */ -static inline int is_same_group(struct task_struct *curr, struct task_struct *p) +/* Do the two (enqueued) entities belong to the same group ? */ +static inline int +is_same_group(struct sched_entity *se, struct sched_entity *pse) { - if (curr->se.cfs_rq == p->se.cfs_rq) + if (se->cfs_rq == pse->cfs_rq) return 1; return 0; } +static inline struct sched_entity *parent_entity(struct sched_entity *se) +{ + return se->parent; +} + #else /* CONFIG_FAIR_GROUP_SCHED */ #define for_each_sched_entity(se) \ @@ -704,11 +708,17 @@ static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) #define for_each_leaf_cfs_rq(rq, cfs_rq) \ for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) -static inline int is_same_group(struct task_struct *curr, struct task_struct *p) +static inline int +is_same_group(struct sched_entity *se, struct sched_entity *pse) { return 1; } +static inline struct sched_entity *parent_entity(struct sched_entity *se) +{ + return NULL; +} + #endif /* CONFIG_FAIR_GROUP_SCHED */ /* @@ -726,6 +736,7 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup) break; cfs_rq = cfs_rq_of(se); enqueue_entity(cfs_rq, se, wakeup); + wakeup = 1; } } @@ -745,6 +756,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) /* Don't dequeue parent if it has other entities besides us */ if (cfs_rq->load.weight) break; + sleep = 1; } } @@ -756,9 +768,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) static void yield_task_fair(struct rq *rq) { struct cfs_rq *cfs_rq = task_cfs_rq(rq->curr); - struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; struct sched_entity *rightmost, *se = &rq->curr->se; - struct rb_node *parent; /* * Are we the only task in the tree? @@ -769,42 +779,28 @@ static void yield_task_fair(struct rq *rq) if (likely(!sysctl_sched_compat_yield)) { __update_rq_clock(rq); /* - * Dequeue and enqueue the task to update its - * position within the tree: + * Update run-time statistics of the 'current'. */ - dequeue_entity(cfs_rq, se, 0); - enqueue_entity(cfs_rq, se, 0); + update_curr(cfs_rq); return; } /* * Find the rightmost entry in the rbtree: */ - do { - parent = *link; - link = &parent->rb_right; - } while (*link); - - rightmost = rb_entry(parent, struct sched_entity, run_node); + rightmost = __pick_last_entity(cfs_rq); /* * Already in the rightmost position? */ - if (unlikely(rightmost == se)) + if (unlikely(rightmost->vruntime < se->vruntime)) return; /* * Minimally necessary key value to be last in the tree: + * Upon rescheduling, sched_class::put_prev_task() will place + * 'current' within the tree based on its new key value. */ se->vruntime = rightmost->vruntime + 1; - - if (cfs_rq->rb_leftmost == &se->run_node) - cfs_rq->rb_leftmost = rb_next(&se->run_node); - /* - * Relink the task to the rightmost position: - */ - rb_erase(&se->run_node, &cfs_rq->tasks_timeline); - rb_link_node(&se->run_node, parent, link); - rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); } /* @@ -814,6 +810,8 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) { struct task_struct *curr = rq->curr; struct cfs_rq *cfs_rq = task_cfs_rq(curr); + struct sched_entity *se = &curr->se, *pse = &p->se; + s64 delta, gran; if (unlikely(rt_prio(p->prio))) { update_rq_clock(rq); @@ -821,11 +819,30 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) resched_task(curr); return; } - if (is_same_group(curr, p)) { - s64 delta = curr->se.vruntime - p->se.vruntime; + /* + * Batch tasks do not preempt (their preemption is driven by + * the tick): + */ + if (unlikely(p->policy == SCHED_BATCH)) + return; + + if (sched_feat(WAKEUP_PREEMPT)) { + while (!is_same_group(se, pse)) { + se = parent_entity(se); + pse = parent_entity(pse); + } - if (delta > (s64)sysctl_sched_wakeup_granularity) - resched_task(curr); + delta = se->vruntime - pse->vruntime; + gran = sysctl_sched_wakeup_granularity; + if (unlikely(se->load.weight != NICE_0_LOAD)) + gran = calc_delta_fair(gran, &se->load); + + if (delta > gran) { + int now = !sched_feat(PREEMPT_RESTRICT); + + if (now || p->prio < curr->prio || !se->peer_preempt++) + resched_task(curr); + } } } @@ -859,6 +876,7 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) } } +#ifdef CONFIG_SMP /************************************************** * Fair scheduling class load-balancing methods: */ @@ -870,7 +888,7 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) * achieve that by always pre-iterating before returning * the current task: */ -static inline struct task_struct * +static struct task_struct * __load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr) { struct task_struct *p; @@ -919,12 +937,11 @@ static int cfs_rq_best_prio(struct cfs_rq *cfs_rq) static unsigned long load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, - unsigned long max_nr_move, unsigned long max_load_move, + unsigned long max_load_move, struct sched_domain *sd, enum cpu_idle_type idle, int *all_pinned, int *this_best_prio) { struct cfs_rq *busy_cfs_rq; - unsigned long load_moved, total_nr_moved = 0, nr_moved; long rem_load_move = max_load_move; struct rq_iterator cfs_rq_iterator; @@ -952,25 +969,48 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, #else # define maxload rem_load_move #endif - /* pass busy_cfs_rq argument into + /* + * pass busy_cfs_rq argument into * load_balance_[start|next]_fair iterators */ cfs_rq_iterator.arg = busy_cfs_rq; - nr_moved = balance_tasks(this_rq, this_cpu, busiest, - max_nr_move, maxload, sd, idle, all_pinned, - &load_moved, this_best_prio, &cfs_rq_iterator); + rem_load_move -= balance_tasks(this_rq, this_cpu, busiest, + maxload, sd, idle, all_pinned, + this_best_prio, + &cfs_rq_iterator); - total_nr_moved += nr_moved; - max_nr_move -= nr_moved; - rem_load_move -= load_moved; - - if (max_nr_move <= 0 || rem_load_move <= 0) + if (rem_load_move <= 0) break; } return max_load_move - rem_load_move; } +static int +move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, + struct sched_domain *sd, enum cpu_idle_type idle) +{ + struct cfs_rq *busy_cfs_rq; + struct rq_iterator cfs_rq_iterator; + + cfs_rq_iterator.start = load_balance_start_fair; + cfs_rq_iterator.next = load_balance_next_fair; + + for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { + /* + * pass busy_cfs_rq argument into + * load_balance_[start|next]_fair iterators + */ + cfs_rq_iterator.arg = busy_cfs_rq; + if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle, + &cfs_rq_iterator)) + return 1; + } + + return 0; +} +#endif + /* * scheduler tick hitting a task of our scheduling class: */ @@ -998,13 +1038,14 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) { struct cfs_rq *cfs_rq = task_cfs_rq(p); struct sched_entity *se = &p->se, *curr = cfs_rq->curr; + int this_cpu = smp_processor_id(); sched_info_queued(p); update_curr(cfs_rq); place_entity(cfs_rq, se, 1); - if (sysctl_sched_child_runs_first && + if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) && curr->vruntime < se->vruntime) { /* * Upon rescheduling, sched_class::put_prev_task() will place @@ -1013,11 +1054,8 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) swap(curr->vruntime, se->vruntime); } - update_stats_enqueue(cfs_rq, se); - check_spread(cfs_rq, se); - check_spread(cfs_rq, curr); - __enqueue_entity(cfs_rq, se); - account_entity_enqueue(cfs_rq, se); + se->peer_preempt = 0; + enqueue_task_fair(rq, p, 0); resched_task(rq->curr); } @@ -1037,7 +1075,8 @@ static void set_curr_task_fair(struct rq *rq) /* * All the scheduling class methods: */ -struct sched_class fair_sched_class __read_mostly = { +static const struct sched_class fair_sched_class = { + .next = &idle_sched_class, .enqueue_task = enqueue_task_fair, .dequeue_task = dequeue_task_fair, .yield_task = yield_task_fair, @@ -1047,7 +1086,10 @@ struct sched_class fair_sched_class __read_mostly = { .pick_next_task = pick_next_task_fair, .put_prev_task = put_prev_task_fair, +#ifdef CONFIG_SMP .load_balance = load_balance_fair, + .move_one_task = move_one_task_fair, +#endif .set_curr_task = set_curr_task_fair, .task_tick = task_tick_fair,