]> Pileus Git - ~andy/linux/blob - kernel/sched/rt.c
8d85f9ac42627335b1f776143b4d1e24481eefb0
[~andy/linux] / kernel / sched / rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #include "sched.h"
7
8 #include <linux/slab.h>
9
10 int sched_rr_timeslice = RR_TIMESLICE;
11
12 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
13
14 struct rt_bandwidth def_rt_bandwidth;
15
16 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
17 {
18         struct rt_bandwidth *rt_b =
19                 container_of(timer, struct rt_bandwidth, rt_period_timer);
20         ktime_t now;
21         int overrun;
22         int idle = 0;
23
24         for (;;) {
25                 now = hrtimer_cb_get_time(timer);
26                 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
27
28                 if (!overrun)
29                         break;
30
31                 idle = do_sched_rt_period_timer(rt_b, overrun);
32         }
33
34         return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
35 }
36
37 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
38 {
39         rt_b->rt_period = ns_to_ktime(period);
40         rt_b->rt_runtime = runtime;
41
42         raw_spin_lock_init(&rt_b->rt_runtime_lock);
43
44         hrtimer_init(&rt_b->rt_period_timer,
45                         CLOCK_MONOTONIC, HRTIMER_MODE_REL);
46         rt_b->rt_period_timer.function = sched_rt_period_timer;
47 }
48
49 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
50 {
51         if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
52                 return;
53
54         if (hrtimer_active(&rt_b->rt_period_timer))
55                 return;
56
57         raw_spin_lock(&rt_b->rt_runtime_lock);
58         start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
59         raw_spin_unlock(&rt_b->rt_runtime_lock);
60 }
61
62 void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
63 {
64         struct rt_prio_array *array;
65         int i;
66
67         array = &rt_rq->active;
68         for (i = 0; i < MAX_RT_PRIO; i++) {
69                 INIT_LIST_HEAD(array->queue + i);
70                 __clear_bit(i, array->bitmap);
71         }
72         /* delimiter for bitsearch: */
73         __set_bit(MAX_RT_PRIO, array->bitmap);
74
75 #if defined CONFIG_SMP
76         rt_rq->highest_prio.curr = MAX_RT_PRIO;
77         rt_rq->highest_prio.next = MAX_RT_PRIO;
78         rt_rq->rt_nr_migratory = 0;
79         rt_rq->overloaded = 0;
80         plist_head_init(&rt_rq->pushable_tasks);
81 #endif
82
83         rt_rq->rt_time = 0;
84         rt_rq->rt_throttled = 0;
85         rt_rq->rt_runtime = 0;
86         raw_spin_lock_init(&rt_rq->rt_runtime_lock);
87 }
88
89 #ifdef CONFIG_RT_GROUP_SCHED
90 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
91 {
92         hrtimer_cancel(&rt_b->rt_period_timer);
93 }
94
95 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
96
97 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
98 {
99 #ifdef CONFIG_SCHED_DEBUG
100         WARN_ON_ONCE(!rt_entity_is_task(rt_se));
101 #endif
102         return container_of(rt_se, struct task_struct, rt);
103 }
104
105 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
106 {
107         return rt_rq->rq;
108 }
109
110 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
111 {
112         return rt_se->rt_rq;
113 }
114
115 void free_rt_sched_group(struct task_group *tg)
116 {
117         int i;
118
119         if (tg->rt_se)
120                 destroy_rt_bandwidth(&tg->rt_bandwidth);
121
122         for_each_possible_cpu(i) {
123                 if (tg->rt_rq)
124                         kfree(tg->rt_rq[i]);
125                 if (tg->rt_se)
126                         kfree(tg->rt_se[i]);
127         }
128
129         kfree(tg->rt_rq);
130         kfree(tg->rt_se);
131 }
132
133 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
134                 struct sched_rt_entity *rt_se, int cpu,
135                 struct sched_rt_entity *parent)
136 {
137         struct rq *rq = cpu_rq(cpu);
138
139         rt_rq->highest_prio.curr = MAX_RT_PRIO;
140         rt_rq->rt_nr_boosted = 0;
141         rt_rq->rq = rq;
142         rt_rq->tg = tg;
143
144         tg->rt_rq[cpu] = rt_rq;
145         tg->rt_se[cpu] = rt_se;
146
147         if (!rt_se)
148                 return;
149
150         if (!parent)
151                 rt_se->rt_rq = &rq->rt;
152         else
153                 rt_se->rt_rq = parent->my_q;
154
155         rt_se->my_q = rt_rq;
156         rt_se->parent = parent;
157         INIT_LIST_HEAD(&rt_se->run_list);
158 }
159
160 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
161 {
162         struct rt_rq *rt_rq;
163         struct sched_rt_entity *rt_se;
164         int i;
165
166         tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
167         if (!tg->rt_rq)
168                 goto err;
169         tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
170         if (!tg->rt_se)
171                 goto err;
172
173         init_rt_bandwidth(&tg->rt_bandwidth,
174                         ktime_to_ns(def_rt_bandwidth.rt_period), 0);
175
176         for_each_possible_cpu(i) {
177                 rt_rq = kzalloc_node(sizeof(struct rt_rq),
178                                      GFP_KERNEL, cpu_to_node(i));
179                 if (!rt_rq)
180                         goto err;
181
182                 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
183                                      GFP_KERNEL, cpu_to_node(i));
184                 if (!rt_se)
185                         goto err_free_rq;
186
187                 init_rt_rq(rt_rq, cpu_rq(i));
188                 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
189                 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
190         }
191
192         return 1;
193
194 err_free_rq:
195         kfree(rt_rq);
196 err:
197         return 0;
198 }
199
200 #else /* CONFIG_RT_GROUP_SCHED */
201
202 #define rt_entity_is_task(rt_se) (1)
203
204 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
205 {
206         return container_of(rt_se, struct task_struct, rt);
207 }
208
209 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
210 {
211         return container_of(rt_rq, struct rq, rt);
212 }
213
214 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
215 {
216         struct task_struct *p = rt_task_of(rt_se);
217         struct rq *rq = task_rq(p);
218
219         return &rq->rt;
220 }
221
222 void free_rt_sched_group(struct task_group *tg) { }
223
224 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
225 {
226         return 1;
227 }
228 #endif /* CONFIG_RT_GROUP_SCHED */
229
230 #ifdef CONFIG_SMP
231
232 static inline int rt_overloaded(struct rq *rq)
233 {
234         return atomic_read(&rq->rd->rto_count);
235 }
236
237 static inline void rt_set_overload(struct rq *rq)
238 {
239         if (!rq->online)
240                 return;
241
242         cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
243         /*
244          * Make sure the mask is visible before we set
245          * the overload count. That is checked to determine
246          * if we should look at the mask. It would be a shame
247          * if we looked at the mask, but the mask was not
248          * updated yet.
249          */
250         wmb();
251         atomic_inc(&rq->rd->rto_count);
252 }
253
254 static inline void rt_clear_overload(struct rq *rq)
255 {
256         if (!rq->online)
257                 return;
258
259         /* the order here really doesn't matter */
260         atomic_dec(&rq->rd->rto_count);
261         cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
262 }
263
264 static void update_rt_migration(struct rt_rq *rt_rq)
265 {
266         if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
267                 if (!rt_rq->overloaded) {
268                         rt_set_overload(rq_of_rt_rq(rt_rq));
269                         rt_rq->overloaded = 1;
270                 }
271         } else if (rt_rq->overloaded) {
272                 rt_clear_overload(rq_of_rt_rq(rt_rq));
273                 rt_rq->overloaded = 0;
274         }
275 }
276
277 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
278 {
279         struct task_struct *p;
280
281         if (!rt_entity_is_task(rt_se))
282                 return;
283
284         p = rt_task_of(rt_se);
285         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
286
287         rt_rq->rt_nr_total++;
288         if (p->nr_cpus_allowed > 1)
289                 rt_rq->rt_nr_migratory++;
290
291         update_rt_migration(rt_rq);
292 }
293
294 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
295 {
296         struct task_struct *p;
297
298         if (!rt_entity_is_task(rt_se))
299                 return;
300
301         p = rt_task_of(rt_se);
302         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
303
304         rt_rq->rt_nr_total--;
305         if (p->nr_cpus_allowed > 1)
306                 rt_rq->rt_nr_migratory--;
307
308         update_rt_migration(rt_rq);
309 }
310
311 static inline int has_pushable_tasks(struct rq *rq)
312 {
313         return !plist_head_empty(&rq->rt.pushable_tasks);
314 }
315
316 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
317 {
318         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
319         plist_node_init(&p->pushable_tasks, p->prio);
320         plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
321
322         /* Update the highest prio pushable task */
323         if (p->prio < rq->rt.highest_prio.next)
324                 rq->rt.highest_prio.next = p->prio;
325 }
326
327 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
328 {
329         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
330
331         /* Update the new highest prio pushable task */
332         if (has_pushable_tasks(rq)) {
333                 p = plist_first_entry(&rq->rt.pushable_tasks,
334                                       struct task_struct, pushable_tasks);
335                 rq->rt.highest_prio.next = p->prio;
336         } else
337                 rq->rt.highest_prio.next = MAX_RT_PRIO;
338 }
339
340 #else
341
342 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
343 {
344 }
345
346 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
347 {
348 }
349
350 static inline
351 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
352 {
353 }
354
355 static inline
356 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
357 {
358 }
359
360 #endif /* CONFIG_SMP */
361
362 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
363 {
364         return !list_empty(&rt_se->run_list);
365 }
366
367 #ifdef CONFIG_RT_GROUP_SCHED
368
369 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
370 {
371         if (!rt_rq->tg)
372                 return RUNTIME_INF;
373
374         return rt_rq->rt_runtime;
375 }
376
377 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
378 {
379         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
380 }
381
382 typedef struct task_group *rt_rq_iter_t;
383
384 static inline struct task_group *next_task_group(struct task_group *tg)
385 {
386         do {
387                 tg = list_entry_rcu(tg->list.next,
388                         typeof(struct task_group), list);
389         } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
390
391         if (&tg->list == &task_groups)
392                 tg = NULL;
393
394         return tg;
395 }
396
397 #define for_each_rt_rq(rt_rq, iter, rq)                                 \
398         for (iter = container_of(&task_groups, typeof(*iter), list);    \
399                 (iter = next_task_group(iter)) &&                       \
400                 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
401
402 static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
403 {
404         list_add_rcu(&rt_rq->leaf_rt_rq_list,
405                         &rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
406 }
407
408 static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
409 {
410         list_del_rcu(&rt_rq->leaf_rt_rq_list);
411 }
412
413 #define for_each_leaf_rt_rq(rt_rq, rq) \
414         list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
415
416 #define for_each_sched_rt_entity(rt_se) \
417         for (; rt_se; rt_se = rt_se->parent)
418
419 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
420 {
421         return rt_se->my_q;
422 }
423
424 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
425 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
426
427 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
428 {
429         struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
430         struct sched_rt_entity *rt_se;
431
432         int cpu = cpu_of(rq_of_rt_rq(rt_rq));
433
434         rt_se = rt_rq->tg->rt_se[cpu];
435
436         if (rt_rq->rt_nr_running) {
437                 if (rt_se && !on_rt_rq(rt_se))
438                         enqueue_rt_entity(rt_se, false);
439                 if (rt_rq->highest_prio.curr < curr->prio)
440                         resched_task(curr);
441         }
442 }
443
444 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
445 {
446         struct sched_rt_entity *rt_se;
447         int cpu = cpu_of(rq_of_rt_rq(rt_rq));
448
449         rt_se = rt_rq->tg->rt_se[cpu];
450
451         if (rt_se && on_rt_rq(rt_se))
452                 dequeue_rt_entity(rt_se);
453 }
454
455 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
456 {
457         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
458 }
459
460 static int rt_se_boosted(struct sched_rt_entity *rt_se)
461 {
462         struct rt_rq *rt_rq = group_rt_rq(rt_se);
463         struct task_struct *p;
464
465         if (rt_rq)
466                 return !!rt_rq->rt_nr_boosted;
467
468         p = rt_task_of(rt_se);
469         return p->prio != p->normal_prio;
470 }
471
472 #ifdef CONFIG_SMP
473 static inline const struct cpumask *sched_rt_period_mask(void)
474 {
475         return this_rq()->rd->span;
476 }
477 #else
478 static inline const struct cpumask *sched_rt_period_mask(void)
479 {
480         return cpu_online_mask;
481 }
482 #endif
483
484 static inline
485 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
486 {
487         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
488 }
489
490 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
491 {
492         return &rt_rq->tg->rt_bandwidth;
493 }
494
495 #else /* !CONFIG_RT_GROUP_SCHED */
496
497 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
498 {
499         return rt_rq->rt_runtime;
500 }
501
502 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
503 {
504         return ktime_to_ns(def_rt_bandwidth.rt_period);
505 }
506
507 typedef struct rt_rq *rt_rq_iter_t;
508
509 #define for_each_rt_rq(rt_rq, iter, rq) \
510         for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
511
512 static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
513 {
514 }
515
516 static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
517 {
518 }
519
520 #define for_each_leaf_rt_rq(rt_rq, rq) \
521         for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
522
523 #define for_each_sched_rt_entity(rt_se) \
524         for (; rt_se; rt_se = NULL)
525
526 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
527 {
528         return NULL;
529 }
530
531 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
532 {
533         if (rt_rq->rt_nr_running)
534                 resched_task(rq_of_rt_rq(rt_rq)->curr);
535 }
536
537 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
538 {
539 }
540
541 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
542 {
543         return rt_rq->rt_throttled;
544 }
545
546 static inline const struct cpumask *sched_rt_period_mask(void)
547 {
548         return cpu_online_mask;
549 }
550
551 static inline
552 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
553 {
554         return &cpu_rq(cpu)->rt;
555 }
556
557 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
558 {
559         return &def_rt_bandwidth;
560 }
561
562 #endif /* CONFIG_RT_GROUP_SCHED */
563
564 #ifdef CONFIG_SMP
565 /*
566  * We ran out of runtime, see if we can borrow some from our neighbours.
567  */
568 static int do_balance_runtime(struct rt_rq *rt_rq)
569 {
570         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
571         struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
572         int i, weight, more = 0;
573         u64 rt_period;
574
575         weight = cpumask_weight(rd->span);
576
577         raw_spin_lock(&rt_b->rt_runtime_lock);
578         rt_period = ktime_to_ns(rt_b->rt_period);
579         for_each_cpu(i, rd->span) {
580                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
581                 s64 diff;
582
583                 if (iter == rt_rq)
584                         continue;
585
586                 raw_spin_lock(&iter->rt_runtime_lock);
587                 /*
588                  * Either all rqs have inf runtime and there's nothing to steal
589                  * or __disable_runtime() below sets a specific rq to inf to
590                  * indicate its been disabled and disalow stealing.
591                  */
592                 if (iter->rt_runtime == RUNTIME_INF)
593                         goto next;
594
595                 /*
596                  * From runqueues with spare time, take 1/n part of their
597                  * spare time, but no more than our period.
598                  */
599                 diff = iter->rt_runtime - iter->rt_time;
600                 if (diff > 0) {
601                         diff = div_u64((u64)diff, weight);
602                         if (rt_rq->rt_runtime + diff > rt_period)
603                                 diff = rt_period - rt_rq->rt_runtime;
604                         iter->rt_runtime -= diff;
605                         rt_rq->rt_runtime += diff;
606                         more = 1;
607                         if (rt_rq->rt_runtime == rt_period) {
608                                 raw_spin_unlock(&iter->rt_runtime_lock);
609                                 break;
610                         }
611                 }
612 next:
613                 raw_spin_unlock(&iter->rt_runtime_lock);
614         }
615         raw_spin_unlock(&rt_b->rt_runtime_lock);
616
617         return more;
618 }
619
620 /*
621  * Ensure this RQ takes back all the runtime it lend to its neighbours.
622  */
623 static void __disable_runtime(struct rq *rq)
624 {
625         struct root_domain *rd = rq->rd;
626         rt_rq_iter_t iter;
627         struct rt_rq *rt_rq;
628
629         if (unlikely(!scheduler_running))
630                 return;
631
632         for_each_rt_rq(rt_rq, iter, rq) {
633                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
634                 s64 want;
635                 int i;
636
637                 raw_spin_lock(&rt_b->rt_runtime_lock);
638                 raw_spin_lock(&rt_rq->rt_runtime_lock);
639                 /*
640                  * Either we're all inf and nobody needs to borrow, or we're
641                  * already disabled and thus have nothing to do, or we have
642                  * exactly the right amount of runtime to take out.
643                  */
644                 if (rt_rq->rt_runtime == RUNTIME_INF ||
645                                 rt_rq->rt_runtime == rt_b->rt_runtime)
646                         goto balanced;
647                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
648
649                 /*
650                  * Calculate the difference between what we started out with
651                  * and what we current have, that's the amount of runtime
652                  * we lend and now have to reclaim.
653                  */
654                 want = rt_b->rt_runtime - rt_rq->rt_runtime;
655
656                 /*
657                  * Greedy reclaim, take back as much as we can.
658                  */
659                 for_each_cpu(i, rd->span) {
660                         struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
661                         s64 diff;
662
663                         /*
664                          * Can't reclaim from ourselves or disabled runqueues.
665                          */
666                         if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
667                                 continue;
668
669                         raw_spin_lock(&iter->rt_runtime_lock);
670                         if (want > 0) {
671                                 diff = min_t(s64, iter->rt_runtime, want);
672                                 iter->rt_runtime -= diff;
673                                 want -= diff;
674                         } else {
675                                 iter->rt_runtime -= want;
676                                 want -= want;
677                         }
678                         raw_spin_unlock(&iter->rt_runtime_lock);
679
680                         if (!want)
681                                 break;
682                 }
683
684                 raw_spin_lock(&rt_rq->rt_runtime_lock);
685                 /*
686                  * We cannot be left wanting - that would mean some runtime
687                  * leaked out of the system.
688                  */
689                 BUG_ON(want);
690 balanced:
691                 /*
692                  * Disable all the borrow logic by pretending we have inf
693                  * runtime - in which case borrowing doesn't make sense.
694                  */
695                 rt_rq->rt_runtime = RUNTIME_INF;
696                 rt_rq->rt_throttled = 0;
697                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
698                 raw_spin_unlock(&rt_b->rt_runtime_lock);
699         }
700 }
701
702 static void __enable_runtime(struct rq *rq)
703 {
704         rt_rq_iter_t iter;
705         struct rt_rq *rt_rq;
706
707         if (unlikely(!scheduler_running))
708                 return;
709
710         /*
711          * Reset each runqueue's bandwidth settings
712          */
713         for_each_rt_rq(rt_rq, iter, rq) {
714                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
715
716                 raw_spin_lock(&rt_b->rt_runtime_lock);
717                 raw_spin_lock(&rt_rq->rt_runtime_lock);
718                 rt_rq->rt_runtime = rt_b->rt_runtime;
719                 rt_rq->rt_time = 0;
720                 rt_rq->rt_throttled = 0;
721                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
722                 raw_spin_unlock(&rt_b->rt_runtime_lock);
723         }
724 }
725
726 static int balance_runtime(struct rt_rq *rt_rq)
727 {
728         int more = 0;
729
730         if (!sched_feat(RT_RUNTIME_SHARE))
731                 return more;
732
733         if (rt_rq->rt_time > rt_rq->rt_runtime) {
734                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
735                 more = do_balance_runtime(rt_rq);
736                 raw_spin_lock(&rt_rq->rt_runtime_lock);
737         }
738
739         return more;
740 }
741 #else /* !CONFIG_SMP */
742 static inline int balance_runtime(struct rt_rq *rt_rq)
743 {
744         return 0;
745 }
746 #endif /* CONFIG_SMP */
747
748 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
749 {
750         int i, idle = 1, throttled = 0;
751         const struct cpumask *span;
752
753         span = sched_rt_period_mask();
754 #ifdef CONFIG_RT_GROUP_SCHED
755         /*
756          * FIXME: isolated CPUs should really leave the root task group,
757          * whether they are isolcpus or were isolated via cpusets, lest
758          * the timer run on a CPU which does not service all runqueues,
759          * potentially leaving other CPUs indefinitely throttled.  If
760          * isolation is really required, the user will turn the throttle
761          * off to kill the perturbations it causes anyway.  Meanwhile,
762          * this maintains functionality for boot and/or troubleshooting.
763          */
764         if (rt_b == &root_task_group.rt_bandwidth)
765                 span = cpu_online_mask;
766 #endif
767         for_each_cpu(i, span) {
768                 int enqueue = 0;
769                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
770                 struct rq *rq = rq_of_rt_rq(rt_rq);
771
772                 raw_spin_lock(&rq->lock);
773                 if (rt_rq->rt_time) {
774                         u64 runtime;
775
776                         raw_spin_lock(&rt_rq->rt_runtime_lock);
777                         if (rt_rq->rt_throttled)
778                                 balance_runtime(rt_rq);
779                         runtime = rt_rq->rt_runtime;
780                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
781                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
782                                 rt_rq->rt_throttled = 0;
783                                 enqueue = 1;
784
785                                 /*
786                                  * Force a clock update if the CPU was idle,
787                                  * lest wakeup -> unthrottle time accumulate.
788                                  */
789                                 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
790                                         rq->skip_clock_update = -1;
791                         }
792                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
793                                 idle = 0;
794                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
795                 } else if (rt_rq->rt_nr_running) {
796                         idle = 0;
797                         if (!rt_rq_throttled(rt_rq))
798                                 enqueue = 1;
799                 }
800                 if (rt_rq->rt_throttled)
801                         throttled = 1;
802
803                 if (enqueue)
804                         sched_rt_rq_enqueue(rt_rq);
805                 raw_spin_unlock(&rq->lock);
806         }
807
808         if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
809                 return 1;
810
811         return idle;
812 }
813
814 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
815 {
816 #ifdef CONFIG_RT_GROUP_SCHED
817         struct rt_rq *rt_rq = group_rt_rq(rt_se);
818
819         if (rt_rq)
820                 return rt_rq->highest_prio.curr;
821 #endif
822
823         return rt_task_of(rt_se)->prio;
824 }
825
826 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
827 {
828         u64 runtime = sched_rt_runtime(rt_rq);
829
830         if (rt_rq->rt_throttled)
831                 return rt_rq_throttled(rt_rq);
832
833         if (runtime >= sched_rt_period(rt_rq))
834                 return 0;
835
836         balance_runtime(rt_rq);
837         runtime = sched_rt_runtime(rt_rq);
838         if (runtime == RUNTIME_INF)
839                 return 0;
840
841         if (rt_rq->rt_time > runtime) {
842                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
843
844                 /*
845                  * Don't actually throttle groups that have no runtime assigned
846                  * but accrue some time due to boosting.
847                  */
848                 if (likely(rt_b->rt_runtime)) {
849                         static bool once = false;
850
851                         rt_rq->rt_throttled = 1;
852
853                         if (!once) {
854                                 once = true;
855                                 printk_sched("sched: RT throttling activated\n");
856                         }
857                 } else {
858                         /*
859                          * In case we did anyway, make it go away,
860                          * replenishment is a joke, since it will replenish us
861                          * with exactly 0 ns.
862                          */
863                         rt_rq->rt_time = 0;
864                 }
865
866                 if (rt_rq_throttled(rt_rq)) {
867                         sched_rt_rq_dequeue(rt_rq);
868                         return 1;
869                 }
870         }
871
872         return 0;
873 }
874
875 /*
876  * Update the current task's runtime statistics. Skip current tasks that
877  * are not in our scheduling class.
878  */
879 static void update_curr_rt(struct rq *rq)
880 {
881         struct task_struct *curr = rq->curr;
882         struct sched_rt_entity *rt_se = &curr->rt;
883         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
884         u64 delta_exec;
885
886         if (curr->sched_class != &rt_sched_class)
887                 return;
888
889         delta_exec = rq_clock_task(rq) - curr->se.exec_start;
890         if (unlikely((s64)delta_exec <= 0))
891                 return;
892
893         schedstat_set(curr->se.statistics.exec_max,
894                       max(curr->se.statistics.exec_max, delta_exec));
895
896         curr->se.sum_exec_runtime += delta_exec;
897         account_group_exec_runtime(curr, delta_exec);
898
899         curr->se.exec_start = rq_clock_task(rq);
900         cpuacct_charge(curr, delta_exec);
901
902         sched_rt_avg_update(rq, delta_exec);
903
904         if (!rt_bandwidth_enabled())
905                 return;
906
907         for_each_sched_rt_entity(rt_se) {
908                 rt_rq = rt_rq_of_se(rt_se);
909
910                 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
911                         raw_spin_lock(&rt_rq->rt_runtime_lock);
912                         rt_rq->rt_time += delta_exec;
913                         if (sched_rt_runtime_exceeded(rt_rq))
914                                 resched_task(curr);
915                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
916                 }
917         }
918 }
919
920 #if defined CONFIG_SMP
921
922 static void
923 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
924 {
925         struct rq *rq = rq_of_rt_rq(rt_rq);
926
927         if (rq->online && prio < prev_prio)
928                 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
929 }
930
931 static void
932 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
933 {
934         struct rq *rq = rq_of_rt_rq(rt_rq);
935
936         if (rq->online && rt_rq->highest_prio.curr != prev_prio)
937                 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
938 }
939
940 #else /* CONFIG_SMP */
941
942 static inline
943 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
944 static inline
945 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
946
947 #endif /* CONFIG_SMP */
948
949 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
950 static void
951 inc_rt_prio(struct rt_rq *rt_rq, int prio)
952 {
953         int prev_prio = rt_rq->highest_prio.curr;
954
955         if (prio < prev_prio)
956                 rt_rq->highest_prio.curr = prio;
957
958         inc_rt_prio_smp(rt_rq, prio, prev_prio);
959 }
960
961 static void
962 dec_rt_prio(struct rt_rq *rt_rq, int prio)
963 {
964         int prev_prio = rt_rq->highest_prio.curr;
965
966         if (rt_rq->rt_nr_running) {
967
968                 WARN_ON(prio < prev_prio);
969
970                 /*
971                  * This may have been our highest task, and therefore
972                  * we may have some recomputation to do
973                  */
974                 if (prio == prev_prio) {
975                         struct rt_prio_array *array = &rt_rq->active;
976
977                         rt_rq->highest_prio.curr =
978                                 sched_find_first_bit(array->bitmap);
979                 }
980
981         } else
982                 rt_rq->highest_prio.curr = MAX_RT_PRIO;
983
984         dec_rt_prio_smp(rt_rq, prio, prev_prio);
985 }
986
987 #else
988
989 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
990 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
991
992 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
993
994 #ifdef CONFIG_RT_GROUP_SCHED
995
996 static void
997 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
998 {
999         if (rt_se_boosted(rt_se))
1000                 rt_rq->rt_nr_boosted++;
1001
1002         if (rt_rq->tg)
1003                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1004 }
1005
1006 static void
1007 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1008 {
1009         if (rt_se_boosted(rt_se))
1010                 rt_rq->rt_nr_boosted--;
1011
1012         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1013 }
1014
1015 #else /* CONFIG_RT_GROUP_SCHED */
1016
1017 static void
1018 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1019 {
1020         start_rt_bandwidth(&def_rt_bandwidth);
1021 }
1022
1023 static inline
1024 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1025
1026 #endif /* CONFIG_RT_GROUP_SCHED */
1027
1028 static inline
1029 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1030 {
1031         int prio = rt_se_prio(rt_se);
1032
1033         WARN_ON(!rt_prio(prio));
1034         rt_rq->rt_nr_running++;
1035
1036         inc_rt_prio(rt_rq, prio);
1037         inc_rt_migration(rt_se, rt_rq);
1038         inc_rt_group(rt_se, rt_rq);
1039 }
1040
1041 static inline
1042 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1043 {
1044         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1045         WARN_ON(!rt_rq->rt_nr_running);
1046         rt_rq->rt_nr_running--;
1047
1048         dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1049         dec_rt_migration(rt_se, rt_rq);
1050         dec_rt_group(rt_se, rt_rq);
1051 }
1052
1053 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1054 {
1055         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1056         struct rt_prio_array *array = &rt_rq->active;
1057         struct rt_rq *group_rq = group_rt_rq(rt_se);
1058         struct list_head *queue = array->queue + rt_se_prio(rt_se);
1059
1060         /*
1061          * Don't enqueue the group if its throttled, or when empty.
1062          * The latter is a consequence of the former when a child group
1063          * get throttled and the current group doesn't have any other
1064          * active members.
1065          */
1066         if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1067                 return;
1068
1069         if (!rt_rq->rt_nr_running)
1070                 list_add_leaf_rt_rq(rt_rq);
1071
1072         if (head)
1073                 list_add(&rt_se->run_list, queue);
1074         else
1075                 list_add_tail(&rt_se->run_list, queue);
1076         __set_bit(rt_se_prio(rt_se), array->bitmap);
1077
1078         inc_rt_tasks(rt_se, rt_rq);
1079 }
1080
1081 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1082 {
1083         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1084         struct rt_prio_array *array = &rt_rq->active;
1085
1086         list_del_init(&rt_se->run_list);
1087         if (list_empty(array->queue + rt_se_prio(rt_se)))
1088                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1089
1090         dec_rt_tasks(rt_se, rt_rq);
1091         if (!rt_rq->rt_nr_running)
1092                 list_del_leaf_rt_rq(rt_rq);
1093 }
1094
1095 /*
1096  * Because the prio of an upper entry depends on the lower
1097  * entries, we must remove entries top - down.
1098  */
1099 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1100 {
1101         struct sched_rt_entity *back = NULL;
1102
1103         for_each_sched_rt_entity(rt_se) {
1104                 rt_se->back = back;
1105                 back = rt_se;
1106         }
1107
1108         for (rt_se = back; rt_se; rt_se = rt_se->back) {
1109                 if (on_rt_rq(rt_se))
1110                         __dequeue_rt_entity(rt_se);
1111         }
1112 }
1113
1114 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1115 {
1116         dequeue_rt_stack(rt_se);
1117         for_each_sched_rt_entity(rt_se)
1118                 __enqueue_rt_entity(rt_se, head);
1119 }
1120
1121 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1122 {
1123         dequeue_rt_stack(rt_se);
1124
1125         for_each_sched_rt_entity(rt_se) {
1126                 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1127
1128                 if (rt_rq && rt_rq->rt_nr_running)
1129                         __enqueue_rt_entity(rt_se, false);
1130         }
1131 }
1132
1133 /*
1134  * Adding/removing a task to/from a priority array:
1135  */
1136 static void
1137 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1138 {
1139         struct sched_rt_entity *rt_se = &p->rt;
1140
1141         if (flags & ENQUEUE_WAKEUP)
1142                 rt_se->timeout = 0;
1143
1144         enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1145
1146         if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1147                 enqueue_pushable_task(rq, p);
1148
1149         inc_nr_running(rq);
1150 }
1151
1152 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1153 {
1154         struct sched_rt_entity *rt_se = &p->rt;
1155
1156         update_curr_rt(rq);
1157         dequeue_rt_entity(rt_se);
1158
1159         dequeue_pushable_task(rq, p);
1160
1161         dec_nr_running(rq);
1162 }
1163
1164 /*
1165  * Put task to the head or the end of the run list without the overhead of
1166  * dequeue followed by enqueue.
1167  */
1168 static void
1169 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1170 {
1171         if (on_rt_rq(rt_se)) {
1172                 struct rt_prio_array *array = &rt_rq->active;
1173                 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1174
1175                 if (head)
1176                         list_move(&rt_se->run_list, queue);
1177                 else
1178                         list_move_tail(&rt_se->run_list, queue);
1179         }
1180 }
1181
1182 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1183 {
1184         struct sched_rt_entity *rt_se = &p->rt;
1185         struct rt_rq *rt_rq;
1186
1187         for_each_sched_rt_entity(rt_se) {
1188                 rt_rq = rt_rq_of_se(rt_se);
1189                 requeue_rt_entity(rt_rq, rt_se, head);
1190         }
1191 }
1192
1193 static void yield_task_rt(struct rq *rq)
1194 {
1195         requeue_task_rt(rq, rq->curr, 0);
1196 }
1197
1198 #ifdef CONFIG_SMP
1199 static int find_lowest_rq(struct task_struct *task);
1200
1201 static int
1202 select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
1203 {
1204         struct task_struct *curr;
1205         struct rq *rq;
1206         int cpu;
1207
1208         cpu = task_cpu(p);
1209
1210         if (p->nr_cpus_allowed == 1)
1211                 goto out;
1212
1213         /* For anything but wake ups, just return the task_cpu */
1214         if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1215                 goto out;
1216
1217         rq = cpu_rq(cpu);
1218
1219         rcu_read_lock();
1220         curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1221
1222         /*
1223          * If the current task on @p's runqueue is an RT task, then
1224          * try to see if we can wake this RT task up on another
1225          * runqueue. Otherwise simply start this RT task
1226          * on its current runqueue.
1227          *
1228          * We want to avoid overloading runqueues. If the woken
1229          * task is a higher priority, then it will stay on this CPU
1230          * and the lower prio task should be moved to another CPU.
1231          * Even though this will probably make the lower prio task
1232          * lose its cache, we do not want to bounce a higher task
1233          * around just because it gave up its CPU, perhaps for a
1234          * lock?
1235          *
1236          * For equal prio tasks, we just let the scheduler sort it out.
1237          *
1238          * Otherwise, just let it ride on the affined RQ and the
1239          * post-schedule router will push the preempted task away
1240          *
1241          * This test is optimistic, if we get it wrong the load-balancer
1242          * will have to sort it out.
1243          */
1244         if (curr && unlikely(rt_task(curr)) &&
1245             (curr->nr_cpus_allowed < 2 ||
1246              curr->prio <= p->prio) &&
1247             (p->nr_cpus_allowed > 1)) {
1248                 int target = find_lowest_rq(p);
1249
1250                 if (target != -1)
1251                         cpu = target;
1252         }
1253         rcu_read_unlock();
1254
1255 out:
1256         return cpu;
1257 }
1258
1259 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1260 {
1261         if (rq->curr->nr_cpus_allowed == 1)
1262                 return;
1263
1264         if (p->nr_cpus_allowed != 1
1265             && cpupri_find(&rq->rd->cpupri, p, NULL))
1266                 return;
1267
1268         if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1269                 return;
1270
1271         /*
1272          * There appears to be other cpus that can accept
1273          * current and none to run 'p', so lets reschedule
1274          * to try and push current away:
1275          */
1276         requeue_task_rt(rq, p, 1);
1277         resched_task(rq->curr);
1278 }
1279
1280 #endif /* CONFIG_SMP */
1281
1282 /*
1283  * Preempt the current task with a newly woken task if needed:
1284  */
1285 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1286 {
1287         if (p->prio < rq->curr->prio) {
1288                 resched_task(rq->curr);
1289                 return;
1290         }
1291
1292 #ifdef CONFIG_SMP
1293         /*
1294          * If:
1295          *
1296          * - the newly woken task is of equal priority to the current task
1297          * - the newly woken task is non-migratable while current is migratable
1298          * - current will be preempted on the next reschedule
1299          *
1300          * we should check to see if current can readily move to a different
1301          * cpu.  If so, we will reschedule to allow the push logic to try
1302          * to move current somewhere else, making room for our non-migratable
1303          * task.
1304          */
1305         if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1306                 check_preempt_equal_prio(rq, p);
1307 #endif
1308 }
1309
1310 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1311                                                    struct rt_rq *rt_rq)
1312 {
1313         struct rt_prio_array *array = &rt_rq->active;
1314         struct sched_rt_entity *next = NULL;
1315         struct list_head *queue;
1316         int idx;
1317
1318         idx = sched_find_first_bit(array->bitmap);
1319         BUG_ON(idx >= MAX_RT_PRIO);
1320
1321         queue = array->queue + idx;
1322         next = list_entry(queue->next, struct sched_rt_entity, run_list);
1323
1324         return next;
1325 }
1326
1327 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1328 {
1329         struct sched_rt_entity *rt_se;
1330         struct task_struct *p;
1331         struct rt_rq *rt_rq;
1332
1333         rt_rq = &rq->rt;
1334
1335         if (!rt_rq->rt_nr_running)
1336                 return NULL;
1337
1338         if (rt_rq_throttled(rt_rq))
1339                 return NULL;
1340
1341         do {
1342                 rt_se = pick_next_rt_entity(rq, rt_rq);
1343                 BUG_ON(!rt_se);
1344                 rt_rq = group_rt_rq(rt_se);
1345         } while (rt_rq);
1346
1347         p = rt_task_of(rt_se);
1348         p->se.exec_start = rq_clock_task(rq);
1349
1350         return p;
1351 }
1352
1353 static struct task_struct *pick_next_task_rt(struct rq *rq)
1354 {
1355         struct task_struct *p = _pick_next_task_rt(rq);
1356
1357         /* The running task is never eligible for pushing */
1358         if (p)
1359                 dequeue_pushable_task(rq, p);
1360
1361 #ifdef CONFIG_SMP
1362         /*
1363          * We detect this state here so that we can avoid taking the RQ
1364          * lock again later if there is no need to push
1365          */
1366         rq->post_schedule = has_pushable_tasks(rq);
1367 #endif
1368
1369         return p;
1370 }
1371
1372 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1373 {
1374         update_curr_rt(rq);
1375
1376         /*
1377          * The previous task needs to be made eligible for pushing
1378          * if it is still active
1379          */
1380         if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1381                 enqueue_pushable_task(rq, p);
1382 }
1383
1384 #ifdef CONFIG_SMP
1385
1386 /* Only try algorithms three times */
1387 #define RT_MAX_TRIES 3
1388
1389 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1390 {
1391         if (!task_running(rq, p) &&
1392             cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1393                 return 1;
1394         return 0;
1395 }
1396
1397 /* Return the second highest RT task, NULL otherwise */
1398 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1399 {
1400         struct task_struct *next = NULL;
1401         struct sched_rt_entity *rt_se;
1402         struct rt_prio_array *array;
1403         struct rt_rq *rt_rq;
1404         int idx;
1405
1406         for_each_leaf_rt_rq(rt_rq, rq) {
1407                 array = &rt_rq->active;
1408                 idx = sched_find_first_bit(array->bitmap);
1409 next_idx:
1410                 if (idx >= MAX_RT_PRIO)
1411                         continue;
1412                 if (next && next->prio <= idx)
1413                         continue;
1414                 list_for_each_entry(rt_se, array->queue + idx, run_list) {
1415                         struct task_struct *p;
1416
1417                         if (!rt_entity_is_task(rt_se))
1418                                 continue;
1419
1420                         p = rt_task_of(rt_se);
1421                         if (pick_rt_task(rq, p, cpu)) {
1422                                 next = p;
1423                                 break;
1424                         }
1425                 }
1426                 if (!next) {
1427                         idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1428                         goto next_idx;
1429                 }
1430         }
1431
1432         return next;
1433 }
1434
1435 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1436
1437 static int find_lowest_rq(struct task_struct *task)
1438 {
1439         struct sched_domain *sd;
1440         struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1441         int this_cpu = smp_processor_id();
1442         int cpu      = task_cpu(task);
1443
1444         /* Make sure the mask is initialized first */
1445         if (unlikely(!lowest_mask))
1446                 return -1;
1447
1448         if (task->nr_cpus_allowed == 1)
1449                 return -1; /* No other targets possible */
1450
1451         if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1452                 return -1; /* No targets found */
1453
1454         /*
1455          * At this point we have built a mask of cpus representing the
1456          * lowest priority tasks in the system.  Now we want to elect
1457          * the best one based on our affinity and topology.
1458          *
1459          * We prioritize the last cpu that the task executed on since
1460          * it is most likely cache-hot in that location.
1461          */
1462         if (cpumask_test_cpu(cpu, lowest_mask))
1463                 return cpu;
1464
1465         /*
1466          * Otherwise, we consult the sched_domains span maps to figure
1467          * out which cpu is logically closest to our hot cache data.
1468          */
1469         if (!cpumask_test_cpu(this_cpu, lowest_mask))
1470                 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1471
1472         rcu_read_lock();
1473         for_each_domain(cpu, sd) {
1474                 if (sd->flags & SD_WAKE_AFFINE) {
1475                         int best_cpu;
1476
1477                         /*
1478                          * "this_cpu" is cheaper to preempt than a
1479                          * remote processor.
1480                          */
1481                         if (this_cpu != -1 &&
1482                             cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1483                                 rcu_read_unlock();
1484                                 return this_cpu;
1485                         }
1486
1487                         best_cpu = cpumask_first_and(lowest_mask,
1488                                                      sched_domain_span(sd));
1489                         if (best_cpu < nr_cpu_ids) {
1490                                 rcu_read_unlock();
1491                                 return best_cpu;
1492                         }
1493                 }
1494         }
1495         rcu_read_unlock();
1496
1497         /*
1498          * And finally, if there were no matches within the domains
1499          * just give the caller *something* to work with from the compatible
1500          * locations.
1501          */
1502         if (this_cpu != -1)
1503                 return this_cpu;
1504
1505         cpu = cpumask_any(lowest_mask);
1506         if (cpu < nr_cpu_ids)
1507                 return cpu;
1508         return -1;
1509 }
1510
1511 /* Will lock the rq it finds */
1512 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1513 {
1514         struct rq *lowest_rq = NULL;
1515         int tries;
1516         int cpu;
1517
1518         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1519                 cpu = find_lowest_rq(task);
1520
1521                 if ((cpu == -1) || (cpu == rq->cpu))
1522                         break;
1523
1524                 lowest_rq = cpu_rq(cpu);
1525
1526                 /* if the prio of this runqueue changed, try again */
1527                 if (double_lock_balance(rq, lowest_rq)) {
1528                         /*
1529                          * We had to unlock the run queue. In
1530                          * the mean time, task could have
1531                          * migrated already or had its affinity changed.
1532                          * Also make sure that it wasn't scheduled on its rq.
1533                          */
1534                         if (unlikely(task_rq(task) != rq ||
1535                                      !cpumask_test_cpu(lowest_rq->cpu,
1536                                                        tsk_cpus_allowed(task)) ||
1537                                      task_running(rq, task) ||
1538                                      !task->on_rq)) {
1539
1540                                 double_unlock_balance(rq, lowest_rq);
1541                                 lowest_rq = NULL;
1542                                 break;
1543                         }
1544                 }
1545
1546                 /* If this rq is still suitable use it. */
1547                 if (lowest_rq->rt.highest_prio.curr > task->prio)
1548                         break;
1549
1550                 /* try again */
1551                 double_unlock_balance(rq, lowest_rq);
1552                 lowest_rq = NULL;
1553         }
1554
1555         return lowest_rq;
1556 }
1557
1558 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1559 {
1560         struct task_struct *p;
1561
1562         if (!has_pushable_tasks(rq))
1563                 return NULL;
1564
1565         p = plist_first_entry(&rq->rt.pushable_tasks,
1566                               struct task_struct, pushable_tasks);
1567
1568         BUG_ON(rq->cpu != task_cpu(p));
1569         BUG_ON(task_current(rq, p));
1570         BUG_ON(p->nr_cpus_allowed <= 1);
1571
1572         BUG_ON(!p->on_rq);
1573         BUG_ON(!rt_task(p));
1574
1575         return p;
1576 }
1577
1578 /*
1579  * If the current CPU has more than one RT task, see if the non
1580  * running task can migrate over to a CPU that is running a task
1581  * of lesser priority.
1582  */
1583 static int push_rt_task(struct rq *rq)
1584 {
1585         struct task_struct *next_task;
1586         struct rq *lowest_rq;
1587         int ret = 0;
1588
1589         if (!rq->rt.overloaded)
1590                 return 0;
1591
1592         next_task = pick_next_pushable_task(rq);
1593         if (!next_task)
1594                 return 0;
1595
1596 retry:
1597         if (unlikely(next_task == rq->curr)) {
1598                 WARN_ON(1);
1599                 return 0;
1600         }
1601
1602         /*
1603          * It's possible that the next_task slipped in of
1604          * higher priority than current. If that's the case
1605          * just reschedule current.
1606          */
1607         if (unlikely(next_task->prio < rq->curr->prio)) {
1608                 resched_task(rq->curr);
1609                 return 0;
1610         }
1611
1612         /* We might release rq lock */
1613         get_task_struct(next_task);
1614
1615         /* find_lock_lowest_rq locks the rq if found */
1616         lowest_rq = find_lock_lowest_rq(next_task, rq);
1617         if (!lowest_rq) {
1618                 struct task_struct *task;
1619                 /*
1620                  * find_lock_lowest_rq releases rq->lock
1621                  * so it is possible that next_task has migrated.
1622                  *
1623                  * We need to make sure that the task is still on the same
1624                  * run-queue and is also still the next task eligible for
1625                  * pushing.
1626                  */
1627                 task = pick_next_pushable_task(rq);
1628                 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1629                         /*
1630                          * The task hasn't migrated, and is still the next
1631                          * eligible task, but we failed to find a run-queue
1632                          * to push it to.  Do not retry in this case, since
1633                          * other cpus will pull from us when ready.
1634                          */
1635                         goto out;
1636                 }
1637
1638                 if (!task)
1639                         /* No more tasks, just exit */
1640                         goto out;
1641
1642                 /*
1643                  * Something has shifted, try again.
1644                  */
1645                 put_task_struct(next_task);
1646                 next_task = task;
1647                 goto retry;
1648         }
1649
1650         deactivate_task(rq, next_task, 0);
1651         set_task_cpu(next_task, lowest_rq->cpu);
1652         activate_task(lowest_rq, next_task, 0);
1653         ret = 1;
1654
1655         resched_task(lowest_rq->curr);
1656
1657         double_unlock_balance(rq, lowest_rq);
1658
1659 out:
1660         put_task_struct(next_task);
1661
1662         return ret;
1663 }
1664
1665 static void push_rt_tasks(struct rq *rq)
1666 {
1667         /* push_rt_task will return true if it moved an RT */
1668         while (push_rt_task(rq))
1669                 ;
1670 }
1671
1672 static int pull_rt_task(struct rq *this_rq)
1673 {
1674         int this_cpu = this_rq->cpu, ret = 0, cpu;
1675         struct task_struct *p;
1676         struct rq *src_rq;
1677
1678         if (likely(!rt_overloaded(this_rq)))
1679                 return 0;
1680
1681         for_each_cpu(cpu, this_rq->rd->rto_mask) {
1682                 if (this_cpu == cpu)
1683                         continue;
1684
1685                 src_rq = cpu_rq(cpu);
1686
1687                 /*
1688                  * Don't bother taking the src_rq->lock if the next highest
1689                  * task is known to be lower-priority than our current task.
1690                  * This may look racy, but if this value is about to go
1691                  * logically higher, the src_rq will push this task away.
1692                  * And if its going logically lower, we do not care
1693                  */
1694                 if (src_rq->rt.highest_prio.next >=
1695                     this_rq->rt.highest_prio.curr)
1696                         continue;
1697
1698                 /*
1699                  * We can potentially drop this_rq's lock in
1700                  * double_lock_balance, and another CPU could
1701                  * alter this_rq
1702                  */
1703                 double_lock_balance(this_rq, src_rq);
1704
1705                 /*
1706                  * Are there still pullable RT tasks?
1707                  */
1708                 if (src_rq->rt.rt_nr_running <= 1)
1709                         goto skip;
1710
1711                 p = pick_next_highest_task_rt(src_rq, this_cpu);
1712
1713                 /*
1714                  * Do we have an RT task that preempts
1715                  * the to-be-scheduled task?
1716                  */
1717                 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1718                         WARN_ON(p == src_rq->curr);
1719                         WARN_ON(!p->on_rq);
1720
1721                         /*
1722                          * There's a chance that p is higher in priority
1723                          * than what's currently running on its cpu.
1724                          * This is just that p is wakeing up and hasn't
1725                          * had a chance to schedule. We only pull
1726                          * p if it is lower in priority than the
1727                          * current task on the run queue
1728                          */
1729                         if (p->prio < src_rq->curr->prio)
1730                                 goto skip;
1731
1732                         ret = 1;
1733
1734                         deactivate_task(src_rq, p, 0);
1735                         set_task_cpu(p, this_cpu);
1736                         activate_task(this_rq, p, 0);
1737                         /*
1738                          * We continue with the search, just in
1739                          * case there's an even higher prio task
1740                          * in another runqueue. (low likelihood
1741                          * but possible)
1742                          */
1743                 }
1744 skip:
1745                 double_unlock_balance(this_rq, src_rq);
1746         }
1747
1748         return ret;
1749 }
1750
1751 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1752 {
1753         /* Try to pull RT tasks here if we lower this rq's prio */
1754         if (rq->rt.highest_prio.curr > prev->prio)
1755                 pull_rt_task(rq);
1756 }
1757
1758 static void post_schedule_rt(struct rq *rq)
1759 {
1760         push_rt_tasks(rq);
1761 }
1762
1763 /*
1764  * If we are not running and we are not going to reschedule soon, we should
1765  * try to push tasks away now
1766  */
1767 static void task_woken_rt(struct rq *rq, struct task_struct *p)
1768 {
1769         if (!task_running(rq, p) &&
1770             !test_tsk_need_resched(rq->curr) &&
1771             has_pushable_tasks(rq) &&
1772             p->nr_cpus_allowed > 1 &&
1773             rt_task(rq->curr) &&
1774             (rq->curr->nr_cpus_allowed < 2 ||
1775              rq->curr->prio <= p->prio))
1776                 push_rt_tasks(rq);
1777 }
1778
1779 static void set_cpus_allowed_rt(struct task_struct *p,
1780                                 const struct cpumask *new_mask)
1781 {
1782         struct rq *rq;
1783         int weight;
1784
1785         BUG_ON(!rt_task(p));
1786
1787         if (!p->on_rq)
1788                 return;
1789
1790         weight = cpumask_weight(new_mask);
1791
1792         /*
1793          * Only update if the process changes its state from whether it
1794          * can migrate or not.
1795          */
1796         if ((p->nr_cpus_allowed > 1) == (weight > 1))
1797                 return;
1798
1799         rq = task_rq(p);
1800
1801         /*
1802          * The process used to be able to migrate OR it can now migrate
1803          */
1804         if (weight <= 1) {
1805                 if (!task_current(rq, p))
1806                         dequeue_pushable_task(rq, p);
1807                 BUG_ON(!rq->rt.rt_nr_migratory);
1808                 rq->rt.rt_nr_migratory--;
1809         } else {
1810                 if (!task_current(rq, p))
1811                         enqueue_pushable_task(rq, p);
1812                 rq->rt.rt_nr_migratory++;
1813         }
1814
1815         update_rt_migration(&rq->rt);
1816 }
1817
1818 /* Assumes rq->lock is held */
1819 static void rq_online_rt(struct rq *rq)
1820 {
1821         if (rq->rt.overloaded)
1822                 rt_set_overload(rq);
1823
1824         __enable_runtime(rq);
1825
1826         cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1827 }
1828
1829 /* Assumes rq->lock is held */
1830 static void rq_offline_rt(struct rq *rq)
1831 {
1832         if (rq->rt.overloaded)
1833                 rt_clear_overload(rq);
1834
1835         __disable_runtime(rq);
1836
1837         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1838 }
1839
1840 /*
1841  * When switch from the rt queue, we bring ourselves to a position
1842  * that we might want to pull RT tasks from other runqueues.
1843  */
1844 static void switched_from_rt(struct rq *rq, struct task_struct *p)
1845 {
1846         /*
1847          * If there are other RT tasks then we will reschedule
1848          * and the scheduling of the other RT tasks will handle
1849          * the balancing. But if we are the last RT task
1850          * we may need to handle the pulling of RT tasks
1851          * now.
1852          */
1853         if (!p->on_rq || rq->rt.rt_nr_running)
1854                 return;
1855
1856         if (pull_rt_task(rq))
1857                 resched_task(rq->curr);
1858 }
1859
1860 void init_sched_rt_class(void)
1861 {
1862         unsigned int i;
1863
1864         for_each_possible_cpu(i) {
1865                 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1866                                         GFP_KERNEL, cpu_to_node(i));
1867         }
1868 }
1869 #endif /* CONFIG_SMP */
1870
1871 /*
1872  * When switching a task to RT, we may overload the runqueue
1873  * with RT tasks. In this case we try to push them off to
1874  * other runqueues.
1875  */
1876 static void switched_to_rt(struct rq *rq, struct task_struct *p)
1877 {
1878         int check_resched = 1;
1879
1880         /*
1881          * If we are already running, then there's nothing
1882          * that needs to be done. But if we are not running
1883          * we may need to preempt the current running task.
1884          * If that current running task is also an RT task
1885          * then see if we can move to another run queue.
1886          */
1887         if (p->on_rq && rq->curr != p) {
1888 #ifdef CONFIG_SMP
1889                 if (rq->rt.overloaded && push_rt_task(rq) &&
1890                     /* Don't resched if we changed runqueues */
1891                     rq != task_rq(p))
1892                         check_resched = 0;
1893 #endif /* CONFIG_SMP */
1894                 if (check_resched && p->prio < rq->curr->prio)
1895                         resched_task(rq->curr);
1896         }
1897 }
1898
1899 /*
1900  * Priority of the task has changed. This may cause
1901  * us to initiate a push or pull.
1902  */
1903 static void
1904 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1905 {
1906         if (!p->on_rq)
1907                 return;
1908
1909         if (rq->curr == p) {
1910 #ifdef CONFIG_SMP
1911                 /*
1912                  * If our priority decreases while running, we
1913                  * may need to pull tasks to this runqueue.
1914                  */
1915                 if (oldprio < p->prio)
1916                         pull_rt_task(rq);
1917                 /*
1918                  * If there's a higher priority task waiting to run
1919                  * then reschedule. Note, the above pull_rt_task
1920                  * can release the rq lock and p could migrate.
1921                  * Only reschedule if p is still on the same runqueue.
1922                  */
1923                 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1924                         resched_task(p);
1925 #else
1926                 /* For UP simply resched on drop of prio */
1927                 if (oldprio < p->prio)
1928                         resched_task(p);
1929 #endif /* CONFIG_SMP */
1930         } else {
1931                 /*
1932                  * This task is not running, but if it is
1933                  * greater than the current running task
1934                  * then reschedule.
1935                  */
1936                 if (p->prio < rq->curr->prio)
1937                         resched_task(rq->curr);
1938         }
1939 }
1940
1941 static void watchdog(struct rq *rq, struct task_struct *p)
1942 {
1943         unsigned long soft, hard;
1944
1945         /* max may change after cur was read, this will be fixed next tick */
1946         soft = task_rlimit(p, RLIMIT_RTTIME);
1947         hard = task_rlimit_max(p, RLIMIT_RTTIME);
1948
1949         if (soft != RLIM_INFINITY) {
1950                 unsigned long next;
1951
1952                 if (p->rt.watchdog_stamp != jiffies) {
1953                         p->rt.timeout++;
1954                         p->rt.watchdog_stamp = jiffies;
1955                 }
1956
1957                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1958                 if (p->rt.timeout > next)
1959                         p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1960         }
1961 }
1962
1963 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1964 {
1965         struct sched_rt_entity *rt_se = &p->rt;
1966
1967         update_curr_rt(rq);
1968
1969         watchdog(rq, p);
1970
1971         /*
1972          * RR tasks need a special form of timeslice management.
1973          * FIFO tasks have no timeslices.
1974          */
1975         if (p->policy != SCHED_RR)
1976                 return;
1977
1978         if (--p->rt.time_slice)
1979                 return;
1980
1981         p->rt.time_slice = sched_rr_timeslice;
1982
1983         /*
1984          * Requeue to the end of queue if we (and all of our ancestors) are the
1985          * only element on the queue
1986          */
1987         for_each_sched_rt_entity(rt_se) {
1988                 if (rt_se->run_list.prev != rt_se->run_list.next) {
1989                         requeue_task_rt(rq, p, 0);
1990                         set_tsk_need_resched(p);
1991                         return;
1992                 }
1993         }
1994 }
1995
1996 static void set_curr_task_rt(struct rq *rq)
1997 {
1998         struct task_struct *p = rq->curr;
1999
2000         p->se.exec_start = rq_clock_task(rq);
2001
2002         /* The running task is never eligible for pushing */
2003         dequeue_pushable_task(rq, p);
2004 }
2005
2006 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2007 {
2008         /*
2009          * Time slice is 0 for SCHED_FIFO tasks
2010          */
2011         if (task->policy == SCHED_RR)
2012                 return sched_rr_timeslice;
2013         else
2014                 return 0;
2015 }
2016
2017 const struct sched_class rt_sched_class = {
2018         .next                   = &fair_sched_class,
2019         .enqueue_task           = enqueue_task_rt,
2020         .dequeue_task           = dequeue_task_rt,
2021         .yield_task             = yield_task_rt,
2022
2023         .check_preempt_curr     = check_preempt_curr_rt,
2024
2025         .pick_next_task         = pick_next_task_rt,
2026         .put_prev_task          = put_prev_task_rt,
2027
2028 #ifdef CONFIG_SMP
2029         .select_task_rq         = select_task_rq_rt,
2030
2031         .set_cpus_allowed       = set_cpus_allowed_rt,
2032         .rq_online              = rq_online_rt,
2033         .rq_offline             = rq_offline_rt,
2034         .pre_schedule           = pre_schedule_rt,
2035         .post_schedule          = post_schedule_rt,
2036         .task_woken             = task_woken_rt,
2037         .switched_from          = switched_from_rt,
2038 #endif
2039
2040         .set_curr_task          = set_curr_task_rt,
2041         .task_tick              = task_tick_rt,
2042
2043         .get_rr_interval        = get_rr_interval_rt,
2044
2045         .prio_changed           = prio_changed_rt,
2046         .switched_to            = switched_to_rt,
2047 };
2048
2049 #ifdef CONFIG_SCHED_DEBUG
2050 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2051
2052 void print_rt_stats(struct seq_file *m, int cpu)
2053 {
2054         rt_rq_iter_t iter;
2055         struct rt_rq *rt_rq;
2056
2057         rcu_read_lock();
2058         for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2059                 print_rt_rq(m, cpu, rt_rq);
2060         rcu_read_unlock();
2061 }
2062 #endif /* CONFIG_SCHED_DEBUG */