]> Pileus Git - ~andy/linux/blobdiff - kernel/sched_rt.c
Merge branch 'depends/rmk/memory_h' into next/fixes
[~andy/linux] / kernel / sched_rt.c
index 10d018212bab8b1ad2f53e1ed388a4bbd56d4656..af1177858be36cb559345cc25704873cbe033181 100644 (file)
@@ -185,11 +185,23 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
 
 typedef struct task_group *rt_rq_iter_t;
 
-#define for_each_rt_rq(rt_rq, iter, rq) \
-       for (iter = list_entry_rcu(task_groups.next, typeof(*iter), list); \
-            (&iter->list != &task_groups) && \
-            (rt_rq = iter->rt_rq[cpu_of(rq)]); \
-            iter = list_entry_rcu(iter->list.next, typeof(*iter), list))
+static inline struct task_group *next_task_group(struct task_group *tg)
+{
+       do {
+               tg = list_entry_rcu(tg->list.next,
+                       typeof(struct task_group), list);
+       } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
+
+       if (&tg->list == &task_groups)
+               tg = NULL;
+
+       return tg;
+}
+
+#define for_each_rt_rq(rt_rq, iter, rq)                                        \
+       for (iter = container_of(&task_groups, typeof(*iter), list);    \
+               (iter = next_task_group(iter)) &&                       \
+               (rt_rq = iter->rt_rq[cpu_of(rq)]);)
 
 static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
 {
@@ -1038,7 +1050,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
         */
        if (curr && unlikely(rt_task(curr)) &&
            (curr->rt.nr_cpus_allowed < 2 ||
-            curr->prio < p->prio) &&
+            curr->prio <= p->prio) &&
            (p->rt.nr_cpus_allowed > 1)) {
                int target = find_lowest_rq(p);
 
@@ -1126,7 +1138,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
 
        rt_rq = &rq->rt;
 
-       if (unlikely(!rt_rq->rt_nr_running))
+       if (!rt_rq->rt_nr_running)
                return NULL;
 
        if (rt_rq_throttled(rt_rq))
@@ -1548,7 +1560,7 @@ skip:
 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
 {
        /* Try to pull RT tasks here if we lower this rq's prio */
-       if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
+       if (rq->rt.highest_prio.curr > prev->prio)
                pull_rt_task(rq);
 }
 
@@ -1569,7 +1581,7 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
            p->rt.nr_cpus_allowed > 1 &&
            rt_task(rq->curr) &&
            (rq->curr->rt.nr_cpus_allowed < 2 ||
-            rq->curr->prio < p->prio))
+            rq->curr->prio <= p->prio))
                push_rt_tasks(rq);
 }