]> Pileus Git - ~andy/linux/blobdiff - kernel/sched.c
[PARISC] Add some defines for HugeTLB pages
[~andy/linux] / kernel / sched.c
index d9dbf8ee6ca4be3dd119567afbdc636a08465995..34a945bcc022a6767ae2ed4adbcd5d56716db98b 100644 (file)
@@ -176,6 +176,13 @@ static unsigned int task_timeslice(task_t *p)
 #define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran)      \
                                < (long long) (sd)->cache_hot_time)
 
+void __put_task_struct_cb(struct rcu_head *rhp)
+{
+       __put_task_struct(container_of(rhp, struct task_struct, rcu));
+}
+
+EXPORT_SYMBOL_GPL(__put_task_struct_cb);
+
 /*
  * These are the runqueue data structures:
  */
@@ -670,6 +677,31 @@ static inline void dec_prio_bias(runqueue_t *rq, int prio)
 {
        rq->prio_bias -= MAX_PRIO - prio;
 }
+
+static inline void inc_nr_running(task_t *p, runqueue_t *rq)
+{
+       rq->nr_running++;
+       if (rt_task(p)) {
+               if (p != rq->migration_thread)
+                       /*
+                        * The migration thread does the actual balancing. Do
+                        * not bias by its priority as the ultra high priority
+                        * will skew balancing adversely.
+                        */
+                       inc_prio_bias(rq, p->prio);
+       } else
+               inc_prio_bias(rq, p->static_prio);
+}
+
+static inline void dec_nr_running(task_t *p, runqueue_t *rq)
+{
+       rq->nr_running--;
+       if (rt_task(p)) {
+               if (p != rq->migration_thread)
+                       dec_prio_bias(rq, p->prio);
+       } else
+               dec_prio_bias(rq, p->static_prio);
+}
 #else
 static inline void inc_prio_bias(runqueue_t *rq, int prio)
 {
@@ -678,25 +710,17 @@ static inline void inc_prio_bias(runqueue_t *rq, int prio)
 static inline void dec_prio_bias(runqueue_t *rq, int prio)
 {
 }
-#endif
 
 static inline void inc_nr_running(task_t *p, runqueue_t *rq)
 {
        rq->nr_running++;
-       if (rt_task(p))
-               inc_prio_bias(rq, p->prio);
-       else
-               inc_prio_bias(rq, p->static_prio);
 }
 
 static inline void dec_nr_running(task_t *p, runqueue_t *rq)
 {
        rq->nr_running--;
-       if (rt_task(p))
-               dec_prio_bias(rq, p->prio);
-       else
-               dec_prio_bias(rq, p->static_prio);
 }
+#endif
 
 /*
  * __activate_task - move a task to the runqueue.
@@ -798,7 +822,8 @@ static void activate_task(task_t *p, runqueue_t *rq, int local)
        }
 #endif
 
-       p->prio = recalc_task_prio(p, now);
+       if (!rt_task(p))
+               p->prio = recalc_task_prio(p, now);
 
        /*
         * This checks to make sure it's not an uninterruptible task
@@ -847,21 +872,28 @@ static void deactivate_task(struct task_struct *p, runqueue_t *rq)
 #ifdef CONFIG_SMP
 static void resched_task(task_t *p)
 {
-       int need_resched, nrpolling;
+       int cpu;
 
        assert_spin_locked(&task_rq(p)->lock);
 
-       /* minimise the chance of sending an interrupt to poll_idle() */
-       nrpolling = test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
-       need_resched = test_and_set_tsk_thread_flag(p,TIF_NEED_RESCHED);
-       nrpolling |= test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
+       if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
+               return;
+
+       set_tsk_thread_flag(p, TIF_NEED_RESCHED);
+
+       cpu = task_cpu(p);
+       if (cpu == smp_processor_id())
+               return;
 
-       if (!need_resched && !nrpolling && (task_cpu(p) != smp_processor_id()))
-               smp_send_reschedule(task_cpu(p));
+       /* NEED_RESCHED must be visible before we test POLLING_NRFLAG */
+       smp_mb();
+       if (!test_tsk_thread_flag(p, TIF_POLLING_NRFLAG))
+               smp_send_reschedule(cpu);
 }
 #else
 static inline void resched_task(task_t *p)
 {
+       assert_spin_locked(&task_rq(p)->lock);
        set_tsk_need_resched(p);
 }
 #endif
@@ -972,22 +1004,27 @@ void kick_process(task_t *p)
 static inline unsigned long __source_load(int cpu, int type, enum idle_type idle)
 {
        runqueue_t *rq = cpu_rq(cpu);
-       unsigned long cpu_load = rq->cpu_load[type-1],
-               load_now = rq->nr_running * SCHED_LOAD_SCALE;
+       unsigned long running = rq->nr_running;
+       unsigned long source_load, cpu_load = rq->cpu_load[type-1],
+               load_now = running * SCHED_LOAD_SCALE;
+
+       if (type == 0)
+               source_load = load_now;
+       else
+               source_load = min(cpu_load, load_now);
 
-       if (idle == NOT_IDLE) {
+       if (running > 1 || (idle == NOT_IDLE && running))
                /*
-                * If we are balancing busy runqueues the load is biased by
-                * priority to create 'nice' support across cpus.
+                * If we are busy rebalancing the load is biased by
+                * priority to create 'nice' support across cpus. When
+                * idle rebalancing we should only bias the source_load if
+                * there is more than one task running on that queue to
+                * prevent idle rebalance from trying to pull tasks from a
+                * queue with only one running task.
                 */
-               cpu_load *= rq->prio_bias;
-               load_now *= rq->prio_bias;
-       }
+               source_load = source_load * rq->prio_bias / running;
 
-       if (type == 0)
-               return load_now;
-
-       return min(cpu_load, load_now);
+       return source_load;
 }
 
 static inline unsigned long source_load(int cpu, int type)
@@ -1001,17 +1038,19 @@ static inline unsigned long source_load(int cpu, int type)
 static inline unsigned long __target_load(int cpu, int type, enum idle_type idle)
 {
        runqueue_t *rq = cpu_rq(cpu);
-       unsigned long cpu_load = rq->cpu_load[type-1],
-               load_now = rq->nr_running * SCHED_LOAD_SCALE;
+       unsigned long running = rq->nr_running;
+       unsigned long target_load, cpu_load = rq->cpu_load[type-1],
+               load_now = running * SCHED_LOAD_SCALE;
 
        if (type == 0)
-               return load_now;
+               target_load = load_now;
+       else
+               target_load = max(cpu_load, load_now);
 
-       if (idle == NOT_IDLE) {
-               cpu_load *= rq->prio_bias;
-               load_now *= rq->prio_bias;
-       }
-       return max(cpu_load, load_now);
+       if (running > 1 || (idle == NOT_IDLE && running))
+               target_load = target_load * rq->prio_bias / running;
+
+       return target_load;
 }
 
 static inline unsigned long target_load(int cpu, int type)
@@ -1405,7 +1444,7 @@ void fastcall sched_fork(task_t *p, int clone_flags)
 #endif
 #ifdef CONFIG_PREEMPT
        /* Want to start with kernel preemption disabled. */
-       p->thread_info->preempt_count = 1;
+       task_thread_info(p)->preempt_count = 1;
 #endif
        /*
         * Share the timeslice between parent and child, thus the
@@ -4295,10 +4334,10 @@ static void show_task(task_t *p)
 #endif
 #ifdef CONFIG_DEBUG_STACK_USAGE
        {
-               unsigned long *n = (unsigned long *) (p->thread_info+1);
+               unsigned long *n = end_of_stack(p);
                while (!*n)
                        n++;
-               free = (unsigned long) n - (unsigned long)(p->thread_info+1);
+               free = (unsigned long)n - (unsigned long)end_of_stack(p);
        }
 #endif
        printk("%5lu %5d %6d ", free, p->pid, p->parent->pid);
@@ -4347,6 +4386,7 @@ void show_state(void)
        } while_each_thread(g, p);
 
        read_unlock(&tasklist_lock);
+       mutex_debug_show_all_locks();
 }
 
 /**
@@ -4378,9 +4418,9 @@ void __devinit init_idle(task_t *idle, int cpu)
 
        /* Set the preempt count _outside_ the spinlocks! */
 #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
-       idle->thread_info->preempt_count = (idle->lock_depth >= 0);
+       task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
 #else
-       idle->thread_info->preempt_count = 0;
+       task_thread_info(idle)->preempt_count = 0;
 #endif
 }