]> Pileus Git - ~andy/linux/commitdiff
ftrace: remove add-hoc code
authorIngo Molnar <mingo@elte.hu>
Mon, 12 May 2008 19:20:54 +0000 (21:20 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Fri, 23 May 2008 19:13:32 +0000 (21:13 +0200)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/sched.c
kernel/sched_fair.c
kernel/trace/trace_sched_switch.c

index 1ec3fb2efee6d6bcccb72cc292a59e0b82ba959c..ad95cca4e42e19520a01f20fb2c3f8d55c78d0c6 100644 (file)
@@ -2412,53 +2412,6 @@ static int sched_balance_self(int cpu, int flag)
 
 #endif /* CONFIG_SMP */
 
-#ifdef CONFIG_CONTEXT_SWITCH_TRACER
-
-void ftrace_task(struct task_struct *p, void *__tr, void *__data)
-{
-#if 0
-       /*  
-        * trace timeline tree
-        */
-       __trace_special(__tr, __data,
-                       p->pid, p->se.vruntime, p->se.sum_exec_runtime);
-#else
-       /*
-        * trace balance metrics
-        */
-       __trace_special(__tr, __data,
-                       p->pid, p->se.avg_overlap, 0);
-#endif
-}
-
-void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data)
-{
-       struct task_struct *p;
-       struct sched_entity *se;
-       struct rb_node *curr;
-       struct rq *rq = __rq;
-
-       if (rq->cfs.curr) {
-               p = task_of(rq->cfs.curr);
-               ftrace_task(p, __tr, __data);
-       }
-       if (rq->cfs.next) {
-               p = task_of(rq->cfs.next);
-               ftrace_task(p, __tr, __data);
-       }
-
-       for (curr = first_fair(&rq->cfs); curr; curr = rb_next(curr)) {
-               se = rb_entry(curr, struct sched_entity, run_node);
-               if (!entity_is_task(se))
-                       continue;
-
-               p = task_of(se);
-               ftrace_task(p, __tr, __data);
-       }
-}
-
-#endif
-
 /***
  * try_to_wake_up - wake up a thread
  * @p: the to-be-woken-up thread
index dc1856f10795373deb3548d8c2bbbcbc656cf3ba..e24ecd39c4b8aec9786d0ab3df0a01ad6dcba08d 100644 (file)
@@ -1061,8 +1061,6 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
        if (!(this_sd->flags & SD_WAKE_AFFINE))
                return 0;
 
-       ftrace_special(__LINE__, curr->se.avg_overlap, sync);
-       ftrace_special(__LINE__, p->se.avg_overlap, -1);
        /*
         * If the currently running task will sleep within
         * a reasonable amount of time then attract this newly
@@ -1240,7 +1238,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
        if (unlikely(se == pse))
                return;
 
-       ftrace_special(__LINE__, p->pid, se->last_wakeup);
        cfs_rq_of(pse)->next = pse;
 
        /*
index bddf676914ed85b292b5c45109e4384e3b767269..5671db0e1827e99b7828cf84f36569a8a8dd4b26 100644 (file)
@@ -36,11 +36,8 @@ ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
        data = tr->data[cpu];
        disabled = atomic_inc_return(&data->disabled);
 
-       if (likely(disabled == 1)) {
+       if (likely(disabled == 1))
                tracing_sched_switch_trace(tr, data, prev, next, flags);
-               if (trace_flags & TRACE_ITER_SCHED_TREE)
-                       ftrace_all_fair_tasks(__rq, tr, data);
-       }
 
        atomic_dec(&data->disabled);
        local_irq_restore(flags);
@@ -65,11 +62,8 @@ wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
        data = tr->data[cpu];
        disabled = atomic_inc_return(&data->disabled);
 
-       if (likely(disabled == 1)) {
+       if (likely(disabled == 1))
                tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
-               if (trace_flags & TRACE_ITER_SCHED_TREE)
-                       ftrace_all_fair_tasks(__rq, tr, data);
-       }
 
        atomic_dec(&data->disabled);
        local_irq_restore(flags);