]> Pileus Git - ~andy/linux/commitdiff
ftrace: sched special
authorIngo Molnar <mingo@elte.hu>
Mon, 12 May 2008 19:20:53 +0000 (21:20 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Fri, 23 May 2008 19:08:47 +0000 (21:08 +0200)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/sched.h
kernel/sched_fair.c
kernel/trace/trace.c
kernel/trace/trace_sched_switch.c

index 5b186bed54bc9f28632b3c800ffcf24816e70e04..360ca99033d228dffc3ba23cc3b718efaf51017a 100644 (file)
@@ -2138,6 +2138,8 @@ extern void
 ftrace_wake_up_task(void *rq, struct task_struct *wakee,
                    struct task_struct *curr);
 extern void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data);
+extern void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
 #else
 static inline void
 ftrace_ctx_switch(void *rq, struct task_struct *prev, struct task_struct *next)
@@ -2155,6 +2157,10 @@ ftrace_wake_up_task(void *rq, struct task_struct *wakee,
 static inline void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data)
 {
 }
+static inline void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+}
 #endif
 
 extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
index e24ecd39c4b8aec9786d0ab3df0a01ad6dcba08d..dc1856f10795373deb3548d8c2bbbcbc656cf3ba 100644 (file)
@@ -1061,6 +1061,8 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
        if (!(this_sd->flags & SD_WAKE_AFFINE))
                return 0;
 
+       ftrace_special(__LINE__, curr->se.avg_overlap, sync);
+       ftrace_special(__LINE__, p->se.avg_overlap, -1);
        /*
         * If the currently running task will sleep within
         * a reasonable amount of time then attract this newly
@@ -1238,6 +1240,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
        if (unlikely(se == pse))
                return;
 
+       ftrace_special(__LINE__, p->pid, se->last_wakeup);
        cfs_rq_of(pse)->next = pse;
 
        /*
index 3a4032492fcbd53455bb4419295a150cd420272a..b87a26414892b57e749f17e05434b7db4b3cdae8 100644 (file)
@@ -1251,7 +1251,7 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
                                 comm);
                break;
        case TRACE_SPECIAL:
-               trace_seq_printf(s, " %ld %ld %ld\n",
+               trace_seq_printf(s, "# %ld %ld %ld\n",
                                 entry->special.arg1,
                                 entry->special.arg2,
                                 entry->special.arg3);
@@ -1335,7 +1335,7 @@ static int print_trace_fmt(struct trace_iterator *iter)
                        return 0;
                break;
        case TRACE_SPECIAL:
-               ret = trace_seq_printf(s, " %ld %ld %ld\n",
+               ret = trace_seq_printf(s, "# %ld %ld %ld\n",
                                 entry->special.arg1,
                                 entry->special.arg2,
                                 entry->special.arg3);
@@ -1400,7 +1400,7 @@ static int print_raw_fmt(struct trace_iterator *iter)
                break;
        case TRACE_SPECIAL:
        case TRACE_STACK:
-               ret = trace_seq_printf(s, " %ld %ld %ld\n",
+               ret = trace_seq_printf(s, "# %ld %ld %ld\n",
                                 entry->special.arg1,
                                 entry->special.arg2,
                                 entry->special.arg3);
index 5a217e863586e3ba2bfcc609892fa6ba0786c41d..bddf676914ed85b292b5c45109e4384e3b767269 100644 (file)
@@ -103,6 +103,30 @@ ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
        wakeup_sched_wakeup(wakee, curr);
 }
 
+void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+       struct trace_array *tr = ctx_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       long disabled;
+       int cpu;
+
+       if (!tracer_enabled)
+               return;
+
+       local_irq_save(flags);
+       cpu = raw_smp_processor_id();
+       data = tr->data[cpu];
+       disabled = atomic_inc_return(&data->disabled);
+
+       if (likely(disabled == 1))
+               __trace_special(tr, data, arg1, arg2, arg3);
+
+       atomic_dec(&data->disabled);
+       local_irq_restore(flags);
+}
+
 static void sched_switch_reset(struct trace_array *tr)
 {
        int cpu;