]> Pileus Git - ~andy/linux/blobdiff - kernel/trace/trace.c
tracing: Add skip argument to trace_dump_stack()
[~andy/linux] / kernel / trace / trace.c
index 5043a0c4dde0ec55e7167d44fb1fcaa1c6db71a9..8aa53213201ff985e766b30541b243d9d93ead53 100644 (file)
@@ -149,14 +149,14 @@ static int __init set_ftrace_dump_on_oops(char *str)
 }
 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
 
-static int __init alloc_snapshot(char *str)
+static int __init boot_alloc_snapshot(char *str)
 {
        allocate_snapshot = true;
        /* We also need the main ring buffer expanded */
        ring_buffer_expanded = true;
        return 1;
 }
-__setup("alloc_snapshot", alloc_snapshot);
+__setup("alloc_snapshot", boot_alloc_snapshot);
 
 
 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
@@ -442,17 +442,23 @@ void tracing_snapshot(void)
        struct tracer *tracer = tr->current_trace;
        unsigned long flags;
 
+       if (in_nmi()) {
+               internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
+               internal_trace_puts("*** snapshot is being ignored        ***\n");
+               return;
+       }
+
        if (!tr->allocated_snapshot) {
-               trace_printk("*** SNAPSHOT NOT ALLOCATED ***\n");
-               trace_printk("*** stopping trace here!   ***\n");
+               internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
+               internal_trace_puts("*** stopping trace here!   ***\n");
                tracing_off();
                return;
        }
 
        /* Note, snapshot can not be used when the tracer uses it */
        if (tracer->use_max_tr) {
-               trace_printk("*** LATENCY TRACER ACTIVE ***\n");
-               trace_printk("*** Can not use snapshot (sorry) ***\n");
+               internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
+               internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
                return;
        }
 
@@ -460,9 +466,42 @@ void tracing_snapshot(void)
        update_max_tr(tr, current, smp_processor_id());
        local_irq_restore(flags);
 }
+EXPORT_SYMBOL_GPL(tracing_snapshot);
 
 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
                                        struct trace_buffer *size_buf, int cpu_id);
+static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
+
+static int alloc_snapshot(struct trace_array *tr)
+{
+       int ret;
+
+       if (!tr->allocated_snapshot) {
+
+               /* allocate spare buffer */
+               ret = resize_buffer_duplicate_size(&tr->max_buffer,
+                                  &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
+               if (ret < 0)
+                       return ret;
+
+               tr->allocated_snapshot = true;
+       }
+
+       return 0;
+}
+
+void free_snapshot(struct trace_array *tr)
+{
+       /*
+        * We don't free the ring buffer. instead, resize it because
+        * The max_tr ring buffer has some state (e.g. ring->clock) and
+        * we want preserve it.
+        */
+       ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
+       set_buffer_entries(&tr->max_buffer, 1);
+       tracing_reset_online_cpus(&tr->max_buffer);
+       tr->allocated_snapshot = false;
+}
 
 /**
  * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
@@ -480,29 +519,25 @@ void tracing_snapshot_alloc(void)
        struct trace_array *tr = &global_trace;
        int ret;
 
-       if (!tr->allocated_snapshot) {
-
-               /* allocate spare buffer */
-               ret = resize_buffer_duplicate_size(&tr->max_buffer,
-                                  &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
-               if (WARN_ON(ret < 0))
-                       return;
-
-               tr->allocated_snapshot = true;
-       }
+       ret = alloc_snapshot(tr);
+       if (WARN_ON(ret < 0))
+               return;
 
        tracing_snapshot();
 }
+EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
 #else
 void tracing_snapshot(void)
 {
        WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
 }
+EXPORT_SYMBOL_GPL(tracing_snapshot);
 void tracing_snapshot_alloc(void)
 {
        /* Give warning */
        tracing_snapshot();
 }
+EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
 #endif /* CONFIG_TRACER_SNAPSHOT */
 
 /**
@@ -1622,8 +1657,9 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
 
 /**
  * trace_dump_stack - record a stack back trace in the trace buffer
+ * @skip: Number of functions to skip (helper handlers)
  */
-void trace_dump_stack(void)
+void trace_dump_stack(int skip)
 {
        unsigned long flags;
 
@@ -1632,9 +1668,13 @@ void trace_dump_stack(void)
 
        local_save_flags(flags);
 
-       /* skipping 3 traces, seems to get us at the caller of this function */
-       __ftrace_trace_stack(global_trace.trace_buffer.buffer, flags, 3,
-                            preempt_count(), NULL);
+       /*
+        * Skip 3 more, seems to get us at the caller of
+        * this function.
+        */
+       skip += 3;
+       __ftrace_trace_stack(global_trace.trace_buffer.buffer,
+                            flags, skip, preempt_count(), NULL);
 }
 
 static DEFINE_PER_CPU(int, user_stack_count);
@@ -3571,15 +3611,7 @@ static int tracing_set_tracer(const char *buf)
                 * so a synchronized_sched() is sufficient.
                 */
                synchronize_sched();
-               /*
-                * We don't free the ring buffer. instead, resize it because
-                * The max_tr ring buffer has some state (e.g. ring->clock) and
-                * we want preserve it.
-                */
-               ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
-               set_buffer_entries(&tr->max_buffer, 1);
-               tracing_reset_online_cpus(&tr->max_buffer);
-               tr->allocated_snapshot = false;
+               free_snapshot(tr);
        }
 #endif
        destroy_trace_option_files(topts);
@@ -3588,12 +3620,9 @@ static int tracing_set_tracer(const char *buf)
 
 #ifdef CONFIG_TRACER_MAX_TRACE
        if (t->use_max_tr && !had_max_tr) {
-               /* we need to make per cpu buffer sizes equivalent */
-               ret = resize_buffer_duplicate_size(&tr->max_buffer, &tr->trace_buffer,
-                                                  RING_BUFFER_ALL_CPUS);
+               ret = alloc_snapshot(tr);
                if (ret < 0)
                        goto out;
-               tr->allocated_snapshot = true;
        }
 #endif
 
@@ -4465,14 +4494,8 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
                        ret = -EINVAL;
                        break;
                }
-               if (tr->allocated_snapshot) {
-                       /* free spare buffer */
-                       ring_buffer_resize(tr->max_buffer.buffer, 1,
-                                          RING_BUFFER_ALL_CPUS);
-                       set_buffer_entries(&tr->max_buffer, 1);
-                       tracing_reset_online_cpus(&tr->max_buffer);
-                       tr->allocated_snapshot = false;
-               }
+               if (tr->allocated_snapshot)
+                       free_snapshot(tr);
                break;
        case 1:
 /* Only allow per-cpu swap if the ring buffer supports it */
@@ -4483,12 +4506,9 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
                }
 #endif
                if (!tr->allocated_snapshot) {
-                       /* allocate spare buffer */
-                       ret = resize_buffer_duplicate_size(&tr->max_buffer,
-                                       &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
+                       ret = alloc_snapshot(tr);
                        if (ret < 0)
                                break;
-                       tr->allocated_snapshot = true;
                }
                local_irq_disable();
                /* Now, we're going to swap */
@@ -5071,7 +5091,114 @@ static const struct file_operations tracing_dyn_info_fops = {
        .read           = tracing_read_dyn_info,
        .llseek         = generic_file_llseek,
 };
-#endif
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
+static void
+ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
+{
+       tracing_snapshot();
+}
+
+static void
+ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
+{
+       unsigned long *count = (long *)data;
+
+       if (!*count)
+               return;
+
+       if (*count != -1)
+               (*count)--;
+
+       tracing_snapshot();
+}
+
+static int
+ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
+                     struct ftrace_probe_ops *ops, void *data)
+{
+       long count = (long)data;
+
+       seq_printf(m, "%ps:", (void *)ip);
+
+       seq_printf(m, "snapshot");
+
+       if (count == -1)
+               seq_printf(m, ":unlimited\n");
+       else
+               seq_printf(m, ":count=%ld\n", count);
+
+       return 0;
+}
+
+static struct ftrace_probe_ops snapshot_probe_ops = {
+       .func                   = ftrace_snapshot,
+       .print                  = ftrace_snapshot_print,
+};
+
+static struct ftrace_probe_ops snapshot_count_probe_ops = {
+       .func                   = ftrace_count_snapshot,
+       .print                  = ftrace_snapshot_print,
+};
+
+static int
+ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
+                              char *glob, char *cmd, char *param, int enable)
+{
+       struct ftrace_probe_ops *ops;
+       void *count = (void *)-1;
+       char *number;
+       int ret;
+
+       /* hash funcs only work with set_ftrace_filter */
+       if (!enable)
+               return -EINVAL;
+
+       ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
+
+       if (glob[0] == '!') {
+               unregister_ftrace_function_probe_func(glob+1, ops);
+               return 0;
+       }
+
+       if (!param)
+               goto out_reg;
+
+       number = strsep(&param, ":");
+
+       if (!strlen(number))
+               goto out_reg;
+
+       /*
+        * We use the callback data field (which is a pointer)
+        * as our counter.
+        */
+       ret = kstrtoul(number, 0, (unsigned long *)&count);
+       if (ret)
+               return ret;
+
+ out_reg:
+       ret = register_ftrace_function_probe(glob, ops, count);
+
+       if (ret >= 0)
+               alloc_snapshot(&global_trace);
+
+       return ret < 0 ? ret : 0;
+}
+
+static struct ftrace_func_command ftrace_snapshot_cmd = {
+       .name                   = "snapshot",
+       .func                   = ftrace_trace_snapshot_callback,
+};
+
+static int register_snapshot_cmd(void)
+{
+       return register_ftrace_command(&ftrace_snapshot_cmd);
+}
+#else
+static inline int register_snapshot_cmd(void) { return 0; }
+#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
 
 struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
 {
@@ -6061,6 +6188,8 @@ __init static int tracer_alloc_buffers(void)
                trace_set_options(&global_trace, option);
        }
 
+       register_snapshot_cmd();
+
        return 0;
 
 out_free_cpumask: