]> Pileus Git - ~andy/linux/blobdiff - kernel/trace/trace.c
tracing: Add skip argument to trace_dump_stack()
[~andy/linux] / kernel / trace / trace.c
index 57895d4765092394055890c037840c32f1bdb828..8aa53213201ff985e766b30541b243d9d93ead53 100644 (file)
@@ -47,7 +47,7 @@
  * On boot up, the ring buffer is set to the minimum size, so that
  * we do not waste memory on systems that are not using tracing.
  */
-int ring_buffer_expanded;
+bool ring_buffer_expanded;
 
 /*
  * We need to change this state when a selftest is running.
@@ -121,12 +121,14 @@ static int tracing_set_tracer(const char *buf);
 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
 static char *default_bootup_tracer;
 
+static bool allocate_snapshot;
+
 static int __init set_cmdline_ftrace(char *str)
 {
        strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
        default_bootup_tracer = bootup_tracer_buf;
        /* We are using ftrace early, expand it */
-       ring_buffer_expanded = 1;
+       ring_buffer_expanded = true;
        return 1;
 }
 __setup("ftrace=", set_cmdline_ftrace);
@@ -147,6 +149,15 @@ static int __init set_ftrace_dump_on_oops(char *str)
 }
 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
 
+static int __init boot_alloc_snapshot(char *str)
+{
+       allocate_snapshot = true;
+       /* We also need the main ring buffer expanded */
+       ring_buffer_expanded = true;
+       return 1;
+}
+__setup("alloc_snapshot", boot_alloc_snapshot);
+
 
 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
 static char *trace_boot_options __initdata;
@@ -339,6 +350,196 @@ void tracing_on(void)
 }
 EXPORT_SYMBOL_GPL(tracing_on);
 
+/**
+ * __trace_puts - write a constant string into the trace buffer.
+ * @ip:           The address of the caller
+ * @str:   The constant string to write
+ * @size:  The size of the string.
+ */
+int __trace_puts(unsigned long ip, const char *str, int size)
+{
+       struct ring_buffer_event *event;
+       struct ring_buffer *buffer;
+       struct print_entry *entry;
+       unsigned long irq_flags;
+       int alloc;
+
+       alloc = sizeof(*entry) + size + 2; /* possible \n added */
+
+       local_save_flags(irq_flags);
+       buffer = global_trace.trace_buffer.buffer;
+       event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
+                                         irq_flags, preempt_count());
+       if (!event)
+               return 0;
+
+       entry = ring_buffer_event_data(event);
+       entry->ip = ip;
+
+       memcpy(&entry->buf, str, size);
+
+       /* Add a newline if necessary */
+       if (entry->buf[size - 1] != '\n') {
+               entry->buf[size] = '\n';
+               entry->buf[size + 1] = '\0';
+       } else
+               entry->buf[size] = '\0';
+
+       __buffer_unlock_commit(buffer, event);
+
+       return size;
+}
+EXPORT_SYMBOL_GPL(__trace_puts);
+
+/**
+ * __trace_bputs - write the pointer to a constant string into trace buffer
+ * @ip:           The address of the caller
+ * @str:   The constant string to write to the buffer to
+ */
+int __trace_bputs(unsigned long ip, const char *str)
+{
+       struct ring_buffer_event *event;
+       struct ring_buffer *buffer;
+       struct bputs_entry *entry;
+       unsigned long irq_flags;
+       int size = sizeof(struct bputs_entry);
+
+       local_save_flags(irq_flags);
+       buffer = global_trace.trace_buffer.buffer;
+       event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
+                                         irq_flags, preempt_count());
+       if (!event)
+               return 0;
+
+       entry = ring_buffer_event_data(event);
+       entry->ip                       = ip;
+       entry->str                      = str;
+
+       __buffer_unlock_commit(buffer, event);
+
+       return 1;
+}
+EXPORT_SYMBOL_GPL(__trace_bputs);
+
+#ifdef CONFIG_TRACER_SNAPSHOT
+/**
+ * trace_snapshot - take a snapshot of the current buffer.
+ *
+ * This causes a swap between the snapshot buffer and the current live
+ * tracing buffer. You can use this to take snapshots of the live
+ * trace when some condition is triggered, but continue to trace.
+ *
+ * Note, make sure to allocate the snapshot with either
+ * a tracing_snapshot_alloc(), or by doing it manually
+ * with: echo 1 > /sys/kernel/debug/tracing/snapshot
+ *
+ * If the snapshot buffer is not allocated, it will stop tracing.
+ * Basically making a permanent snapshot.
+ */
+void tracing_snapshot(void)
+{
+       struct trace_array *tr = &global_trace;
+       struct tracer *tracer = tr->current_trace;
+       unsigned long flags;
+
+       if (in_nmi()) {
+               internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
+               internal_trace_puts("*** snapshot is being ignored        ***\n");
+               return;
+       }
+
+       if (!tr->allocated_snapshot) {
+               internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
+               internal_trace_puts("*** stopping trace here!   ***\n");
+               tracing_off();
+               return;
+       }
+
+       /* Note, snapshot can not be used when the tracer uses it */
+       if (tracer->use_max_tr) {
+               internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
+               internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
+               return;
+       }
+
+       local_irq_save(flags);
+       update_max_tr(tr, current, smp_processor_id());
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(tracing_snapshot);
+
+static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
+                                       struct trace_buffer *size_buf, int cpu_id);
+static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
+
+static int alloc_snapshot(struct trace_array *tr)
+{
+       int ret;
+
+       if (!tr->allocated_snapshot) {
+
+               /* allocate spare buffer */
+               ret = resize_buffer_duplicate_size(&tr->max_buffer,
+                                  &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
+               if (ret < 0)
+                       return ret;
+
+               tr->allocated_snapshot = true;
+       }
+
+       return 0;
+}
+
+void free_snapshot(struct trace_array *tr)
+{
+       /*
+        * We don't free the ring buffer. instead, resize it because
+        * The max_tr ring buffer has some state (e.g. ring->clock) and
+        * we want preserve it.
+        */
+       ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
+       set_buffer_entries(&tr->max_buffer, 1);
+       tracing_reset_online_cpus(&tr->max_buffer);
+       tr->allocated_snapshot = false;
+}
+
+/**
+ * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
+ *
+ * This is similar to trace_snapshot(), but it will allocate the
+ * snapshot buffer if it isn't already allocated. Use this only
+ * where it is safe to sleep, as the allocation may sleep.
+ *
+ * This causes a swap between the snapshot buffer and the current live
+ * tracing buffer. You can use this to take snapshots of the live
+ * trace when some condition is triggered, but continue to trace.
+ */
+void tracing_snapshot_alloc(void)
+{
+       struct trace_array *tr = &global_trace;
+       int ret;
+
+       ret = alloc_snapshot(tr);
+       if (WARN_ON(ret < 0))
+               return;
+
+       tracing_snapshot();
+}
+EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
+#else
+void tracing_snapshot(void)
+{
+       WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
+}
+EXPORT_SYMBOL_GPL(tracing_snapshot);
+void tracing_snapshot_alloc(void)
+{
+       /* Give warning */
+       tracing_snapshot();
+}
+EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
+#endif /* CONFIG_TRACER_SNAPSHOT */
+
 /**
  * tracing_off - turn off tracing buffers
  *
@@ -734,6 +935,72 @@ static void default_wait_pipe(struct trace_iterator *iter)
        ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
 }
 
+#ifdef CONFIG_FTRACE_STARTUP_TEST
+static int run_tracer_selftest(struct tracer *type)
+{
+       struct trace_array *tr = &global_trace;
+       struct tracer *saved_tracer = tr->current_trace;
+       int ret;
+
+       if (!type->selftest || tracing_selftest_disabled)
+               return 0;
+
+       /*
+        * Run a selftest on this tracer.
+        * Here we reset the trace buffer, and set the current
+        * tracer to be this tracer. The tracer can then run some
+        * internal tracing to verify that everything is in order.
+        * If we fail, we do not register this tracer.
+        */
+       tracing_reset_online_cpus(&tr->trace_buffer);
+
+       tr->current_trace = type;
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+       if (type->use_max_tr) {
+               /* If we expanded the buffers, make sure the max is expanded too */
+               if (ring_buffer_expanded)
+                       ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
+                                          RING_BUFFER_ALL_CPUS);
+               tr->allocated_snapshot = true;
+       }
+#endif
+
+       /* the test is responsible for initializing and enabling */
+       pr_info("Testing tracer %s: ", type->name);
+       ret = type->selftest(type, tr);
+       /* the test is responsible for resetting too */
+       tr->current_trace = saved_tracer;
+       if (ret) {
+               printk(KERN_CONT "FAILED!\n");
+               /* Add the warning after printing 'FAILED' */
+               WARN_ON(1);
+               return -1;
+       }
+       /* Only reset on passing, to avoid touching corrupted buffers */
+       tracing_reset_online_cpus(&tr->trace_buffer);
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+       if (type->use_max_tr) {
+               tr->allocated_snapshot = false;
+
+               /* Shrink the max buffer again */
+               if (ring_buffer_expanded)
+                       ring_buffer_resize(tr->max_buffer.buffer, 1,
+                                          RING_BUFFER_ALL_CPUS);
+       }
+#endif
+
+       printk(KERN_CONT "PASSED\n");
+       return 0;
+}
+#else
+static inline int run_tracer_selftest(struct tracer *type)
+{
+       return 0;
+}
+#endif /* CONFIG_FTRACE_STARTUP_TEST */
+
 /**
  * register_tracer - register a tracer with the ftrace system.
  * @type - the plugin for the tracer
@@ -779,61 +1046,9 @@ int register_tracer(struct tracer *type)
        if (!type->wait_pipe)
                type->wait_pipe = default_wait_pipe;
 
-
-#ifdef CONFIG_FTRACE_STARTUP_TEST
-       if (type->selftest && !tracing_selftest_disabled) {
-               struct trace_array *tr = &global_trace;
-               struct tracer *saved_tracer = tr->current_trace;
-
-               /*
-                * Run a selftest on this tracer.
-                * Here we reset the trace buffer, and set the current
-                * tracer to be this tracer. The tracer can then run some
-                * internal tracing to verify that everything is in order.
-                * If we fail, we do not register this tracer.
-                */
-               tracing_reset_online_cpus(&tr->trace_buffer);
-
-               tr->current_trace = type;
-
-#ifdef CONFIG_TRACER_MAX_TRACE
-               if (type->use_max_tr) {
-                       /* If we expanded the buffers, make sure the max is expanded too */
-                       if (ring_buffer_expanded)
-                               ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
-                                                  RING_BUFFER_ALL_CPUS);
-                       tr->allocated_snapshot = true;
-               }
-#endif
-
-               /* the test is responsible for initializing and enabling */
-               pr_info("Testing tracer %s: ", type->name);
-               ret = type->selftest(type, tr);
-               /* the test is responsible for resetting too */
-               tr->current_trace = saved_tracer;
-               if (ret) {
-                       printk(KERN_CONT "FAILED!\n");
-                       /* Add the warning after printing 'FAILED' */
-                       WARN_ON(1);
-                       goto out;
-               }
-               /* Only reset on passing, to avoid touching corrupted buffers */
-               tracing_reset_online_cpus(&tr->trace_buffer);
-
-#ifdef CONFIG_TRACER_MAX_TRACE
-               if (type->use_max_tr) {
-                       tr->allocated_snapshot = false;
-
-                       /* Shrink the max buffer again */
-                       if (ring_buffer_expanded)
-                               ring_buffer_resize(tr->max_buffer.buffer, 1,
-                                                  RING_BUFFER_ALL_CPUS);
-               }
-#endif
-
-               printk(KERN_CONT "PASSED\n");
-       }
-#endif
+       ret = run_tracer_selftest(type);
+       if (ret < 0)
+               goto out;
 
        type->next = trace_types;
        trace_types = type;
@@ -853,7 +1068,7 @@ int register_tracer(struct tracer *type)
        tracing_set_tracer(type->name);
        default_bootup_tracer = NULL;
        /* disable other selftests, since this will break it. */
-       tracing_selftest_disabled = 1;
+       tracing_selftest_disabled = true;
 #ifdef CONFIG_FTRACE_STARTUP_TEST
        printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
               type->name);
@@ -1442,8 +1657,9 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
 
 /**
  * trace_dump_stack - record a stack back trace in the trace buffer
+ * @skip: Number of functions to skip (helper handlers)
  */
-void trace_dump_stack(void)
+void trace_dump_stack(int skip)
 {
        unsigned long flags;
 
@@ -1452,9 +1668,13 @@ void trace_dump_stack(void)
 
        local_save_flags(flags);
 
-       /* skipping 3 traces, seems to get us at the caller of this function */
-       __ftrace_trace_stack(global_trace.trace_buffer.buffer, flags, 3,
-                            preempt_count(), NULL);
+       /*
+        * Skip 3 more, seems to get us at the caller of
+        * this function.
+        */
+       skip += 3;
+       __ftrace_trace_stack(global_trace.trace_buffer.buffer,
+                            flags, skip, preempt_count(), NULL);
 }
 
 static DEFINE_PER_CPU(int, user_stack_count);
@@ -2366,6 +2586,11 @@ enum print_line_t print_trace_line(struct trace_iterator *iter)
                        return ret;
        }
 
+       if (iter->ent->type == TRACE_BPUTS &&
+                       trace_flags & TRACE_ITER_PRINTK &&
+                       trace_flags & TRACE_ITER_PRINTK_MSGONLY)
+               return trace_print_bputs_msg_only(iter);
+
        if (iter->ent->type == TRACE_BPRINT &&
                        trace_flags & TRACE_ITER_PRINTK &&
                        trace_flags & TRACE_ITER_PRINTK_MSGONLY)
@@ -2613,6 +2838,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
                tracing_iter_reset(iter, cpu);
        }
 
+       tr->ref++;
+
        mutex_unlock(&trace_types_lock);
 
        return iter;
@@ -2649,6 +2876,10 @@ static int tracing_release(struct inode *inode, struct file *file)
        tr = iter->tr;
 
        mutex_lock(&trace_types_lock);
+
+       WARN_ON(!tr->ref);
+       tr->ref--;
+
        for_each_tracing_cpu(cpu) {
                if (iter->buffer_iter[cpu])
                        ring_buffer_read_finish(iter->buffer_iter[cpu]);
@@ -3214,7 +3445,7 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
         * we use the size that was given, and we can forget about
         * expanding it later.
         */
-       ring_buffer_expanded = 1;
+       ring_buffer_expanded = true;
 
        /* May be called before buffers are initialized */
        if (!tr->trace_buffer.buffer)
@@ -3380,15 +3611,7 @@ static int tracing_set_tracer(const char *buf)
                 * so a synchronized_sched() is sufficient.
                 */
                synchronize_sched();
-               /*
-                * We don't free the ring buffer. instead, resize it because
-                * The max_tr ring buffer has some state (e.g. ring->clock) and
-                * we want preserve it.
-                */
-               ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
-               set_buffer_entries(&tr->max_buffer, 1);
-               tracing_reset_online_cpus(&tr->max_buffer);
-               tr->allocated_snapshot = false;
+               free_snapshot(tr);
        }
 #endif
        destroy_trace_option_files(topts);
@@ -3397,12 +3620,9 @@ static int tracing_set_tracer(const char *buf)
 
 #ifdef CONFIG_TRACER_MAX_TRACE
        if (t->use_max_tr && !had_max_tr) {
-               /* we need to make per cpu buffer sizes equivalent */
-               ret = resize_buffer_duplicate_size(&tr->max_buffer, &tr->trace_buffer,
-                                                  RING_BUFFER_ALL_CPUS);
+               ret = alloc_snapshot(tr);
                if (ret < 0)
                        goto out;
-               tr->allocated_snapshot = true;
        }
 #endif
 
@@ -4274,14 +4494,8 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
                        ret = -EINVAL;
                        break;
                }
-               if (tr->allocated_snapshot) {
-                       /* free spare buffer */
-                       ring_buffer_resize(tr->max_buffer.buffer, 1,
-                                          RING_BUFFER_ALL_CPUS);
-                       set_buffer_entries(&tr->max_buffer, 1);
-                       tracing_reset_online_cpus(&tr->max_buffer);
-                       tr->allocated_snapshot = false;
-               }
+               if (tr->allocated_snapshot)
+                       free_snapshot(tr);
                break;
        case 1:
 /* Only allow per-cpu swap if the ring buffer supports it */
@@ -4292,19 +4506,16 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
                }
 #endif
                if (!tr->allocated_snapshot) {
-                       /* allocate spare buffer */
-                       ret = resize_buffer_duplicate_size(&tr->max_buffer,
-                                       &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
+                       ret = alloc_snapshot(tr);
                        if (ret < 0)
                                break;
-                       tr->allocated_snapshot = true;
                }
                local_irq_disable();
                /* Now, we're going to swap */
                if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
-                       update_max_tr(&global_trace, current, smp_processor_id());
+                       update_max_tr(tr, current, smp_processor_id());
                else
-                       update_max_tr_single(&global_trace, current, iter->cpu_file);
+                       update_max_tr_single(tr, current, iter->cpu_file);
                local_irq_enable();
                break;
        default:
@@ -4460,6 +4671,10 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
        if (!info)
                return -ENOMEM;
 
+       mutex_lock(&trace_types_lock);
+
+       tr->ref++;
+
        info->iter.tr           = tr;
        info->iter.cpu_file     = tc->cpu;
        info->iter.trace        = tr->current_trace;
@@ -4470,6 +4685,8 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
 
        filp->private_data = info;
 
+       mutex_unlock(&trace_types_lock);
+
        return nonseekable_open(inode, filp);
 }
 
@@ -4568,10 +4785,17 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
        struct ftrace_buffer_info *info = file->private_data;
        struct trace_iterator *iter = &info->iter;
 
+       mutex_lock(&trace_types_lock);
+
+       WARN_ON(!iter->tr->ref);
+       iter->tr->ref--;
+
        if (info->spare)
                ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
        kfree(info);
 
+       mutex_unlock(&trace_types_lock);
+
        return 0;
 }
 
@@ -4867,7 +5091,114 @@ static const struct file_operations tracing_dyn_info_fops = {
        .read           = tracing_read_dyn_info,
        .llseek         = generic_file_llseek,
 };
-#endif
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
+static void
+ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
+{
+       tracing_snapshot();
+}
+
+static void
+ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
+{
+       unsigned long *count = (long *)data;
+
+       if (!*count)
+               return;
+
+       if (*count != -1)
+               (*count)--;
+
+       tracing_snapshot();
+}
+
+static int
+ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
+                     struct ftrace_probe_ops *ops, void *data)
+{
+       long count = (long)data;
+
+       seq_printf(m, "%ps:", (void *)ip);
+
+       seq_printf(m, "snapshot");
+
+       if (count == -1)
+               seq_printf(m, ":unlimited\n");
+       else
+               seq_printf(m, ":count=%ld\n", count);
+
+       return 0;
+}
+
+static struct ftrace_probe_ops snapshot_probe_ops = {
+       .func                   = ftrace_snapshot,
+       .print                  = ftrace_snapshot_print,
+};
+
+static struct ftrace_probe_ops snapshot_count_probe_ops = {
+       .func                   = ftrace_count_snapshot,
+       .print                  = ftrace_snapshot_print,
+};
+
+static int
+ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
+                              char *glob, char *cmd, char *param, int enable)
+{
+       struct ftrace_probe_ops *ops;
+       void *count = (void *)-1;
+       char *number;
+       int ret;
+
+       /* hash funcs only work with set_ftrace_filter */
+       if (!enable)
+               return -EINVAL;
+
+       ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
+
+       if (glob[0] == '!') {
+               unregister_ftrace_function_probe_func(glob+1, ops);
+               return 0;
+       }
+
+       if (!param)
+               goto out_reg;
+
+       number = strsep(&param, ":");
+
+       if (!strlen(number))
+               goto out_reg;
+
+       /*
+        * We use the callback data field (which is a pointer)
+        * as our counter.
+        */
+       ret = kstrtoul(number, 0, (unsigned long *)&count);
+       if (ret)
+               return ret;
+
+ out_reg:
+       ret = register_ftrace_function_probe(glob, ops, count);
+
+       if (ret >= 0)
+               alloc_snapshot(&global_trace);
+
+       return ret < 0 ? ret : 0;
+}
+
+static struct ftrace_func_command ftrace_snapshot_cmd = {
+       .name                   = "snapshot",
+       .func                   = ftrace_trace_snapshot_callback,
+};
+
+static int register_snapshot_cmd(void)
+{
+       return register_ftrace_command(&ftrace_snapshot_cmd);
+}
+#else
+static inline int register_snapshot_cmd(void) { return 0; }
+#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
 
 struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
 {
@@ -5279,53 +5610,57 @@ static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf)
        }
 }
 
-static int allocate_trace_buffers(struct trace_array *tr, int size)
+static int
+allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
 {
        enum ring_buffer_flags rb_flags;
 
        rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
 
-       tr->trace_buffer.buffer = ring_buffer_alloc(size, rb_flags);
-       if (!tr->trace_buffer.buffer)
-               goto out_free;
+       buf->buffer = ring_buffer_alloc(size, rb_flags);
+       if (!buf->buffer)
+               return -ENOMEM;
 
-       tr->trace_buffer.data = alloc_percpu(struct trace_array_cpu);
-       if (!tr->trace_buffer.data)
-               goto out_free;
+       buf->data = alloc_percpu(struct trace_array_cpu);
+       if (!buf->data) {
+               ring_buffer_free(buf->buffer);
+               return -ENOMEM;
+       }
 
-       init_trace_buffers(tr, &tr->trace_buffer);
+       init_trace_buffers(tr, buf);
 
        /* Allocate the first page for all buffers */
        set_buffer_entries(&tr->trace_buffer,
                           ring_buffer_size(tr->trace_buffer.buffer, 0));
 
-#ifdef CONFIG_TRACER_MAX_TRACE
-
-       tr->max_buffer.buffer = ring_buffer_alloc(1, rb_flags);
-       if (!tr->max_buffer.buffer)
-               goto out_free;
-
-       tr->max_buffer.data = alloc_percpu(struct trace_array_cpu);
-       if (!tr->max_buffer.data)
-               goto out_free;
+       return 0;
+}
 
-       init_trace_buffers(tr, &tr->max_buffer);
+static int allocate_trace_buffers(struct trace_array *tr, int size)
+{
+       int ret;
 
-       set_buffer_entries(&tr->max_buffer, 1);
-#endif
-       return 0;
+       ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
+       if (ret)
+               return ret;
 
- out_free:
-       if (tr->trace_buffer.buffer)
+#ifdef CONFIG_TRACER_MAX_TRACE
+       ret = allocate_trace_buffer(tr, &tr->max_buffer,
+                                   allocate_snapshot ? size : 1);
+       if (WARN_ON(ret)) {
                ring_buffer_free(tr->trace_buffer.buffer);
-       free_percpu(tr->trace_buffer.data);
+               free_percpu(tr->trace_buffer.data);
+               return -ENOMEM;
+       }
+       tr->allocated_snapshot = allocate_snapshot;
 
-#ifdef CONFIG_TRACER_MAX_TRACE
-       if (tr->max_buffer.buffer)
-               ring_buffer_free(tr->max_buffer.buffer);
-       free_percpu(tr->max_buffer.data);
+       /*
+        * Only the top level trace array gets its snapshot allocated
+        * from the kernel command line.
+        */
+       allocate_snapshot = false;
 #endif
-       return -ENOMEM;
+       return 0;
 }
 
 static int new_instance_create(const char *name)
@@ -5411,6 +5746,10 @@ static int instance_delete(const char *name)
        if (!found)
                goto out_unlock;
 
+       ret = -EBUSY;
+       if (tr->ref)
+               goto out_unlock;
+
        list_del(&tr->list);
 
        event_trace_del_tracer(tr);
@@ -5506,6 +5845,7 @@ static __init void create_trace_instances(struct dentry *d_tracer)
 static void
 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
 {
+       int cpu;
 
        trace_create_file("trace_options", 0644, d_tracer,
                          tr, &tracing_iter_fops);
@@ -5533,12 +5873,20 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
 
        trace_create_file("tracing_on", 0644, d_tracer,
                            tr, &rb_simple_fops);
+
+#ifdef CONFIG_TRACER_SNAPSHOT
+       trace_create_file("snapshot", 0644, d_tracer,
+                         (void *)&tr->trace_cpu, &snapshot_fops);
+#endif
+
+       for_each_tracing_cpu(cpu)
+               tracing_init_debugfs_percpu(tr, cpu);
+
 }
 
 static __init int tracer_init_debugfs(void)
 {
        struct dentry *d_tracer;
-       int cpu;
 
        trace_access_lock_init();
 
@@ -5574,18 +5922,10 @@ static __init int tracer_init_debugfs(void)
                        &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
 #endif
 
-#ifdef CONFIG_TRACER_SNAPSHOT
-       trace_create_file("snapshot", 0644, d_tracer,
-                         (void *)&global_trace.trace_cpu, &snapshot_fops);
-#endif
-
        create_trace_instances(d_tracer);
 
        create_trace_options_dir(&global_trace);
 
-       for_each_tracing_cpu(cpu)
-               tracing_init_debugfs_percpu(&global_trace, cpu);
-
        return 0;
 }
 
@@ -5848,6 +6188,8 @@ __init static int tracer_alloc_buffers(void)
                trace_set_options(&global_trace, option);
        }
 
+       register_snapshot_cmd();
+
        return 0;
 
 out_free_cpumask: