}
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
-static int __init alloc_snapshot(char *str)
+static int __init boot_alloc_snapshot(char *str)
{
allocate_snapshot = true;
/* We also need the main ring buffer expanded */
ring_buffer_expanded = true;
return 1;
}
-__setup("alloc_snapshot", alloc_snapshot);
+__setup("alloc_snapshot", boot_alloc_snapshot);
static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
}
EXPORT_SYMBOL_GPL(tracing_on);
+/**
+ * __trace_puts - write a constant string into the trace buffer.
+ * @ip: The address of the caller
+ * @str: The constant string to write
+ * @size: The size of the string.
+ */
+int __trace_puts(unsigned long ip, const char *str, int size)
+{
+ struct ring_buffer_event *event;
+ struct ring_buffer *buffer;
+ struct print_entry *entry;
+ unsigned long irq_flags;
+ int alloc;
+
+ alloc = sizeof(*entry) + size + 2; /* possible \n added */
+
+ local_save_flags(irq_flags);
+ buffer = global_trace.trace_buffer.buffer;
+ event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
+ irq_flags, preempt_count());
+ if (!event)
+ return 0;
+
+ entry = ring_buffer_event_data(event);
+ entry->ip = ip;
+
+ memcpy(&entry->buf, str, size);
+
+ /* Add a newline if necessary */
+ if (entry->buf[size - 1] != '\n') {
+ entry->buf[size] = '\n';
+ entry->buf[size + 1] = '\0';
+ } else
+ entry->buf[size] = '\0';
+
+ __buffer_unlock_commit(buffer, event);
+
+ return size;
+}
+EXPORT_SYMBOL_GPL(__trace_puts);
+
+/**
+ * __trace_bputs - write the pointer to a constant string into trace buffer
+ * @ip: The address of the caller
+ * @str: The constant string to write to the buffer to
+ */
+int __trace_bputs(unsigned long ip, const char *str)
+{
+ struct ring_buffer_event *event;
+ struct ring_buffer *buffer;
+ struct bputs_entry *entry;
+ unsigned long irq_flags;
+ int size = sizeof(struct bputs_entry);
+
+ local_save_flags(irq_flags);
+ buffer = global_trace.trace_buffer.buffer;
+ event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
+ irq_flags, preempt_count());
+ if (!event)
+ return 0;
+
+ entry = ring_buffer_event_data(event);
+ entry->ip = ip;
+ entry->str = str;
+
+ __buffer_unlock_commit(buffer, event);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(__trace_bputs);
+
#ifdef CONFIG_TRACER_SNAPSHOT
/**
* trace_snapshot - take a snapshot of the current buffer.
struct tracer *tracer = tr->current_trace;
unsigned long flags;
+ if (in_nmi()) {
+ internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
+ internal_trace_puts("*** snapshot is being ignored ***\n");
+ return;
+ }
+
if (!tr->allocated_snapshot) {
- trace_printk("*** SNAPSHOT NOT ALLOCATED ***\n");
- trace_printk("*** stopping trace here! ***\n");
+ internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
+ internal_trace_puts("*** stopping trace here! ***\n");
tracing_off();
return;
}
/* Note, snapshot can not be used when the tracer uses it */
if (tracer->use_max_tr) {
- trace_printk("*** LATENCY TRACER ACTIVE ***\n");
- trace_printk("*** Can not use snapshot (sorry) ***\n");
+ internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
+ internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
return;
}
update_max_tr(tr, current, smp_processor_id());
local_irq_restore(flags);
}
+EXPORT_SYMBOL_GPL(tracing_snapshot);
static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
struct trace_buffer *size_buf, int cpu_id);
+static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
+
+static int alloc_snapshot(struct trace_array *tr)
+{
+ int ret;
+
+ if (!tr->allocated_snapshot) {
+
+ /* allocate spare buffer */
+ ret = resize_buffer_duplicate_size(&tr->max_buffer,
+ &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
+ if (ret < 0)
+ return ret;
+
+ tr->allocated_snapshot = true;
+ }
+
+ return 0;
+}
+
+void free_snapshot(struct trace_array *tr)
+{
+ /*
+ * We don't free the ring buffer. instead, resize it because
+ * The max_tr ring buffer has some state (e.g. ring->clock) and
+ * we want preserve it.
+ */
+ ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
+ set_buffer_entries(&tr->max_buffer, 1);
+ tracing_reset_online_cpus(&tr->max_buffer);
+ tr->allocated_snapshot = false;
+}
/**
* trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
struct trace_array *tr = &global_trace;
int ret;
- if (!tr->allocated_snapshot) {
-
- /* allocate spare buffer */
- ret = resize_buffer_duplicate_size(&tr->max_buffer,
- &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
- if (WARN_ON(ret < 0))
- return;
-
- tr->allocated_snapshot = true;
- }
+ ret = alloc_snapshot(tr);
+ if (WARN_ON(ret < 0))
+ return;
tracing_snapshot();
}
+EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
#else
void tracing_snapshot(void)
{
WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
}
+EXPORT_SYMBOL_GPL(tracing_snapshot);
void tracing_snapshot_alloc(void)
{
/* Give warning */
tracing_snapshot();
}
+EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
#endif /* CONFIG_TRACER_SNAPSHOT */
/**
/**
* trace_dump_stack - record a stack back trace in the trace buffer
+ * @skip: Number of functions to skip (helper handlers)
*/
-void trace_dump_stack(void)
+void trace_dump_stack(int skip)
{
unsigned long flags;
local_save_flags(flags);
- /* skipping 3 traces, seems to get us at the caller of this function */
- __ftrace_trace_stack(global_trace.trace_buffer.buffer, flags, 3,
- preempt_count(), NULL);
+ /*
+ * Skip 3 more, seems to get us at the caller of
+ * this function.
+ */
+ skip += 3;
+ __ftrace_trace_stack(global_trace.trace_buffer.buffer,
+ flags, skip, preempt_count(), NULL);
}
static DEFINE_PER_CPU(int, user_stack_count);
return ret;
}
+ if (iter->ent->type == TRACE_BPUTS &&
+ trace_flags & TRACE_ITER_PRINTK &&
+ trace_flags & TRACE_ITER_PRINTK_MSGONLY)
+ return trace_print_bputs_msg_only(iter);
+
if (iter->ent->type == TRACE_BPRINT &&
trace_flags & TRACE_ITER_PRINTK &&
trace_flags & TRACE_ITER_PRINTK_MSGONLY)
* so a synchronized_sched() is sufficient.
*/
synchronize_sched();
- /*
- * We don't free the ring buffer. instead, resize it because
- * The max_tr ring buffer has some state (e.g. ring->clock) and
- * we want preserve it.
- */
- ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
- set_buffer_entries(&tr->max_buffer, 1);
- tracing_reset_online_cpus(&tr->max_buffer);
- tr->allocated_snapshot = false;
+ free_snapshot(tr);
}
#endif
destroy_trace_option_files(topts);
#ifdef CONFIG_TRACER_MAX_TRACE
if (t->use_max_tr && !had_max_tr) {
- /* we need to make per cpu buffer sizes equivalent */
- ret = resize_buffer_duplicate_size(&tr->max_buffer, &tr->trace_buffer,
- RING_BUFFER_ALL_CPUS);
+ ret = alloc_snapshot(tr);
if (ret < 0)
goto out;
- tr->allocated_snapshot = true;
}
#endif
ret = -EINVAL;
break;
}
- if (tr->allocated_snapshot) {
- /* free spare buffer */
- ring_buffer_resize(tr->max_buffer.buffer, 1,
- RING_BUFFER_ALL_CPUS);
- set_buffer_entries(&tr->max_buffer, 1);
- tracing_reset_online_cpus(&tr->max_buffer);
- tr->allocated_snapshot = false;
- }
+ if (tr->allocated_snapshot)
+ free_snapshot(tr);
break;
case 1:
/* Only allow per-cpu swap if the ring buffer supports it */
}
#endif
if (!tr->allocated_snapshot) {
- /* allocate spare buffer */
- ret = resize_buffer_duplicate_size(&tr->max_buffer,
- &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
+ ret = alloc_snapshot(tr);
if (ret < 0)
break;
- tr->allocated_snapshot = true;
}
local_irq_disable();
/* Now, we're going to swap */
.read = tracing_read_dyn_info,
.llseek = generic_file_llseek,
};
-#endif
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
+static void
+ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
+{
+ tracing_snapshot();
+}
+
+static void
+ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
+{
+ unsigned long *count = (long *)data;
+
+ if (!*count)
+ return;
+
+ if (*count != -1)
+ (*count)--;
+
+ tracing_snapshot();
+}
+
+static int
+ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
+ struct ftrace_probe_ops *ops, void *data)
+{
+ long count = (long)data;
+
+ seq_printf(m, "%ps:", (void *)ip);
+
+ seq_printf(m, "snapshot");
+
+ if (count == -1)
+ seq_printf(m, ":unlimited\n");
+ else
+ seq_printf(m, ":count=%ld\n", count);
+
+ return 0;
+}
+
+static struct ftrace_probe_ops snapshot_probe_ops = {
+ .func = ftrace_snapshot,
+ .print = ftrace_snapshot_print,
+};
+
+static struct ftrace_probe_ops snapshot_count_probe_ops = {
+ .func = ftrace_count_snapshot,
+ .print = ftrace_snapshot_print,
+};
+
+static int
+ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
+ char *glob, char *cmd, char *param, int enable)
+{
+ struct ftrace_probe_ops *ops;
+ void *count = (void *)-1;
+ char *number;
+ int ret;
+
+ /* hash funcs only work with set_ftrace_filter */
+ if (!enable)
+ return -EINVAL;
+
+ ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
+
+ if (glob[0] == '!') {
+ unregister_ftrace_function_probe_func(glob+1, ops);
+ return 0;
+ }
+
+ if (!param)
+ goto out_reg;
+
+ number = strsep(¶m, ":");
+
+ if (!strlen(number))
+ goto out_reg;
+
+ /*
+ * We use the callback data field (which is a pointer)
+ * as our counter.
+ */
+ ret = kstrtoul(number, 0, (unsigned long *)&count);
+ if (ret)
+ return ret;
+
+ out_reg:
+ ret = register_ftrace_function_probe(glob, ops, count);
+
+ if (ret >= 0)
+ alloc_snapshot(&global_trace);
+
+ return ret < 0 ? ret : 0;
+}
+
+static struct ftrace_func_command ftrace_snapshot_cmd = {
+ .name = "snapshot",
+ .func = ftrace_trace_snapshot_callback,
+};
+
+static int register_snapshot_cmd(void)
+{
+ return register_ftrace_command(&ftrace_snapshot_cmd);
+}
+#else
+static inline int register_snapshot_cmd(void) { return 0; }
+#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
{
trace_set_options(&global_trace, option);
}
+ register_snapshot_cmd();
+
return 0;
out_free_cpumask: