]> Pileus Git - ~andy/linux/blobdiff - kernel/trace/trace.c
ring-buffer: Add per_cpu ring buffer control files
[~andy/linux] / kernel / trace / trace.c
index bbcde546f9f76fcc4bc4b7af53ab1182693a0e30..f11a285ee5bb4f87d139ca1f938c9652490322e7 100644 (file)
@@ -838,7 +838,8 @@ __acquires(kernel_lock)
 
                /* If we expanded the buffers, make sure the max is expanded too */
                if (ring_buffer_expanded && type->use_max_tr)
-                       ring_buffer_resize(max_tr.buffer, trace_buf_size);
+                       ring_buffer_resize(max_tr.buffer, trace_buf_size,
+                                               RING_BUFFER_ALL_CPUS);
 
                /* the test is responsible for initializing and enabling */
                pr_info("Testing tracer %s: ", type->name);
@@ -854,7 +855,8 @@ __acquires(kernel_lock)
 
                /* Shrink the max buffer again */
                if (ring_buffer_expanded && type->use_max_tr)
-                       ring_buffer_resize(max_tr.buffer, 1);
+                       ring_buffer_resize(max_tr.buffer, 1,
+                                               RING_BUFFER_ALL_CPUS);
 
                printk(KERN_CONT "PASSED\n");
        }
@@ -3053,7 +3055,14 @@ int tracer_init(struct tracer *t, struct trace_array *tr)
        return t->init(tr);
 }
 
-static int __tracing_resize_ring_buffer(unsigned long size)
+static void set_buffer_entries(struct trace_array *tr, unsigned long val)
+{
+       int cpu;
+       for_each_tracing_cpu(cpu)
+               tr->data[cpu]->entries = val;
+}
+
+static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
 {
        int ret;
 
@@ -3064,19 +3073,32 @@ static int __tracing_resize_ring_buffer(unsigned long size)
         */
        ring_buffer_expanded = 1;
 
-       ret = ring_buffer_resize(global_trace.buffer, size);
+       ret = ring_buffer_resize(global_trace.buffer, size, cpu);
        if (ret < 0)
                return ret;
 
        if (!current_trace->use_max_tr)
                goto out;
 
-       ret = ring_buffer_resize(max_tr.buffer, size);
+       ret = ring_buffer_resize(max_tr.buffer, size, cpu);
        if (ret < 0) {
-               int r;
+               int r = 0;
+
+               if (cpu == RING_BUFFER_ALL_CPUS) {
+                       int i;
+                       for_each_tracing_cpu(i) {
+                               r = ring_buffer_resize(global_trace.buffer,
+                                               global_trace.data[i]->entries,
+                                               i);
+                               if (r < 0)
+                                       break;
+                       }
+               } else {
+                       r = ring_buffer_resize(global_trace.buffer,
+                                               global_trace.data[cpu]->entries,
+                                               cpu);
+               }
 
-               r = ring_buffer_resize(global_trace.buffer,
-                                      global_trace.entries);
                if (r < 0) {
                        /*
                         * AARGH! We are left with different
@@ -3098,14 +3120,21 @@ static int __tracing_resize_ring_buffer(unsigned long size)
                return ret;
        }
 
-       max_tr.entries = size;
+       if (cpu == RING_BUFFER_ALL_CPUS)
+               set_buffer_entries(&max_tr, size);
+       else
+               max_tr.data[cpu]->entries = size;
+
  out:
-       global_trace.entries = size;
+       if (cpu == RING_BUFFER_ALL_CPUS)
+               set_buffer_entries(&global_trace, size);
+       else
+               global_trace.data[cpu]->entries = size;
 
        return ret;
 }
 
-static ssize_t tracing_resize_ring_buffer(unsigned long size)
+static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id)
 {
        int cpu, ret = size;
 
@@ -3121,12 +3150,19 @@ static ssize_t tracing_resize_ring_buffer(unsigned long size)
                        atomic_inc(&max_tr.data[cpu]->disabled);
        }
 
-       if (size != global_trace.entries)
-               ret = __tracing_resize_ring_buffer(size);
+       if (cpu_id != RING_BUFFER_ALL_CPUS) {
+               /* make sure, this cpu is enabled in the mask */
+               if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+       }
 
+       ret = __tracing_resize_ring_buffer(size, cpu_id);
        if (ret < 0)
                ret = -ENOMEM;
 
+out:
        for_each_tracing_cpu(cpu) {
                if (global_trace.data[cpu])
                        atomic_dec(&global_trace.data[cpu]->disabled);
@@ -3157,7 +3193,8 @@ int tracing_update_buffers(void)
 
        mutex_lock(&trace_types_lock);
        if (!ring_buffer_expanded)
-               ret = __tracing_resize_ring_buffer(trace_buf_size);
+               ret = __tracing_resize_ring_buffer(trace_buf_size,
+                                               RING_BUFFER_ALL_CPUS);
        mutex_unlock(&trace_types_lock);
 
        return ret;
@@ -3181,7 +3218,8 @@ static int tracing_set_tracer(const char *buf)
        mutex_lock(&trace_types_lock);
 
        if (!ring_buffer_expanded) {
-               ret = __tracing_resize_ring_buffer(trace_buf_size);
+               ret = __tracing_resize_ring_buffer(trace_buf_size,
+                                               RING_BUFFER_ALL_CPUS);
                if (ret < 0)
                        goto out;
                ret = 0;
@@ -3207,8 +3245,8 @@ static int tracing_set_tracer(const char *buf)
                 * The max_tr ring buffer has some state (e.g. ring->clock) and
                 * we want preserve it.
                 */
-               ring_buffer_resize(max_tr.buffer, 1);
-               max_tr.entries = 1;
+               ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
+               set_buffer_entries(&max_tr, 1);
        }
        destroy_trace_option_files(topts);
 
@@ -3216,10 +3254,17 @@ static int tracing_set_tracer(const char *buf)
 
        topts = create_trace_option_files(current_trace);
        if (current_trace->use_max_tr) {
-               ret = ring_buffer_resize(max_tr.buffer, global_trace.entries);
-               if (ret < 0)
-                       goto out;
-               max_tr.entries = global_trace.entries;
+               int cpu;
+               /* we need to make per cpu buffer sizes equivalent */
+               for_each_tracing_cpu(cpu) {
+                       ret = ring_buffer_resize(max_tr.buffer,
+                                               global_trace.data[cpu]->entries,
+                                               cpu);
+                       if (ret < 0)
+                               goto out;
+                       max_tr.data[cpu]->entries =
+                                       global_trace.data[cpu]->entries;
+               }
        }
 
        if (t->init) {
@@ -3721,30 +3766,82 @@ out_err:
        goto out;
 }
 
+struct ftrace_entries_info {
+       struct trace_array      *tr;
+       int                     cpu;
+};
+
+static int tracing_entries_open(struct inode *inode, struct file *filp)
+{
+       struct ftrace_entries_info *info;
+
+       if (tracing_disabled)
+               return -ENODEV;
+
+       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+
+       info->tr = &global_trace;
+       info->cpu = (unsigned long)inode->i_private;
+
+       filp->private_data = info;
+
+       return 0;
+}
+
 static ssize_t
 tracing_entries_read(struct file *filp, char __user *ubuf,
                     size_t cnt, loff_t *ppos)
 {
-       struct trace_array *tr = filp->private_data;
-       char buf[96];
-       int r;
+       struct ftrace_entries_info *info = filp->private_data;
+       struct trace_array *tr = info->tr;
+       char buf[64];
+       int r = 0;
+       ssize_t ret;
 
        mutex_lock(&trace_types_lock);
-       if (!ring_buffer_expanded)
-               r = sprintf(buf, "%lu (expanded: %lu)\n",
-                           tr->entries >> 10,
-                           trace_buf_size >> 10);
-       else
-               r = sprintf(buf, "%lu\n", tr->entries >> 10);
+
+       if (info->cpu == RING_BUFFER_ALL_CPUS) {
+               int cpu, buf_size_same;
+               unsigned long size;
+
+               size = 0;
+               buf_size_same = 1;
+               /* check if all cpu sizes are same */
+               for_each_tracing_cpu(cpu) {
+                       /* fill in the size from first enabled cpu */
+                       if (size == 0)
+                               size = tr->data[cpu]->entries;
+                       if (size != tr->data[cpu]->entries) {
+                               buf_size_same = 0;
+                               break;
+                       }
+               }
+
+               if (buf_size_same) {
+                       if (!ring_buffer_expanded)
+                               r = sprintf(buf, "%lu (expanded: %lu)\n",
+                                           size >> 10,
+                                           trace_buf_size >> 10);
+                       else
+                               r = sprintf(buf, "%lu\n", size >> 10);
+               } else
+                       r = sprintf(buf, "X\n");
+       } else
+               r = sprintf(buf, "%lu\n", tr->data[info->cpu]->entries >> 10);
+
        mutex_unlock(&trace_types_lock);
 
-       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+       ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+       return ret;
 }
 
 static ssize_t
 tracing_entries_write(struct file *filp, const char __user *ubuf,
                      size_t cnt, loff_t *ppos)
 {
+       struct ftrace_entries_info *info = filp->private_data;
        unsigned long val;
        int ret;
 
@@ -3759,7 +3856,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
        /* value is in KB */
        val <<= 10;
 
-       ret = tracing_resize_ring_buffer(val);
+       ret = tracing_resize_ring_buffer(val, info->cpu);
        if (ret < 0)
                return ret;
 
@@ -3768,6 +3865,16 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
        return cnt;
 }
 
+static int
+tracing_entries_release(struct inode *inode, struct file *filp)
+{
+       struct ftrace_entries_info *info = filp->private_data;
+
+       kfree(info);
+
+       return 0;
+}
+
 static ssize_t
 tracing_total_entries_read(struct file *filp, char __user *ubuf,
                                size_t cnt, loff_t *ppos)
@@ -3779,7 +3886,7 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf,
 
        mutex_lock(&trace_types_lock);
        for_each_tracing_cpu(cpu) {
-               size += tr->entries >> 10;
+               size += tr->data[cpu]->entries >> 10;
                if (!ring_buffer_expanded)
                        expanded_size += trace_buf_size >> 10;
        }
@@ -3813,7 +3920,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
        if (trace_flags & TRACE_ITER_STOP_ON_FREE)
                tracing_off();
        /* resize the ring buffer to 0 */
-       tracing_resize_ring_buffer(0);
+       tracing_resize_ring_buffer(0, RING_BUFFER_ALL_CPUS);
 
        return 0;
 }
@@ -4012,9 +4119,10 @@ static const struct file_operations tracing_pipe_fops = {
 };
 
 static const struct file_operations tracing_entries_fops = {
-       .open           = tracing_open_generic,
+       .open           = tracing_entries_open,
        .read           = tracing_entries_read,
        .write          = tracing_entries_write,
+       .release        = tracing_entries_release,
        .llseek         = generic_file_llseek,
 };
 
@@ -4466,6 +4574,9 @@ static void tracing_init_debugfs_percpu(long cpu)
 
        trace_create_file("stats", 0444, d_cpu,
                        (void *) cpu, &tracing_stats_fops);
+
+       trace_create_file("buffer_size_kb", 0444, d_cpu,
+                       (void *) cpu, &tracing_entries_fops);
 }
 
 #ifdef CONFIG_FTRACE_SELFTEST
@@ -4795,7 +4906,7 @@ static __init int tracer_init_debugfs(void)
                        (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
 
        trace_create_file("buffer_size_kb", 0644, d_tracer,
-                       &global_trace, &tracing_entries_fops);
+                       (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops);
 
        trace_create_file("buffer_total_size_kb", 0444, d_tracer,
                        &global_trace, &tracing_total_entries_fops);
@@ -5056,7 +5167,6 @@ __init static int tracer_alloc_buffers(void)
                WARN_ON(1);
                goto out_free_cpumask;
        }
-       global_trace.entries = ring_buffer_size(global_trace.buffer);
        if (global_trace.buffer_disabled)
                tracing_off();
 
@@ -5069,7 +5179,6 @@ __init static int tracer_alloc_buffers(void)
                ring_buffer_free(global_trace.buffer);
                goto out_free_cpumask;
        }
-       max_tr.entries = 1;
 #endif
 
        /* Allocate the first page for all buffers */
@@ -5078,6 +5187,11 @@ __init static int tracer_alloc_buffers(void)
                max_tr.data[i] = &per_cpu(max_tr_data, i);
        }
 
+       set_buffer_entries(&global_trace, ring_buf_size);
+#ifdef CONFIG_TRACER_MAX_TRACE
+       set_buffer_entries(&max_tr, 1);
+#endif
+
        trace_init_cmdlines();
 
        register_tracer(&nop_trace);