]> Pileus Git - ~andy/linux/commitdiff
perf: Pass last sampling period to perf_sample_data_init()
authorRobert Richter <robert.richter@amd.com>
Mon, 2 Apr 2012 18:19:08 +0000 (20:19 +0200)
committerIngo Molnar <mingo@kernel.org>
Wed, 9 May 2012 13:23:12 +0000 (15:23 +0200)
We always need to pass the last sample period to
perf_sample_data_init(), otherwise the event distribution will be
wrong. Thus, modifiyng the function interface with the required period
as argument. So basically a pattern like this:

        perf_sample_data_init(&data, ~0ULL);
        data.period = event->hw.last_period;

will now be like that:

        perf_sample_data_init(&data, ~0ULL, event->hw.last_period);

Avoids unininitialized data.period and simplifies code.

Signed-off-by: Robert Richter <robert.richter@amd.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1333390758-10893-3-git-send-email-robert.richter@amd.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
15 files changed:
arch/alpha/kernel/perf_event.c
arch/arm/kernel/perf_event_v6.c
arch/arm/kernel/perf_event_v7.c
arch/arm/kernel/perf_event_xscale.c
arch/mips/kernel/perf_event_mipsxx.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/perf/core-fsl-emb.c
arch/sparc/kernel/perf_event.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_amd_ibs.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_p4.c
include/linux/perf_event.h
kernel/events/core.c

index 0dae252f7a33b10ecb0d4e6a9fe77b8e5556fb14..d821b17047e0abbe54dd82871d3cd9e324d28df8 100644 (file)
@@ -824,7 +824,6 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
 
        idx = la_ptr;
 
-       perf_sample_data_init(&data, 0);
        for (j = 0; j < cpuc->n_events; j++) {
                if (cpuc->current_idx[j] == idx)
                        break;
@@ -848,7 +847,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
 
        hwc = &event->hw;
        alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
-       data.period = event->hw.last_period;
+       perf_sample_data_init(&data, 0, hwc->last_period);
 
        if (alpha_perf_event_set_period(event, hwc, idx)) {
                if (perf_event_overflow(event, &data, regs)) {
index b78af0cc6ef36ddf8b371c9b57541b19caab5677..ab627a740fa37409258b24441ae7c0b5aec7e7f6 100644 (file)
@@ -489,8 +489,6 @@ armv6pmu_handle_irq(int irq_num,
         */
        armv6_pmcr_write(pmcr);
 
-       perf_sample_data_init(&data, 0);
-
        cpuc = &__get_cpu_var(cpu_hw_events);
        for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
                struct perf_event *event = cpuc->events[idx];
@@ -509,7 +507,7 @@ armv6pmu_handle_irq(int irq_num,
 
                hwc = &event->hw;
                armpmu_event_update(event, hwc, idx);
-               data.period = event->hw.last_period;
+               perf_sample_data_init(&data, 0, hwc->last_period);
                if (!armpmu_event_set_period(event, hwc, idx))
                        continue;
 
index 00755d82e2f2cbe06cda363c53ff115397d94d59..d3c536068162d30ec782c60eb780e6514e4784b5 100644 (file)
@@ -1077,8 +1077,6 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
         */
        regs = get_irq_regs();
 
-       perf_sample_data_init(&data, 0);
-
        cpuc = &__get_cpu_var(cpu_hw_events);
        for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
                struct perf_event *event = cpuc->events[idx];
@@ -1097,7 +1095,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
 
                hwc = &event->hw;
                armpmu_event_update(event, hwc, idx);
-               data.period = event->hw.last_period;
+               perf_sample_data_init(&data, 0, hwc->last_period);
                if (!armpmu_event_set_period(event, hwc, idx))
                        continue;
 
index 71a21e6712f5356daa77aa134796701101ce1a13..e34e7254e65263ee66cfc7d2d5397980aaf6b399 100644 (file)
@@ -248,8 +248,6 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
 
        regs = get_irq_regs();
 
-       perf_sample_data_init(&data, 0);
-
        cpuc = &__get_cpu_var(cpu_hw_events);
        for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
                struct perf_event *event = cpuc->events[idx];
@@ -263,7 +261,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
 
                hwc = &event->hw;
                armpmu_event_update(event, hwc, idx);
-               data.period = event->hw.last_period;
+               perf_sample_data_init(&data, 0, hwc->last_period);
                if (!armpmu_event_set_period(event, hwc, idx))
                        continue;
 
@@ -588,8 +586,6 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
 
        regs = get_irq_regs();
 
-       perf_sample_data_init(&data, 0);
-
        cpuc = &__get_cpu_var(cpu_hw_events);
        for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
                struct perf_event *event = cpuc->events[idx];
@@ -603,7 +599,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
 
                hwc = &event->hw;
                armpmu_event_update(event, hwc, idx);
-               data.period = event->hw.last_period;
+               perf_sample_data_init(&data, 0, hwc->last_period);
                if (!armpmu_event_set_period(event, hwc, idx))
                        continue;
 
index 811084f4e4220910d83314c843af969ef5425b72..ab73fa2fb9b5707343c6081b821faf9775b90a2f 100644 (file)
@@ -1325,7 +1325,7 @@ static int mipsxx_pmu_handle_shared_irq(void)
 
        regs = get_irq_regs();
 
-       perf_sample_data_init(&data, 0);
+       perf_sample_data_init(&data, 0, 0);
 
        switch (counters) {
 #define HANDLE_COUNTER(n)                                              \
index 02aee03e713c6f287721faa7079ac760c865f7c9..8f84bcba18da12f1ee9b1f1f5727075777febb8c 100644 (file)
@@ -1299,8 +1299,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
        if (record) {
                struct perf_sample_data data;
 
-               perf_sample_data_init(&data, ~0ULL);
-               data.period = event->hw.last_period;
+               perf_sample_data_init(&data, ~0ULL, event->hw.last_period);
 
                if (event->attr.sample_type & PERF_SAMPLE_ADDR)
                        perf_get_data_addr(regs, &data.addr);
index 0a6d2a9d569cde1e924735dd24147eb90bd84c04..106c533546758280ae59da38ce45a2c88a7f49b5 100644 (file)
@@ -613,8 +613,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
        if (record) {
                struct perf_sample_data data;
 
-               perf_sample_data_init(&data, 0);
-               data.period = event->hw.last_period;
+               perf_sample_data_init(&data, 0, event->hw.last_period);
 
                if (perf_event_overflow(event, &data, regs))
                        fsl_emb_pmu_stop(event, 0);
index 28559ce5eeb523ecd81ee00879b68ef540ee39bc..5713957dcb8a38bb3baeb168e2ea33656f8a9a56 100644 (file)
@@ -1296,8 +1296,6 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
 
        regs = args->regs;
 
-       perf_sample_data_init(&data, 0);
-
        cpuc = &__get_cpu_var(cpu_hw_events);
 
        /* If the PMU has the TOE IRQ enable bits, we need to do a
@@ -1321,7 +1319,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
                if (val & (1ULL << 31))
                        continue;
 
-               data.period = event->hw.last_period;
+               perf_sample_data_init(&data, 0, hwc->last_period);
                if (!sparc_perf_event_set_period(event, hwc, idx))
                        continue;
 
index e33e9cf160eb9c3257d47b95e657bd458b13dc91..e049d6da01832cfc91b5e2a45b922f592c7bb7ae 100644 (file)
@@ -1183,8 +1183,6 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
        int idx, handled = 0;
        u64 val;
 
-       perf_sample_data_init(&data, 0);
-
        cpuc = &__get_cpu_var(cpu_hw_events);
 
        /*
@@ -1219,7 +1217,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
                 * event overflow
                 */
                handled++;
-               data.period     = event->hw.last_period;
+               perf_sample_data_init(&data, 0, event->hw.last_period);
 
                if (!x86_perf_event_set_period(event))
                        continue;
index c8f69bea66245ab0ae82c770c2b774b1eaf96ead..2317228b529989350302b428c7fb6b32a9cf1ecb 100644 (file)
@@ -398,8 +398,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
        }
 
        perf_ibs_event_update(perf_ibs, event, config);
-       perf_sample_data_init(&data, 0);
-       data.period = event->hw.last_period;
+       perf_sample_data_init(&data, 0, hwc->last_period);
 
        if (event->attr.sample_type & PERF_SAMPLE_RAW) {
                ibs_data.caps = ibs_caps;
index 26b3e2fef1047a86a7d546d4b7e8326b3ecbad33..166546ec6aefe523a20fc5b4206d0ef9a87e4679 100644 (file)
@@ -1027,8 +1027,6 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
        u64 status;
        int handled;
 
-       perf_sample_data_init(&data, 0);
-
        cpuc = &__get_cpu_var(cpu_hw_events);
 
        /*
@@ -1082,7 +1080,7 @@ again:
                if (!intel_pmu_save_and_restart(event))
                        continue;
 
-               data.period = event->hw.last_period;
+               perf_sample_data_init(&data, 0, event->hw.last_period);
 
                if (has_branch_stack(event))
                        data.br_stack = &cpuc->lbr_stack;
index 7f64df19e7ddfa5ebf7d2368864be508ae0cd1ce..5a3edc27f6e5754f758e5ff166f89b34d6d99a01 100644 (file)
@@ -316,8 +316,7 @@ int intel_pmu_drain_bts_buffer(void)
 
        ds->bts_index = ds->bts_buffer_base;
 
-       perf_sample_data_init(&data, 0);
-       data.period = event->hw.last_period;
+       perf_sample_data_init(&data, 0, event->hw.last_period);
        regs.ip     = 0;
 
        /*
@@ -564,8 +563,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
        if (!intel_pmu_save_and_restart(event))
                return;
 
-       perf_sample_data_init(&data, 0);
-       data.period = event->hw.last_period;
+       perf_sample_data_init(&data, 0, event->hw.last_period);
 
        /*
         * We use the interrupt regs as a base because the PEBS record
index a2dfacfd7103b4b951452d5c42a3d613223f5ad5..47124a73dd73098e1179f174a0a6cb56ea07f227 100644 (file)
@@ -1005,8 +1005,6 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
        int idx, handled = 0;
        u64 val;
 
-       perf_sample_data_init(&data, 0);
-
        cpuc = &__get_cpu_var(cpu_hw_events);
 
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
@@ -1034,10 +1032,12 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
                handled += overflow;
 
                /* event overflow for sure */
-               data.period = event->hw.last_period;
+               perf_sample_data_init(&data, 0, hwc->last_period);
 
                if (!x86_perf_event_set_period(event))
                        continue;
+
+
                if (perf_event_overflow(event, &data, regs))
                        x86_pmu_stop(event, 0);
        }
index ddbb6a901f653b7880ed293ee2f67db17a01b850..f32578634d9d1a9c195c8075c80bc981607d3452 100644 (file)
@@ -1132,11 +1132,14 @@ struct perf_sample_data {
        struct perf_branch_stack        *br_stack;
 };
 
-static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
+static inline void perf_sample_data_init(struct perf_sample_data *data,
+                                        u64 addr, u64 period)
 {
+       /* remaining struct members initialized in perf_prepare_sample() */
        data->addr = addr;
        data->raw  = NULL;
        data->br_stack = NULL;
+       data->period    = period;
 }
 
 extern void perf_output_sample(struct perf_output_handle *handle,
index 9789a56b7d54320e2318b0f51c289730bda81e0d..00c58df9f4e2fcb222f0556231fece67aed6eece 100644 (file)
@@ -4957,7 +4957,7 @@ void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
        if (rctx < 0)
                return;
 
-       perf_sample_data_init(&data, addr);
+       perf_sample_data_init(&data, addr, 0);
 
        do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
 
@@ -5215,7 +5215,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
                .data = record,
        };
 
-       perf_sample_data_init(&data, addr);
+       perf_sample_data_init(&data, addr, 0);
        data.raw = &raw;
 
        hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
@@ -5318,7 +5318,7 @@ void perf_bp_event(struct perf_event *bp, void *data)
        struct perf_sample_data sample;
        struct pt_regs *regs = data;
 
-       perf_sample_data_init(&sample, bp->attr.bp_addr);
+       perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
 
        if (!bp->hw.state && !perf_exclude_event(bp, regs))
                perf_swevent_event(bp, 1, &sample, regs);
@@ -5344,8 +5344,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
 
        event->pmu->read(event);
 
-       perf_sample_data_init(&data, 0);
-       data.period = event->hw.last_period;
+       perf_sample_data_init(&data, 0, event->hw.last_period);
        regs = get_irq_regs();
 
        if (regs && !perf_exclude_event(event, regs)) {