]> Pileus Git - ~andy/linux/commitdiff
perf/x86: Move Intel specific code to intel_pmu_init()
authorRobert Richter <robert.richter@amd.com>
Wed, 20 Jun 2012 18:46:34 +0000 (20:46 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 5 Jul 2012 19:19:40 +0000 (21:19 +0200)
There is some Intel specific code in the generic x86 path. Move it to
intel_pmu_init().

Since p4 and p6 pmus don't have fixed counters we may skip the check
in case such a pmu is detected.

Signed-off-by: Robert Richter <robert.richter@amd.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1340217996-2254-3-git-send-email-robert.richter@amd.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel.c

index 668050002602d86aaaea9b89400a2b7771552383..7b4f1e871f7c8d83b692238be9be9bf41b150b0d 100644 (file)
@@ -1307,7 +1307,6 @@ static struct attribute_group x86_pmu_format_group = {
 static int __init init_hw_perf_events(void)
 {
        struct x86_pmu_quirk *quirk;
-       struct event_constraint *c;
        int err;
 
        pr_info("Performance Events: ");
@@ -1338,21 +1337,8 @@ static int __init init_hw_perf_events(void)
        for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
                quirk->func();
 
-       if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
-               WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
-                    x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
-               x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
-       }
-       x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
-
-       if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
-               WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
-                    x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
-               x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
-       }
-
-       x86_pmu.intel_ctrl |=
-               ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
+       if (!x86_pmu.intel_ctrl)
+               x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
 
        perf_events_lapic_init();
        register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
@@ -1361,22 +1347,6 @@ static int __init init_hw_perf_events(void)
                __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
                                   0, x86_pmu.num_counters, 0);
 
-       if (x86_pmu.event_constraints) {
-               /*
-                * event on fixed counter2 (REF_CYCLES) only works on this
-                * counter, so do not extend mask to generic counters
-                */
-               for_each_event_constraint(c, x86_pmu.event_constraints) {
-                       if (c->cmask != X86_RAW_EVENT_MASK
-                           || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
-                               continue;
-                       }
-
-                       c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
-                       c->weight += x86_pmu.num_counters;
-               }
-       }
-
        x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
        x86_pmu_format_group.attrs = x86_pmu.format_attrs;
 
index 5b0b362c7ae6756f4f4171c49302abb5585f7024..2e9444c80148c44535e4d4a80c4d565e0b706164 100644 (file)
@@ -1765,6 +1765,7 @@ __init int intel_pmu_init(void)
        union cpuid10_edx edx;
        union cpuid10_eax eax;
        union cpuid10_ebx ebx;
+       struct event_constraint *c;
        unsigned int unused;
        int version;
 
@@ -1953,5 +1954,37 @@ __init int intel_pmu_init(void)
                }
        }
 
+       if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
+               WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
+                    x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
+               x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
+       }
+       x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
+
+       if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
+               WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
+                    x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
+               x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
+       }
+
+       x86_pmu.intel_ctrl |=
+               ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
+
+       if (x86_pmu.event_constraints) {
+               /*
+                * event on fixed counter2 (REF_CYCLES) only works on this
+                * counter, so do not extend mask to generic counters
+                */
+               for_each_event_constraint(c, x86_pmu.event_constraints) {
+                       if (c->cmask != X86_RAW_EVENT_MASK
+                           || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
+                               continue;
+                       }
+
+                       c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
+                       c->weight += x86_pmu.num_counters;
+               }
+       }
+
        return 0;
 }