]> Pileus Git - ~andy/linux/blobdiff - arch/arm/kernel/perf_event.c
ARM: perf: consistently use struct perf_event in arm_pmu functions
[~andy/linux] / arch / arm / kernel / perf_event.c
index f8406af03279da1f3a3cddf8251df69c4f7a12a7..1cfa3f35713e984ba92abf879dc6b9ae2feec66a 100644 (file)
@@ -86,12 +86,10 @@ armpmu_map_event(struct perf_event *event,
        return -ENOENT;
 }
 
-int
-armpmu_event_set_period(struct perf_event *event,
-                       struct hw_perf_event *hwc,
-                       int idx)
+int armpmu_event_set_period(struct perf_event *event)
 {
        struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
        s64 left = local64_read(&hwc->period_left);
        s64 period = hwc->sample_period;
        int ret = 0;
@@ -119,24 +117,22 @@ armpmu_event_set_period(struct perf_event *event,
 
        local64_set(&hwc->prev_count, (u64)-left);
 
-       armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
+       armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
 
        perf_event_update_userpage(event);
 
        return ret;
 }
 
-u64
-armpmu_event_update(struct perf_event *event,
-                   struct hw_perf_event *hwc,
-                   int idx)
+u64 armpmu_event_update(struct perf_event *event)
 {
        struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
        u64 delta, prev_raw_count, new_raw_count;
 
 again:
        prev_raw_count = local64_read(&hwc->prev_count);
-       new_raw_count = armpmu->read_counter(idx);
+       new_raw_count = armpmu->read_counter(event);
 
        if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
                             new_raw_count) != prev_raw_count)
@@ -159,7 +155,7 @@ armpmu_read(struct perf_event *event)
        if (hwc->idx < 0)
                return;
 
-       armpmu_event_update(event, hwc, hwc->idx);
+       armpmu_event_update(event);
 }
 
 static void
@@ -173,14 +169,13 @@ armpmu_stop(struct perf_event *event, int flags)
         * PERF_EF_UPDATE, see comments in armpmu_start().
         */
        if (!(hwc->state & PERF_HES_STOPPED)) {
-               armpmu->disable(hwc, hwc->idx);
-               armpmu_event_update(event, hwc, hwc->idx);
+               armpmu->disable(event);
+               armpmu_event_update(event);
                hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
        }
 }
 
-static void
-armpmu_start(struct perf_event *event, int flags)
+static void armpmu_start(struct perf_event *event, int flags)
 {
        struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
        struct hw_perf_event *hwc = &event->hw;
@@ -200,8 +195,8 @@ armpmu_start(struct perf_event *event, int flags)
         * get an interrupt too soon or *way* too late if the overflow has
         * happened since disabling.
         */
-       armpmu_event_set_period(event, hwc, hwc->idx);
-       armpmu->enable(hwc, hwc->idx);
+       armpmu_event_set_period(event);
+       armpmu->enable(event);
 }
 
 static void
@@ -233,7 +228,7 @@ armpmu_add(struct perf_event *event, int flags)
        perf_pmu_disable(event->pmu);
 
        /* If we don't have a space for the counter then finish early. */
-       idx = armpmu->get_event_idx(hw_events, hwc);
+       idx = armpmu->get_event_idx(hw_events, event);
        if (idx < 0) {
                err = idx;
                goto out;
@@ -244,7 +239,7 @@ armpmu_add(struct perf_event *event, int flags)
         * sure it is disabled.
         */
        event->hw.idx = idx;
-       armpmu->disable(hwc, idx);
+       armpmu->disable(event);
        hw_events->events[idx] = event;
 
        hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
@@ -264,13 +259,12 @@ validate_event(struct pmu_hw_events *hw_events,
               struct perf_event *event)
 {
        struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-       struct hw_perf_event fake_event = event->hw;
        struct pmu *leader_pmu = event->group_leader->pmu;
 
        if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
                return 1;
 
-       return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
+       return armpmu->get_event_idx(hw_events, event) >= 0;
 }
 
 static int
@@ -316,7 +310,7 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
 static void
 armpmu_release_hardware(struct arm_pmu *armpmu)
 {
-       armpmu->free_irq();
+       armpmu->free_irq(armpmu);
        pm_runtime_put_sync(&armpmu->plat_device->dev);
 }
 
@@ -330,7 +324,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
                return -ENODEV;
 
        pm_runtime_get_sync(&pmu_device->dev);
-       err = armpmu->request_irq(armpmu_dispatch_irq);
+       err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
        if (err) {
                armpmu_release_hardware(armpmu);
                return err;
@@ -465,13 +459,13 @@ static void armpmu_enable(struct pmu *pmu)
        int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
 
        if (enabled)
-               armpmu->start();
+               armpmu->start(armpmu);
 }
 
 static void armpmu_disable(struct pmu *pmu)
 {
        struct arm_pmu *armpmu = to_arm_pmu(pmu);
-       armpmu->stop();
+       armpmu->stop(armpmu);
 }
 
 #ifdef CONFIG_PM_RUNTIME