4 * Copyright (C) 2012 ARM Limited
5 * Author: Will Deacon <will.deacon@arm.com>
7 * This code is based heavily on the ARMv7 perf event code.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #define pr_fmt(fmt) "hw perfevents: " fmt
23 #include <linux/bitmap.h>
24 #include <linux/interrupt.h>
25 #include <linux/kernel.h>
26 #include <linux/export.h>
27 #include <linux/perf_event.h>
28 #include <linux/platform_device.h>
29 #include <linux/spinlock.h>
30 #include <linux/uaccess.h>
32 #include <asm/cputype.h>
34 #include <asm/irq_regs.h>
36 #include <asm/stacktrace.h>
39 * ARMv8 supports a maximum of 32 events.
40 * The cycle counter is included in this total.
42 #define ARMPMU_MAX_HWEVENTS 32
44 static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
45 static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
46 static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
48 #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
50 /* Set at runtime when we know what CPU type we are. */
51 static struct arm_pmu *cpu_pmu;
54 armpmu_get_max_events(void)
59 max_events = cpu_pmu->num_events;
63 EXPORT_SYMBOL_GPL(armpmu_get_max_events);
65 int perf_num_counters(void)
67 return armpmu_get_max_events();
69 EXPORT_SYMBOL_GPL(perf_num_counters);
71 #define HW_OP_UNSUPPORTED 0xFFFF
74 PERF_COUNT_HW_CACHE_##_x
76 #define CACHE_OP_UNSUPPORTED 0xFFFF
79 armpmu_map_cache_event(const unsigned (*cache_map)
80 [PERF_COUNT_HW_CACHE_MAX]
81 [PERF_COUNT_HW_CACHE_OP_MAX]
82 [PERF_COUNT_HW_CACHE_RESULT_MAX],
85 unsigned int cache_type, cache_op, cache_result, ret;
87 cache_type = (config >> 0) & 0xff;
88 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
91 cache_op = (config >> 8) & 0xff;
92 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
95 cache_result = (config >> 16) & 0xff;
96 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
99 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
101 if (ret == CACHE_OP_UNSUPPORTED)
108 armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
112 if (config >= PERF_COUNT_HW_MAX)
115 mapping = (*event_map)[config];
116 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
120 armpmu_map_raw_event(u32 raw_event_mask, u64 config)
122 return (int)(config & raw_event_mask);
125 static int map_cpu_event(struct perf_event *event,
126 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
127 const unsigned (*cache_map)
128 [PERF_COUNT_HW_CACHE_MAX]
129 [PERF_COUNT_HW_CACHE_OP_MAX]
130 [PERF_COUNT_HW_CACHE_RESULT_MAX],
133 u64 config = event->attr.config;
135 switch (event->attr.type) {
136 case PERF_TYPE_HARDWARE:
137 return armpmu_map_event(event_map, config);
138 case PERF_TYPE_HW_CACHE:
139 return armpmu_map_cache_event(cache_map, config);
141 return armpmu_map_raw_event(raw_event_mask, config);
148 armpmu_event_set_period(struct perf_event *event,
149 struct hw_perf_event *hwc,
152 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
153 s64 left = local64_read(&hwc->period_left);
154 s64 period = hwc->sample_period;
157 if (unlikely(left <= -period)) {
159 local64_set(&hwc->period_left, left);
160 hwc->last_period = period;
164 if (unlikely(left <= 0)) {
166 local64_set(&hwc->period_left, left);
167 hwc->last_period = period;
171 if (left > (s64)armpmu->max_period)
172 left = armpmu->max_period;
174 local64_set(&hwc->prev_count, (u64)-left);
176 armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
178 perf_event_update_userpage(event);
184 armpmu_event_update(struct perf_event *event,
185 struct hw_perf_event *hwc,
188 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
189 u64 delta, prev_raw_count, new_raw_count;
192 prev_raw_count = local64_read(&hwc->prev_count);
193 new_raw_count = armpmu->read_counter(idx);
195 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
196 new_raw_count) != prev_raw_count)
199 delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
201 local64_add(delta, &event->count);
202 local64_sub(delta, &hwc->period_left);
204 return new_raw_count;
208 armpmu_read(struct perf_event *event)
210 struct hw_perf_event *hwc = &event->hw;
212 /* Don't read disabled counters! */
216 armpmu_event_update(event, hwc, hwc->idx);
220 armpmu_stop(struct perf_event *event, int flags)
222 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
223 struct hw_perf_event *hwc = &event->hw;
226 * ARM pmu always has to update the counter, so ignore
227 * PERF_EF_UPDATE, see comments in armpmu_start().
229 if (!(hwc->state & PERF_HES_STOPPED)) {
230 armpmu->disable(hwc, hwc->idx);
231 barrier(); /* why? */
232 armpmu_event_update(event, hwc, hwc->idx);
233 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
238 armpmu_start(struct perf_event *event, int flags)
240 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
241 struct hw_perf_event *hwc = &event->hw;
244 * ARM pmu always has to reprogram the period, so ignore
245 * PERF_EF_RELOAD, see the comment below.
247 if (flags & PERF_EF_RELOAD)
248 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
252 * Set the period again. Some counters can't be stopped, so when we
253 * were stopped we simply disabled the IRQ source and the counter
254 * may have been left counting. If we don't do this step then we may
255 * get an interrupt too soon or *way* too late if the overflow has
256 * happened since disabling.
258 armpmu_event_set_period(event, hwc, hwc->idx);
259 armpmu->enable(hwc, hwc->idx);
263 armpmu_del(struct perf_event *event, int flags)
265 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
266 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
267 struct hw_perf_event *hwc = &event->hw;
272 armpmu_stop(event, PERF_EF_UPDATE);
273 hw_events->events[idx] = NULL;
274 clear_bit(idx, hw_events->used_mask);
276 perf_event_update_userpage(event);
280 armpmu_add(struct perf_event *event, int flags)
282 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
283 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
284 struct hw_perf_event *hwc = &event->hw;
288 perf_pmu_disable(event->pmu);
290 /* If we don't have a space for the counter then finish early. */
291 idx = armpmu->get_event_idx(hw_events, hwc);
298 * If there is an event in the counter we are going to use then make
299 * sure it is disabled.
302 armpmu->disable(hwc, idx);
303 hw_events->events[idx] = event;
305 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
306 if (flags & PERF_EF_START)
307 armpmu_start(event, PERF_EF_RELOAD);
309 /* Propagate our changes to the userspace mapping. */
310 perf_event_update_userpage(event);
313 perf_pmu_enable(event->pmu);
318 validate_event(struct pmu_hw_events *hw_events,
319 struct perf_event *event)
321 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
322 struct hw_perf_event fake_event = event->hw;
323 struct pmu *leader_pmu = event->group_leader->pmu;
325 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
328 return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
332 validate_group(struct perf_event *event)
334 struct perf_event *sibling, *leader = event->group_leader;
335 struct pmu_hw_events fake_pmu;
336 DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
339 * Initialise the fake PMU. We only need to populate the
340 * used_mask for the purposes of validation.
342 memset(fake_used_mask, 0, sizeof(fake_used_mask));
343 fake_pmu.used_mask = fake_used_mask;
345 if (!validate_event(&fake_pmu, leader))
348 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
349 if (!validate_event(&fake_pmu, sibling))
353 if (!validate_event(&fake_pmu, event))
360 armpmu_release_hardware(struct arm_pmu *armpmu)
363 struct platform_device *pmu_device = armpmu->plat_device;
365 irqs = min(pmu_device->num_resources, num_possible_cpus());
367 for (i = 0; i < irqs; ++i) {
368 if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
370 irq = platform_get_irq(pmu_device, i);
372 free_irq(irq, armpmu);
377 armpmu_reserve_hardware(struct arm_pmu *armpmu)
379 int i, err, irq, irqs;
380 struct platform_device *pmu_device = armpmu->plat_device;
383 pr_err("no PMU device registered\n");
387 irqs = min(pmu_device->num_resources, num_possible_cpus());
389 pr_err("no irqs for PMUs defined\n");
393 for (i = 0; i < irqs; ++i) {
395 irq = platform_get_irq(pmu_device, i);
400 * If we have a single PMU interrupt that we can't shift,
401 * assume that we're running on a uniprocessor machine and
402 * continue. Otherwise, continue without this interrupt.
404 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
405 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
410 err = request_irq(irq, armpmu->handle_irq,
414 pr_err("unable to request IRQ%d for ARM PMU counters\n",
416 armpmu_release_hardware(armpmu);
420 cpumask_set_cpu(i, &armpmu->active_irqs);
427 hw_perf_event_destroy(struct perf_event *event)
429 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
430 atomic_t *active_events = &armpmu->active_events;
431 struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
433 if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
434 armpmu_release_hardware(armpmu);
435 mutex_unlock(pmu_reserve_mutex);
440 event_requires_mode_exclusion(struct perf_event_attr *attr)
442 return attr->exclude_idle || attr->exclude_user ||
443 attr->exclude_kernel || attr->exclude_hv;
447 __hw_perf_event_init(struct perf_event *event)
449 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
450 struct hw_perf_event *hwc = &event->hw;
453 mapping = armpmu->map_event(event);
456 pr_debug("event %x:%llx not supported\n", event->attr.type,
462 * We don't assign an index until we actually place the event onto
463 * hardware. Use -1 to signify that we haven't decided where to put it
464 * yet. For SMP systems, each core has it's own PMU so we can't do any
465 * clever allocation or constraints checking at this point.
468 hwc->config_base = 0;
473 * Check whether we need to exclude the counter from certain modes.
475 if ((!armpmu->set_event_filter ||
476 armpmu->set_event_filter(hwc, &event->attr)) &&
477 event_requires_mode_exclusion(&event->attr)) {
478 pr_debug("ARM performance counters do not support mode exclusion\n");
483 * Store the event encoding into the config_base field.
485 hwc->config_base |= (unsigned long)mapping;
487 if (!hwc->sample_period) {
489 * For non-sampling runs, limit the sample_period to half
490 * of the counter width. That way, the new counter value
491 * is far less likely to overtake the previous one unless
492 * you have some serious IRQ latency issues.
494 hwc->sample_period = armpmu->max_period >> 1;
495 hwc->last_period = hwc->sample_period;
496 local64_set(&hwc->period_left, hwc->sample_period);
500 if (event->group_leader != event) {
501 err = validate_group(event);
509 static int armpmu_event_init(struct perf_event *event)
511 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
513 atomic_t *active_events = &armpmu->active_events;
515 if (armpmu->map_event(event) == -ENOENT)
518 event->destroy = hw_perf_event_destroy;
520 if (!atomic_inc_not_zero(active_events)) {
521 mutex_lock(&armpmu->reserve_mutex);
522 if (atomic_read(active_events) == 0)
523 err = armpmu_reserve_hardware(armpmu);
526 atomic_inc(active_events);
527 mutex_unlock(&armpmu->reserve_mutex);
533 err = __hw_perf_event_init(event);
535 hw_perf_event_destroy(event);
540 static void armpmu_enable(struct pmu *pmu)
542 struct arm_pmu *armpmu = to_arm_pmu(pmu);
543 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
544 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
550 static void armpmu_disable(struct pmu *pmu)
552 struct arm_pmu *armpmu = to_arm_pmu(pmu);
556 static void __init armpmu_init(struct arm_pmu *armpmu)
558 atomic_set(&armpmu->active_events, 0);
559 mutex_init(&armpmu->reserve_mutex);
561 armpmu->pmu = (struct pmu) {
562 .pmu_enable = armpmu_enable,
563 .pmu_disable = armpmu_disable,
564 .event_init = armpmu_event_init,
567 .start = armpmu_start,
573 int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type)
576 return perf_pmu_register(&armpmu->pmu, name, type);
580 * ARMv8 PMUv3 Performance Events handling code.
581 * Common event types.
583 enum armv8_pmuv3_perf_types {
584 /* Required events. */
585 ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR = 0x00,
586 ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL = 0x03,
587 ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS = 0x04,
588 ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
589 ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES = 0x11,
590 ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED = 0x12,
592 /* At least one of the following is required. */
593 ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED = 0x08,
594 ARMV8_PMUV3_PERFCTR_OP_SPEC = 0x1B,
596 /* Common architectural events. */
597 ARMV8_PMUV3_PERFCTR_MEM_READ = 0x06,
598 ARMV8_PMUV3_PERFCTR_MEM_WRITE = 0x07,
599 ARMV8_PMUV3_PERFCTR_EXC_TAKEN = 0x09,
600 ARMV8_PMUV3_PERFCTR_EXC_EXECUTED = 0x0A,
601 ARMV8_PMUV3_PERFCTR_CID_WRITE = 0x0B,
602 ARMV8_PMUV3_PERFCTR_PC_WRITE = 0x0C,
603 ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH = 0x0D,
604 ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN = 0x0E,
605 ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
606 ARMV8_PMUV3_PERFCTR_TTBR_WRITE = 0x1C,
608 /* Common microarchitectural events. */
609 ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL = 0x01,
610 ARMV8_PMUV3_PERFCTR_ITLB_REFILL = 0x02,
611 ARMV8_PMUV3_PERFCTR_DTLB_REFILL = 0x05,
612 ARMV8_PMUV3_PERFCTR_MEM_ACCESS = 0x13,
613 ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS = 0x14,
614 ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB = 0x15,
615 ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS = 0x16,
616 ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL = 0x17,
617 ARMV8_PMUV3_PERFCTR_L2_CACHE_WB = 0x18,
618 ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19,
619 ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A,
620 ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D,
623 /* PMUv3 HW events mapping. */
624 static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
625 [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
626 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
627 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
628 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
629 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED,
630 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
631 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
632 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
633 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
636 static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
637 [PERF_COUNT_HW_CACHE_OP_MAX]
638 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
641 [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
642 [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
645 [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
646 [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
649 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
650 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
655 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
656 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
659 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
660 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
663 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
664 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
669 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
670 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
673 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
674 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
677 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
678 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
683 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
684 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
687 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
688 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
691 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
692 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
697 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
698 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
701 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
702 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
705 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
706 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
711 [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
712 [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
715 [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
716 [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
719 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
720 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
725 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
726 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
729 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
730 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
733 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
734 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
740 * Perf Events' indices
742 #define ARMV8_IDX_CYCLE_COUNTER 0
743 #define ARMV8_IDX_COUNTER0 1
744 #define ARMV8_IDX_COUNTER_LAST (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
746 #define ARMV8_MAX_COUNTERS 32
747 #define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)
750 * ARMv8 low level PMU access
754 * Perf Event to low level counters mapping
756 #define ARMV8_IDX_TO_COUNTER(x) \
757 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
760 * Per-CPU PMCR: config reg
762 #define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
763 #define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
764 #define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
765 #define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
766 #define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
767 #define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
768 #define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
769 #define ARMV8_PMCR_N_MASK 0x1f
770 #define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */
773 * PMOVSR: counters overflow flag status reg
775 #define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
776 #define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
779 * PMXEVTYPER: Event selection reg
781 #define ARMV8_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
782 #define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
785 * Event filters for PMUv3
787 #define ARMV8_EXCLUDE_EL1 (1 << 31)
788 #define ARMV8_EXCLUDE_EL0 (1 << 30)
789 #define ARMV8_INCLUDE_EL2 (1 << 27)
791 static inline u32 armv8pmu_pmcr_read(void)
794 asm volatile("mrs %0, pmcr_el0" : "=r" (val));
798 static inline void armv8pmu_pmcr_write(u32 val)
800 val &= ARMV8_PMCR_MASK;
802 asm volatile("msr pmcr_el0, %0" :: "r" (val));
805 static inline int armv8pmu_has_overflowed(u32 pmovsr)
807 return pmovsr & ARMV8_OVERFLOWED_MASK;
810 static inline int armv8pmu_counter_valid(int idx)
812 return idx >= ARMV8_IDX_CYCLE_COUNTER && idx <= ARMV8_IDX_COUNTER_LAST;
815 static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
820 if (!armv8pmu_counter_valid(idx)) {
821 pr_err("CPU%u checking wrong counter %d overflow status\n",
822 smp_processor_id(), idx);
824 counter = ARMV8_IDX_TO_COUNTER(idx);
825 ret = pmnc & BIT(counter);
831 static inline int armv8pmu_select_counter(int idx)
835 if (!armv8pmu_counter_valid(idx)) {
836 pr_err("CPU%u selecting wrong PMNC counter %d\n",
837 smp_processor_id(), idx);
841 counter = ARMV8_IDX_TO_COUNTER(idx);
842 asm volatile("msr pmselr_el0, %0" :: "r" (counter));
848 static inline u32 armv8pmu_read_counter(int idx)
852 if (!armv8pmu_counter_valid(idx))
853 pr_err("CPU%u reading wrong counter %d\n",
854 smp_processor_id(), idx);
855 else if (idx == ARMV8_IDX_CYCLE_COUNTER)
856 asm volatile("mrs %0, pmccntr_el0" : "=r" (value));
857 else if (armv8pmu_select_counter(idx) == idx)
858 asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value));
863 static inline void armv8pmu_write_counter(int idx, u32 value)
865 if (!armv8pmu_counter_valid(idx))
866 pr_err("CPU%u writing wrong counter %d\n",
867 smp_processor_id(), idx);
868 else if (idx == ARMV8_IDX_CYCLE_COUNTER)
869 asm volatile("msr pmccntr_el0, %0" :: "r" (value));
870 else if (armv8pmu_select_counter(idx) == idx)
871 asm volatile("msr pmxevcntr_el0, %0" :: "r" (value));
874 static inline void armv8pmu_write_evtype(int idx, u32 val)
876 if (armv8pmu_select_counter(idx) == idx) {
877 val &= ARMV8_EVTYPE_MASK;
878 asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
882 static inline int armv8pmu_enable_counter(int idx)
886 if (!armv8pmu_counter_valid(idx)) {
887 pr_err("CPU%u enabling wrong PMNC counter %d\n",
888 smp_processor_id(), idx);
892 counter = ARMV8_IDX_TO_COUNTER(idx);
893 asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter)));
897 static inline int armv8pmu_disable_counter(int idx)
901 if (!armv8pmu_counter_valid(idx)) {
902 pr_err("CPU%u disabling wrong PMNC counter %d\n",
903 smp_processor_id(), idx);
907 counter = ARMV8_IDX_TO_COUNTER(idx);
908 asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter)));
912 static inline int armv8pmu_enable_intens(int idx)
916 if (!armv8pmu_counter_valid(idx)) {
917 pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
918 smp_processor_id(), idx);
922 counter = ARMV8_IDX_TO_COUNTER(idx);
923 asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter)));
927 static inline int armv8pmu_disable_intens(int idx)
931 if (!armv8pmu_counter_valid(idx)) {
932 pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
933 smp_processor_id(), idx);
937 counter = ARMV8_IDX_TO_COUNTER(idx);
938 asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter)));
940 /* Clear the overflow flag in case an interrupt is pending. */
941 asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter)));
946 static inline u32 armv8pmu_getreset_flags(void)
951 asm volatile("mrs %0, pmovsclr_el0" : "=r" (value));
953 /* Write to clear flags */
954 value &= ARMV8_OVSR_MASK;
955 asm volatile("msr pmovsclr_el0, %0" :: "r" (value));
960 static void armv8pmu_enable_event(struct hw_perf_event *hwc, int idx)
963 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
966 * Enable counter and interrupt, and set the counter to count
967 * the event that we're interested in.
969 raw_spin_lock_irqsave(&events->pmu_lock, flags);
974 armv8pmu_disable_counter(idx);
977 * Set event (if destined for PMNx counters).
979 armv8pmu_write_evtype(idx, hwc->config_base);
982 * Enable interrupt for this counter
984 armv8pmu_enable_intens(idx);
989 armv8pmu_enable_counter(idx);
991 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
994 static void armv8pmu_disable_event(struct hw_perf_event *hwc, int idx)
997 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1000 * Disable counter and interrupt
1002 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1007 armv8pmu_disable_counter(idx);
1010 * Disable interrupt for this counter
1012 armv8pmu_disable_intens(idx);
1014 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1017 static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
1020 struct perf_sample_data data;
1021 struct pmu_hw_events *cpuc;
1022 struct pt_regs *regs;
1026 * Get and reset the IRQ flags
1028 pmovsr = armv8pmu_getreset_flags();
1031 * Did an overflow occur?
1033 if (!armv8pmu_has_overflowed(pmovsr))
1037 * Handle the counter(s) overflow(s)
1039 regs = get_irq_regs();
1041 cpuc = &__get_cpu_var(cpu_hw_events);
1042 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
1043 struct perf_event *event = cpuc->events[idx];
1044 struct hw_perf_event *hwc;
1046 /* Ignore if we don't have an event. */
1051 * We have a single interrupt for all counters. Check that
1052 * each counter has overflowed before we process it.
1054 if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
1058 armpmu_event_update(event, hwc, idx);
1059 perf_sample_data_init(&data, 0, hwc->last_period);
1060 if (!armpmu_event_set_period(event, hwc, idx))
1063 if (perf_event_overflow(event, &data, regs))
1064 cpu_pmu->disable(hwc, idx);
1068 * Handle the pending perf events.
1070 * Note: this call *must* be run with interrupts disabled. For
1071 * platforms that can have the PMU interrupts raised as an NMI, this
1079 static void armv8pmu_start(void)
1081 unsigned long flags;
1082 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1084 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1085 /* Enable all counters */
1086 armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
1087 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1090 static void armv8pmu_stop(void)
1092 unsigned long flags;
1093 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1095 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1096 /* Disable all counters */
1097 armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
1098 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1101 static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
1102 struct hw_perf_event *event)
1105 unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT;
1107 /* Always place a cycle counter into the cycle counter. */
1108 if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
1109 if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
1112 return ARMV8_IDX_CYCLE_COUNTER;
1116 * For anything other than a cycle counter, try and use
1117 * the events counters
1119 for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1120 if (!test_and_set_bit(idx, cpuc->used_mask))
1124 /* The counters are all in use. */
1129 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1131 static int armv8pmu_set_event_filter(struct hw_perf_event *event,
1132 struct perf_event_attr *attr)
1134 unsigned long config_base = 0;
1136 if (attr->exclude_idle)
1138 if (attr->exclude_user)
1139 config_base |= ARMV8_EXCLUDE_EL0;
1140 if (attr->exclude_kernel)
1141 config_base |= ARMV8_EXCLUDE_EL1;
1142 if (!attr->exclude_hv)
1143 config_base |= ARMV8_INCLUDE_EL2;
1146 * Install the filter into config_base as this is used to
1147 * construct the event type.
1149 event->config_base = config_base;
1154 static void armv8pmu_reset(void *info)
1156 u32 idx, nb_cnt = cpu_pmu->num_events;
1158 /* The counter and interrupt enable registers are unknown at reset. */
1159 for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
1160 armv8pmu_disable_event(NULL, idx);
1162 /* Initialize & Reset PMNC: C and P bits. */
1163 armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
1165 /* Disable access from userspace. */
1166 asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
1169 static int armv8_pmuv3_map_event(struct perf_event *event)
1171 return map_cpu_event(event, &armv8_pmuv3_perf_map,
1172 &armv8_pmuv3_perf_cache_map, 0xFF);
1175 static struct arm_pmu armv8pmu = {
1176 .handle_irq = armv8pmu_handle_irq,
1177 .enable = armv8pmu_enable_event,
1178 .disable = armv8pmu_disable_event,
1179 .read_counter = armv8pmu_read_counter,
1180 .write_counter = armv8pmu_write_counter,
1181 .get_event_idx = armv8pmu_get_event_idx,
1182 .start = armv8pmu_start,
1183 .stop = armv8pmu_stop,
1184 .reset = armv8pmu_reset,
1185 .max_period = (1LLU << 32) - 1,
1188 static u32 __init armv8pmu_read_num_pmnc_events(void)
1192 /* Read the nb of CNTx counters supported from PMNC */
1193 nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
1195 /* Add the CPU cycles counter and return */
1199 static struct arm_pmu *__init armv8_pmuv3_pmu_init(void)
1201 armv8pmu.name = "arm/armv8-pmuv3";
1202 armv8pmu.map_event = armv8_pmuv3_map_event;
1203 armv8pmu.num_events = armv8pmu_read_num_pmnc_events();
1204 armv8pmu.set_event_filter = armv8pmu_set_event_filter;
1209 * Ensure the PMU has sane values out of reset.
1210 * This requires SMP to be available, so exists as a separate initcall.
1215 if (cpu_pmu && cpu_pmu->reset)
1216 return on_each_cpu(cpu_pmu->reset, NULL, 1);
1219 arch_initcall(cpu_pmu_reset);
1222 * PMU platform driver and devicetree bindings.
1224 static struct of_device_id armpmu_of_device_ids[] = {
1225 {.compatible = "arm,armv8-pmuv3"},
1229 static int armpmu_device_probe(struct platform_device *pdev)
1234 cpu_pmu->plat_device = pdev;
1238 static struct platform_driver armpmu_driver = {
1241 .of_match_table = armpmu_of_device_ids,
1243 .probe = armpmu_device_probe,
1246 static int __init register_pmu_driver(void)
1248 return platform_driver_register(&armpmu_driver);
1250 device_initcall(register_pmu_driver);
1252 static struct pmu_hw_events *armpmu_get_cpu_events(void)
1254 return &__get_cpu_var(cpu_hw_events);
1257 static void __init cpu_pmu_init(struct arm_pmu *armpmu)
1260 for_each_possible_cpu(cpu) {
1261 struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
1262 events->events = per_cpu(hw_events, cpu);
1263 events->used_mask = per_cpu(used_mask, cpu);
1264 raw_spin_lock_init(&events->pmu_lock);
1266 armpmu->get_hw_events = armpmu_get_cpu_events;
1269 static int __init init_hw_perf_events(void)
1271 u64 dfr = read_cpuid(ID_AA64DFR0_EL1);
1273 switch ((dfr >> 8) & 0xf) {
1274 case 0x1: /* PMUv3 */
1275 cpu_pmu = armv8_pmuv3_pmu_init();
1280 pr_info("enabled with %s PMU driver, %d counters available\n",
1281 cpu_pmu->name, cpu_pmu->num_events);
1282 cpu_pmu_init(cpu_pmu);
1283 armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
1285 pr_info("no hardware support available\n");
1290 early_initcall(init_hw_perf_events);
1293 * Callchain handling code.
1296 struct frame_tail __user *fp;
1298 } __attribute__((packed));
1301 * Get the return address for a single stackframe and return a pointer to the
1304 static struct frame_tail __user *
1305 user_backtrace(struct frame_tail __user *tail,
1306 struct perf_callchain_entry *entry)
1308 struct frame_tail buftail;
1311 /* Also check accessibility of one struct frame_tail beyond */
1312 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
1315 pagefault_disable();
1316 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
1322 perf_callchain_store(entry, buftail.lr);
1325 * Frame pointers should strictly progress back up the stack
1326 * (towards higher addresses).
1328 if (tail >= buftail.fp)
1334 void perf_callchain_user(struct perf_callchain_entry *entry,
1335 struct pt_regs *regs)
1337 struct frame_tail __user *tail;
1339 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1340 /* We don't support guest os callchain now */
1344 perf_callchain_store(entry, regs->pc);
1345 tail = (struct frame_tail __user *)regs->regs[29];
1347 while (entry->nr < PERF_MAX_STACK_DEPTH &&
1348 tail && !((unsigned long)tail & 0xf))
1349 tail = user_backtrace(tail, entry);
1353 * Gets called by walk_stackframe() for every stackframe. This will be called
1354 * whist unwinding the stackframe and is like a subroutine return so we use
1357 static int callchain_trace(struct stackframe *frame, void *data)
1359 struct perf_callchain_entry *entry = data;
1360 perf_callchain_store(entry, frame->pc);
1364 void perf_callchain_kernel(struct perf_callchain_entry *entry,
1365 struct pt_regs *regs)
1367 struct stackframe frame;
1369 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1370 /* We don't support guest os callchain now */
1374 frame.fp = regs->regs[29];
1375 frame.sp = regs->sp;
1376 frame.pc = regs->pc;
1377 walk_stackframe(&frame, callchain_trace, entry);
1380 unsigned long perf_instruction_pointer(struct pt_regs *regs)
1382 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
1383 return perf_guest_cbs->get_guest_ip();
1385 return instruction_pointer(regs);
1388 unsigned long perf_misc_flags(struct pt_regs *regs)
1392 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1393 if (perf_guest_cbs->is_user_mode())
1394 misc |= PERF_RECORD_MISC_GUEST_USER;
1396 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
1398 if (user_mode(regs))
1399 misc |= PERF_RECORD_MISC_USER;
1401 misc |= PERF_RECORD_MISC_KERNEL;