4 * ARM performance counter support.
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
9 * This code is based on the sparc64 perf event code, which is in turn based
10 * on the x86 code. Callchain code is based on the ARM OProfile backtrace
13 #define pr_fmt(fmt) "hw perfevents: " fmt
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/perf_event.h>
19 #include <linux/platform_device.h>
20 #include <linux/spinlock.h>
21 #include <linux/uaccess.h>
23 #include <asm/cputype.h>
25 #include <asm/irq_regs.h>
27 #include <asm/stacktrace.h>
29 static struct platform_device *pmu_device;
32 * Hardware lock to serialize accesses to PMU registers. Needed for the
33 * read/modify/write sequences.
35 static DEFINE_RAW_SPINLOCK(pmu_lock);
38 * ARMv6 supports a maximum of 3 events, starting from index 1. If we add
39 * another platform that supports more, we need to increase this to be the
40 * largest of all platforms.
42 * ARMv7 supports up to 32 events:
43 * cycle counter CCNT + 31 events counters CNT0..30.
44 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
46 #define ARMPMU_MAX_HWEVENTS 33
48 /* The events for a given CPU. */
49 struct cpu_hw_events {
51 * The events that are active on the CPU for the given index. Index 0
54 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
57 * A 1 bit for an index indicates that the counter is being used for
58 * an event. A 0 means that the counter can be used.
60 unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
63 * A 1 bit for an index indicates that the counter is actively being
66 unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
68 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
71 enum arm_perf_pmu_ids id;
73 irqreturn_t (*handle_irq)(int irq_num, void *dev);
74 void (*enable)(struct hw_perf_event *evt, int idx);
75 void (*disable)(struct hw_perf_event *evt, int idx);
76 int (*get_event_idx)(struct cpu_hw_events *cpuc,
77 struct hw_perf_event *hwc);
78 u32 (*read_counter)(int idx);
79 void (*write_counter)(int idx, u32 val);
82 void (*reset)(void *);
83 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
84 [PERF_COUNT_HW_CACHE_OP_MAX]
85 [PERF_COUNT_HW_CACHE_RESULT_MAX];
86 const unsigned (*event_map)[PERF_COUNT_HW_MAX];
92 /* Set at runtime when we know what CPU type we are. */
93 static const struct arm_pmu *armpmu;
96 armpmu_get_pmu_id(void)
105 EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
108 armpmu_get_max_events(void)
113 max_events = armpmu->num_events;
117 EXPORT_SYMBOL_GPL(armpmu_get_max_events);
119 int perf_num_counters(void)
121 return armpmu_get_max_events();
123 EXPORT_SYMBOL_GPL(perf_num_counters);
125 #define HW_OP_UNSUPPORTED 0xFFFF
128 PERF_COUNT_HW_CACHE_##_x
130 #define CACHE_OP_UNSUPPORTED 0xFFFF
133 armpmu_map_cache_event(u64 config)
135 unsigned int cache_type, cache_op, cache_result, ret;
137 cache_type = (config >> 0) & 0xff;
138 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
141 cache_op = (config >> 8) & 0xff;
142 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
145 cache_result = (config >> 16) & 0xff;
146 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
149 ret = (int)(*armpmu->cache_map)[cache_type][cache_op][cache_result];
151 if (ret == CACHE_OP_UNSUPPORTED)
158 armpmu_map_event(u64 config)
160 int mapping = (*armpmu->event_map)[config];
161 return mapping == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : mapping;
165 armpmu_map_raw_event(u64 config)
167 return (int)(config & armpmu->raw_event_mask);
171 armpmu_event_set_period(struct perf_event *event,
172 struct hw_perf_event *hwc,
175 s64 left = local64_read(&hwc->period_left);
176 s64 period = hwc->sample_period;
179 if (unlikely(left <= -period)) {
181 local64_set(&hwc->period_left, left);
182 hwc->last_period = period;
186 if (unlikely(left <= 0)) {
188 local64_set(&hwc->period_left, left);
189 hwc->last_period = period;
193 if (left > (s64)armpmu->max_period)
194 left = armpmu->max_period;
196 local64_set(&hwc->prev_count, (u64)-left);
198 armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
200 perf_event_update_userpage(event);
206 armpmu_event_update(struct perf_event *event,
207 struct hw_perf_event *hwc,
211 s64 prev_raw_count, new_raw_count;
215 prev_raw_count = local64_read(&hwc->prev_count);
216 new_raw_count = armpmu->read_counter(idx);
218 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
219 new_raw_count) != prev_raw_count)
222 delta = (new_raw_count << shift) - (prev_raw_count << shift);
225 local64_add(delta, &event->count);
226 local64_sub(delta, &hwc->period_left);
228 return new_raw_count;
232 armpmu_read(struct perf_event *event)
234 struct hw_perf_event *hwc = &event->hw;
236 /* Don't read disabled counters! */
240 armpmu_event_update(event, hwc, hwc->idx);
244 armpmu_stop(struct perf_event *event, int flags)
246 struct hw_perf_event *hwc = &event->hw;
252 * ARM pmu always has to update the counter, so ignore
253 * PERF_EF_UPDATE, see comments in armpmu_start().
255 if (!(hwc->state & PERF_HES_STOPPED)) {
256 armpmu->disable(hwc, hwc->idx);
257 barrier(); /* why? */
258 armpmu_event_update(event, hwc, hwc->idx);
259 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
264 armpmu_start(struct perf_event *event, int flags)
266 struct hw_perf_event *hwc = &event->hw;
272 * ARM pmu always has to reprogram the period, so ignore
273 * PERF_EF_RELOAD, see the comment below.
275 if (flags & PERF_EF_RELOAD)
276 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
280 * Set the period again. Some counters can't be stopped, so when we
281 * were stopped we simply disabled the IRQ source and the counter
282 * may have been left counting. If we don't do this step then we may
283 * get an interrupt too soon or *way* too late if the overflow has
284 * happened since disabling.
286 armpmu_event_set_period(event, hwc, hwc->idx);
287 armpmu->enable(hwc, hwc->idx);
291 armpmu_del(struct perf_event *event, int flags)
293 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
294 struct hw_perf_event *hwc = &event->hw;
299 clear_bit(idx, cpuc->active_mask);
300 armpmu_stop(event, PERF_EF_UPDATE);
301 cpuc->events[idx] = NULL;
302 clear_bit(idx, cpuc->used_mask);
304 perf_event_update_userpage(event);
308 armpmu_add(struct perf_event *event, int flags)
310 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
311 struct hw_perf_event *hwc = &event->hw;
315 perf_pmu_disable(event->pmu);
317 /* If we don't have a space for the counter then finish early. */
318 idx = armpmu->get_event_idx(cpuc, hwc);
325 * If there is an event in the counter we are going to use then make
326 * sure it is disabled.
329 armpmu->disable(hwc, idx);
330 cpuc->events[idx] = event;
331 set_bit(idx, cpuc->active_mask);
333 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
334 if (flags & PERF_EF_START)
335 armpmu_start(event, PERF_EF_RELOAD);
337 /* Propagate our changes to the userspace mapping. */
338 perf_event_update_userpage(event);
341 perf_pmu_enable(event->pmu);
345 static struct pmu pmu;
348 validate_event(struct cpu_hw_events *cpuc,
349 struct perf_event *event)
351 struct hw_perf_event fake_event = event->hw;
353 if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
356 return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
360 validate_group(struct perf_event *event)
362 struct perf_event *sibling, *leader = event->group_leader;
363 struct cpu_hw_events fake_pmu;
365 memset(&fake_pmu, 0, sizeof(fake_pmu));
367 if (!validate_event(&fake_pmu, leader))
370 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
371 if (!validate_event(&fake_pmu, sibling))
375 if (!validate_event(&fake_pmu, event))
381 static irqreturn_t armpmu_platform_irq(int irq, void *dev)
383 struct arm_pmu_platdata *plat = dev_get_platdata(&pmu_device->dev);
385 return plat->handle_irq(irq, dev, armpmu->handle_irq);
389 armpmu_reserve_hardware(void)
391 struct arm_pmu_platdata *plat;
392 irq_handler_t handle_irq;
393 int i, err = -ENODEV, irq;
395 pmu_device = reserve_pmu(ARM_PMU_DEVICE_CPU);
396 if (IS_ERR(pmu_device)) {
397 pr_warning("unable to reserve pmu\n");
398 return PTR_ERR(pmu_device);
401 init_pmu(ARM_PMU_DEVICE_CPU);
403 plat = dev_get_platdata(&pmu_device->dev);
404 if (plat && plat->handle_irq)
405 handle_irq = armpmu_platform_irq;
407 handle_irq = armpmu->handle_irq;
409 if (pmu_device->num_resources < 1) {
410 pr_err("no irqs for PMUs defined\n");
414 for (i = 0; i < pmu_device->num_resources; ++i) {
415 irq = platform_get_irq(pmu_device, i);
419 err = request_irq(irq, handle_irq,
420 IRQF_DISABLED | IRQF_NOBALANCING,
423 pr_warning("unable to request IRQ%d for ARM perf "
430 for (i = i - 1; i >= 0; --i) {
431 irq = platform_get_irq(pmu_device, i);
435 release_pmu(pmu_device);
443 armpmu_release_hardware(void)
447 for (i = pmu_device->num_resources - 1; i >= 0; --i) {
448 irq = platform_get_irq(pmu_device, i);
454 release_pmu(pmu_device);
458 static atomic_t active_events = ATOMIC_INIT(0);
459 static DEFINE_MUTEX(pmu_reserve_mutex);
462 hw_perf_event_destroy(struct perf_event *event)
464 if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) {
465 armpmu_release_hardware();
466 mutex_unlock(&pmu_reserve_mutex);
471 __hw_perf_event_init(struct perf_event *event)
473 struct hw_perf_event *hwc = &event->hw;
476 /* Decode the generic type into an ARM event identifier. */
477 if (PERF_TYPE_HARDWARE == event->attr.type) {
478 mapping = armpmu_map_event(event->attr.config);
479 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
480 mapping = armpmu_map_cache_event(event->attr.config);
481 } else if (PERF_TYPE_RAW == event->attr.type) {
482 mapping = armpmu_map_raw_event(event->attr.config);
484 pr_debug("event type %x not supported\n", event->attr.type);
489 pr_debug("event %x:%llx not supported\n", event->attr.type,
495 * Check whether we need to exclude the counter from certain modes.
496 * The ARM performance counters are on all of the time so if someone
497 * has asked us for some excludes then we have to fail.
499 if (event->attr.exclude_kernel || event->attr.exclude_user ||
500 event->attr.exclude_hv || event->attr.exclude_idle) {
501 pr_debug("ARM performance counters do not support "
507 * We don't assign an index until we actually place the event onto
508 * hardware. Use -1 to signify that we haven't decided where to put it
509 * yet. For SMP systems, each core has it's own PMU so we can't do any
510 * clever allocation or constraints checking at this point.
515 * Store the event encoding into the config_base field. config and
516 * event_base are unused as the only 2 things we need to know are
517 * the event mapping and the counter to use. The counter to use is
518 * also the indx and the config_base is the event type.
520 hwc->config_base = (unsigned long)mapping;
524 if (!hwc->sample_period) {
525 hwc->sample_period = armpmu->max_period;
526 hwc->last_period = hwc->sample_period;
527 local64_set(&hwc->period_left, hwc->sample_period);
531 if (event->group_leader != event) {
532 err = validate_group(event);
540 static int armpmu_event_init(struct perf_event *event)
544 switch (event->attr.type) {
546 case PERF_TYPE_HARDWARE:
547 case PERF_TYPE_HW_CACHE:
557 event->destroy = hw_perf_event_destroy;
559 if (!atomic_inc_not_zero(&active_events)) {
560 if (atomic_read(&active_events) > armpmu->num_events) {
561 atomic_dec(&active_events);
565 mutex_lock(&pmu_reserve_mutex);
566 if (atomic_read(&active_events) == 0) {
567 err = armpmu_reserve_hardware();
571 atomic_inc(&active_events);
572 mutex_unlock(&pmu_reserve_mutex);
578 err = __hw_perf_event_init(event);
580 hw_perf_event_destroy(event);
585 static void armpmu_enable(struct pmu *pmu)
587 /* Enable all of the perf events on hardware. */
589 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
594 for (idx = 0; idx <= armpmu->num_events; ++idx) {
595 struct perf_event *event = cpuc->events[idx];
600 armpmu->enable(&event->hw, idx);
606 static void armpmu_disable(struct pmu *pmu)
612 static struct pmu pmu = {
613 .pmu_enable = armpmu_enable,
614 .pmu_disable = armpmu_disable,
615 .event_init = armpmu_event_init,
618 .start = armpmu_start,
623 /* Include the PMU-specific implementations. */
624 #include "perf_event_xscale.c"
625 #include "perf_event_v6.c"
626 #include "perf_event_v7.c"
629 * Ensure the PMU has sane values out of reset.
630 * This requires SMP to be available, so exists as a separate initcall.
635 if (armpmu && armpmu->reset)
636 return on_each_cpu(armpmu->reset, NULL, 1);
639 arch_initcall(armpmu_reset);
642 init_hw_perf_events(void)
644 unsigned long cpuid = read_cpuid_id();
645 unsigned long implementor = (cpuid & 0xFF000000) >> 24;
646 unsigned long part_number = (cpuid & 0xFFF0);
649 if (0x41 == implementor) {
650 switch (part_number) {
651 case 0xB360: /* ARM1136 */
652 case 0xB560: /* ARM1156 */
653 case 0xB760: /* ARM1176 */
654 armpmu = armv6pmu_init();
656 case 0xB020: /* ARM11mpcore */
657 armpmu = armv6mpcore_pmu_init();
659 case 0xC080: /* Cortex-A8 */
660 armpmu = armv7_a8_pmu_init();
662 case 0xC090: /* Cortex-A9 */
663 armpmu = armv7_a9_pmu_init();
666 /* Intel CPUs [xscale]. */
667 } else if (0x69 == implementor) {
668 part_number = (cpuid >> 13) & 0x7;
669 switch (part_number) {
671 armpmu = xscale1pmu_init();
674 armpmu = xscale2pmu_init();
680 pr_info("enabled with %s PMU driver, %d counters available\n",
681 armpmu->name, armpmu->num_events);
683 pr_info("no hardware support available\n");
686 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
690 early_initcall(init_hw_perf_events);
693 * Callchain handling code.
697 * The registers we're interested in are at the end of the variable
698 * length saved register structure. The fp points at the end of this
699 * structure so the address of this struct is:
700 * (struct frame_tail *)(xxx->fp)-1
702 * This code has been adapted from the ARM OProfile support.
705 struct frame_tail __user *fp;
708 } __attribute__((packed));
711 * Get the return address for a single stackframe and return a pointer to the
714 static struct frame_tail __user *
715 user_backtrace(struct frame_tail __user *tail,
716 struct perf_callchain_entry *entry)
718 struct frame_tail buftail;
720 /* Also check accessibility of one struct frame_tail beyond */
721 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
723 if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
726 perf_callchain_store(entry, buftail.lr);
729 * Frame pointers should strictly progress back up the stack
730 * (towards higher addresses).
732 if (tail + 1 >= buftail.fp)
735 return buftail.fp - 1;
739 perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
741 struct frame_tail __user *tail;
744 tail = (struct frame_tail __user *)regs->ARM_fp - 1;
746 while (tail && !((unsigned long)tail & 0x3))
747 tail = user_backtrace(tail, entry);
751 * Gets called by walk_stackframe() for every stackframe. This will be called
752 * whist unwinding the stackframe and is like a subroutine return so we use
756 callchain_trace(struct stackframe *fr,
759 struct perf_callchain_entry *entry = data;
760 perf_callchain_store(entry, fr->pc);
765 perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
767 struct stackframe fr;
769 fr.fp = regs->ARM_fp;
770 fr.sp = regs->ARM_sp;
771 fr.lr = regs->ARM_lr;
772 fr.pc = regs->ARM_pc;
773 walk_stackframe(&fr, callchain_trace, entry);