2 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
4 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
5 * 2010 (c) MontaVista Software, LLC.
7 * Copied from ARMv6 code, with the low level code inspired
8 * by the ARMv7 Oprofile code.
10 * Cortex-A8 has up to 4 configurable performance counters and
11 * a single cycle counter.
12 * Cortex-A9 has up to 31 configurable performance counters and
13 * a single cycle counter.
15 * All counters can be enabled/disabled and IRQ masked separately. The cycle
16 * counter and all 4 performance counters together can be reset separately.
21 * Common ARMv7 event types
23 * Note: An implementation may not be able to count all of these events
24 * but the encodings are considered to be `reserved' in the case that
25 * they are not available.
27 enum armv7_perf_types {
28 ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
29 ARMV7_PERFCTR_IFETCH_MISS = 0x01,
30 ARMV7_PERFCTR_ITLB_MISS = 0x02,
31 ARMV7_PERFCTR_DCACHE_REFILL = 0x03, /* L1 */
32 ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, /* L1 */
33 ARMV7_PERFCTR_DTLB_REFILL = 0x05,
34 ARMV7_PERFCTR_DREAD = 0x06,
35 ARMV7_PERFCTR_DWRITE = 0x07,
36 ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
37 ARMV7_PERFCTR_EXC_TAKEN = 0x09,
38 ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
39 ARMV7_PERFCTR_CID_WRITE = 0x0B,
40 /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
42 * - all branch instructions,
43 * - instructions that explicitly write the PC,
44 * - exception generating instructions.
46 ARMV7_PERFCTR_PC_WRITE = 0x0C,
47 ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
48 ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
49 ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F,
51 /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
52 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
53 ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
54 ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
55 ARMV7_PERFCTR_MEM_ACCESS = 0x13,
56 ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
57 ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
58 ARMV7_PERFCTR_L2_DCACHE_ACCESS = 0x16,
59 ARMV7_PERFCTR_L2_DCACHE_REFILL = 0x17,
60 ARMV7_PERFCTR_L2_DCACHE_WB = 0x18,
61 ARMV7_PERFCTR_BUS_ACCESS = 0x19,
62 ARMV7_PERFCTR_MEMORY_ERROR = 0x1A,
63 ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
64 ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
65 ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
67 ARMV7_PERFCTR_CPU_CYCLES = 0xFF
70 /* ARMv7 Cortex-A8 specific event types */
71 enum armv7_a8_perf_types {
72 ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40,
73 ARMV7_PERFCTR_L2_STORE_MERGED = 0x41,
74 ARMV7_PERFCTR_L2_STORE_BUFF = 0x42,
75 ARMV7_PERFCTR_L2_ACCESS = 0x43,
76 ARMV7_PERFCTR_L2_CACH_MISS = 0x44,
77 ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45,
78 ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46,
79 ARMV7_PERFCTR_MEMORY_REPLAY = 0x47,
80 ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48,
81 ARMV7_PERFCTR_L1_DATA_MISS = 0x49,
82 ARMV7_PERFCTR_L1_INST_MISS = 0x4A,
83 ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B,
84 ARMV7_PERFCTR_L1_NEON_DATA = 0x4C,
85 ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D,
86 ARMV7_PERFCTR_L2_NEON = 0x4E,
87 ARMV7_PERFCTR_L2_NEON_HIT = 0x4F,
88 ARMV7_PERFCTR_L1_INST = 0x50,
89 ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51,
90 ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52,
91 ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53,
92 ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54,
93 ARMV7_PERFCTR_OP_EXECUTED = 0x55,
94 ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56,
95 ARMV7_PERFCTR_CYCLES_INST = 0x57,
96 ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58,
97 ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59,
98 ARMV7_PERFCTR_NEON_CYCLES = 0x5A,
100 ARMV7_PERFCTR_PMU0_EVENTS = 0x70,
101 ARMV7_PERFCTR_PMU1_EVENTS = 0x71,
102 ARMV7_PERFCTR_PMU_EVENTS = 0x72,
105 /* ARMv7 Cortex-A9 specific event types */
106 enum armv7_a9_perf_types {
107 ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40,
108 ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41,
109 ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42,
111 ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50,
112 ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51,
114 ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60,
115 ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61,
116 ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62,
117 ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63,
118 ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64,
119 ARMV7_PERFCTR_DATA_EVICTION = 0x65,
120 ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66,
121 ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67,
122 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68,
124 ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E,
126 ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70,
127 ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71,
128 ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72,
129 ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73,
130 ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74,
132 ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80,
133 ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81,
134 ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82,
135 ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83,
136 ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84,
137 ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85,
138 ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86,
140 ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A,
141 ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B,
143 ARMV7_PERFCTR_ISB_INST = 0x90,
144 ARMV7_PERFCTR_DSB_INST = 0x91,
145 ARMV7_PERFCTR_DMB_INST = 0x92,
146 ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93,
148 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0,
149 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1,
150 ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2,
151 ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3,
152 ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4,
153 ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5
157 * Cortex-A8 HW events mapping
159 * The hardware events that we support. We do support cache operations but
160 * we have harvard caches and no way to combine instruction and data
161 * accesses/misses in hardware.
163 static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
164 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
165 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
166 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
167 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
168 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
169 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
170 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
173 static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
174 [PERF_COUNT_HW_CACHE_OP_MAX]
175 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
178 * The performance counters don't differentiate between read
179 * and write accesses/misses so this isn't strictly correct,
180 * but it's the best we can do. Writes and reads get
184 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
185 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
188 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
189 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
192 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
193 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
198 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
199 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
202 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
203 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
206 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
207 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
212 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
213 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
216 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
217 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
220 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
221 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
226 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
227 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
230 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
231 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
234 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
235 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
240 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
241 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
244 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
245 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
248 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
249 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
254 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
256 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
259 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
261 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
264 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
265 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
271 * Cortex-A9 HW events mapping
273 static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
274 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
275 [PERF_COUNT_HW_INSTRUCTIONS] =
276 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
277 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT,
278 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS,
279 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
280 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
281 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
284 static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
285 [PERF_COUNT_HW_CACHE_OP_MAX]
286 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
289 * The performance counters don't differentiate between read
290 * and write accesses/misses so this isn't strictly correct,
291 * but it's the best we can do. Writes and reads get
295 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
296 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
299 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
300 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
303 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
304 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
309 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
310 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
313 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
314 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
317 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
318 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
323 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
324 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
327 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
328 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
331 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
332 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
337 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
338 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
341 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
342 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
345 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
346 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
351 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
352 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
355 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
356 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
359 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
360 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
365 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
367 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
370 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
372 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
375 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
376 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
382 * Perf Events counters
384 enum armv7_counters {
385 ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */
386 ARMV7_COUNTER0 = 2, /* First event counter */
390 * The cycle counter is ARMV7_CYCLE_COUNTER.
391 * The first event counter is ARMV7_COUNTER0.
392 * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
394 #define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
397 * ARMv7 low level PMNC access
401 * Per-CPU PMNC: config reg
403 #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
404 #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
405 #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
406 #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
407 #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
408 #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
409 #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
410 #define ARMV7_PMNC_N_MASK 0x1f
411 #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
416 #define ARMV7_CNT0 0 /* First event counter */
417 #define ARMV7_CCNT 31 /* Cycle counter */
419 /* Perf Event to low level counters mapping */
420 #define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
423 * CNTENS: counters enable reg
425 #define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
426 #define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
429 * CNTENC: counters disable reg
431 #define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
432 #define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
435 * INTENS: counters overflow interrupt enable reg
437 #define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
438 #define ARMV7_INTENS_C (1 << ARMV7_CCNT)
441 * INTENC: counters overflow interrupt disable reg
443 #define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
444 #define ARMV7_INTENC_C (1 << ARMV7_CCNT)
447 * EVTSEL: Event selection reg
449 #define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */
452 * SELECT: Counter selection reg
454 #define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */
457 * FLAG: counters overflow flag status reg
459 #define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
460 #define ARMV7_FLAG_C (1 << ARMV7_CCNT)
461 #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
462 #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
464 static inline unsigned long armv7_pmnc_read(void)
467 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
471 static inline void armv7_pmnc_write(unsigned long val)
473 val &= ARMV7_PMNC_MASK;
475 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
478 static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
480 return pmnc & ARMV7_OVERFLOWED_MASK;
483 static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
484 enum armv7_counters counter)
488 if (counter == ARMV7_CYCLE_COUNTER)
489 ret = pmnc & ARMV7_FLAG_C;
490 else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
491 ret = pmnc & ARMV7_FLAG_P(counter);
493 pr_err("CPU%u checking wrong counter %d overflow status\n",
494 smp_processor_id(), counter);
499 static inline int armv7_pmnc_select_counter(unsigned int idx)
503 if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) {
504 pr_err("CPU%u selecting wrong PMNC counter"
505 " %d\n", smp_processor_id(), idx);
509 val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
510 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
516 static inline u32 armv7pmu_read_counter(int idx)
518 unsigned long value = 0;
520 if (idx == ARMV7_CYCLE_COUNTER)
521 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
522 else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
523 if (armv7_pmnc_select_counter(idx) == idx)
524 asm volatile("mrc p15, 0, %0, c9, c13, 2"
527 pr_err("CPU%u reading wrong counter %d\n",
528 smp_processor_id(), idx);
533 static inline void armv7pmu_write_counter(int idx, u32 value)
535 if (idx == ARMV7_CYCLE_COUNTER)
536 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
537 else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
538 if (armv7_pmnc_select_counter(idx) == idx)
539 asm volatile("mcr p15, 0, %0, c9, c13, 2"
542 pr_err("CPU%u writing wrong counter %d\n",
543 smp_processor_id(), idx);
546 static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
548 if (armv7_pmnc_select_counter(idx) == idx) {
549 val &= ARMV7_EVTSEL_MASK;
550 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
554 static inline u32 armv7_pmnc_enable_counter(unsigned int idx)
558 if ((idx != ARMV7_CYCLE_COUNTER) &&
559 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
560 pr_err("CPU%u enabling wrong PMNC counter"
561 " %d\n", smp_processor_id(), idx);
565 if (idx == ARMV7_CYCLE_COUNTER)
566 val = ARMV7_CNTENS_C;
568 val = ARMV7_CNTENS_P(idx);
570 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
575 static inline u32 armv7_pmnc_disable_counter(unsigned int idx)
580 if ((idx != ARMV7_CYCLE_COUNTER) &&
581 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
582 pr_err("CPU%u disabling wrong PMNC counter"
583 " %d\n", smp_processor_id(), idx);
587 if (idx == ARMV7_CYCLE_COUNTER)
588 val = ARMV7_CNTENC_C;
590 val = ARMV7_CNTENC_P(idx);
592 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
597 static inline u32 armv7_pmnc_enable_intens(unsigned int idx)
601 if ((idx != ARMV7_CYCLE_COUNTER) &&
602 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
603 pr_err("CPU%u enabling wrong PMNC counter"
604 " interrupt enable %d\n", smp_processor_id(), idx);
608 if (idx == ARMV7_CYCLE_COUNTER)
609 val = ARMV7_INTENS_C;
611 val = ARMV7_INTENS_P(idx);
613 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
618 static inline u32 armv7_pmnc_disable_intens(unsigned int idx)
622 if ((idx != ARMV7_CYCLE_COUNTER) &&
623 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
624 pr_err("CPU%u disabling wrong PMNC counter"
625 " interrupt enable %d\n", smp_processor_id(), idx);
629 if (idx == ARMV7_CYCLE_COUNTER)
630 val = ARMV7_INTENC_C;
632 val = ARMV7_INTENC_P(idx);
634 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
639 static inline u32 armv7_pmnc_getreset_flags(void)
644 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
646 /* Write to clear flags */
647 val &= ARMV7_FLAG_MASK;
648 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
654 static void armv7_pmnc_dump_regs(void)
659 printk(KERN_INFO "PMNC registers dump:\n");
661 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
662 printk(KERN_INFO "PMNC =0x%08x\n", val);
664 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
665 printk(KERN_INFO "CNTENS=0x%08x\n", val);
667 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
668 printk(KERN_INFO "INTENS=0x%08x\n", val);
670 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
671 printk(KERN_INFO "FLAGS =0x%08x\n", val);
673 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
674 printk(KERN_INFO "SELECT=0x%08x\n", val);
676 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
677 printk(KERN_INFO "CCNT =0x%08x\n", val);
679 for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) {
680 armv7_pmnc_select_counter(cnt);
681 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
682 printk(KERN_INFO "CNT[%d] count =0x%08x\n",
683 cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
684 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
685 printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
686 cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
691 static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
696 * Enable counter and interrupt, and set the counter to count
697 * the event that we're interested in.
699 raw_spin_lock_irqsave(&pmu_lock, flags);
704 armv7_pmnc_disable_counter(idx);
707 * Set event (if destined for PMNx counters)
708 * We don't need to set the event if it's a cycle count
710 if (idx != ARMV7_CYCLE_COUNTER)
711 armv7_pmnc_write_evtsel(idx, hwc->config_base);
714 * Enable interrupt for this counter
716 armv7_pmnc_enable_intens(idx);
721 armv7_pmnc_enable_counter(idx);
723 raw_spin_unlock_irqrestore(&pmu_lock, flags);
726 static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
731 * Disable counter and interrupt
733 raw_spin_lock_irqsave(&pmu_lock, flags);
738 armv7_pmnc_disable_counter(idx);
741 * Disable interrupt for this counter
743 armv7_pmnc_disable_intens(idx);
745 raw_spin_unlock_irqrestore(&pmu_lock, flags);
748 static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
751 struct perf_sample_data data;
752 struct cpu_hw_events *cpuc;
753 struct pt_regs *regs;
757 * Get and reset the IRQ flags
759 pmnc = armv7_pmnc_getreset_flags();
762 * Did an overflow occur?
764 if (!armv7_pmnc_has_overflowed(pmnc))
768 * Handle the counter(s) overflow(s)
770 regs = get_irq_regs();
772 perf_sample_data_init(&data, 0);
774 cpuc = &__get_cpu_var(cpu_hw_events);
775 for (idx = 0; idx <= armpmu->num_events; ++idx) {
776 struct perf_event *event = cpuc->events[idx];
777 struct hw_perf_event *hwc;
779 if (!test_bit(idx, cpuc->active_mask))
783 * We have a single interrupt for all counters. Check that
784 * each counter has overflowed before we process it.
786 if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
790 armpmu_event_update(event, hwc, idx, 1);
791 data.period = event->hw.last_period;
792 if (!armpmu_event_set_period(event, hwc, idx))
795 if (perf_event_overflow(event, 0, &data, regs))
796 armpmu->disable(hwc, idx);
800 * Handle the pending perf events.
802 * Note: this call *must* be run with interrupts disabled. For
803 * platforms that can have the PMU interrupts raised as an NMI, this
811 static void armv7pmu_start(void)
815 raw_spin_lock_irqsave(&pmu_lock, flags);
816 /* Enable all counters */
817 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
818 raw_spin_unlock_irqrestore(&pmu_lock, flags);
821 static void armv7pmu_stop(void)
825 raw_spin_lock_irqsave(&pmu_lock, flags);
826 /* Disable all counters */
827 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
828 raw_spin_unlock_irqrestore(&pmu_lock, flags);
831 static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
832 struct hw_perf_event *event)
836 /* Always place a cycle counter into the cycle counter. */
837 if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) {
838 if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask))
841 return ARMV7_CYCLE_COUNTER;
844 * For anything other than a cycle counter, try and use
845 * the events counters
847 for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
848 if (!test_and_set_bit(idx, cpuc->used_mask))
852 /* The counters are all in use. */
857 static void armv7pmu_reset(void *info)
859 u32 idx, nb_cnt = armpmu->num_events;
861 /* The counter and interrupt enable registers are unknown at reset. */
862 for (idx = 1; idx < nb_cnt; ++idx)
863 armv7pmu_disable_event(NULL, idx);
865 /* Initialize & Reset PMNC: C and P bits */
866 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
869 static struct arm_pmu armv7pmu = {
870 .handle_irq = armv7pmu_handle_irq,
871 .enable = armv7pmu_enable_event,
872 .disable = armv7pmu_disable_event,
873 .read_counter = armv7pmu_read_counter,
874 .write_counter = armv7pmu_write_counter,
875 .get_event_idx = armv7pmu_get_event_idx,
876 .start = armv7pmu_start,
877 .stop = armv7pmu_stop,
878 .reset = armv7pmu_reset,
879 .raw_event_mask = 0xFF,
880 .max_period = (1LLU << 32) - 1,
883 static u32 __init armv7_read_num_pmnc_events(void)
887 /* Read the nb of CNTx counters supported from PMNC */
888 nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
890 /* Add the CPU cycles counter and return */
894 static const struct arm_pmu *__init armv7_a8_pmu_init(void)
896 armv7pmu.id = ARM_PERF_PMU_ID_CA8;
897 armv7pmu.name = "ARMv7 Cortex-A8";
898 armv7pmu.cache_map = &armv7_a8_perf_cache_map;
899 armv7pmu.event_map = &armv7_a8_perf_map;
900 armv7pmu.num_events = armv7_read_num_pmnc_events();
904 static const struct arm_pmu *__init armv7_a9_pmu_init(void)
906 armv7pmu.id = ARM_PERF_PMU_ID_CA9;
907 armv7pmu.name = "ARMv7 Cortex-A9";
908 armv7pmu.cache_map = &armv7_a9_perf_cache_map;
909 armv7pmu.event_map = &armv7_a9_perf_map;
910 armv7pmu.num_events = armv7_read_num_pmnc_events();
914 static const struct arm_pmu *__init armv7_a8_pmu_init(void)
919 static const struct arm_pmu *__init armv7_a9_pmu_init(void)
923 #endif /* CONFIG_CPU_V7 */