2 * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org>
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * This is an implementation of a DWARF unwinder. Its main purpose is
9 * for generating stacktrace information. Based on the DWARF 3
10 * specification from http://www.dwarfstd.org.
13 * - DWARF64 doesn't work.
17 #include <linux/kernel.h>
19 #include <linux/list.h>
21 #include <asm/dwarf.h>
22 #include <asm/unwinder.h>
23 #include <asm/sections.h>
24 #include <asm/unaligned.h>
25 #include <asm/dwarf.h>
26 #include <asm/stacktrace.h>
28 static LIST_HEAD(dwarf_cie_list);
29 DEFINE_SPINLOCK(dwarf_cie_lock);
31 static LIST_HEAD(dwarf_fde_list);
32 DEFINE_SPINLOCK(dwarf_fde_lock);
34 static struct dwarf_cie *cached_cie;
37 * Figure out whether we need to allocate some dwarf registers. If dwarf
38 * registers have already been allocated then we may need to realloc
39 * them. "reg" is a register number that we need to be able to access
42 * Register numbers start at zero, therefore we need to allocate space
43 * for "reg" + 1 registers.
45 static void dwarf_frame_alloc_regs(struct dwarf_frame *frame,
48 struct dwarf_reg *regs;
49 unsigned int num_regs = reg + 1;
53 new_size = num_regs * sizeof(*regs);
54 old_size = frame->num_regs * sizeof(*regs);
56 /* Fast path: don't allocate any regs if we've already got enough. */
57 if (frame->num_regs >= num_regs)
60 regs = kzalloc(new_size, GFP_ATOMIC);
62 printk(KERN_WARNING "Unable to allocate DWARF registers\n");
64 * Let's just bomb hard here, we have no way to
71 memcpy(regs, frame->regs, old_size);
76 frame->num_regs = num_regs;
80 * dwarf_read_addr - read dwarf data
81 * @src: source address of data
82 * @dst: destination address to store the data to
84 * Read 'n' bytes from @src, where 'n' is the size of an address on
85 * the native machine. We return the number of bytes read, which
86 * should always be 'n'. We also have to be careful when reading
87 * from @src and writing to @dst, because they can be arbitrarily
88 * aligned. Return 'n' - the number of bytes read.
90 static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst)
92 u32 val = get_unaligned(src);
93 put_unaligned(val, dst);
94 return sizeof(unsigned long *);
98 * dwarf_read_uleb128 - read unsigned LEB128 data
99 * @addr: the address where the ULEB128 data is stored
100 * @ret: address to store the result
102 * Decode an unsigned LEB128 encoded datum. The algorithm is taken
103 * from Appendix C of the DWARF 3 spec. For information on the
104 * encodings refer to section "7.6 - Variable Length Data". Return
105 * the number of bytes read.
107 static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret)
118 byte = __raw_readb(addr);
122 result |= (byte & 0x7f) << shift;
135 * dwarf_read_leb128 - read signed LEB128 data
136 * @addr: the address of the LEB128 encoded data
137 * @ret: address to store the result
139 * Decode signed LEB128 data. The algorithm is taken from Appendix
140 * C of the DWARF 3 spec. Return the number of bytes read.
142 static inline unsigned long dwarf_read_leb128(char *addr, int *ret)
154 byte = __raw_readb(addr);
156 result |= (byte & 0x7f) << shift;
164 /* The number of bits in a signed integer. */
165 num_bits = 8 * sizeof(result);
167 if ((shift < num_bits) && (byte & 0x40))
168 result |= (-1 << shift);
176 * dwarf_read_encoded_value - return the decoded value at @addr
177 * @addr: the address of the encoded value
178 * @val: where to write the decoded value
179 * @encoding: the encoding with which we can decode @addr
181 * GCC emits encoded address in the .eh_frame FDE entries. Decode
182 * the value at @addr using @encoding. The decoded value is written
183 * to @val and the number of bytes read is returned.
185 static int dwarf_read_encoded_value(char *addr, unsigned long *val,
188 unsigned long decoded_addr = 0;
191 switch (encoding & 0x70) {
192 case DW_EH_PE_absptr:
195 decoded_addr = (unsigned long)addr;
198 pr_debug("encoding=0x%x\n", (encoding & 0x70));
202 if ((encoding & 0x07) == 0x00)
203 encoding |= DW_EH_PE_udata4;
205 switch (encoding & 0x0f) {
206 case DW_EH_PE_sdata4:
207 case DW_EH_PE_udata4:
209 decoded_addr += get_unaligned((u32 *)addr);
210 __raw_writel(decoded_addr, val);
213 pr_debug("encoding=0x%x\n", encoding);
221 * dwarf_entry_len - return the length of an FDE or CIE
222 * @addr: the address of the entry
223 * @len: the length of the entry
225 * Read the initial_length field of the entry and store the size of
226 * the entry in @len. We return the number of bytes read. Return a
227 * count of 0 on error.
229 static inline int dwarf_entry_len(char *addr, unsigned long *len)
234 initial_len = get_unaligned((u32 *)addr);
238 * An initial length field value in the range DW_LEN_EXT_LO -
239 * DW_LEN_EXT_HI indicates an extension, and should not be
240 * interpreted as a length. The only extension that we currently
241 * understand is the use of DWARF64 addresses.
243 if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) {
245 * The 64-bit length field immediately follows the
246 * compulsory 32-bit length field.
248 if (initial_len == DW_EXT_DWARF64) {
249 *len = get_unaligned((u64 *)addr + 4);
252 printk(KERN_WARNING "Unknown DWARF extension\n");
262 * dwarf_lookup_cie - locate the cie
263 * @cie_ptr: pointer to help with lookup
265 static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
267 struct dwarf_cie *cie, *n;
270 spin_lock_irqsave(&dwarf_cie_lock, flags);
273 * We've cached the last CIE we looked up because chances are
274 * that the FDE wants this CIE.
276 if (cached_cie && cached_cie->cie_pointer == cie_ptr) {
281 list_for_each_entry_safe(cie, n, &dwarf_cie_list, link) {
282 if (cie->cie_pointer == cie_ptr) {
288 /* Couldn't find the entry in the list. */
289 if (&cie->link == &dwarf_cie_list)
292 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
297 * dwarf_lookup_fde - locate the FDE that covers pc
298 * @pc: the program counter
300 struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
303 struct dwarf_fde *fde, *n;
305 spin_lock_irqsave(&dwarf_fde_lock, flags);
306 list_for_each_entry_safe(fde, n, &dwarf_fde_list, link) {
307 unsigned long start, end;
309 start = fde->initial_location;
310 end = fde->initial_location + fde->address_range;
312 if (pc >= start && pc < end)
316 /* Couldn't find the entry in the list. */
317 if (&fde->link == &dwarf_fde_list)
320 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
326 * dwarf_cfa_execute_insns - execute instructions to calculate a CFA
327 * @insn_start: address of the first instruction
328 * @insn_end: address of the last instruction
329 * @cie: the CIE for this function
330 * @fde: the FDE for this function
331 * @frame: the instructions calculate the CFA for this frame
332 * @pc: the program counter of the address we're interested in
333 * @define_ra: keep executing insns until the return addr reg is defined?
335 * Execute the Call Frame instruction sequence starting at
336 * @insn_start and ending at @insn_end. The instructions describe
337 * how to calculate the Canonical Frame Address of a stackframe.
338 * Store the results in @frame.
340 static int dwarf_cfa_execute_insns(unsigned char *insn_start,
341 unsigned char *insn_end,
342 struct dwarf_cie *cie,
343 struct dwarf_fde *fde,
344 struct dwarf_frame *frame,
349 unsigned char *current_insn;
350 unsigned int count, delta, reg, expr_len, offset;
353 current_insn = insn_start;
356 * If we're executing instructions for the dwarf_unwind_stack()
357 * FDE we need to keep executing instructions until the value of
358 * DWARF_ARCH_RA_REG is defined. See the comment in
359 * dwarf_unwind_stack() for more details.
366 while (current_insn < insn_end && (frame->pc <= pc || !seen_ra_reg) ) {
367 insn = __raw_readb(current_insn++);
370 if (frame->num_regs >= DWARF_ARCH_RA_REG &&
371 frame->regs[DWARF_ARCH_RA_REG].flags)
376 * Firstly, handle the opcodes that embed their operands
377 * in the instructions.
379 switch (DW_CFA_opcode(insn)) {
380 case DW_CFA_advance_loc:
381 delta = DW_CFA_operand(insn);
382 delta *= cie->code_alignment_factor;
387 reg = DW_CFA_operand(insn);
388 count = dwarf_read_uleb128(current_insn, &offset);
389 current_insn += count;
390 offset *= cie->data_alignment_factor;
391 dwarf_frame_alloc_regs(frame, reg);
392 frame->regs[reg].addr = offset;
393 frame->regs[reg].flags |= DWARF_REG_OFFSET;
397 reg = DW_CFA_operand(insn);
403 * Secondly, handle the opcodes that don't embed their
404 * operands in the instruction.
409 case DW_CFA_advance_loc1:
410 delta = *current_insn++;
411 frame->pc += delta * cie->code_alignment_factor;
413 case DW_CFA_advance_loc2:
414 delta = get_unaligned((u16 *)current_insn);
416 frame->pc += delta * cie->code_alignment_factor;
418 case DW_CFA_advance_loc4:
419 delta = get_unaligned((u32 *)current_insn);
421 frame->pc += delta * cie->code_alignment_factor;
423 case DW_CFA_offset_extended:
424 count = dwarf_read_uleb128(current_insn, ®);
425 current_insn += count;
426 count = dwarf_read_uleb128(current_insn, &offset);
427 current_insn += count;
428 offset *= cie->data_alignment_factor;
430 case DW_CFA_restore_extended:
431 count = dwarf_read_uleb128(current_insn, ®);
432 current_insn += count;
434 case DW_CFA_undefined:
435 count = dwarf_read_uleb128(current_insn, ®);
436 current_insn += count;
439 count = dwarf_read_uleb128(current_insn,
440 &frame->cfa_register);
441 current_insn += count;
442 count = dwarf_read_uleb128(current_insn,
444 current_insn += count;
446 frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
448 case DW_CFA_def_cfa_register:
449 count = dwarf_read_uleb128(current_insn,
450 &frame->cfa_register);
451 current_insn += count;
452 frame->cfa_offset = 0;
453 frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
455 case DW_CFA_def_cfa_offset:
456 count = dwarf_read_uleb128(current_insn, &offset);
457 current_insn += count;
458 frame->cfa_offset = offset;
460 case DW_CFA_def_cfa_expression:
461 count = dwarf_read_uleb128(current_insn, &expr_len);
462 current_insn += count;
464 frame->cfa_expr = current_insn;
465 frame->cfa_expr_len = expr_len;
466 current_insn += expr_len;
468 frame->flags |= DWARF_FRAME_CFA_REG_EXP;
470 case DW_CFA_offset_extended_sf:
471 count = dwarf_read_uleb128(current_insn, ®);
472 current_insn += count;
473 count = dwarf_read_leb128(current_insn, &offset);
474 current_insn += count;
475 offset *= cie->data_alignment_factor;
476 dwarf_frame_alloc_regs(frame, reg);
477 frame->regs[reg].flags |= DWARF_REG_OFFSET;
478 frame->regs[reg].addr = offset;
480 case DW_CFA_val_offset:
481 count = dwarf_read_uleb128(current_insn, ®);
482 current_insn += count;
483 count = dwarf_read_leb128(current_insn, &offset);
484 offset *= cie->data_alignment_factor;
485 frame->regs[reg].flags |= DWARF_REG_OFFSET;
486 frame->regs[reg].addr = offset;
489 pr_debug("unhandled DWARF instruction 0x%x\n", insn);
498 * dwarf_unwind_stack - recursively unwind the stack
499 * @pc: address of the function to unwind
500 * @prev: struct dwarf_frame of the previous stackframe on the callstack
502 * Return a struct dwarf_frame representing the most recent frame
503 * on the callstack. Each of the lower (older) stack frames are
504 * linked via the "prev" member.
506 struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
507 struct dwarf_frame *prev)
509 struct dwarf_frame *frame;
510 struct dwarf_cie *cie;
511 struct dwarf_fde *fde;
514 bool define_ra = false;
517 * If this is the first invocation of this recursive function we
518 * need get the contents of a physical register to get the CFA
519 * in order to begin the virtual unwinding of the stack.
521 * Setting "define_ra" to true indictates that we want
522 * dwarf_cfa_execute_insns() to continue executing instructions
523 * until we know how to calculate the value of DWARF_ARCH_RA_REG
524 * (which we need in order to kick off the whole unwinding
527 * NOTE: the return address is guaranteed to be setup by the
528 * time this function makes its first function call.
531 pc = (unsigned long)&dwarf_unwind_stack;
535 frame = kzalloc(sizeof(*frame), GFP_ATOMIC);
541 fde = dwarf_lookup_fde(pc);
544 * This is our normal exit path - the one that stops the
545 * recursion. There's two reasons why we might exit
548 * a) pc has no asscociated DWARF frame info and so
549 * we don't know how to unwind this frame. This is
550 * usually the case when we're trying to unwind a
551 * frame that was called from some assembly code
552 * that has no DWARF info, e.g. syscalls.
554 * b) the DEBUG info for pc is bogus. There's
555 * really no way to distinguish this case from the
556 * case above, which sucks because we could print a
562 cie = dwarf_lookup_cie(fde->cie_pointer);
564 frame->pc = fde->initial_location;
566 /* CIE initial instructions */
567 dwarf_cfa_execute_insns(cie->initial_instructions,
568 cie->instructions_end, cie, fde,
571 /* FDE instructions */
572 dwarf_cfa_execute_insns(fde->instructions, fde->end, cie,
573 fde, frame, pc, define_ra);
575 /* Calculate the CFA */
576 switch (frame->flags) {
577 case DWARF_FRAME_CFA_REG_OFFSET:
579 BUG_ON(!prev->regs[frame->cfa_register].flags);
582 addr += prev->regs[frame->cfa_register].addr;
583 frame->cfa = __raw_readl(addr);
587 * Again, this is the first invocation of this
588 * recurisve function. We need to physically
589 * read the contents of a register in order to
590 * get the Canonical Frame Address for this
593 frame->cfa = dwarf_read_arch_reg(frame->cfa_register);
596 frame->cfa += frame->cfa_offset;
602 /* If we haven't seen the return address reg, we're screwed. */
603 BUG_ON(!frame->regs[DWARF_ARCH_RA_REG].flags);
605 for (i = 0; i <= frame->num_regs; i++) {
606 struct dwarf_reg *reg = &frame->regs[i];
612 offset += frame->cfa;
615 addr = frame->cfa + frame->regs[DWARF_ARCH_RA_REG].addr;
616 frame->return_addr = __raw_readl(addr);
618 frame->next = dwarf_unwind_stack(frame->return_addr, frame);
622 static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
625 struct dwarf_cie *cie;
629 cie = kzalloc(sizeof(*cie), GFP_KERNEL);
636 * Record the offset into the .eh_frame section
637 * for this CIE. It allows this CIE to be
638 * quickly and easily looked up from the
641 cie->cie_pointer = (unsigned long)entry;
643 cie->version = *(char *)p++;
644 BUG_ON(cie->version != 1);
646 cie->augmentation = p;
647 p += strlen(cie->augmentation) + 1;
649 count = dwarf_read_uleb128(p, &cie->code_alignment_factor);
652 count = dwarf_read_leb128(p, &cie->data_alignment_factor);
656 * Which column in the rule table contains the
659 if (cie->version == 1) {
660 cie->return_address_reg = __raw_readb(p);
663 count = dwarf_read_uleb128(p, &cie->return_address_reg);
667 if (cie->augmentation[0] == 'z') {
668 unsigned int length, count;
669 cie->flags |= DWARF_CIE_Z_AUGMENTATION;
671 count = dwarf_read_uleb128(p, &length);
674 BUG_ON((unsigned char *)p > end);
676 cie->initial_instructions = p + length;
680 while (*cie->augmentation) {
682 * "L" indicates a byte showing how the
683 * LSDA pointer is encoded. Skip it.
685 if (*cie->augmentation == 'L') {
688 } else if (*cie->augmentation == 'R') {
690 * "R" indicates a byte showing
691 * how FDE addresses are
694 cie->encoding = *(char *)p++;
696 } else if (*cie->augmentation == 'P') {
698 * "R" indicates a personality
703 } else if (*cie->augmentation == 'S') {
707 * Unknown augmentation. Assume
710 p = cie->initial_instructions;
716 cie->initial_instructions = p;
717 cie->instructions_end = end;
720 spin_lock_irqsave(&dwarf_cie_lock, flags);
721 list_add_tail(&cie->link, &dwarf_cie_list);
722 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
727 static int dwarf_parse_fde(void *entry, u32 entry_type,
728 void *start, unsigned long len)
730 struct dwarf_fde *fde;
731 struct dwarf_cie *cie;
736 fde = kzalloc(sizeof(*fde), GFP_KERNEL);
743 * In a .eh_frame section the CIE pointer is the
744 * delta between the address within the FDE
746 fde->cie_pointer = (unsigned long)(p - entry_type - 4);
748 cie = dwarf_lookup_cie(fde->cie_pointer);
752 count = dwarf_read_encoded_value(p, &fde->initial_location,
755 count = dwarf_read_addr(p, &fde->initial_location);
760 count = dwarf_read_encoded_value(p, &fde->address_range,
761 cie->encoding & 0x0f);
763 count = dwarf_read_addr(p, &fde->address_range);
767 if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) {
769 count = dwarf_read_uleb128(p, &length);
773 /* Call frame instructions. */
774 fde->instructions = p;
775 fde->end = start + len;
778 spin_lock_irqsave(&dwarf_fde_lock, flags);
779 list_add_tail(&fde->link, &dwarf_fde_list);
780 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
785 static void dwarf_unwinder_dump(struct task_struct *task, struct pt_regs *regs,
787 const struct stacktrace_ops *ops, void *data)
789 struct dwarf_frame *frame;
791 frame = dwarf_unwind_stack(0, NULL);
793 while (frame && frame->return_addr) {
794 ops->address(data, frame->return_addr, 1);
799 static struct unwinder dwarf_unwinder = {
800 .name = "dwarf-unwinder",
801 .dump = dwarf_unwinder_dump,
805 static void dwarf_unwinder_cleanup(void)
807 struct dwarf_cie *cie, *m;
808 struct dwarf_fde *fde, *n;
812 * Deallocate all the memory allocated for the DWARF unwinder.
813 * Traverse all the FDE/CIE lists and remove and free all the
814 * memory associated with those data structures.
816 spin_lock_irqsave(&dwarf_cie_lock, flags);
817 list_for_each_entry_safe(cie, m, &dwarf_cie_list, link)
819 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
821 spin_lock_irqsave(&dwarf_fde_lock, flags);
822 list_for_each_entry_safe(fde, n, &dwarf_fde_list, link)
824 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
828 * dwarf_unwinder_init - initialise the dwarf unwinder
830 * Build the data structures describing the .dwarf_frame section to
831 * make it easier to lookup CIE and FDE entries. Because the
832 * .eh_frame section is packed as tightly as possible it is not
833 * easy to lookup the FDE for a given PC, so we build a list of FDE
834 * and CIE entries that make it easier.
836 void dwarf_unwinder_init(void)
842 unsigned int c_entries, f_entries;
844 INIT_LIST_HEAD(&dwarf_cie_list);
845 INIT_LIST_HEAD(&dwarf_fde_list);
849 entry = &__start_eh_frame;
851 while ((char *)entry < __stop_eh_frame) {
854 count = dwarf_entry_len(p, &len);
857 * We read a bogus length field value. There is
858 * nothing we can do here apart from disabling
859 * the DWARF unwinder. We can't even skip this
860 * entry and move to the next one because 'len'
861 * tells us where our next entry is.
867 /* initial length does not include itself */
870 entry_type = get_unaligned((u32 *)p);
873 if (entry_type == DW_EH_FRAME_CIE) {
874 err = dwarf_parse_cie(entry, p, len, end);
880 err = dwarf_parse_fde(entry, entry_type, p, len);
887 entry = (char *)entry + len + 4;
890 printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
891 c_entries, f_entries);
893 err = unwinder_register(&dwarf_unwinder);
900 printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err);
901 dwarf_unwinder_cleanup();