2 * Kernel Probes (KProbes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2002, 2006
20 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
23 #include <linux/kprobes.h>
24 #include <linux/ptrace.h>
25 #include <linux/preempt.h>
26 #include <linux/stop_machine.h>
27 #include <linux/kdebug.h>
28 #include <linux/uaccess.h>
29 #include <asm/cacheflush.h>
30 #include <asm/sections.h>
31 #include <linux/module.h>
32 #include <linux/slab.h>
33 #include <linux/hardirq.h>
35 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
36 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
38 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
40 int __kprobes arch_prepare_kprobe(struct kprobe *p)
42 /* Make sure the probe isn't going on a difficult instruction */
43 if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
46 if ((unsigned long)p->addr & 0x01)
49 /* Use the get_insn_slot() facility for correctness */
50 if (!(p->ainsn.insn = get_insn_slot()))
53 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
55 get_instruction_type(&p->ainsn);
60 int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
62 switch (*(__u8 *) instruction) {
63 case 0x0c: /* bassm */
67 case 0xac: /* stnsm */
68 case 0xad: /* stosm */
71 switch (*(__u16 *) instruction) {
73 case 0xb25a: /* bsa */
74 case 0xb240: /* bakr */
75 case 0xb258: /* bsg */
78 case 0xb98d: /* epsw */
84 void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
86 /* default fixup method */
87 ainsn->fixup = FIXUP_PSW_NORMAL;
90 ainsn->reg = (*ainsn->insn & 0xf0) >> 4;
92 /* save the instruction length (pop 5-5) in bytes */
93 switch (*(__u8 *) (ainsn->insn) >> 6) {
106 switch (*(__u8 *) ainsn->insn) {
107 case 0x05: /* balr */
108 case 0x0d: /* basr */
109 ainsn->fixup = FIXUP_RETURN_REGISTER;
110 /* if r2 = 0, no branch will be taken */
111 if ((*ainsn->insn & 0x0f) == 0)
112 ainsn->fixup |= FIXUP_BRANCH_NOT_TAKEN;
114 case 0x06: /* bctr */
116 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
120 ainsn->fixup = FIXUP_RETURN_REGISTER;
125 case 0x87: /* bxle */
126 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
128 case 0x82: /* lpsw */
129 ainsn->fixup = FIXUP_NOT_REQUIRED;
131 case 0xb2: /* lpswe */
132 if (*(((__u8 *) ainsn->insn) + 1) == 0xb2) {
133 ainsn->fixup = FIXUP_NOT_REQUIRED;
136 case 0xa7: /* bras */
137 if ((*ainsn->insn & 0x0f) == 0x05) {
138 ainsn->fixup |= FIXUP_RETURN_REGISTER;
142 if ((*ainsn->insn & 0x0f) == 0x00 /* larl */
143 || (*ainsn->insn & 0x0f) == 0x05) /* brasl */
144 ainsn->fixup |= FIXUP_RETURN_REGISTER;
147 if (*(((__u8 *) ainsn->insn) + 5 ) == 0x44 || /* bxhg */
148 *(((__u8 *) ainsn->insn) + 5) == 0x45) {/* bxleg */
149 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
152 case 0xe3: /* bctg */
153 if (*(((__u8 *) ainsn->insn) + 5) == 0x46) {
154 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
160 static int __kprobes swap_instruction(void *aref)
162 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
163 unsigned long status = kcb->kprobe_status;
164 struct ins_replace_args *args = aref;
167 kcb->kprobe_status = KPROBE_SWAP_INST;
168 rc = probe_kernel_write(args->ptr, &args->new, sizeof(args->new));
169 kcb->kprobe_status = status;
173 void __kprobes arch_arm_kprobe(struct kprobe *p)
175 struct ins_replace_args args;
178 args.old = p->opcode;
179 args.new = BREAKPOINT_INSTRUCTION;
180 stop_machine(swap_instruction, &args, NULL);
183 void __kprobes arch_disarm_kprobe(struct kprobe *p)
185 struct ins_replace_args args;
188 args.old = BREAKPOINT_INSTRUCTION;
189 args.new = p->opcode;
190 stop_machine(swap_instruction, &args, NULL);
193 void __kprobes arch_remove_kprobe(struct kprobe *p)
196 free_insn_slot(p->ainsn.insn, 0);
197 p->ainsn.insn = NULL;
201 static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
203 per_cr_bits kprobe_per_regs[1];
205 memset(kprobe_per_regs, 0, sizeof(per_cr_bits));
206 regs->psw.addr = (unsigned long)p->ainsn.insn | PSW_ADDR_AMODE;
208 /* Set up the per control reg info, will pass to lctl */
209 kprobe_per_regs[0].em_instruction_fetch = 1;
210 kprobe_per_regs[0].starting_addr = (unsigned long)p->ainsn.insn;
211 kprobe_per_regs[0].ending_addr = (unsigned long)p->ainsn.insn + 1;
213 /* Set the PER control regs, turns on single step for this address */
214 __ctl_load(kprobe_per_regs, 9, 11);
215 regs->psw.mask |= PSW_MASK_PER;
216 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
219 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
221 kcb->prev_kprobe.kp = kprobe_running();
222 kcb->prev_kprobe.status = kcb->kprobe_status;
223 kcb->prev_kprobe.kprobe_saved_imask = kcb->kprobe_saved_imask;
224 memcpy(kcb->prev_kprobe.kprobe_saved_ctl, kcb->kprobe_saved_ctl,
225 sizeof(kcb->kprobe_saved_ctl));
228 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
230 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
231 kcb->kprobe_status = kcb->prev_kprobe.status;
232 kcb->kprobe_saved_imask = kcb->prev_kprobe.kprobe_saved_imask;
233 memcpy(kcb->kprobe_saved_ctl, kcb->prev_kprobe.kprobe_saved_ctl,
234 sizeof(kcb->kprobe_saved_ctl));
237 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
238 struct kprobe_ctlblk *kcb)
240 __get_cpu_var(current_kprobe) = p;
241 /* Save the interrupt and per flags */
242 kcb->kprobe_saved_imask = regs->psw.mask &
243 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
244 /* Save the control regs that govern PER */
245 __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
248 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
249 struct pt_regs *regs)
251 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
253 /* Replace the return addr with trampoline addr */
254 regs->gprs[14] = (unsigned long)&kretprobe_trampoline;
257 static int __kprobes kprobe_handler(struct pt_regs *regs)
261 unsigned long *addr = (unsigned long *)
262 ((regs->psw.addr & PSW_ADDR_INSN) - 2);
263 struct kprobe_ctlblk *kcb;
266 * We don't want to be preempted for the entire
267 * duration of kprobe processing
270 kcb = get_kprobe_ctlblk();
272 /* Check we're not actually recursing */
273 if (kprobe_running()) {
274 p = get_kprobe(addr);
276 if (kcb->kprobe_status == KPROBE_HIT_SS &&
277 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
278 regs->psw.mask &= ~PSW_MASK_PER;
279 regs->psw.mask |= kcb->kprobe_saved_imask;
282 /* We have reentered the kprobe_handler(), since
283 * another probe was hit while within the handler.
284 * We here save the original kprobes variables and
285 * just single step on the instruction of the new probe
286 * without calling any user handlers.
288 save_previous_kprobe(kcb);
289 set_current_kprobe(p, regs, kcb);
290 kprobes_inc_nmissed_count(p);
291 prepare_singlestep(p, regs);
292 kcb->kprobe_status = KPROBE_REENTER;
295 p = __get_cpu_var(current_kprobe);
296 if (p->break_handler && p->break_handler(p, regs)) {
303 p = get_kprobe(addr);
306 * No kprobe at this address. The fault has not been
307 * caused by a kprobe breakpoint. The race of breakpoint
308 * vs. kprobe remove does not exist because on s390 we
309 * use stop_machine to arm/disarm the breakpoints.
313 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
314 set_current_kprobe(p, regs, kcb);
315 if (p->pre_handler && p->pre_handler(p, regs))
316 /* handler has already set things up, so skip ss setup */
320 prepare_singlestep(p, regs);
321 kcb->kprobe_status = KPROBE_HIT_SS;
325 preempt_enable_no_resched();
330 * Function return probe trampoline:
331 * - init_kprobes() establishes a probepoint here
332 * - When the probed function returns, this probe
333 * causes the handlers to fire
335 static void __used kretprobe_trampoline_holder(void)
337 asm volatile(".global kretprobe_trampoline\n"
338 "kretprobe_trampoline: bcr 0,0\n");
342 * Called when the probe at kretprobe trampoline is hit
344 static int __kprobes trampoline_probe_handler(struct kprobe *p,
345 struct pt_regs *regs)
347 struct kretprobe_instance *ri = NULL;
348 struct hlist_head *head, empty_rp;
349 struct hlist_node *node, *tmp;
350 unsigned long flags, orig_ret_address = 0;
351 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
352 kprobe_opcode_t *correct_ret_addr = NULL;
354 INIT_HLIST_HEAD(&empty_rp);
355 kretprobe_hash_lock(current, &head, &flags);
358 * It is possible to have multiple instances associated with a given
359 * task either because an multiple functions in the call path
360 * have a return probe installed on them, and/or more than one return
361 * return probe was registered for a target function.
363 * We can handle this because:
364 * - instances are always inserted at the head of the list
365 * - when multiple return probes are registered for the same
366 * function, the first instance's ret_addr will point to the
367 * real return address, and all the rest will point to
368 * kretprobe_trampoline
370 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
371 if (ri->task != current)
372 /* another task is sharing our hash bucket */
375 orig_ret_address = (unsigned long)ri->ret_addr;
377 if (orig_ret_address != trampoline_address)
379 * This is the real return address. Any other
380 * instances associated with this task are for
381 * other calls deeper on the call stack
386 kretprobe_assert(ri, orig_ret_address, trampoline_address);
388 correct_ret_addr = ri->ret_addr;
389 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
390 if (ri->task != current)
391 /* another task is sharing our hash bucket */
394 orig_ret_address = (unsigned long)ri->ret_addr;
396 if (ri->rp && ri->rp->handler) {
397 ri->ret_addr = correct_ret_addr;
398 ri->rp->handler(ri, regs);
401 recycle_rp_inst(ri, &empty_rp);
403 if (orig_ret_address != trampoline_address) {
405 * This is the real return address. Any other
406 * instances associated with this task are for
407 * other calls deeper on the call stack
413 regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
415 reset_current_kprobe();
416 kretprobe_hash_unlock(current, &flags);
417 preempt_enable_no_resched();
419 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
420 hlist_del(&ri->hlist);
424 * By returning a non-zero value, we are telling
425 * kprobe_handler() that we don't want the post_handler
426 * to run (and have re-enabled preemption)
432 * Called after single-stepping. p->addr is the address of the
433 * instruction whose first byte has been replaced by the "breakpoint"
434 * instruction. To avoid the SMP problems that can occur when we
435 * temporarily put back the original opcode to single-step, we
436 * single-stepped a copy of the instruction. The address of this
437 * copy is p->ainsn.insn.
439 static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
441 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
443 regs->psw.addr &= PSW_ADDR_INSN;
445 if (p->ainsn.fixup & FIXUP_PSW_NORMAL)
446 regs->psw.addr = (unsigned long)p->addr +
447 ((unsigned long)regs->psw.addr -
448 (unsigned long)p->ainsn.insn);
450 if (p->ainsn.fixup & FIXUP_BRANCH_NOT_TAKEN)
451 if ((unsigned long)regs->psw.addr -
452 (unsigned long)p->ainsn.insn == p->ainsn.ilen)
453 regs->psw.addr = (unsigned long)p->addr + p->ainsn.ilen;
455 if (p->ainsn.fixup & FIXUP_RETURN_REGISTER)
456 regs->gprs[p->ainsn.reg] = ((unsigned long)p->addr +
457 (regs->gprs[p->ainsn.reg] -
458 (unsigned long)p->ainsn.insn))
461 regs->psw.addr |= PSW_ADDR_AMODE;
462 /* turn off PER mode */
463 regs->psw.mask &= ~PSW_MASK_PER;
464 /* Restore the original per control regs */
465 __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
466 regs->psw.mask |= kcb->kprobe_saved_imask;
469 static int __kprobes post_kprobe_handler(struct pt_regs *regs)
471 struct kprobe *cur = kprobe_running();
472 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
477 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
478 kcb->kprobe_status = KPROBE_HIT_SSDONE;
479 cur->post_handler(cur, regs, 0);
482 resume_execution(cur, regs);
484 /*Restore back the original saved kprobes variables and continue. */
485 if (kcb->kprobe_status == KPROBE_REENTER) {
486 restore_previous_kprobe(kcb);
489 reset_current_kprobe();
491 preempt_enable_no_resched();
494 * if somebody else is singlestepping across a probe point, psw mask
495 * will have PER set, in which case, continue the remaining processing
496 * of do_single_step, as if this is not a probe hit.
498 if (regs->psw.mask & PSW_MASK_PER) {
505 static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
507 struct kprobe *cur = kprobe_running();
508 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
509 const struct exception_table_entry *entry;
511 switch(kcb->kprobe_status) {
512 case KPROBE_SWAP_INST:
513 /* We are here because the instruction replacement failed */
518 * We are here because the instruction being single
519 * stepped caused a page fault. We reset the current
520 * kprobe and the nip points back to the probe address
521 * and allow the page fault handler to continue as a
524 regs->psw.addr = (unsigned long)cur->addr | PSW_ADDR_AMODE;
525 regs->psw.mask &= ~PSW_MASK_PER;
526 regs->psw.mask |= kcb->kprobe_saved_imask;
527 if (kcb->kprobe_status == KPROBE_REENTER)
528 restore_previous_kprobe(kcb);
530 reset_current_kprobe();
532 preempt_enable_no_resched();
534 case KPROBE_HIT_ACTIVE:
535 case KPROBE_HIT_SSDONE:
537 * We increment the nmissed count for accounting,
538 * we can also use npre/npostfault count for accouting
539 * these specific fault cases.
541 kprobes_inc_nmissed_count(cur);
544 * We come here because instructions in the pre/post
545 * handler caused the page_fault, this could happen
546 * if handler tries to access user space by
547 * copy_from_user(), get_user() etc. Let the
548 * user-specified handler try to fix it first.
550 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
554 * In case the user-specified fault handler returned
555 * zero, try to fix up.
557 entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
559 regs->psw.addr = entry->fixup | PSW_ADDR_AMODE;
564 * fixup_exception() could not handle it,
565 * Let do_page_fault() fix it.
574 int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
578 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
580 ret = kprobe_trap_handler(regs, trapnr);
581 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
582 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
587 * Wrapper routine to for handling exceptions.
589 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
590 unsigned long val, void *data)
592 struct die_args *args = (struct die_args *)data;
593 struct pt_regs *regs = args->regs;
594 int ret = NOTIFY_DONE;
596 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
601 if (kprobe_handler(args->regs))
605 if (post_kprobe_handler(args->regs))
609 if (!preemptible() && kprobe_running() &&
610 kprobe_trap_handler(args->regs, args->trapnr))
617 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
618 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
623 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
625 struct jprobe *jp = container_of(p, struct jprobe, kp);
627 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
629 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
631 /* setup return addr to the jprobe handler routine */
632 regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE;
633 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
635 /* r14 is the function return address */
636 kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
637 /* r15 is the stack pointer */
638 kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15];
639 addr = (unsigned long)kcb->jprobe_saved_r15;
641 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
642 MIN_STACK_SIZE(addr));
646 void __kprobes jprobe_return(void)
648 asm volatile(".word 0x0002");
651 void __kprobes jprobe_return_end(void)
653 asm volatile("bcr 0,0");
656 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
658 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
659 unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15);
661 /* Put the regs back */
662 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
663 /* put the stack back */
664 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
665 MIN_STACK_SIZE(stack_addr));
666 preempt_enable_no_resched();
670 static struct kprobe trampoline_p = {
671 .addr = (kprobe_opcode_t *) & kretprobe_trampoline,
672 .pre_handler = trampoline_probe_handler
675 int __init arch_init_kprobes(void)
677 return register_kprobe(&trampoline_p);
680 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
682 if (p->addr == (kprobe_opcode_t *) & kretprobe_trampoline)