2 * Kernel Probes (KProbes)
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
34 #include <linux/kprobes.h>
35 #include <linux/spinlock.h>
36 #include <linux/hash.h>
37 #include <linux/init.h>
38 #include <linux/slab.h>
39 #include <linux/module.h>
40 #include <linux/moduleloader.h>
41 #include <asm-generic/sections.h>
42 #include <asm/cacheflush.h>
43 #include <asm/errno.h>
44 #include <asm/kdebug.h>
46 #define KPROBE_HASH_BITS 6
47 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
49 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
50 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
52 unsigned int kprobe_cpu = NR_CPUS;
53 static DEFINE_SPINLOCK(kprobe_lock);
54 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
57 * kprobe->ainsn.insn points to the copy of the instruction to be
58 * single-stepped. x86_64, POWER4 and above have no-exec support and
59 * stepping on the instruction on a vmalloced/kmalloced/data page
60 * is a recipe for disaster
62 #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
64 struct kprobe_insn_page {
65 struct hlist_node hlist;
66 kprobe_opcode_t *insns; /* Page of instruction slots */
67 char slot_used[INSNS_PER_PAGE];
71 static struct hlist_head kprobe_insn_pages;
74 * get_insn_slot() - Find a slot on an executable page for an instruction.
75 * We allocate an executable page if there's no room on existing ones.
77 kprobe_opcode_t __kprobes *get_insn_slot(void)
79 struct kprobe_insn_page *kip;
80 struct hlist_node *pos;
82 hlist_for_each(pos, &kprobe_insn_pages) {
83 kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
84 if (kip->nused < INSNS_PER_PAGE) {
86 for (i = 0; i < INSNS_PER_PAGE; i++) {
87 if (!kip->slot_used[i]) {
88 kip->slot_used[i] = 1;
90 return kip->insns + (i * MAX_INSN_SIZE);
93 /* Surprise! No unused slots. Fix kip->nused. */
94 kip->nused = INSNS_PER_PAGE;
98 /* All out of space. Need to allocate a new page. Use slot 0.*/
99 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
105 * Use module_alloc so this page is within +/- 2GB of where the
106 * kernel image and loaded module images reside. This is required
107 * so x86_64 can correctly handle the %rip-relative fixups.
109 kip->insns = module_alloc(PAGE_SIZE);
114 INIT_HLIST_NODE(&kip->hlist);
115 hlist_add_head(&kip->hlist, &kprobe_insn_pages);
116 memset(kip->slot_used, 0, INSNS_PER_PAGE);
117 kip->slot_used[0] = 1;
122 void __kprobes free_insn_slot(kprobe_opcode_t *slot)
124 struct kprobe_insn_page *kip;
125 struct hlist_node *pos;
127 hlist_for_each(pos, &kprobe_insn_pages) {
128 kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
129 if (kip->insns <= slot &&
130 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
131 int i = (slot - kip->insns) / MAX_INSN_SIZE;
132 kip->slot_used[i] = 0;
134 if (kip->nused == 0) {
136 * Page is no longer in use. Free it unless
137 * it's the last one. We keep the last one
138 * so as not to have to set it up again the
139 * next time somebody inserts a probe.
141 hlist_del(&kip->hlist);
142 if (hlist_empty(&kprobe_insn_pages)) {
143 INIT_HLIST_NODE(&kip->hlist);
144 hlist_add_head(&kip->hlist,
147 module_free(NULL, kip->insns);
156 /* Locks kprobe: irqs must be disabled */
157 void __kprobes lock_kprobes(void)
159 unsigned long flags = 0;
161 /* Avoiding local interrupts to happen right after we take the kprobe_lock
162 * and before we get a chance to update kprobe_cpu, this to prevent
163 * deadlock when we have a kprobe on ISR routine and a kprobe on task
166 local_irq_save(flags);
168 spin_lock(&kprobe_lock);
169 kprobe_cpu = smp_processor_id();
171 local_irq_restore(flags);
174 void __kprobes unlock_kprobes(void)
176 unsigned long flags = 0;
178 /* Avoiding local interrupts to happen right after we update
179 * kprobe_cpu and before we get a a chance to release kprobe_lock,
180 * this to prevent deadlock when we have a kprobe on ISR routine and
181 * a kprobe on task routine
183 local_irq_save(flags);
185 kprobe_cpu = NR_CPUS;
186 spin_unlock(&kprobe_lock);
188 local_irq_restore(flags);
191 /* We have preemption disabled.. so it is safe to use __ versions */
192 static inline void set_kprobe_instance(struct kprobe *kp)
194 __get_cpu_var(kprobe_instance) = kp;
197 static inline void reset_kprobe_instance(void)
199 __get_cpu_var(kprobe_instance) = NULL;
202 /* You have to be holding the kprobe_lock */
203 struct kprobe __kprobes *get_kprobe(void *addr)
205 struct hlist_head *head;
206 struct hlist_node *node;
208 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
209 hlist_for_each(node, head) {
210 struct kprobe *p = hlist_entry(node, struct kprobe, hlist);
218 * Aggregate handlers for multiple kprobes support - these handlers
219 * take care of invoking the individual kprobe handlers on p->list
221 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
225 list_for_each_entry(kp, &p->list, list) {
226 if (kp->pre_handler) {
227 set_kprobe_instance(kp);
228 if (kp->pre_handler(kp, regs))
231 reset_kprobe_instance();
236 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
241 list_for_each_entry(kp, &p->list, list) {
242 if (kp->post_handler) {
243 set_kprobe_instance(kp);
244 kp->post_handler(kp, regs, flags);
245 reset_kprobe_instance();
251 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
254 struct kprobe *cur = __get_cpu_var(kprobe_instance);
257 * if we faulted "during" the execution of a user specified
258 * probe handler, invoke just that probe's fault handler
260 if (cur && cur->fault_handler) {
261 if (cur->fault_handler(cur, regs, trapnr))
267 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
269 struct kprobe *cur = __get_cpu_var(kprobe_instance);
272 if (cur && cur->break_handler) {
273 if (cur->break_handler(cur, regs))
276 reset_kprobe_instance();
280 struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
282 struct hlist_node *node;
283 struct kretprobe_instance *ri;
284 hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
289 static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
292 struct hlist_node *node;
293 struct kretprobe_instance *ri;
294 hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
299 void __kprobes add_rp_inst(struct kretprobe_instance *ri)
302 * Remove rp inst off the free list -
303 * Add it back when probed function returns
305 hlist_del(&ri->uflist);
307 /* Add rp inst onto table */
308 INIT_HLIST_NODE(&ri->hlist);
309 hlist_add_head(&ri->hlist,
310 &kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]);
312 /* Also add this rp inst to the used list. */
313 INIT_HLIST_NODE(&ri->uflist);
314 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
317 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri)
319 /* remove rp inst off the rprobe_inst_table */
320 hlist_del(&ri->hlist);
322 /* remove rp inst off the used list */
323 hlist_del(&ri->uflist);
324 /* put rp inst back onto the free list */
325 INIT_HLIST_NODE(&ri->uflist);
326 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
332 struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
334 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
338 * This function is called from exit_thread or flush_thread when task tk's
339 * stack is being recycled so that we can recycle any function-return probe
340 * instances associated with this task. These left over instances represent
341 * probed functions that have been called but will never return.
343 void __kprobes kprobe_flush_task(struct task_struct *tk)
345 struct kretprobe_instance *ri;
346 struct hlist_head *head;
347 struct hlist_node *node, *tmp;
348 unsigned long flags = 0;
350 spin_lock_irqsave(&kprobe_lock, flags);
351 head = kretprobe_inst_table_head(current);
352 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
356 spin_unlock_irqrestore(&kprobe_lock, flags);
360 * This kprobe pre_handler is registered with every kretprobe. When probe
361 * hits it will set up the return probe.
363 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
364 struct pt_regs *regs)
366 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
368 /*TODO: consider to only swap the RA after the last pre_handler fired */
369 arch_prepare_kretprobe(rp, regs);
373 static inline void free_rp_inst(struct kretprobe *rp)
375 struct kretprobe_instance *ri;
376 while ((ri = get_free_rp_inst(rp)) != NULL) {
377 hlist_del(&ri->uflist);
383 * Keep all fields in the kprobe consistent
385 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
387 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
388 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
392 * Add the new probe to old_p->list. Fail if this is the
393 * second jprobe at the address - two jprobes can't coexist
395 static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
399 if (p->break_handler) {
400 list_for_each_entry(kp, &old_p->list, list) {
401 if (kp->break_handler)
404 list_add_tail(&p->list, &old_p->list);
406 list_add(&p->list, &old_p->list);
411 * Fill in the required fields of the "manager kprobe". Replace the
412 * earlier kprobe in the hlist with the manager kprobe
414 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
418 ap->pre_handler = aggr_pre_handler;
419 ap->post_handler = aggr_post_handler;
420 ap->fault_handler = aggr_fault_handler;
421 ap->break_handler = aggr_break_handler;
423 INIT_LIST_HEAD(&ap->list);
424 list_add(&p->list, &ap->list);
426 INIT_HLIST_NODE(&ap->hlist);
427 hlist_del(&p->hlist);
428 hlist_add_head(&ap->hlist,
429 &kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]);
433 * This is the second or subsequent kprobe at the address - handle
435 * TODO: Move kcalloc outside the spinlock
437 static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
443 if (old_p->pre_handler == aggr_pre_handler) {
444 copy_kprobe(old_p, p);
445 ret = add_new_kprobe(old_p, p);
447 ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC);
450 add_aggr_kprobe(ap, old_p);
452 ret = add_new_kprobe(ap, p);
457 /* kprobe removal house-keeping routines */
458 static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags)
460 arch_disarm_kprobe(p);
461 hlist_del(&p->hlist);
462 spin_unlock_irqrestore(&kprobe_lock, flags);
463 arch_remove_kprobe(p);
466 static inline void cleanup_aggr_kprobe(struct kprobe *old_p,
467 struct kprobe *p, unsigned long flags)
470 if (list_empty(&old_p->list)) {
471 cleanup_kprobe(old_p, flags);
474 spin_unlock_irqrestore(&kprobe_lock, flags);
477 static int __kprobes in_kprobes_functions(unsigned long addr)
479 if (addr >= (unsigned long)__kprobes_text_start
480 && addr < (unsigned long)__kprobes_text_end)
485 int __kprobes register_kprobe(struct kprobe *p)
488 unsigned long flags = 0;
489 struct kprobe *old_p;
491 if ((ret = in_kprobes_functions((unsigned long) p->addr)) != 0)
493 if ((ret = arch_prepare_kprobe(p)) != 0)
496 spin_lock_irqsave(&kprobe_lock, flags);
497 old_p = get_kprobe(p->addr);
500 ret = register_aggr_kprobe(old_p, p);
505 INIT_HLIST_NODE(&p->hlist);
506 hlist_add_head(&p->hlist,
507 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
512 spin_unlock_irqrestore(&kprobe_lock, flags);
515 arch_remove_kprobe(p);
519 void __kprobes unregister_kprobe(struct kprobe *p)
522 struct kprobe *old_p;
524 spin_lock_irqsave(&kprobe_lock, flags);
525 old_p = get_kprobe(p->addr);
527 if (old_p->pre_handler == aggr_pre_handler)
528 cleanup_aggr_kprobe(old_p, p, flags);
530 cleanup_kprobe(p, flags);
532 spin_unlock_irqrestore(&kprobe_lock, flags);
535 static struct notifier_block kprobe_exceptions_nb = {
536 .notifier_call = kprobe_exceptions_notify,
537 .priority = 0x7fffffff /* we need to notified first */
540 int __kprobes register_jprobe(struct jprobe *jp)
542 /* Todo: Verify probepoint is a function entry point */
543 jp->kp.pre_handler = setjmp_pre_handler;
544 jp->kp.break_handler = longjmp_break_handler;
546 return register_kprobe(&jp->kp);
549 void __kprobes unregister_jprobe(struct jprobe *jp)
551 unregister_kprobe(&jp->kp);
554 #ifdef ARCH_SUPPORTS_KRETPROBES
556 int __kprobes register_kretprobe(struct kretprobe *rp)
559 struct kretprobe_instance *inst;
562 rp->kp.pre_handler = pre_handler_kretprobe;
564 /* Pre-allocate memory for max kretprobe instances */
565 if (rp->maxactive <= 0) {
566 #ifdef CONFIG_PREEMPT
567 rp->maxactive = max(10, 2 * NR_CPUS);
569 rp->maxactive = NR_CPUS;
572 INIT_HLIST_HEAD(&rp->used_instances);
573 INIT_HLIST_HEAD(&rp->free_instances);
574 for (i = 0; i < rp->maxactive; i++) {
575 inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
580 INIT_HLIST_NODE(&inst->uflist);
581 hlist_add_head(&inst->uflist, &rp->free_instances);
585 /* Establish function entry probe point */
586 if ((ret = register_kprobe(&rp->kp)) != 0)
591 #else /* ARCH_SUPPORTS_KRETPROBES */
593 int __kprobes register_kretprobe(struct kretprobe *rp)
598 #endif /* ARCH_SUPPORTS_KRETPROBES */
600 void __kprobes unregister_kretprobe(struct kretprobe *rp)
603 struct kretprobe_instance *ri;
605 unregister_kprobe(&rp->kp);
607 spin_lock_irqsave(&kprobe_lock, flags);
609 while ((ri = get_used_rp_inst(rp)) != NULL) {
611 hlist_del(&ri->uflist);
613 spin_unlock_irqrestore(&kprobe_lock, flags);
616 static int __init init_kprobes(void)
620 /* FIXME allocate the probe table, currently defined statically */
621 /* initialize all list heads */
622 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
623 INIT_HLIST_HEAD(&kprobe_table[i]);
624 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
627 err = arch_init_kprobes();
629 err = register_die_notifier(&kprobe_exceptions_nb);
634 __initcall(init_kprobes);
636 EXPORT_SYMBOL_GPL(register_kprobe);
637 EXPORT_SYMBOL_GPL(unregister_kprobe);
638 EXPORT_SYMBOL_GPL(register_jprobe);
639 EXPORT_SYMBOL_GPL(unregister_jprobe);
640 EXPORT_SYMBOL_GPL(jprobe_return);
641 EXPORT_SYMBOL_GPL(register_kretprobe);
642 EXPORT_SYMBOL_GPL(unregister_kretprobe);