2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/gfp.h>
25 #include <linux/module.h>
26 #include <linux/vmalloc.h>
29 #include <asm/cputable.h>
30 #include <asm/uaccess.h>
31 #include <asm/kvm_ppc.h>
33 #include <asm/cacheflush.h>
37 unsigned long kvmppc_booke_handlers;
39 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
40 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42 struct kvm_stats_debugfs_item debugfs_entries[] = {
43 { "mmio", VCPU_STAT(mmio_exits) },
44 { "dcr", VCPU_STAT(dcr_exits) },
45 { "sig", VCPU_STAT(signal_exits) },
46 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
47 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
48 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
49 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
50 { "sysc", VCPU_STAT(syscall_exits) },
51 { "isi", VCPU_STAT(isi_exits) },
52 { "dsi", VCPU_STAT(dsi_exits) },
53 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
54 { "dec", VCPU_STAT(dec_exits) },
55 { "ext_intr", VCPU_STAT(ext_intr_exits) },
56 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
60 /* TODO: use vcpu_printf() */
61 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
65 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
66 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
67 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
68 vcpu->arch.shared->srr1);
70 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
72 for (i = 0; i < 32; i += 4) {
73 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
74 kvmppc_get_gpr(vcpu, i),
75 kvmppc_get_gpr(vcpu, i+1),
76 kvmppc_get_gpr(vcpu, i+2),
77 kvmppc_get_gpr(vcpu, i+3));
81 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
82 unsigned int priority)
84 set_bit(priority, &vcpu->arch.pending_exceptions);
87 static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
88 ulong dear_flags, ulong esr_flags)
90 vcpu->arch.queued_dear = dear_flags;
91 vcpu->arch.queued_esr = esr_flags;
92 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
95 static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
96 ulong dear_flags, ulong esr_flags)
98 vcpu->arch.queued_dear = dear_flags;
99 vcpu->arch.queued_esr = esr_flags;
100 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
103 static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
106 vcpu->arch.queued_esr = esr_flags;
107 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
110 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
112 vcpu->arch.queued_esr = esr_flags;
113 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
116 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
118 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
121 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
123 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
126 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
128 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
131 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
132 struct kvm_interrupt *irq)
134 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL);
137 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
138 struct kvm_interrupt *irq)
140 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
143 /* Deliver the interrupt of the corresponding priority, if possible. */
144 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
145 unsigned int priority)
148 ulong uninitialized_var(msr_mask);
149 bool update_esr = false, update_dear = false;
152 case BOOKE_IRQPRIO_DTLB_MISS:
153 case BOOKE_IRQPRIO_DATA_STORAGE:
156 case BOOKE_IRQPRIO_INST_STORAGE:
157 case BOOKE_IRQPRIO_PROGRAM:
160 case BOOKE_IRQPRIO_ITLB_MISS:
161 case BOOKE_IRQPRIO_SYSCALL:
162 case BOOKE_IRQPRIO_FP_UNAVAIL:
163 case BOOKE_IRQPRIO_SPE_UNAVAIL:
164 case BOOKE_IRQPRIO_SPE_FP_DATA:
165 case BOOKE_IRQPRIO_SPE_FP_ROUND:
166 case BOOKE_IRQPRIO_AP_UNAVAIL:
167 case BOOKE_IRQPRIO_ALIGNMENT:
169 msr_mask = MSR_CE|MSR_ME|MSR_DE;
171 case BOOKE_IRQPRIO_CRITICAL:
172 case BOOKE_IRQPRIO_WATCHDOG:
173 allowed = vcpu->arch.shared->msr & MSR_CE;
176 case BOOKE_IRQPRIO_MACHINE_CHECK:
177 allowed = vcpu->arch.shared->msr & MSR_ME;
180 case BOOKE_IRQPRIO_EXTERNAL:
181 case BOOKE_IRQPRIO_DECREMENTER:
182 case BOOKE_IRQPRIO_FIT:
183 allowed = vcpu->arch.shared->msr & MSR_EE;
184 msr_mask = MSR_CE|MSR_ME|MSR_DE;
186 case BOOKE_IRQPRIO_DEBUG:
187 allowed = vcpu->arch.shared->msr & MSR_DE;
193 vcpu->arch.shared->srr0 = vcpu->arch.pc;
194 vcpu->arch.shared->srr1 = vcpu->arch.shared->msr;
195 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
196 if (update_esr == true)
197 vcpu->arch.esr = vcpu->arch.queued_esr;
198 if (update_dear == true)
199 vcpu->arch.shared->dar = vcpu->arch.queued_dear;
200 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
202 clear_bit(priority, &vcpu->arch.pending_exceptions);
208 /* Check pending exceptions and deliver one, if possible. */
209 void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
211 unsigned long *pending = &vcpu->arch.pending_exceptions;
212 unsigned int priority;
214 priority = __ffs(*pending);
215 while (priority <= BOOKE_IRQPRIO_MAX) {
216 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
219 priority = find_next_bit(pending,
220 BITS_PER_BYTE * sizeof(*pending),
228 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
230 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
231 unsigned int exit_nr)
233 enum emulation_result er;
236 /* update before a new last_exit_type is rewritten */
237 kvmppc_update_timing_stats(vcpu);
241 run->exit_reason = KVM_EXIT_UNKNOWN;
242 run->ready_for_interrupt_injection = 1;
245 case BOOKE_INTERRUPT_MACHINE_CHECK:
246 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
247 kvmppc_dump_vcpu(vcpu);
251 case BOOKE_INTERRUPT_EXTERNAL:
252 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
258 case BOOKE_INTERRUPT_DECREMENTER:
259 /* Since we switched IVPR back to the host's value, the host
260 * handled this interrupt the moment we enabled interrupts.
261 * Now we just offer it a chance to reschedule the guest. */
262 kvmppc_account_exit(vcpu, DEC_EXITS);
268 case BOOKE_INTERRUPT_PROGRAM:
269 if (vcpu->arch.shared->msr & MSR_PR) {
270 /* Program traps generated by user-level software must be handled
271 * by the guest kernel. */
272 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
274 kvmppc_account_exit(vcpu, USR_PR_INST);
278 er = kvmppc_emulate_instruction(run, vcpu);
281 /* don't overwrite subtypes, just account kvm_stats */
282 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
283 /* Future optimization: only reload non-volatiles if
284 * they were actually modified by emulation. */
288 run->exit_reason = KVM_EXIT_DCR;
292 /* XXX Deliver Program interrupt to guest. */
293 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
294 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
295 /* For debugging, encode the failing instruction and
296 * report it to userspace. */
297 run->hw.hardware_exit_reason = ~0ULL << 32;
298 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
306 case BOOKE_INTERRUPT_FP_UNAVAIL:
307 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
308 kvmppc_account_exit(vcpu, FP_UNAVAIL);
312 case BOOKE_INTERRUPT_SPE_UNAVAIL:
313 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL);
317 case BOOKE_INTERRUPT_SPE_FP_DATA:
318 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
322 case BOOKE_INTERRUPT_SPE_FP_ROUND:
323 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
327 case BOOKE_INTERRUPT_DATA_STORAGE:
328 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
329 vcpu->arch.fault_esr);
330 kvmppc_account_exit(vcpu, DSI_EXITS);
334 case BOOKE_INTERRUPT_INST_STORAGE:
335 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
336 kvmppc_account_exit(vcpu, ISI_EXITS);
340 case BOOKE_INTERRUPT_SYSCALL:
341 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
342 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
346 case BOOKE_INTERRUPT_DTLB_MISS: {
347 unsigned long eaddr = vcpu->arch.fault_dear;
352 /* Check the guest TLB. */
353 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
354 if (gtlb_index < 0) {
355 /* The guest didn't have a mapping for it. */
356 kvmppc_core_queue_dtlb_miss(vcpu,
357 vcpu->arch.fault_dear,
358 vcpu->arch.fault_esr);
359 kvmppc_mmu_dtlb_miss(vcpu);
360 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
365 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
366 gfn = gpaddr >> PAGE_SHIFT;
368 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
369 /* The guest TLB had a mapping, but the shadow TLB
370 * didn't, and it is RAM. This could be because:
371 * a) the entry is mapping the host kernel, or
372 * b) the guest used a large mapping which we're faking
373 * Either way, we need to satisfy the fault without
374 * invoking the guest. */
375 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
376 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
379 /* Guest has mapped and accessed a page which is not
381 vcpu->arch.paddr_accessed = gpaddr;
382 r = kvmppc_emulate_mmio(run, vcpu);
383 kvmppc_account_exit(vcpu, MMIO_EXITS);
389 case BOOKE_INTERRUPT_ITLB_MISS: {
390 unsigned long eaddr = vcpu->arch.pc;
397 /* Check the guest TLB. */
398 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
399 if (gtlb_index < 0) {
400 /* The guest didn't have a mapping for it. */
401 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
402 kvmppc_mmu_itlb_miss(vcpu);
403 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
407 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
409 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
410 gfn = gpaddr >> PAGE_SHIFT;
412 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
413 /* The guest TLB had a mapping, but the shadow TLB
414 * didn't. This could be because:
415 * a) the entry is mapping the host kernel, or
416 * b) the guest used a large mapping which we're faking
417 * Either way, we need to satisfy the fault without
418 * invoking the guest. */
419 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
421 /* Guest mapped and leaped at non-RAM! */
422 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
428 case BOOKE_INTERRUPT_DEBUG: {
431 vcpu->arch.pc = mfspr(SPRN_CSRR0);
433 /* clear IAC events in DBSR register */
434 dbsr = mfspr(SPRN_DBSR);
435 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
436 mtspr(SPRN_DBSR, dbsr);
438 run->exit_reason = KVM_EXIT_DEBUG;
439 kvmppc_account_exit(vcpu, DEBUG_EXITS);
445 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
451 kvmppc_core_deliver_interrupts(vcpu);
453 if (!(r & RESUME_HOST)) {
454 /* To avoid clobbering exit_reason, only check for signals if
455 * we aren't already exiting to userspace for some other
457 if (signal_pending(current)) {
458 run->exit_reason = KVM_EXIT_INTR;
459 r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
460 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
467 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
468 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
471 vcpu->arch.shared->msr = 0;
472 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
474 vcpu->arch.shadow_pid = 1;
476 /* Eye-catching number so we know if the guest takes an interrupt
477 * before it's programmed its own IVPR. */
478 vcpu->arch.ivpr = 0x55550000;
480 kvmppc_init_timing_stats(vcpu);
482 return kvmppc_core_vcpu_setup(vcpu);
485 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
489 regs->pc = vcpu->arch.pc;
490 regs->cr = kvmppc_get_cr(vcpu);
491 regs->ctr = vcpu->arch.ctr;
492 regs->lr = vcpu->arch.lr;
493 regs->xer = kvmppc_get_xer(vcpu);
494 regs->msr = vcpu->arch.shared->msr;
495 regs->srr0 = vcpu->arch.shared->srr0;
496 regs->srr1 = vcpu->arch.shared->srr1;
497 regs->pid = vcpu->arch.pid;
498 regs->sprg0 = vcpu->arch.shared->sprg0;
499 regs->sprg1 = vcpu->arch.shared->sprg1;
500 regs->sprg2 = vcpu->arch.shared->sprg2;
501 regs->sprg3 = vcpu->arch.shared->sprg3;
502 regs->sprg5 = vcpu->arch.sprg4;
503 regs->sprg6 = vcpu->arch.sprg5;
504 regs->sprg7 = vcpu->arch.sprg6;
506 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
507 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
512 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
516 vcpu->arch.pc = regs->pc;
517 kvmppc_set_cr(vcpu, regs->cr);
518 vcpu->arch.ctr = regs->ctr;
519 vcpu->arch.lr = regs->lr;
520 kvmppc_set_xer(vcpu, regs->xer);
521 kvmppc_set_msr(vcpu, regs->msr);
522 vcpu->arch.shared->srr0 = regs->srr0;
523 vcpu->arch.shared->srr1 = regs->srr1;
524 vcpu->arch.shared->sprg0 = regs->sprg0;
525 vcpu->arch.shared->sprg1 = regs->sprg1;
526 vcpu->arch.shared->sprg2 = regs->sprg2;
527 vcpu->arch.shared->sprg3 = regs->sprg3;
528 vcpu->arch.sprg5 = regs->sprg4;
529 vcpu->arch.sprg6 = regs->sprg5;
530 vcpu->arch.sprg7 = regs->sprg6;
532 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
533 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
538 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
539 struct kvm_sregs *sregs)
544 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
545 struct kvm_sregs *sregs)
550 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
555 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
560 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
561 struct kvm_translation *tr)
565 r = kvmppc_core_vcpu_translate(vcpu, tr);
569 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
574 int __init kvmppc_booke_init(void)
576 unsigned long ivor[16];
577 unsigned long max_ivor = 0;
580 /* We install our own exception handlers by hijacking IVPR. IVPR must
581 * be 16-bit aligned, so we need a 64KB allocation. */
582 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
584 if (!kvmppc_booke_handlers)
587 /* XXX make sure our handlers are smaller than Linux's */
589 /* Copy our interrupt handlers to match host IVORs. That way we don't
590 * have to swap the IVORs on every guest/host transition. */
591 ivor[0] = mfspr(SPRN_IVOR0);
592 ivor[1] = mfspr(SPRN_IVOR1);
593 ivor[2] = mfspr(SPRN_IVOR2);
594 ivor[3] = mfspr(SPRN_IVOR3);
595 ivor[4] = mfspr(SPRN_IVOR4);
596 ivor[5] = mfspr(SPRN_IVOR5);
597 ivor[6] = mfspr(SPRN_IVOR6);
598 ivor[7] = mfspr(SPRN_IVOR7);
599 ivor[8] = mfspr(SPRN_IVOR8);
600 ivor[9] = mfspr(SPRN_IVOR9);
601 ivor[10] = mfspr(SPRN_IVOR10);
602 ivor[11] = mfspr(SPRN_IVOR11);
603 ivor[12] = mfspr(SPRN_IVOR12);
604 ivor[13] = mfspr(SPRN_IVOR13);
605 ivor[14] = mfspr(SPRN_IVOR14);
606 ivor[15] = mfspr(SPRN_IVOR15);
608 for (i = 0; i < 16; i++) {
609 if (ivor[i] > max_ivor)
612 memcpy((void *)kvmppc_booke_handlers + ivor[i],
613 kvmppc_handlers_start + i * kvmppc_handler_len,
616 flush_icache_range(kvmppc_booke_handlers,
617 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
622 void __exit kvmppc_booke_exit(void)
624 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);