2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
9 * This file is derived from arch/powerpc/kvm/44x.c,
10 * by Hollis Blanchard <hollisb@us.ibm.com>.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License, version 2, as
14 * published by the Free Software Foundation.
17 #include <linux/kvm_host.h>
18 #include <linux/err.h>
19 #include <linux/slab.h>
22 #include <asm/cputable.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
25 #include <asm/uaccess.h>
27 #include <asm/kvm_ppc.h>
28 #include <asm/kvm_book3s.h>
29 #include <asm/mmu_context.h>
30 #include <linux/gfp.h>
31 #include <linux/sched.h>
32 #include <linux/vmalloc.h>
33 #include <linux/highmem.h>
35 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
37 /* #define EXIT_DEBUG */
38 /* #define EXIT_DEBUG_SIMPLE */
39 /* #define DEBUG_EXT */
41 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
44 /* Some compatibility defines */
45 #ifdef CONFIG_PPC_BOOK3S_32
46 #define MSR_USER32 MSR_USER
47 #define MSR_USER64 MSR_USER
48 #define HW_PAGE_SIZE PAGE_SIZE
51 struct kvm_stats_debugfs_item debugfs_entries[] = {
52 { "exits", VCPU_STAT(sum_exits) },
53 { "mmio", VCPU_STAT(mmio_exits) },
54 { "sig", VCPU_STAT(signal_exits) },
55 { "sysc", VCPU_STAT(syscall_exits) },
56 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
57 { "dec", VCPU_STAT(dec_exits) },
58 { "ext_intr", VCPU_STAT(ext_intr_exits) },
59 { "queue_intr", VCPU_STAT(queue_intr) },
60 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
61 { "pf_storage", VCPU_STAT(pf_storage) },
62 { "sp_storage", VCPU_STAT(sp_storage) },
63 { "pf_instruc", VCPU_STAT(pf_instruc) },
64 { "sp_instruc", VCPU_STAT(sp_instruc) },
65 { "ld", VCPU_STAT(ld) },
66 { "ld_slow", VCPU_STAT(ld_slow) },
67 { "st", VCPU_STAT(st) },
68 { "st_slow", VCPU_STAT(st_slow) },
72 void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
76 void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
80 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
82 #ifdef CONFIG_PPC_BOOK3S_64
83 memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb));
84 memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
85 sizeof(get_paca()->shadow_vcpu));
86 to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max;
89 #ifdef CONFIG_PPC_BOOK3S_32
90 current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
94 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
96 #ifdef CONFIG_PPC_BOOK3S_64
97 memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb));
98 memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
99 sizeof(get_paca()->shadow_vcpu));
100 to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max;
103 kvmppc_giveup_ext(vcpu, MSR_FP);
104 kvmppc_giveup_ext(vcpu, MSR_VEC);
105 kvmppc_giveup_ext(vcpu, MSR_VSX);
108 #if defined(EXIT_DEBUG)
109 static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu)
111 u64 jd = mftb() - vcpu->arch.dec_jiffies;
112 return vcpu->arch.dec - jd;
116 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
118 ulong smsr = vcpu->arch.shared->msr;
120 /* Guest MSR values */
121 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE;
122 /* Process MSR values */
123 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
124 /* External providers the guest reserved */
125 smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
126 /* 64-bit Process MSR values */
127 #ifdef CONFIG_PPC_BOOK3S_64
128 smsr |= MSR_ISF | MSR_HV;
130 vcpu->arch.shadow_msr = smsr;
133 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
135 ulong old_msr = vcpu->arch.shared->msr;
138 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
141 msr &= to_book3s(vcpu)->msr_mask;
142 vcpu->arch.shared->msr = msr;
143 kvmppc_recalc_shadow_msr(vcpu);
145 if (msr & (MSR_WE|MSR_POW)) {
146 if (!vcpu->arch.pending_exceptions) {
147 kvm_vcpu_block(vcpu);
148 vcpu->stat.halt_wakeup++;
152 if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
153 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
154 kvmppc_mmu_flush_segments(vcpu);
155 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
158 /* Preload FPU if it's enabled */
159 if (vcpu->arch.shared->msr & MSR_FP)
160 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
163 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
165 vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu);
166 vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags;
167 kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec);
168 vcpu->arch.mmu.reset_msr(vcpu);
171 static int kvmppc_book3s_vec2irqprio(unsigned int vec)
176 case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
177 case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
178 case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break;
179 case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break;
180 case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
181 case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
182 case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
183 case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
184 case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
185 case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
186 case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break;
187 case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break;
188 case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
189 case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
190 case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
191 default: prio = BOOK3S_IRQPRIO_MAX; break;
197 static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
200 clear_bit(kvmppc_book3s_vec2irqprio(vec),
201 &vcpu->arch.pending_exceptions);
204 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
206 vcpu->stat.queue_intr++;
208 set_bit(kvmppc_book3s_vec2irqprio(vec),
209 &vcpu->arch.pending_exceptions);
211 printk(KERN_INFO "Queueing interrupt %x\n", vec);
216 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
218 to_book3s(vcpu)->prog_flags = flags;
219 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM);
222 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
224 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
227 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
229 return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions);
232 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
234 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
237 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
238 struct kvm_interrupt *irq)
240 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
243 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
244 struct kvm_interrupt *irq)
246 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
249 int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
254 ulong crit_raw = vcpu->arch.shared->critical;
255 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
258 /* Truncate crit indicators in 32 bit mode */
259 if (!(vcpu->arch.shared->msr & MSR_SF)) {
260 crit_raw &= 0xffffffff;
261 crit_r1 &= 0xffffffff;
264 /* Critical section when crit == r1 */
265 crit = (crit_raw == crit_r1);
266 /* ... and we're in supervisor mode */
267 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
270 case BOOK3S_IRQPRIO_DECREMENTER:
271 deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit;
272 vec = BOOK3S_INTERRUPT_DECREMENTER;
274 case BOOK3S_IRQPRIO_EXTERNAL:
275 deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit;
276 vec = BOOK3S_INTERRUPT_EXTERNAL;
278 case BOOK3S_IRQPRIO_SYSTEM_RESET:
279 vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
281 case BOOK3S_IRQPRIO_MACHINE_CHECK:
282 vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
284 case BOOK3S_IRQPRIO_DATA_STORAGE:
285 vec = BOOK3S_INTERRUPT_DATA_STORAGE;
287 case BOOK3S_IRQPRIO_INST_STORAGE:
288 vec = BOOK3S_INTERRUPT_INST_STORAGE;
290 case BOOK3S_IRQPRIO_DATA_SEGMENT:
291 vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
293 case BOOK3S_IRQPRIO_INST_SEGMENT:
294 vec = BOOK3S_INTERRUPT_INST_SEGMENT;
296 case BOOK3S_IRQPRIO_ALIGNMENT:
297 vec = BOOK3S_INTERRUPT_ALIGNMENT;
299 case BOOK3S_IRQPRIO_PROGRAM:
300 vec = BOOK3S_INTERRUPT_PROGRAM;
301 flags = to_book3s(vcpu)->prog_flags;
303 case BOOK3S_IRQPRIO_VSX:
304 vec = BOOK3S_INTERRUPT_VSX;
306 case BOOK3S_IRQPRIO_ALTIVEC:
307 vec = BOOK3S_INTERRUPT_ALTIVEC;
309 case BOOK3S_IRQPRIO_FP_UNAVAIL:
310 vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
312 case BOOK3S_IRQPRIO_SYSCALL:
313 vec = BOOK3S_INTERRUPT_SYSCALL;
315 case BOOK3S_IRQPRIO_DEBUG:
316 vec = BOOK3S_INTERRUPT_TRACE;
318 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
319 vec = BOOK3S_INTERRUPT_PERFMON;
323 printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
328 printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
332 kvmppc_inject_interrupt(vcpu, vec, flags);
337 void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
339 unsigned long *pending = &vcpu->arch.pending_exceptions;
340 unsigned int priority;
343 if (vcpu->arch.pending_exceptions)
344 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
346 priority = __ffs(*pending);
347 while (priority < BOOK3S_IRQPRIO_MAX) {
348 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
349 (priority != BOOK3S_IRQPRIO_DECREMENTER)) {
350 /* DEC interrupts get cleared by mtdec */
351 clear_bit(priority, &vcpu->arch.pending_exceptions);
355 priority = find_next_bit(pending,
356 BITS_PER_BYTE * sizeof(*pending),
361 void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
365 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
366 vcpu->arch.pvr = pvr;
367 #ifdef CONFIG_PPC_BOOK3S_64
368 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
369 kvmppc_mmu_book3s_64_init(vcpu);
370 to_book3s(vcpu)->hior = 0xfff00000;
371 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
375 kvmppc_mmu_book3s_32_init(vcpu);
376 to_book3s(vcpu)->hior = 0;
377 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
380 /* If we are in hypervisor level on 970, we can tell the CPU to
381 * treat DCBZ as 32 bytes store */
382 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
383 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
384 !strcmp(cur_cpu_spec->platform, "ppc970"))
385 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
387 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
388 really needs them in a VM on Cell and force disable them. */
389 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
390 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
392 #ifdef CONFIG_PPC_BOOK3S_32
393 /* 32 bit Book3S always has 32 byte dcbz */
394 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
397 /* On some CPUs we can execute paired single operations natively */
398 asm ( "mfpvr %0" : "=r"(host_pvr));
400 case 0x00080200: /* lonestar 2.0 */
401 case 0x00088202: /* lonestar 2.2 */
402 case 0x70000100: /* gekko 1.0 */
403 case 0x00080100: /* gekko 2.0 */
404 case 0x00083203: /* gekko 2.3a */
405 case 0x00083213: /* gekko 2.3b */
406 case 0x00083204: /* gekko 2.4 */
407 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
408 case 0x00087200: /* broadway */
409 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
410 /* Enable HID2.PSE - in case we need it later */
411 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
415 /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
416 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
417 * emulate 32 bytes dcbz length.
419 * The Book3s_64 inventors also realized this case and implemented a special bit
420 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
422 * My approach here is to patch the dcbz instruction on executing pages.
424 static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
431 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
432 if (is_error_page(hpage))
435 hpage_offset = pte->raddr & ~PAGE_MASK;
436 hpage_offset &= ~0xFFFULL;
440 page = kmap_atomic(hpage, KM_USER0);
442 /* patch dcbz into reserved instruction, so we trap */
443 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
444 if ((page[i] & 0xff0007ff) == INS_DCBZ)
445 page[i] &= 0xfffffff7;
447 kunmap_atomic(page, KM_USER0);
451 static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
452 struct kvmppc_pte *pte)
454 int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR));
458 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data);
461 pte->raddr = eaddr & 0xffffffff;
462 pte->vpage = VSID_REAL | eaddr >> 12;
463 pte->may_read = true;
464 pte->may_write = true;
465 pte->may_execute = true;
472 static hva_t kvmppc_bad_hva(void)
477 static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
482 if (read && !pte->may_read)
485 if (!read && !pte->may_write)
488 hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
489 if (kvm_is_error_hva(hpage))
492 return hpage | (pte->raddr & ~PAGE_MASK);
494 return kvmppc_bad_hva();
497 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
500 struct kvmppc_pte pte;
504 if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
512 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
513 return EMULATE_DO_MMIO;
518 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
521 struct kvmppc_pte pte;
526 if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
531 hva = kvmppc_pte_to_hva(vcpu, &pte, true);
532 if (kvm_is_error_hva(hva))
535 if (copy_from_user(ptr, (void __user *)hva, size)) {
536 printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva);
545 return EMULATE_DO_MMIO;
548 static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
550 return kvm_is_visible_gfn(vcpu->kvm, gfn);
553 int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
554 ulong eaddr, int vec)
556 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
557 int r = RESUME_GUEST;
560 struct kvmppc_pte pte;
561 bool is_mmio = false;
562 bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
563 bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
566 relocated = data ? dr : ir;
568 /* Resolve real address if translation turned on */
570 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
572 pte.may_execute = true;
574 pte.may_write = true;
575 pte.raddr = eaddr & 0xffffffff;
577 pte.vpage = eaddr >> 12;
580 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
582 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
586 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
588 if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
589 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
591 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
595 page_found = -EINVAL;
599 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
600 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
602 * If we do the dcbz hack, we have to NX on every execution,
603 * so we can patch the executing code. This renders our guest
606 pte.may_execute = !data;
609 if (page_found == -ENOENT) {
610 /* Page not found in guest PTE entries */
611 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
612 vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
613 vcpu->arch.shared->msr |=
614 (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
615 kvmppc_book3s_queue_irqprio(vcpu, vec);
616 } else if (page_found == -EPERM) {
617 /* Storage protection */
618 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
619 vcpu->arch.shared->dsisr =
620 to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
621 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
622 vcpu->arch.shared->msr |=
623 (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
624 kvmppc_book3s_queue_irqprio(vcpu, vec);
625 } else if (page_found == -EINVAL) {
626 /* Page not found in guest SLB */
627 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
628 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
629 } else if (!is_mmio &&
630 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
631 /* The guest's PTE is not mapped yet. Map on the host */
632 kvmppc_mmu_map_page(vcpu, &pte);
634 vcpu->stat.sp_storage++;
635 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
636 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
637 kvmppc_patch_dcbz(vcpu, &pte);
640 vcpu->stat.mmio_exits++;
641 vcpu->arch.paddr_accessed = pte.raddr;
642 r = kvmppc_emulate_mmio(run, vcpu);
643 if ( r == RESUME_HOST_NV )
650 static inline int get_fpr_index(int i)
658 /* Give up external provider (FPU, Altivec, VSX) */
659 void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
661 struct thread_struct *t = ¤t->thread;
662 u64 *vcpu_fpr = vcpu->arch.fpr;
664 u64 *vcpu_vsx = vcpu->arch.vsr;
666 u64 *thread_fpr = (u64*)t->fpr;
669 if (!(vcpu->arch.guest_owned_ext & msr))
673 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
679 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
680 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
682 vcpu->arch.fpscr = t->fpscr.val;
685 #ifdef CONFIG_ALTIVEC
686 giveup_altivec(current);
687 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
688 vcpu->arch.vscr = t->vscr;
693 __giveup_vsx(current);
694 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
695 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
702 vcpu->arch.guest_owned_ext &= ~msr;
703 current->thread.regs->msr &= ~msr;
704 kvmppc_recalc_shadow_msr(vcpu);
707 static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
709 ulong srr0 = kvmppc_get_pc(vcpu);
710 u32 last_inst = kvmppc_get_last_inst(vcpu);
713 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
714 if (ret == -ENOENT) {
715 ulong msr = vcpu->arch.shared->msr;
717 msr = kvmppc_set_field(msr, 33, 33, 1);
718 msr = kvmppc_set_field(msr, 34, 36, 0);
719 vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
720 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
721 return EMULATE_AGAIN;
727 static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
730 /* Need to do paired single emulation? */
731 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
734 /* Read out the instruction */
735 if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
736 /* Need to emulate */
739 return EMULATE_AGAIN;
742 /* Handle external providers (FPU, Altivec, VSX) */
743 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
746 struct thread_struct *t = ¤t->thread;
747 u64 *vcpu_fpr = vcpu->arch.fpr;
749 u64 *vcpu_vsx = vcpu->arch.vsr;
751 u64 *thread_fpr = (u64*)t->fpr;
754 /* When we have paired singles, we emulate in software */
755 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
758 if (!(vcpu->arch.shared->msr & msr)) {
759 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
763 /* We already own the ext */
764 if (vcpu->arch.guest_owned_ext & msr) {
769 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
772 current->thread.regs->msr |= msr;
776 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
777 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
779 t->fpscr.val = vcpu->arch.fpscr;
781 kvmppc_load_up_fpu();
784 #ifdef CONFIG_ALTIVEC
785 memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
786 t->vscr = vcpu->arch.vscr;
788 kvmppc_load_up_altivec();
793 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
794 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
795 kvmppc_load_up_vsx();
802 vcpu->arch.guest_owned_ext |= msr;
804 kvmppc_recalc_shadow_msr(vcpu);
809 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
810 unsigned int exit_nr)
814 vcpu->stat.sum_exits++;
816 run->exit_reason = KVM_EXIT_UNKNOWN;
817 run->ready_for_interrupt_injection = 1;
819 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n",
820 exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
821 kvmppc_get_dec(vcpu), to_svcpu(vcpu)->shadow_srr1);
822 #elif defined (EXIT_DEBUG_SIMPLE)
823 if ((exit_nr != 0x900) && (exit_nr != 0x500))
824 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n",
825 exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
826 vcpu->arch.shared->msr);
830 case BOOK3S_INTERRUPT_INST_STORAGE:
831 vcpu->stat.pf_instruc++;
833 #ifdef CONFIG_PPC_BOOK3S_32
834 /* We set segments as unused segments when invalidating them. So
835 * treat the respective fault as segment fault. */
836 if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]
838 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
844 /* only care about PTEG not found errors, but leave NX alone */
845 if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) {
846 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
847 vcpu->stat.sp_instruc++;
848 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
849 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
851 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
852 * so we can't use the NX bit inside the guest. Let's cross our fingers,
853 * that no guest that needs the dcbz hack does NX.
855 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
858 vcpu->arch.shared->msr |=
859 to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
860 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
861 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
865 case BOOK3S_INTERRUPT_DATA_STORAGE:
867 ulong dar = kvmppc_get_fault_dar(vcpu);
868 vcpu->stat.pf_storage++;
870 #ifdef CONFIG_PPC_BOOK3S_32
871 /* We set segments as unused segments when invalidating them. So
872 * treat the respective fault as segment fault. */
873 if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) {
874 kvmppc_mmu_map_segment(vcpu, dar);
880 /* The only case we need to handle is missing shadow PTEs */
881 if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) {
882 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
884 vcpu->arch.shared->dar = dar;
885 vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
886 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
887 kvmppc_mmu_pte_flush(vcpu, dar, ~0xFFFUL);
892 case BOOK3S_INTERRUPT_DATA_SEGMENT:
893 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
894 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
895 kvmppc_book3s_queue_irqprio(vcpu,
896 BOOK3S_INTERRUPT_DATA_SEGMENT);
900 case BOOK3S_INTERRUPT_INST_SEGMENT:
901 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
902 kvmppc_book3s_queue_irqprio(vcpu,
903 BOOK3S_INTERRUPT_INST_SEGMENT);
907 /* We're good on these - the host merely wanted to get our attention */
908 case BOOK3S_INTERRUPT_DECREMENTER:
909 vcpu->stat.dec_exits++;
912 case BOOK3S_INTERRUPT_EXTERNAL:
913 vcpu->stat.ext_intr_exits++;
916 case BOOK3S_INTERRUPT_PERFMON:
919 case BOOK3S_INTERRUPT_PROGRAM:
921 enum emulation_result er;
925 flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
927 if (vcpu->arch.shared->msr & MSR_PR) {
929 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
931 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
932 (INS_DCBZ & 0xfffffff7)) {
933 kvmppc_core_queue_program(vcpu, flags);
939 vcpu->stat.emulated_inst_exits++;
940 er = kvmppc_emulate_instruction(run, vcpu);
949 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
950 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
951 kvmppc_core_queue_program(vcpu, flags);
954 case EMULATE_DO_MMIO:
955 run->exit_reason = KVM_EXIT_MMIO;
963 case BOOK3S_INTERRUPT_SYSCALL:
964 if (vcpu->arch.osi_enabled &&
965 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
966 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
968 u64 *gprs = run->osi.gprs;
971 run->exit_reason = KVM_EXIT_OSI;
972 for (i = 0; i < 32; i++)
973 gprs[i] = kvmppc_get_gpr(vcpu, i);
974 vcpu->arch.osi_needed = 1;
976 } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
977 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
978 /* KVM PV hypercalls */
979 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
983 vcpu->stat.syscall_exits++;
984 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
988 case BOOK3S_INTERRUPT_FP_UNAVAIL:
989 case BOOK3S_INTERRUPT_ALTIVEC:
990 case BOOK3S_INTERRUPT_VSX:
995 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
996 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
997 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
1000 switch (kvmppc_check_ext(vcpu, exit_nr)) {
1002 /* everything ok - let's enable the ext */
1003 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
1006 /* we need to emulate this instruction */
1007 goto program_interrupt;
1010 /* nothing to worry about - go again */
1015 case BOOK3S_INTERRUPT_ALIGNMENT:
1016 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
1017 vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
1018 kvmppc_get_last_inst(vcpu));
1019 vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
1020 kvmppc_get_last_inst(vcpu));
1021 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1025 case BOOK3S_INTERRUPT_MACHINE_CHECK:
1026 case BOOK3S_INTERRUPT_TRACE:
1027 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1031 /* Ugh - bork here! What did we get? */
1032 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
1033 exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1);
1040 if (!(r & RESUME_HOST)) {
1041 /* To avoid clobbering exit_reason, only check for signals if
1042 * we aren't already exiting to userspace for some other
1044 if (signal_pending(current)) {
1046 printk(KERN_EMERG "KVM: Going back to host\n");
1048 vcpu->stat.signal_exits++;
1049 run->exit_reason = KVM_EXIT_INTR;
1052 /* In case an interrupt came in that was triggered
1053 * from userspace (like DEC), we need to check what
1055 kvmppc_core_deliver_interrupts(vcpu);
1060 printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, kvmppc_get_pc(vcpu), r);
1066 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1071 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1075 regs->pc = kvmppc_get_pc(vcpu);
1076 regs->cr = kvmppc_get_cr(vcpu);
1077 regs->ctr = kvmppc_get_ctr(vcpu);
1078 regs->lr = kvmppc_get_lr(vcpu);
1079 regs->xer = kvmppc_get_xer(vcpu);
1080 regs->msr = vcpu->arch.shared->msr;
1081 regs->srr0 = vcpu->arch.shared->srr0;
1082 regs->srr1 = vcpu->arch.shared->srr1;
1083 regs->pid = vcpu->arch.pid;
1084 regs->sprg0 = vcpu->arch.shared->sprg0;
1085 regs->sprg1 = vcpu->arch.shared->sprg1;
1086 regs->sprg2 = vcpu->arch.shared->sprg2;
1087 regs->sprg3 = vcpu->arch.shared->sprg3;
1088 regs->sprg5 = vcpu->arch.sprg4;
1089 regs->sprg6 = vcpu->arch.sprg5;
1090 regs->sprg7 = vcpu->arch.sprg6;
1092 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1093 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
1098 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1102 kvmppc_set_pc(vcpu, regs->pc);
1103 kvmppc_set_cr(vcpu, regs->cr);
1104 kvmppc_set_ctr(vcpu, regs->ctr);
1105 kvmppc_set_lr(vcpu, regs->lr);
1106 kvmppc_set_xer(vcpu, regs->xer);
1107 kvmppc_set_msr(vcpu, regs->msr);
1108 vcpu->arch.shared->srr0 = regs->srr0;
1109 vcpu->arch.shared->srr1 = regs->srr1;
1110 vcpu->arch.shared->sprg0 = regs->sprg0;
1111 vcpu->arch.shared->sprg1 = regs->sprg1;
1112 vcpu->arch.shared->sprg2 = regs->sprg2;
1113 vcpu->arch.shared->sprg3 = regs->sprg3;
1114 vcpu->arch.sprg5 = regs->sprg4;
1115 vcpu->arch.sprg6 = regs->sprg5;
1116 vcpu->arch.sprg7 = regs->sprg6;
1118 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1119 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
1124 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1125 struct kvm_sregs *sregs)
1127 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1130 sregs->pvr = vcpu->arch.pvr;
1132 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1133 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1134 for (i = 0; i < 64; i++) {
1135 sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i;
1136 sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv;
1139 for (i = 0; i < 16; i++) {
1140 sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw;
1141 sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw;
1143 for (i = 0; i < 8; i++) {
1144 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1145 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1152 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1153 struct kvm_sregs *sregs)
1155 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1158 kvmppc_set_pvr(vcpu, sregs->pvr);
1160 vcpu3s->sdr1 = sregs->u.s.sdr1;
1161 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1162 for (i = 0; i < 64; i++) {
1163 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
1164 sregs->u.s.ppc64.slb[i].slbe);
1167 for (i = 0; i < 16; i++) {
1168 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1170 for (i = 0; i < 8; i++) {
1171 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1172 (u32)sregs->u.s.ppc32.ibat[i]);
1173 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1174 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1175 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1176 (u32)sregs->u.s.ppc32.dbat[i]);
1177 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1178 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1182 /* Flush the MMU after messing with the segments */
1183 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1188 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1193 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1198 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1199 struct kvm_translation *tr)
1205 * Get (and clear) the dirty memory log for a memory slot.
1207 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1208 struct kvm_dirty_log *log)
1210 struct kvm_memory_slot *memslot;
1211 struct kvm_vcpu *vcpu;
1217 mutex_lock(&kvm->slots_lock);
1219 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1223 /* If nothing is dirty, don't bother messing with page tables. */
1225 memslot = &kvm->memslots->memslots[log->slot];
1227 ga = memslot->base_gfn << PAGE_SHIFT;
1228 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1230 kvm_for_each_vcpu(n, vcpu, kvm)
1231 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1233 n = kvm_dirty_bitmap_bytes(memslot);
1234 memset(memslot->dirty_bitmap, 0, n);
1239 mutex_unlock(&kvm->slots_lock);
1243 int kvmppc_core_check_processor_compat(void)
1248 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1250 struct kvmppc_vcpu_book3s *vcpu_book3s;
1251 struct kvm_vcpu *vcpu;
1254 vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s));
1258 memset(vcpu_book3s, 0, sizeof(struct kvmppc_vcpu_book3s));
1260 vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
1261 kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
1262 if (!vcpu_book3s->shadow_vcpu)
1265 vcpu = &vcpu_book3s->vcpu;
1266 err = kvm_vcpu_init(vcpu, kvm, id);
1268 goto free_shadow_vcpu;
1270 vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO);
1271 if (!vcpu->arch.shared)
1274 vcpu->arch.host_retip = kvm_return_point;
1275 vcpu->arch.host_msr = mfmsr();
1276 #ifdef CONFIG_PPC_BOOK3S_64
1277 /* default to book3s_64 (970fx) */
1278 vcpu->arch.pvr = 0x3C0301;
1280 /* default to book3s_32 (750) */
1281 vcpu->arch.pvr = 0x84202;
1283 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
1284 vcpu_book3s->slb_nr = 64;
1286 /* remember where some real-mode handlers are */
1287 vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
1288 vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
1289 vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
1290 #ifdef CONFIG_PPC_BOOK3S_64
1291 vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
1293 vcpu->arch.rmcall = (ulong)kvmppc_rmcall;
1296 vcpu->arch.shadow_msr = MSR_USER64;
1298 err = kvmppc_mmu_init(vcpu);
1305 kvm_vcpu_uninit(vcpu);
1307 kfree(vcpu_book3s->shadow_vcpu);
1311 return ERR_PTR(err);
1314 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1316 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1318 free_page((unsigned long)vcpu->arch.shared);
1319 kvm_vcpu_uninit(vcpu);
1320 kfree(vcpu_book3s->shadow_vcpu);
1324 extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
1325 int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1328 double fpr[32][TS_FPRWIDTH];
1331 #ifdef CONFIG_ALTIVEC
1334 unsigned long uninitialized_var(vrsave);
1342 /* No need to go into the guest when all we do is going out */
1343 if (signal_pending(current)) {
1344 kvm_run->exit_reason = KVM_EXIT_INTR;
1348 /* Save FPU state in stack */
1349 if (current->thread.regs->msr & MSR_FP)
1350 giveup_fpu(current);
1351 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
1352 fpscr = current->thread.fpscr.val;
1353 fpexc_mode = current->thread.fpexc_mode;
1355 #ifdef CONFIG_ALTIVEC
1356 /* Save Altivec state in stack */
1357 used_vr = current->thread.used_vr;
1359 if (current->thread.regs->msr & MSR_VEC)
1360 giveup_altivec(current);
1361 memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
1362 vscr = current->thread.vscr;
1363 vrsave = current->thread.vrsave;
1368 /* Save VSX state in stack */
1369 used_vsr = current->thread.used_vsr;
1370 if (used_vsr && (current->thread.regs->msr & MSR_VSX))
1371 __giveup_vsx(current);
1374 /* Remember the MSR with disabled extensions */
1375 ext_msr = current->thread.regs->msr;
1377 /* XXX we get called with irq disabled - change that! */
1380 /* Preload FPU if it's enabled */
1381 if (vcpu->arch.shared->msr & MSR_FP)
1382 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1384 ret = __kvmppc_vcpu_entry(kvm_run, vcpu);
1386 local_irq_disable();
1388 current->thread.regs->msr = ext_msr;
1390 /* Make sure we save the guest FPU/Altivec/VSX state */
1391 kvmppc_giveup_ext(vcpu, MSR_FP);
1392 kvmppc_giveup_ext(vcpu, MSR_VEC);
1393 kvmppc_giveup_ext(vcpu, MSR_VSX);
1395 /* Restore FPU state from stack */
1396 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
1397 current->thread.fpscr.val = fpscr;
1398 current->thread.fpexc_mode = fpexc_mode;
1400 #ifdef CONFIG_ALTIVEC
1401 /* Restore Altivec state from stack */
1402 if (used_vr && current->thread.used_vr) {
1403 memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
1404 current->thread.vscr = vscr;
1405 current->thread.vrsave = vrsave;
1407 current->thread.used_vr = used_vr;
1411 current->thread.used_vsr = used_vsr;
1417 static int kvmppc_book3s_init(void)
1421 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
1427 r = kvmppc_mmu_hpte_sysinit();
1432 static void kvmppc_book3s_exit(void)
1434 kvmppc_mmu_hpte_sysexit();
1438 module_init(kvmppc_book3s_init);
1439 module_exit(kvmppc_book3s_exit);