2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
13 * Derived from book3s_rmhandlers.S and other files, which are:
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
25 #include <asm/ptrace.h>
26 #include <asm/hvcall.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/exception-64s.h>
29 #include <asm/kvm_book3s_asm.h>
30 #include <asm/mmu-hash64.h>
32 /*****************************************************************************
34 * Real Mode handlers that need to be in the linear mapping *
36 ****************************************************************************/
38 .globl kvmppc_skip_interrupt
39 kvmppc_skip_interrupt:
47 .globl kvmppc_skip_Hinterrupt
48 kvmppc_skip_Hinterrupt:
57 * Call kvmppc_hv_entry in real mode.
58 * Must be called with interrupts hard-disabled.
62 * LR = return address to continue at after eventually re-enabling MMU
64 _GLOBAL(kvmppc_hv_entry_trampoline)
66 LOAD_REG_ADDR(r5, kvmppc_hv_entry)
71 mtmsrd r0,1 /* clear RI in MSR */
76 /******************************************************************************
80 *****************************************************************************/
84 #define XICS_IPI 2 /* interrupt source # for IPIs */
87 * We come in here when wakened from nap mode on a secondary hw thread.
88 * Relocation is off and most register values are lost.
89 * r13 points to the PACA.
91 .globl kvm_start_guest
93 ld r1,PACAEMERGSP(r13)
94 subi r1,r1,STACK_FRAME_OVERHEAD
97 li r0,KVM_HWTHREAD_IN_KVM
98 stb r0,HSTATE_HWTHREAD_STATE(r13)
100 /* NV GPR values from power7_idle() will no longer be valid */
102 stb r0,PACA_NAPSTATELOST(r13)
104 /* get vcpu pointer, NULL if we have no vcpu to run */
105 ld r4,HSTATE_KVM_VCPU(r13)
108 /* Check the wake reason in SRR1 to see why we got here */
110 rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
111 cmpwi r3,4 /* was it an external interrupt? */
115 * External interrupt - for now assume it is an IPI, since we
116 * should never get any other interrupts sent to offline threads.
117 * Only do this for secondary threads.
123 25: ld r5,HSTATE_XICS_PHYS(r13)
127 lwzcix r8,r5,r7 /* get and ack the interrupt */
129 clrldi. r9,r8,40 /* get interrupt source ID. */
130 beq 27f /* none there? */
133 stbcix r0,r5,r6 /* clear IPI */
134 26: stwcix r8,r5,r7 /* EOI the interrupt */
136 27: /* XXX should handle hypervisor maintenance interrupts etc. here */
138 /* reload vcpu pointer after clearing the IPI */
139 ld r4,HSTATE_KVM_VCPU(r13)
141 /* if we have no vcpu to run, go back to sleep */
144 /* were we napping due to cede? */
145 lbz r0,HSTATE_NAPPING(r13)
149 .global kvmppc_hv_entry
158 * all other volatile GPRS = free
161 std r0, HSTATE_VMHANDLER(r13)
163 /* Set partition DABR */
164 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
171 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
173 /* Load guest PMU registers */
174 /* R4 is live here (vcpu pointer) */
176 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
177 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
179 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
180 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
181 lwz r6, VCPU_PMC + 8(r4)
182 lwz r7, VCPU_PMC + 12(r4)
183 lwz r8, VCPU_PMC + 16(r4)
184 lwz r9, VCPU_PMC + 20(r4)
186 lwz r10, VCPU_PMC + 24(r4)
187 lwz r11, VCPU_PMC + 28(r4)
188 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
198 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
200 ld r5, VCPU_MMCR + 8(r4)
201 ld r6, VCPU_MMCR + 16(r4)
207 /* Load up FP, VMX and VSX registers */
210 ld r14, VCPU_GPR(R14)(r4)
211 ld r15, VCPU_GPR(R15)(r4)
212 ld r16, VCPU_GPR(R16)(r4)
213 ld r17, VCPU_GPR(R17)(r4)
214 ld r18, VCPU_GPR(R18)(r4)
215 ld r19, VCPU_GPR(R19)(r4)
216 ld r20, VCPU_GPR(R20)(r4)
217 ld r21, VCPU_GPR(R21)(r4)
218 ld r22, VCPU_GPR(R22)(r4)
219 ld r23, VCPU_GPR(R23)(r4)
220 ld r24, VCPU_GPR(R24)(r4)
221 ld r25, VCPU_GPR(R25)(r4)
222 ld r26, VCPU_GPR(R26)(r4)
223 ld r27, VCPU_GPR(R27)(r4)
224 ld r28, VCPU_GPR(R28)(r4)
225 ld r29, VCPU_GPR(R29)(r4)
226 ld r30, VCPU_GPR(R30)(r4)
227 ld r31, VCPU_GPR(R31)(r4)
230 /* Switch DSCR to guest value */
233 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
236 * Set the decrementer to the guest decrementer.
238 ld r8,VCPU_DEC_EXPIRES(r4)
244 ld r5, VCPU_SPRG0(r4)
245 ld r6, VCPU_SPRG1(r4)
246 ld r7, VCPU_SPRG2(r4)
247 ld r8, VCPU_SPRG3(r4)
253 /* Save R1 in the PACA */
254 std r1, HSTATE_HOST_R1(r13)
256 /* Increment yield count if they have a VPA */
260 lwz r5, LPPACA_YIELDCOUNT(r3)
262 stw r5, LPPACA_YIELDCOUNT(r3)
264 /* Load up DAR and DSISR */
266 lwz r6, VCPU_DSISR(r4)
271 /* Restore AMR and UAMOR, set AMOR to all 1s */
278 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
288 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
290 * POWER7 host -> guest partition switch code.
291 * We don't have to lock against concurrent tlbies,
292 * but we do have to coordinate across hardware threads.
294 /* Increment entry count iff exit count is zero. */
295 ld r5,HSTATE_KVM_VCORE(r13)
296 addi r9,r5,VCORE_ENTRY_EXIT
298 cmpwi r3,0x100 /* any threads starting to exit? */
299 bge secondary_too_late /* if so we're too late to the party */
304 /* Primary thread switches to guest partition. */
305 ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
311 li r0,LPID_RSVD /* switch to reserved LPID */
314 mtspr SPRN_SDR1,r6 /* switch to partition page table */
318 /* See if we need to flush the TLB */
319 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
320 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
321 srdi r6,r6,6 /* doubleword number */
322 sldi r6,r6,3 /* address offset */
324 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
330 23: ldarx r7,0,r6 /* if set, clear the bit */
334 li r6,128 /* and flush the TLB */
336 li r7,0x800 /* IS field = 0b10 */
344 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
347 /* Secondary threads wait for primary to have done partition switch */
348 20: lbz r0,VCORE_IN_GUEST(r5)
352 /* Set LPCR and RMOR. */
353 10: ld r8,KVM_LPCR(r9)
359 /* Check if HDEC expires soon */
362 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
366 /* Save purr/spurr */
369 std r5,HSTATE_PURR(r13)
370 std r6,HSTATE_SPURR(r13)
378 * PPC970 host -> guest partition switch code.
379 * We have to lock against concurrent tlbies,
380 * using native_tlbie_lock to lock against host tlbies
381 * and kvm->arch.tlbie_lock to lock against guest tlbies.
382 * We also have to invalidate the TLB since its
383 * entries aren't tagged with the LPID.
385 30: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
387 /* first take native_tlbie_lock */
390 .tc native_tlbie_lock[TC],native_tlbie_lock
392 ld r3,toc_tlbie_lock@toc(2)
393 lwz r8,PACA_LOCK_TOKEN(r13)
401 ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */
403 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
407 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
410 stw r0,0(r3) /* drop native_tlbie_lock */
412 /* invalidate the whole TLB */
421 /* Take the guest's tlbie_lock */
422 addi r3,r9,KVM_TLBIE_LOCK
430 mtspr SPRN_SDR1,r6 /* switch to partition page table */
432 /* Set up HID4 with the guest's LPID etc. */
437 /* drop the guest's tlbie_lock */
441 /* Check if HDEC expires soon */
444 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
448 /* Enable HDEC interrupts */
451 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
461 /* Load up guest SLB entries */
462 31: lwz r5,VCPU_SLB_MAX(r4)
467 1: ld r8,VCPU_SLB_E(r6)
470 addi r6,r6,VCPU_SLB_SIZE
474 /* Restore state of CTRL run bit; assume 1 on entry */
488 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
492 ld r11, VCPU_MSR(r4) /* r11 = vcpu->arch.msr & ~MSR_HV */
494 rldicl r11, r11, 63 - MSR_HV_LG, 1
495 rotldi r11, r11, 1 + MSR_HV_LG
498 /* Check if we can deliver an external or decrementer interrupt now */
499 ld r0,VCPU_PENDING_EXC(r4)
500 li r8,(1 << BOOK3S_IRQPRIO_EXTERNAL)
501 oris r8,r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
511 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
513 li r0,BOOK3S_INTERRUPT_EXTERNAL
517 li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
523 li r0,BOOK3S_INTERRUPT_DECREMENTER
526 /* Move SRR0 and SRR1 into the respective regs */
527 5: mtspr SPRN_SRR0, r6
530 stb r0,VCPU_CEDED(r4) /* cancel cede */
536 /* Activate guest mode, so faults get handled by KVM */
537 li r9, KVM_GUEST_MODE_GUEST
538 stb r9, HSTATE_IN_GUEST(r13)
545 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
552 ld r0, VCPU_GPR(R0)(r4)
553 ld r1, VCPU_GPR(R1)(r4)
554 ld r2, VCPU_GPR(R2)(r4)
555 ld r3, VCPU_GPR(R3)(r4)
556 ld r5, VCPU_GPR(R5)(r4)
557 ld r6, VCPU_GPR(R6)(r4)
558 ld r7, VCPU_GPR(R7)(r4)
559 ld r8, VCPU_GPR(R8)(r4)
560 ld r9, VCPU_GPR(R9)(r4)
561 ld r10, VCPU_GPR(R10)(r4)
562 ld r11, VCPU_GPR(R11)(r4)
563 ld r12, VCPU_GPR(R12)(r4)
564 ld r13, VCPU_GPR(R13)(r4)
566 ld r4, VCPU_GPR(R4)(r4)
571 /******************************************************************************
575 *****************************************************************************/
578 * We come here from the first-level interrupt handlers.
580 .globl kvmppc_interrupt
584 * R12 = interrupt vector
586 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
587 * guest R13 saved in SPRN_SCRATCH0
589 /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
590 std r9, HSTATE_HOST_R2(r13)
591 ld r9, HSTATE_KVM_VCPU(r13)
595 std r0, VCPU_GPR(R0)(r9)
596 std r1, VCPU_GPR(R1)(r9)
597 std r2, VCPU_GPR(R2)(r9)
598 std r3, VCPU_GPR(R3)(r9)
599 std r4, VCPU_GPR(R4)(r9)
600 std r5, VCPU_GPR(R5)(r9)
601 std r6, VCPU_GPR(R6)(r9)
602 std r7, VCPU_GPR(R7)(r9)
603 std r8, VCPU_GPR(R8)(r9)
604 ld r0, HSTATE_HOST_R2(r13)
605 std r0, VCPU_GPR(R9)(r9)
606 std r10, VCPU_GPR(R10)(r9)
607 std r11, VCPU_GPR(R11)(r9)
608 ld r3, HSTATE_SCRATCH0(r13)
609 lwz r4, HSTATE_SCRATCH1(r13)
610 std r3, VCPU_GPR(R12)(r9)
613 ld r3, HSTATE_CFAR(r13)
614 std r3, VCPU_CFAR(r9)
615 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
617 /* Restore R1/R2 so we can handle faults */
618 ld r1, HSTATE_HOST_R1(r13)
623 std r10, VCPU_SRR0(r9)
624 std r11, VCPU_SRR1(r9)
625 andi. r0, r12, 2 /* need to read HSRR0/1? */
627 mfspr r10, SPRN_HSRR0
628 mfspr r11, SPRN_HSRR1
630 1: std r10, VCPU_PC(r9)
631 std r11, VCPU_MSR(r9)
635 std r3, VCPU_GPR(R13)(r9)
638 /* Unset guest mode */
639 li r0, KVM_GUEST_MODE_NONE
640 stb r0, HSTATE_IN_GUEST(r13)
642 stw r12,VCPU_TRAP(r9)
644 /* Save HEIR (HV emulation assist reg) in last_inst
645 if this is an HEI (HV emulation interrupt, e40) */
646 li r3,KVM_INST_FETCH_FAILED
648 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
651 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
652 11: stw r3,VCPU_LAST_INST(r9)
654 /* these are volatile across C function calls */
661 /* If this is a page table miss then see if it's theirs or ours */
662 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
664 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
666 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
668 /* See if this is a leftover HDEC interrupt */
669 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
675 /* See if this is an hcall we can handle in real mode */
676 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
677 beq hcall_try_real_mode
679 /* Check for mediated interrupts (could be done earlier really ...) */
681 cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL
687 bne bounce_ext_interrupt
689 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
691 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
697 std r5,VCPU_DEC_EXPIRES(r9)
699 /* Save more register state */
703 stw r7, VCPU_DSISR(r9)
705 /* don't overwrite fault_dar/fault_dsisr if HDSI */
706 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
708 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
709 std r6, VCPU_FAULT_DAR(r9)
710 stw r7, VCPU_FAULT_DSISR(r9)
712 /* See if it is a machine check */
713 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
714 beq machine_check_realmode
717 /* Save guest CTRL register, set runlatch to 1 */
718 6: mfspr r6,SPRN_CTRLF
725 /* Read the guest SLB and save it away */
726 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
732 andis. r0,r8,SLB_ESID_V@h
734 add r8,r8,r6 /* put index in */
736 std r8,VCPU_SLB_E(r7)
737 std r3,VCPU_SLB_V(r7)
738 addi r7,r7,VCPU_SLB_SIZE
742 stw r5,VCPU_SLB_MAX(r9)
745 * Save the guest PURR/SPURR
753 std r6,VCPU_SPURR(r9)
758 * Restore host PURR/SPURR and add guest times
759 * so that the time in the guest gets accounted.
761 ld r3,HSTATE_PURR(r13)
762 ld r4,HSTATE_SPURR(r13)
767 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
775 hdec_soon: /* r9 = vcpu, r12 = trap, r13 = paca */
778 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
780 * POWER7 guest -> host partition switch code.
781 * We don't have to lock against tlbies but we do
782 * have to coordinate the hardware threads.
784 /* Increment the threads-exiting-guest count in the 0xff00
785 bits of vcore->entry_exit_count */
787 ld r5,HSTATE_KVM_VCORE(r13)
788 addi r6,r5,VCORE_ENTRY_EXIT
796 * At this point we have an interrupt that we have to pass
797 * up to the kernel or qemu; we can't handle it in real mode.
798 * Thus we have to do a partition switch, so we have to
799 * collect the other threads, if we are the first thread
800 * to take an interrupt. To do this, we set the HDEC to 0,
801 * which causes an HDEC interrupt in all threads within 2ns
802 * because the HDEC register is shared between all 4 threads.
803 * However, we don't need to bother if this is an HDEC
804 * interrupt, since the other threads will already be on their
805 * way here in that case.
807 cmpwi r3,0x100 /* Are we the first here? */
809 cmpwi r3,1 /* Are any other threads in the guest? */
811 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
817 * Send an IPI to any napping threads, since an HDEC interrupt
818 * doesn't wake CPUs up from nap.
820 lwz r3,VCORE_NAPPING_THREADS(r5)
824 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
826 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
830 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
833 stbcix r0,r7,r8 /* trigger the IPI */
838 /* Secondary threads wait for primary to do partition switch */
839 43: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
840 ld r5,HSTATE_KVM_VCORE(r13)
845 13: lbz r3,VCORE_IN_GUEST(r5)
851 /* Primary thread waits for all the secondaries to exit guest */
852 15: lwz r3,VCORE_ENTRY_EXIT(r5)
859 /* Primary thread switches back to host partition */
860 ld r6,KVM_HOST_SDR1(r4)
861 lwz r7,KVM_HOST_LPID(r4)
862 li r8,LPID_RSVD /* switch to reserved LPID */
865 mtspr SPRN_SDR1,r6 /* switch to partition page table */
869 stb r0,VCORE_IN_GUEST(r5)
870 lis r8,0x7fff /* MAX_INT@h */
873 16: ld r8,KVM_HOST_LPCR(r4)
879 * PPC970 guest -> host partition switch code.
880 * We have to lock against concurrent tlbies, and
881 * we have to flush the whole TLB.
883 32: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
885 /* Take the guest's tlbie_lock */
886 lwz r8,PACA_LOCK_TOKEN(r13)
887 addi r3,r4,KVM_TLBIE_LOCK
895 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
897 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
901 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
904 stw r0,0(r3) /* drop guest tlbie_lock */
906 /* invalidate the whole TLB */
915 /* take native_tlbie_lock */
916 ld r3,toc_tlbie_lock@toc(2)
924 ld r6,KVM_HOST_SDR1(r4)
925 mtspr SPRN_SDR1,r6 /* switch to host page table */
927 /* Set up host HID4 value */
932 stw r0,0(r3) /* drop native_tlbie_lock */
934 lis r8,0x7fff /* MAX_INT@h */
937 /* Disable HDEC interrupts */
940 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
950 /* load host SLB entries */
951 33: ld r8,PACA_SLBSHADOWPTR(r13)
954 ld r5,SLBSHADOW_SAVEAREA(r8)
955 ld r6,SLBSHADOW_SAVEAREA+8(r8)
956 andis. r7,r5,SLB_ESID_V@h
962 /* Save and reset AMR and UAMOR before turning on the MMU */
967 std r6,VCPU_UAMOR(r9)
970 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
972 /* Switch DSCR back to host value */
975 ld r7, HSTATE_DSCR(r13)
976 std r8, VCPU_DSCR(r7)
978 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
980 /* Save non-volatile GPRs */
981 std r14, VCPU_GPR(R14)(r9)
982 std r15, VCPU_GPR(R15)(r9)
983 std r16, VCPU_GPR(R16)(r9)
984 std r17, VCPU_GPR(R17)(r9)
985 std r18, VCPU_GPR(R18)(r9)
986 std r19, VCPU_GPR(R19)(r9)
987 std r20, VCPU_GPR(R20)(r9)
988 std r21, VCPU_GPR(R21)(r9)
989 std r22, VCPU_GPR(R22)(r9)
990 std r23, VCPU_GPR(R23)(r9)
991 std r24, VCPU_GPR(R24)(r9)
992 std r25, VCPU_GPR(R25)(r9)
993 std r26, VCPU_GPR(R26)(r9)
994 std r27, VCPU_GPR(R27)(r9)
995 std r28, VCPU_GPR(R28)(r9)
996 std r29, VCPU_GPR(R29)(r9)
997 std r30, VCPU_GPR(R30)(r9)
998 std r31, VCPU_GPR(R31)(r9)
1001 mfspr r3, SPRN_SPRG0
1002 mfspr r4, SPRN_SPRG1
1003 mfspr r5, SPRN_SPRG2
1004 mfspr r6, SPRN_SPRG3
1005 std r3, VCPU_SPRG0(r9)
1006 std r4, VCPU_SPRG1(r9)
1007 std r5, VCPU_SPRG2(r9)
1008 std r6, VCPU_SPRG3(r9)
1014 /* Increment yield count if they have a VPA */
1015 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1018 lwz r3, LPPACA_YIELDCOUNT(r8)
1020 stw r3, LPPACA_YIELDCOUNT(r8)
1022 /* Save PMU registers if requested */
1023 /* r8 and cr0.eq are live here */
1025 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1026 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1027 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1028 mfspr r6, SPRN_MMCRA
1030 /* On P7, clear MMCRA in order to disable SDAR updates */
1032 mtspr SPRN_MMCRA, r7
1033 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1035 beq 21f /* if no VPA, save PMU stuff anyway */
1036 lbz r7, LPPACA_PMCINUSE(r8)
1037 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1039 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1041 21: mfspr r5, SPRN_MMCR1
1042 std r4, VCPU_MMCR(r9)
1043 std r5, VCPU_MMCR + 8(r9)
1044 std r6, VCPU_MMCR + 16(r9)
1052 mfspr r10, SPRN_PMC7
1053 mfspr r11, SPRN_PMC8
1054 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1055 stw r3, VCPU_PMC(r9)
1056 stw r4, VCPU_PMC + 4(r9)
1057 stw r5, VCPU_PMC + 8(r9)
1058 stw r6, VCPU_PMC + 12(r9)
1059 stw r7, VCPU_PMC + 16(r9)
1060 stw r8, VCPU_PMC + 20(r9)
1062 stw r10, VCPU_PMC + 24(r9)
1063 stw r11, VCPU_PMC + 28(r9)
1064 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1067 /* Secondary threads go off to take a nap on POWER7 */
1069 lwz r0,VCPU_PTID(r9)
1072 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1074 /* Restore host DABR and DABRX */
1075 ld r5,HSTATE_DABR(r13)
1081 ld r3,PACA_SPRG3(r13)
1085 * Reload DEC. HDEC interrupts were disabled when
1086 * we reloaded the host's LPCR value.
1088 ld r3, HSTATE_DECEXP(r13)
1093 /* Reload the host's PMU registers */
1094 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
1095 lbz r4, LPPACA_PMCINUSE(r3)
1097 beq 23f /* skip if not */
1098 lwz r3, HSTATE_PMC(r13)
1099 lwz r4, HSTATE_PMC + 4(r13)
1100 lwz r5, HSTATE_PMC + 8(r13)
1101 lwz r6, HSTATE_PMC + 12(r13)
1102 lwz r8, HSTATE_PMC + 16(r13)
1103 lwz r9, HSTATE_PMC + 20(r13)
1105 lwz r10, HSTATE_PMC + 24(r13)
1106 lwz r11, HSTATE_PMC + 28(r13)
1107 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1115 mtspr SPRN_PMC7, r10
1116 mtspr SPRN_PMC8, r11
1117 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1118 ld r3, HSTATE_MMCR(r13)
1119 ld r4, HSTATE_MMCR + 8(r13)
1120 ld r5, HSTATE_MMCR + 16(r13)
1121 mtspr SPRN_MMCR1, r4
1122 mtspr SPRN_MMCRA, r5
1123 mtspr SPRN_MMCR0, r3
1127 * For external and machine check interrupts, we need
1128 * to call the Linux handler to process the interrupt.
1129 * We do that by jumping to absolute address 0x500 for
1130 * external interrupts, or the machine_check_fwnmi label
1131 * for machine checks (since firmware might have patched
1132 * the vector area at 0x200). The [h]rfid at the end of the
1133 * handler will return to the book3s_hv_interrupts.S code.
1134 * For other interrupts we do the rfid to get back
1135 * to the book3s_hv_interrupts.S code here.
1137 ld r8, HSTATE_VMHANDLER(r13)
1138 ld r7, HSTATE_HOST_MSR(r13)
1140 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1141 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1144 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1146 /* RFI into the highmem handler, or branch to interrupt handler */
1150 mtmsrd r6, 1 /* Clear RI in MSR */
1153 beqa 0x500 /* external interrupt (PPC970) */
1154 beq cr1, 13f /* machine check */
1157 /* On POWER7, we have external interrupts set to use HSRR0/1 */
1158 11: mtspr SPRN_HSRR0, r8
1159 mtspr SPRN_HSRR1, r7
1162 13: b machine_check_fwnmi
1165 * Check whether an HDSI is an HPTE not found fault or something else.
1166 * If it is an HPTE not found fault that is due to the guest accessing
1167 * a page that they have mapped but which we have paged out, then
1168 * we continue on with the guest exit path. In all other cases,
1169 * reflect the HDSI to the guest as a DSI.
1173 mfspr r6, SPRN_HDSISR
1174 /* HPTE not found fault or protection fault? */
1175 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1176 beq 1f /* if not, send it to the guest */
1177 andi. r0, r11, MSR_DR /* data relocation enabled? */
1180 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1181 bne 1f /* if no SLB entry found */
1182 4: std r4, VCPU_FAULT_DAR(r9)
1183 stw r6, VCPU_FAULT_DSISR(r9)
1185 /* Search the hash table. */
1186 mr r3, r9 /* vcpu pointer */
1187 li r7, 1 /* data fault */
1188 bl .kvmppc_hpte_hv_fault
1189 ld r9, HSTATE_KVM_VCPU(r13)
1191 ld r11, VCPU_MSR(r9)
1192 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1193 cmpdi r3, 0 /* retry the instruction */
1195 cmpdi r3, -1 /* handle in kernel mode */
1197 cmpdi r3, -2 /* MMIO emulation; need instr word */
1200 /* Synthesize a DSI for the guest */
1201 ld r4, VCPU_FAULT_DAR(r9)
1203 1: mtspr SPRN_DAR, r4
1204 mtspr SPRN_DSISR, r6
1205 mtspr SPRN_SRR0, r10
1206 mtspr SPRN_SRR1, r11
1207 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
1208 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1210 fast_interrupt_c_return:
1211 6: ld r7, VCPU_CTR(r9)
1212 lwz r8, VCPU_XER(r9)
1218 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1219 ld r5, KVM_VRMA_SLB_V(r5)
1222 /* If this is for emulated MMIO, load the instruction word */
1223 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1225 /* Set guest mode to 'jump over instruction' so if lwz faults
1226 * we'll just continue at the next IP. */
1227 li r0, KVM_GUEST_MODE_SKIP
1228 stb r0, HSTATE_IN_GUEST(r13)
1230 /* Do the access with MSR:DR enabled */
1232 ori r4, r3, MSR_DR /* Enable paging for data */
1237 /* Store the result */
1238 stw r8, VCPU_LAST_INST(r9)
1240 /* Unset guest mode. */
1241 li r0, KVM_GUEST_MODE_NONE
1242 stb r0, HSTATE_IN_GUEST(r13)
1246 * Similarly for an HISI, reflect it to the guest as an ISI unless
1247 * it is an HPTE not found fault for a page that we have paged out.
1250 andis. r0, r11, SRR1_ISI_NOPT@h
1252 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1255 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1256 bne 1f /* if no SLB entry found */
1258 /* Search the hash table. */
1259 mr r3, r9 /* vcpu pointer */
1262 li r7, 0 /* instruction fault */
1263 bl .kvmppc_hpte_hv_fault
1264 ld r9, HSTATE_KVM_VCPU(r13)
1266 ld r11, VCPU_MSR(r9)
1267 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1268 cmpdi r3, 0 /* retry the instruction */
1269 beq fast_interrupt_c_return
1270 cmpdi r3, -1 /* handle in kernel mode */
1273 /* Synthesize an ISI for the guest */
1275 1: mtspr SPRN_SRR0, r10
1276 mtspr SPRN_SRR1, r11
1277 li r10, BOOK3S_INTERRUPT_INST_STORAGE
1278 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1280 b fast_interrupt_c_return
1282 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1283 ld r5, KVM_VRMA_SLB_V(r6)
1287 * Try to handle an hcall in real mode.
1288 * Returns to the guest if we handle it, or continues on up to
1289 * the kernel if we can't (i.e. if we don't have a handler for
1290 * it, or if the handler returns H_TOO_HARD).
1292 .globl hcall_try_real_mode
1293 hcall_try_real_mode:
1294 ld r3,VCPU_GPR(R3)(r9)
1298 cmpldi r3,hcall_real_table_end - hcall_real_table
1300 LOAD_REG_ADDR(r4, hcall_real_table)
1306 mr r3,r9 /* get vcpu pointer */
1307 ld r4,VCPU_GPR(R4)(r9)
1310 beq hcall_real_fallback
1311 ld r4,HSTATE_KVM_VCPU(r13)
1312 std r3,VCPU_GPR(R3)(r4)
1317 /* We've attempted a real mode hcall, but it's punted it back
1318 * to userspace. We need to restore some clobbered volatiles
1319 * before resuming the pass-it-to-qemu path */
1320 hcall_real_fallback:
1321 li r12,BOOK3S_INTERRUPT_SYSCALL
1322 ld r9, HSTATE_KVM_VCPU(r13)
1326 .globl hcall_real_table
1328 .long 0 /* 0 - unused */
1329 .long .kvmppc_h_remove - hcall_real_table
1330 .long .kvmppc_h_enter - hcall_real_table
1331 .long .kvmppc_h_read - hcall_real_table
1332 .long 0 /* 0x10 - H_CLEAR_MOD */
1333 .long 0 /* 0x14 - H_CLEAR_REF */
1334 .long .kvmppc_h_protect - hcall_real_table
1335 .long 0 /* 0x1c - H_GET_TCE */
1336 .long .kvmppc_h_put_tce - hcall_real_table
1337 .long 0 /* 0x24 - H_SET_SPRG0 */
1338 .long .kvmppc_h_set_dabr - hcall_real_table
1384 .long .kvmppc_h_cede - hcall_real_table
1401 .long .kvmppc_h_bulk_remove - hcall_real_table
1402 hcall_real_table_end:
1408 bounce_ext_interrupt:
1412 li r10,BOOK3S_INTERRUPT_EXTERNAL
1413 li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1417 _GLOBAL(kvmppc_h_set_dabr)
1418 std r4,VCPU_DABR(r3)
1419 /* Work around P7 bug where DABR can get corrupted on mtspr */
1420 1: mtspr SPRN_DABR,r4
1428 _GLOBAL(kvmppc_h_cede)
1430 std r11,VCPU_MSR(r3)
1432 stb r0,VCPU_CEDED(r3)
1433 sync /* order setting ceded vs. testing prodded */
1434 lbz r5,VCPU_PRODDED(r3)
1436 bne kvm_cede_prodded
1437 li r0,0 /* set trap to 0 to say hcall is handled */
1438 stw r0,VCPU_TRAP(r3)
1440 std r0,VCPU_GPR(R3)(r3)
1442 b kvm_cede_exit /* just send it up to host on 970 */
1443 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1446 * Set our bit in the bitmask of napping threads unless all the
1447 * other threads are already napping, in which case we send this
1450 ld r5,HSTATE_KVM_VCORE(r13)
1451 lwz r6,VCPU_PTID(r3)
1452 lwz r8,VCORE_ENTRY_EXIT(r5)
1456 addi r6,r5,VCORE_NAPPING_THREADS
1465 stb r0,HSTATE_NAPPING(r13)
1466 /* order napping_threads update vs testing entry_exit_count */
1469 lwz r7,VCORE_ENTRY_EXIT(r5)
1471 bge 33f /* another thread already exiting */
1474 * Although not specifically required by the architecture, POWER7
1475 * preserves the following registers in nap mode, even if an SMT mode
1476 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
1477 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
1479 /* Save non-volatile GPRs */
1480 std r14, VCPU_GPR(R14)(r3)
1481 std r15, VCPU_GPR(R15)(r3)
1482 std r16, VCPU_GPR(R16)(r3)
1483 std r17, VCPU_GPR(R17)(r3)
1484 std r18, VCPU_GPR(R18)(r3)
1485 std r19, VCPU_GPR(R19)(r3)
1486 std r20, VCPU_GPR(R20)(r3)
1487 std r21, VCPU_GPR(R21)(r3)
1488 std r22, VCPU_GPR(R22)(r3)
1489 std r23, VCPU_GPR(R23)(r3)
1490 std r24, VCPU_GPR(R24)(r3)
1491 std r25, VCPU_GPR(R25)(r3)
1492 std r26, VCPU_GPR(R26)(r3)
1493 std r27, VCPU_GPR(R27)(r3)
1494 std r28, VCPU_GPR(R28)(r3)
1495 std r29, VCPU_GPR(R29)(r3)
1496 std r30, VCPU_GPR(R30)(r3)
1497 std r31, VCPU_GPR(R31)(r3)
1503 * Take a nap until a decrementer or external interrupt occurs,
1504 * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR
1507 stb r0,HSTATE_HWTHREAD_REQ(r13)
1509 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
1513 std r0, HSTATE_SCRATCH0(r13)
1515 ld r0, HSTATE_SCRATCH0(r13)
1522 /* Woken by external or decrementer interrupt */
1523 ld r1, HSTATE_HOST_R1(r13)
1525 /* load up FP state */
1529 ld r14, VCPU_GPR(R14)(r4)
1530 ld r15, VCPU_GPR(R15)(r4)
1531 ld r16, VCPU_GPR(R16)(r4)
1532 ld r17, VCPU_GPR(R17)(r4)
1533 ld r18, VCPU_GPR(R18)(r4)
1534 ld r19, VCPU_GPR(R19)(r4)
1535 ld r20, VCPU_GPR(R20)(r4)
1536 ld r21, VCPU_GPR(R21)(r4)
1537 ld r22, VCPU_GPR(R22)(r4)
1538 ld r23, VCPU_GPR(R23)(r4)
1539 ld r24, VCPU_GPR(R24)(r4)
1540 ld r25, VCPU_GPR(R25)(r4)
1541 ld r26, VCPU_GPR(R26)(r4)
1542 ld r27, VCPU_GPR(R27)(r4)
1543 ld r28, VCPU_GPR(R28)(r4)
1544 ld r29, VCPU_GPR(R29)(r4)
1545 ld r30, VCPU_GPR(R30)(r4)
1546 ld r31, VCPU_GPR(R31)(r4)
1548 /* clear our bit in vcore->napping_threads */
1549 33: ld r5,HSTATE_KVM_VCORE(r13)
1550 lwz r3,VCPU_PTID(r4)
1553 addi r6,r5,VCORE_NAPPING_THREADS
1559 stb r0,HSTATE_NAPPING(r13)
1561 /* see if any other thread is already exiting */
1562 lwz r0,VCORE_ENTRY_EXIT(r5)
1564 blt kvmppc_cede_reentry /* if not go back to guest */
1566 /* some threads are exiting, so go to the guest exit path */
1567 b hcall_real_fallback
1569 /* cede when already previously prodded case */
1572 stb r0,VCPU_PRODDED(r3)
1573 sync /* order testing prodded vs. clearing ceded */
1574 stb r0,VCPU_CEDED(r3)
1578 /* we've ceded but we want to give control to the host */
1583 /* Try to handle a machine check in real mode */
1584 machine_check_realmode:
1585 mr r3, r9 /* get vcpu pointer */
1586 bl .kvmppc_realmode_machine_check
1588 cmpdi r3, 0 /* continue exiting from guest? */
1589 ld r9, HSTATE_KVM_VCPU(r13)
1590 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1592 /* If not, deliver a machine check. SRR0/1 are already set */
1593 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
1594 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1596 b fast_interrupt_c_return
1599 ld r5,HSTATE_KVM_VCORE(r13)
1601 13: lbz r3,VCORE_IN_GUEST(r5)
1605 ld r11,PACA_SLBSHADOWPTR(r13)
1607 .rept SLB_NUM_BOLTED
1608 ld r5,SLBSHADOW_SAVEAREA(r11)
1609 ld r6,SLBSHADOW_SAVEAREA+8(r11)
1610 andis. r7,r5,SLB_ESID_V@h
1617 /* Clear our vcpu pointer so we don't come back in early */
1619 std r0, HSTATE_KVM_VCPU(r13)
1621 /* Clear any pending IPI - assume we're a secondary thread */
1622 ld r5, HSTATE_XICS_PHYS(r13)
1624 lwzcix r3, r5, r7 /* ack any pending interrupt */
1625 rlwinm. r0, r3, 0, 0xffffff /* any pending? */
1630 stbcix r0, r5, r6 /* clear the IPI */
1631 stwcix r3, r5, r7 /* EOI it */
1634 /* increment the nap count and then go to nap mode */
1635 ld r4, HSTATE_KVM_VCORE(r13)
1636 addi r4, r4, VCORE_NAP_COUNT
1637 lwsync /* make previous updates visible */
1644 li r0, KVM_HWTHREAD_IN_NAP
1645 stb r0, HSTATE_HWTHREAD_STATE(r13)
1649 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
1652 std r0, HSTATE_SCRATCH0(r13)
1654 ld r0, HSTATE_SCRATCH0(r13)
1661 * Save away FP, VMX and VSX registers.
1664 _GLOBAL(kvmppc_save_fp)
1667 #ifdef CONFIG_ALTIVEC
1669 oris r8,r8,MSR_VEC@h
1670 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1674 oris r8,r8,MSR_VSX@h
1675 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1683 li r6,reg*16+VCPU_VSRS
1691 stfd reg,reg*8+VCPU_FPRS(r3)
1695 ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
1698 stfd fr0,VCPU_FPSCR(r3)
1700 #ifdef CONFIG_ALTIVEC
1704 li r6,reg*16+VCPU_VRS
1711 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1713 mfspr r6,SPRN_VRSAVE
1714 stw r6,VCPU_VRSAVE(r3)
1720 * Load up FP, VMX and VSX registers
1723 .globl kvmppc_load_fp
1727 #ifdef CONFIG_ALTIVEC
1729 oris r8,r8,MSR_VEC@h
1730 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1734 oris r8,r8,MSR_VSX@h
1735 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1739 lfd fr0,VCPU_FPSCR(r4)
1745 li r7,reg*16+VCPU_VSRS
1753 lfd reg,reg*8+VCPU_FPRS(r4)
1757 ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
1760 #ifdef CONFIG_ALTIVEC
1767 li r7,reg*16+VCPU_VRS
1771 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1773 lwz r7,VCPU_VRSAVE(r4)
1774 mtspr SPRN_VRSAVE,r7