]> Pileus Git - ~andy/linux/blob - arch/powerpc/kvm/booke.c
KVM: PPC: PR: Use generic tracepoint for guest exit
[~andy/linux] / arch / powerpc / kvm / booke.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  * Copyright 2010-2011 Freescale Semiconductor, Inc.
17  *
18  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
20  *          Scott Wood <scottwood@freescale.com>
21  *          Varun Sethi <varun.sethi@freescale.com>
22  */
23
24 #include <linux/errno.h>
25 #include <linux/err.h>
26 #include <linux/kvm_host.h>
27 #include <linux/gfp.h>
28 #include <linux/module.h>
29 #include <linux/vmalloc.h>
30 #include <linux/fs.h>
31
32 #include <asm/cputable.h>
33 #include <asm/uaccess.h>
34 #include <asm/kvm_ppc.h>
35 #include <asm/cacheflush.h>
36 #include <asm/dbell.h>
37 #include <asm/hw_irq.h>
38 #include <asm/irq.h>
39
40 #include "timing.h"
41 #include "booke.h"
42 #include "trace.h"
43
44 unsigned long kvmppc_booke_handlers;
45
46 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
47 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
48
49 struct kvm_stats_debugfs_item debugfs_entries[] = {
50         { "mmio",       VCPU_STAT(mmio_exits) },
51         { "dcr",        VCPU_STAT(dcr_exits) },
52         { "sig",        VCPU_STAT(signal_exits) },
53         { "itlb_r",     VCPU_STAT(itlb_real_miss_exits) },
54         { "itlb_v",     VCPU_STAT(itlb_virt_miss_exits) },
55         { "dtlb_r",     VCPU_STAT(dtlb_real_miss_exits) },
56         { "dtlb_v",     VCPU_STAT(dtlb_virt_miss_exits) },
57         { "sysc",       VCPU_STAT(syscall_exits) },
58         { "isi",        VCPU_STAT(isi_exits) },
59         { "dsi",        VCPU_STAT(dsi_exits) },
60         { "inst_emu",   VCPU_STAT(emulated_inst_exits) },
61         { "dec",        VCPU_STAT(dec_exits) },
62         { "ext_intr",   VCPU_STAT(ext_intr_exits) },
63         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
64         { "doorbell", VCPU_STAT(dbell_exits) },
65         { "guest doorbell", VCPU_STAT(gdbell_exits) },
66         { NULL }
67 };
68
69 /* TODO: use vcpu_printf() */
70 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
71 {
72         int i;
73
74         printk("pc:   %08lx msr:  %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
75         printk("lr:   %08lx ctr:  %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
76         printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
77                                             vcpu->arch.shared->srr1);
78
79         printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
80
81         for (i = 0; i < 32; i += 4) {
82                 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
83                        kvmppc_get_gpr(vcpu, i),
84                        kvmppc_get_gpr(vcpu, i+1),
85                        kvmppc_get_gpr(vcpu, i+2),
86                        kvmppc_get_gpr(vcpu, i+3));
87         }
88 }
89
90 #ifdef CONFIG_SPE
91 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
92 {
93         preempt_disable();
94         enable_kernel_spe();
95         kvmppc_save_guest_spe(vcpu);
96         vcpu->arch.shadow_msr &= ~MSR_SPE;
97         preempt_enable();
98 }
99
100 static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
101 {
102         preempt_disable();
103         enable_kernel_spe();
104         kvmppc_load_guest_spe(vcpu);
105         vcpu->arch.shadow_msr |= MSR_SPE;
106         preempt_enable();
107 }
108
109 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
110 {
111         if (vcpu->arch.shared->msr & MSR_SPE) {
112                 if (!(vcpu->arch.shadow_msr & MSR_SPE))
113                         kvmppc_vcpu_enable_spe(vcpu);
114         } else if (vcpu->arch.shadow_msr & MSR_SPE) {
115                 kvmppc_vcpu_disable_spe(vcpu);
116         }
117 }
118 #else
119 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
120 {
121 }
122 #endif
123
124 /*
125  * Helper function for "full" MSR writes.  No need to call this if only
126  * EE/CE/ME/DE/RI are changing.
127  */
128 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
129 {
130         u32 old_msr = vcpu->arch.shared->msr;
131
132 #ifdef CONFIG_KVM_BOOKE_HV
133         new_msr |= MSR_GS;
134 #endif
135
136         vcpu->arch.shared->msr = new_msr;
137
138         kvmppc_mmu_msr_notify(vcpu, old_msr);
139         kvmppc_vcpu_sync_spe(vcpu);
140 }
141
142 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
143                                        unsigned int priority)
144 {
145         set_bit(priority, &vcpu->arch.pending_exceptions);
146 }
147
148 static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
149                                         ulong dear_flags, ulong esr_flags)
150 {
151         vcpu->arch.queued_dear = dear_flags;
152         vcpu->arch.queued_esr = esr_flags;
153         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
154 }
155
156 static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
157                                            ulong dear_flags, ulong esr_flags)
158 {
159         vcpu->arch.queued_dear = dear_flags;
160         vcpu->arch.queued_esr = esr_flags;
161         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
162 }
163
164 static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
165                                            ulong esr_flags)
166 {
167         vcpu->arch.queued_esr = esr_flags;
168         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
169 }
170
171 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
172 {
173         vcpu->arch.queued_esr = esr_flags;
174         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
175 }
176
177 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
178 {
179         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
180 }
181
182 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
183 {
184         return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
185 }
186
187 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
188 {
189         clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
190 }
191
192 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
193                                 struct kvm_interrupt *irq)
194 {
195         unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
196
197         if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
198                 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
199
200         kvmppc_booke_queue_irqprio(vcpu, prio);
201 }
202
203 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
204                                   struct kvm_interrupt *irq)
205 {
206         clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
207         clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
208 }
209
210 static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
211 {
212 #ifdef CONFIG_KVM_BOOKE_HV
213         mtspr(SPRN_GSRR0, srr0);
214         mtspr(SPRN_GSRR1, srr1);
215 #else
216         vcpu->arch.shared->srr0 = srr0;
217         vcpu->arch.shared->srr1 = srr1;
218 #endif
219 }
220
221 static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
222 {
223         vcpu->arch.csrr0 = srr0;
224         vcpu->arch.csrr1 = srr1;
225 }
226
227 static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
228 {
229         if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
230                 vcpu->arch.dsrr0 = srr0;
231                 vcpu->arch.dsrr1 = srr1;
232         } else {
233                 set_guest_csrr(vcpu, srr0, srr1);
234         }
235 }
236
237 static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
238 {
239         vcpu->arch.mcsrr0 = srr0;
240         vcpu->arch.mcsrr1 = srr1;
241 }
242
243 static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
244 {
245 #ifdef CONFIG_KVM_BOOKE_HV
246         return mfspr(SPRN_GDEAR);
247 #else
248         return vcpu->arch.shared->dar;
249 #endif
250 }
251
252 static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
253 {
254 #ifdef CONFIG_KVM_BOOKE_HV
255         mtspr(SPRN_GDEAR, dear);
256 #else
257         vcpu->arch.shared->dar = dear;
258 #endif
259 }
260
261 static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
262 {
263 #ifdef CONFIG_KVM_BOOKE_HV
264         return mfspr(SPRN_GESR);
265 #else
266         return vcpu->arch.shared->esr;
267 #endif
268 }
269
270 static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
271 {
272 #ifdef CONFIG_KVM_BOOKE_HV
273         mtspr(SPRN_GESR, esr);
274 #else
275         vcpu->arch.shared->esr = esr;
276 #endif
277 }
278
279 /* Deliver the interrupt of the corresponding priority, if possible. */
280 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
281                                         unsigned int priority)
282 {
283         int allowed = 0;
284         ulong msr_mask = 0;
285         bool update_esr = false, update_dear = false;
286         ulong crit_raw = vcpu->arch.shared->critical;
287         ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
288         bool crit;
289         bool keep_irq = false;
290         enum int_class int_class;
291
292         /* Truncate crit indicators in 32 bit mode */
293         if (!(vcpu->arch.shared->msr & MSR_SF)) {
294                 crit_raw &= 0xffffffff;
295                 crit_r1 &= 0xffffffff;
296         }
297
298         /* Critical section when crit == r1 */
299         crit = (crit_raw == crit_r1);
300         /* ... and we're in supervisor mode */
301         crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
302
303         if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
304                 priority = BOOKE_IRQPRIO_EXTERNAL;
305                 keep_irq = true;
306         }
307
308         switch (priority) {
309         case BOOKE_IRQPRIO_DTLB_MISS:
310         case BOOKE_IRQPRIO_DATA_STORAGE:
311                 update_dear = true;
312                 /* fall through */
313         case BOOKE_IRQPRIO_INST_STORAGE:
314         case BOOKE_IRQPRIO_PROGRAM:
315                 update_esr = true;
316                 /* fall through */
317         case BOOKE_IRQPRIO_ITLB_MISS:
318         case BOOKE_IRQPRIO_SYSCALL:
319         case BOOKE_IRQPRIO_FP_UNAVAIL:
320         case BOOKE_IRQPRIO_SPE_UNAVAIL:
321         case BOOKE_IRQPRIO_SPE_FP_DATA:
322         case BOOKE_IRQPRIO_SPE_FP_ROUND:
323         case BOOKE_IRQPRIO_AP_UNAVAIL:
324         case BOOKE_IRQPRIO_ALIGNMENT:
325                 allowed = 1;
326                 msr_mask = MSR_CE | MSR_ME | MSR_DE;
327                 int_class = INT_CLASS_NONCRIT;
328                 break;
329         case BOOKE_IRQPRIO_CRITICAL:
330         case BOOKE_IRQPRIO_DBELL_CRIT:
331                 allowed = vcpu->arch.shared->msr & MSR_CE;
332                 allowed = allowed && !crit;
333                 msr_mask = MSR_ME;
334                 int_class = INT_CLASS_CRIT;
335                 break;
336         case BOOKE_IRQPRIO_MACHINE_CHECK:
337                 allowed = vcpu->arch.shared->msr & MSR_ME;
338                 allowed = allowed && !crit;
339                 int_class = INT_CLASS_MC;
340                 break;
341         case BOOKE_IRQPRIO_DECREMENTER:
342         case BOOKE_IRQPRIO_FIT:
343                 keep_irq = true;
344                 /* fall through */
345         case BOOKE_IRQPRIO_EXTERNAL:
346         case BOOKE_IRQPRIO_DBELL:
347                 allowed = vcpu->arch.shared->msr & MSR_EE;
348                 allowed = allowed && !crit;
349                 msr_mask = MSR_CE | MSR_ME | MSR_DE;
350                 int_class = INT_CLASS_NONCRIT;
351                 break;
352         case BOOKE_IRQPRIO_DEBUG:
353                 allowed = vcpu->arch.shared->msr & MSR_DE;
354                 allowed = allowed && !crit;
355                 msr_mask = MSR_ME;
356                 int_class = INT_CLASS_CRIT;
357                 break;
358         }
359
360         if (allowed) {
361                 switch (int_class) {
362                 case INT_CLASS_NONCRIT:
363                         set_guest_srr(vcpu, vcpu->arch.pc,
364                                       vcpu->arch.shared->msr);
365                         break;
366                 case INT_CLASS_CRIT:
367                         set_guest_csrr(vcpu, vcpu->arch.pc,
368                                        vcpu->arch.shared->msr);
369                         break;
370                 case INT_CLASS_DBG:
371                         set_guest_dsrr(vcpu, vcpu->arch.pc,
372                                        vcpu->arch.shared->msr);
373                         break;
374                 case INT_CLASS_MC:
375                         set_guest_mcsrr(vcpu, vcpu->arch.pc,
376                                         vcpu->arch.shared->msr);
377                         break;
378                 }
379
380                 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
381                 if (update_esr == true)
382                         set_guest_esr(vcpu, vcpu->arch.queued_esr);
383                 if (update_dear == true)
384                         set_guest_dear(vcpu, vcpu->arch.queued_dear);
385                 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
386
387                 if (!keep_irq)
388                         clear_bit(priority, &vcpu->arch.pending_exceptions);
389         }
390
391 #ifdef CONFIG_KVM_BOOKE_HV
392         /*
393          * If an interrupt is pending but masked, raise a guest doorbell
394          * so that we are notified when the guest enables the relevant
395          * MSR bit.
396          */
397         if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
398                 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
399         if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
400                 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
401         if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
402                 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
403 #endif
404
405         return allowed;
406 }
407
408 static void update_timer_ints(struct kvm_vcpu *vcpu)
409 {
410         if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
411                 kvmppc_core_queue_dec(vcpu);
412         else
413                 kvmppc_core_dequeue_dec(vcpu);
414 }
415
416 static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
417 {
418         unsigned long *pending = &vcpu->arch.pending_exceptions;
419         unsigned int priority;
420
421         if (vcpu->requests) {
422                 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) {
423                         smp_mb();
424                         update_timer_ints(vcpu);
425                 }
426         }
427
428         priority = __ffs(*pending);
429         while (priority < BOOKE_IRQPRIO_MAX) {
430                 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
431                         break;
432
433                 priority = find_next_bit(pending,
434                                          BITS_PER_BYTE * sizeof(*pending),
435                                          priority + 1);
436         }
437
438         /* Tell the guest about our interrupt status */
439         vcpu->arch.shared->int_pending = !!*pending;
440 }
441
442 /* Check pending exceptions and deliver one, if possible. */
443 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
444 {
445         int r = 0;
446         WARN_ON_ONCE(!irqs_disabled());
447
448         kvmppc_core_check_exceptions(vcpu);
449
450         if (vcpu->arch.shared->msr & MSR_WE) {
451                 local_irq_enable();
452                 kvm_vcpu_block(vcpu);
453                 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
454                 local_irq_disable();
455
456                 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
457                 r = 1;
458         };
459
460         return r;
461 }
462
463 /*
464  * Common checks before entering the guest world.  Call with interrupts
465  * disabled.
466  *
467  * returns !0 if a signal is pending and check_signal is true
468  */
469 static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
470 {
471         int r = 0;
472
473         WARN_ON_ONCE(!irqs_disabled());
474         while (true) {
475                 if (need_resched()) {
476                         local_irq_enable();
477                         cond_resched();
478                         local_irq_disable();
479                         continue;
480                 }
481
482                 if (signal_pending(current)) {
483                         r = 1;
484                         break;
485                 }
486
487                 if (kvmppc_core_prepare_to_enter(vcpu)) {
488                         /* interrupts got enabled in between, so we
489                            are back at square 1 */
490                         continue;
491                 }
492
493                 break;
494         }
495
496         return r;
497 }
498
499 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
500 {
501         int ret;
502 #ifdef CONFIG_PPC_FPU
503         unsigned int fpscr;
504         int fpexc_mode;
505         u64 fpr[32];
506 #endif
507
508         if (!vcpu->arch.sane) {
509                 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
510                 return -EINVAL;
511         }
512
513         local_irq_disable();
514         if (kvmppc_prepare_to_enter(vcpu)) {
515                 kvm_run->exit_reason = KVM_EXIT_INTR;
516                 ret = -EINTR;
517                 goto out;
518         }
519
520         kvm_guest_enter();
521
522 #ifdef CONFIG_PPC_FPU
523         /* Save userspace FPU state in stack */
524         enable_kernel_fp();
525         memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
526         fpscr = current->thread.fpscr.val;
527         fpexc_mode = current->thread.fpexc_mode;
528
529         /* Restore guest FPU state to thread */
530         memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
531         current->thread.fpscr.val = vcpu->arch.fpscr;
532
533         /*
534          * Since we can't trap on MSR_FP in GS-mode, we consider the guest
535          * as always using the FPU.  Kernel usage of FP (via
536          * enable_kernel_fp()) in this thread must not occur while
537          * vcpu->fpu_active is set.
538          */
539         vcpu->fpu_active = 1;
540
541         kvmppc_load_guest_fp(vcpu);
542 #endif
543
544         ret = __kvmppc_vcpu_run(kvm_run, vcpu);
545
546 #ifdef CONFIG_PPC_FPU
547         kvmppc_save_guest_fp(vcpu);
548
549         vcpu->fpu_active = 0;
550
551         /* Save guest FPU state from thread */
552         memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
553         vcpu->arch.fpscr = current->thread.fpscr.val;
554
555         /* Restore userspace FPU state from stack */
556         memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
557         current->thread.fpscr.val = fpscr;
558         current->thread.fpexc_mode = fpexc_mode;
559 #endif
560
561         kvm_guest_exit();
562
563 out:
564         local_irq_enable();
565         return ret;
566 }
567
568 static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
569 {
570         enum emulation_result er;
571
572         er = kvmppc_emulate_instruction(run, vcpu);
573         switch (er) {
574         case EMULATE_DONE:
575                 /* don't overwrite subtypes, just account kvm_stats */
576                 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
577                 /* Future optimization: only reload non-volatiles if
578                  * they were actually modified by emulation. */
579                 return RESUME_GUEST_NV;
580
581         case EMULATE_DO_DCR:
582                 run->exit_reason = KVM_EXIT_DCR;
583                 return RESUME_HOST;
584
585         case EMULATE_FAIL:
586                 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
587                        __func__, vcpu->arch.pc, vcpu->arch.last_inst);
588                 /* For debugging, encode the failing instruction and
589                  * report it to userspace. */
590                 run->hw.hardware_exit_reason = ~0ULL << 32;
591                 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
592                 kvmppc_core_queue_program(vcpu, ESR_PIL);
593                 return RESUME_HOST;
594
595         default:
596                 BUG();
597         }
598 }
599
600 static void kvmppc_fill_pt_regs(struct pt_regs *regs)
601 {
602         ulong r1, ip, msr, lr;
603
604         asm("mr %0, 1" : "=r"(r1));
605         asm("mflr %0" : "=r"(lr));
606         asm("mfmsr %0" : "=r"(msr));
607         asm("bl 1f; 1: mflr %0" : "=r"(ip));
608
609         memset(regs, 0, sizeof(*regs));
610         regs->gpr[1] = r1;
611         regs->nip = ip;
612         regs->msr = msr;
613         regs->link = lr;
614 }
615
616 /*
617  * For interrupts needed to be handled by host interrupt handlers,
618  * corresponding host handler are called from here in similar way
619  * (but not exact) as they are called from low level handler
620  * (such as from arch/powerpc/kernel/head_fsl_booke.S).
621  */
622 static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
623                                      unsigned int exit_nr)
624 {
625         struct pt_regs regs;
626
627         switch (exit_nr) {
628         case BOOKE_INTERRUPT_EXTERNAL:
629                 kvmppc_fill_pt_regs(&regs);
630                 do_IRQ(&regs);
631                 break;
632         case BOOKE_INTERRUPT_DECREMENTER:
633                 kvmppc_fill_pt_regs(&regs);
634                 timer_interrupt(&regs);
635                 break;
636 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64)
637         case BOOKE_INTERRUPT_DOORBELL:
638                 kvmppc_fill_pt_regs(&regs);
639                 doorbell_exception(&regs);
640                 break;
641 #endif
642         case BOOKE_INTERRUPT_MACHINE_CHECK:
643                 /* FIXME */
644                 break;
645         case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
646                 kvmppc_fill_pt_regs(&regs);
647                 performance_monitor_exception(&regs);
648                 break;
649         case BOOKE_INTERRUPT_WATCHDOG:
650                 kvmppc_fill_pt_regs(&regs);
651 #ifdef CONFIG_BOOKE_WDT
652                 WatchdogException(&regs);
653 #else
654                 unknown_exception(&regs);
655 #endif
656                 break;
657         case BOOKE_INTERRUPT_CRITICAL:
658                 unknown_exception(&regs);
659                 break;
660         }
661 }
662
663 /**
664  * kvmppc_handle_exit
665  *
666  * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
667  */
668 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
669                        unsigned int exit_nr)
670 {
671         int r = RESUME_HOST;
672
673         /* update before a new last_exit_type is rewritten */
674         kvmppc_update_timing_stats(vcpu);
675
676         /* restart interrupts if they were meant for the host */
677         kvmppc_restart_interrupt(vcpu, exit_nr);
678
679         local_irq_enable();
680
681         trace_kvm_exit(exit_nr, vcpu);
682
683         run->exit_reason = KVM_EXIT_UNKNOWN;
684         run->ready_for_interrupt_injection = 1;
685
686         switch (exit_nr) {
687         case BOOKE_INTERRUPT_MACHINE_CHECK:
688                 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
689                 kvmppc_dump_vcpu(vcpu);
690                 /* For debugging, send invalid exit reason to user space */
691                 run->hw.hardware_exit_reason = ~1ULL << 32;
692                 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
693                 r = RESUME_HOST;
694                 break;
695
696         case BOOKE_INTERRUPT_EXTERNAL:
697                 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
698                 r = RESUME_GUEST;
699                 break;
700
701         case BOOKE_INTERRUPT_DECREMENTER:
702                 kvmppc_account_exit(vcpu, DEC_EXITS);
703                 r = RESUME_GUEST;
704                 break;
705
706         case BOOKE_INTERRUPT_WATCHDOG:
707                 r = RESUME_GUEST;
708                 break;
709
710         case BOOKE_INTERRUPT_DOORBELL:
711                 kvmppc_account_exit(vcpu, DBELL_EXITS);
712                 r = RESUME_GUEST;
713                 break;
714
715         case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
716                 kvmppc_account_exit(vcpu, GDBELL_EXITS);
717
718                 /*
719                  * We are here because there is a pending guest interrupt
720                  * which could not be delivered as MSR_CE or MSR_ME was not
721                  * set.  Once we break from here we will retry delivery.
722                  */
723                 r = RESUME_GUEST;
724                 break;
725
726         case BOOKE_INTERRUPT_GUEST_DBELL:
727                 kvmppc_account_exit(vcpu, GDBELL_EXITS);
728
729                 /*
730                  * We are here because there is a pending guest interrupt
731                  * which could not be delivered as MSR_EE was not set.  Once
732                  * we break from here we will retry delivery.
733                  */
734                 r = RESUME_GUEST;
735                 break;
736
737         case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
738                 r = RESUME_GUEST;
739                 break;
740
741         case BOOKE_INTERRUPT_HV_PRIV:
742                 r = emulation_exit(run, vcpu);
743                 break;
744
745         case BOOKE_INTERRUPT_PROGRAM:
746                 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
747                         /*
748                          * Program traps generated by user-level software must
749                          * be handled by the guest kernel.
750                          *
751                          * In GS mode, hypervisor privileged instructions trap
752                          * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
753                          * actual program interrupts, handled by the guest.
754                          */
755                         kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
756                         r = RESUME_GUEST;
757                         kvmppc_account_exit(vcpu, USR_PR_INST);
758                         break;
759                 }
760
761                 r = emulation_exit(run, vcpu);
762                 break;
763
764         case BOOKE_INTERRUPT_FP_UNAVAIL:
765                 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
766                 kvmppc_account_exit(vcpu, FP_UNAVAIL);
767                 r = RESUME_GUEST;
768                 break;
769
770 #ifdef CONFIG_SPE
771         case BOOKE_INTERRUPT_SPE_UNAVAIL: {
772                 if (vcpu->arch.shared->msr & MSR_SPE)
773                         kvmppc_vcpu_enable_spe(vcpu);
774                 else
775                         kvmppc_booke_queue_irqprio(vcpu,
776                                                    BOOKE_IRQPRIO_SPE_UNAVAIL);
777                 r = RESUME_GUEST;
778                 break;
779         }
780
781         case BOOKE_INTERRUPT_SPE_FP_DATA:
782                 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
783                 r = RESUME_GUEST;
784                 break;
785
786         case BOOKE_INTERRUPT_SPE_FP_ROUND:
787                 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
788                 r = RESUME_GUEST;
789                 break;
790 #else
791         case BOOKE_INTERRUPT_SPE_UNAVAIL:
792                 /*
793                  * Guest wants SPE, but host kernel doesn't support it.  Send
794                  * an "unimplemented operation" program check to the guest.
795                  */
796                 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
797                 r = RESUME_GUEST;
798                 break;
799
800         /*
801          * These really should never happen without CONFIG_SPE,
802          * as we should never enable the real MSR[SPE] in the guest.
803          */
804         case BOOKE_INTERRUPT_SPE_FP_DATA:
805         case BOOKE_INTERRUPT_SPE_FP_ROUND:
806                 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
807                        __func__, exit_nr, vcpu->arch.pc);
808                 run->hw.hardware_exit_reason = exit_nr;
809                 r = RESUME_HOST;
810                 break;
811 #endif
812
813         case BOOKE_INTERRUPT_DATA_STORAGE:
814                 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
815                                                vcpu->arch.fault_esr);
816                 kvmppc_account_exit(vcpu, DSI_EXITS);
817                 r = RESUME_GUEST;
818                 break;
819
820         case BOOKE_INTERRUPT_INST_STORAGE:
821                 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
822                 kvmppc_account_exit(vcpu, ISI_EXITS);
823                 r = RESUME_GUEST;
824                 break;
825
826 #ifdef CONFIG_KVM_BOOKE_HV
827         case BOOKE_INTERRUPT_HV_SYSCALL:
828                 if (!(vcpu->arch.shared->msr & MSR_PR)) {
829                         kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
830                 } else {
831                         /*
832                          * hcall from guest userspace -- send privileged
833                          * instruction program check.
834                          */
835                         kvmppc_core_queue_program(vcpu, ESR_PPR);
836                 }
837
838                 r = RESUME_GUEST;
839                 break;
840 #else
841         case BOOKE_INTERRUPT_SYSCALL:
842                 if (!(vcpu->arch.shared->msr & MSR_PR) &&
843                     (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
844                         /* KVM PV hypercalls */
845                         kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
846                         r = RESUME_GUEST;
847                 } else {
848                         /* Guest syscalls */
849                         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
850                 }
851                 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
852                 r = RESUME_GUEST;
853                 break;
854 #endif
855
856         case BOOKE_INTERRUPT_DTLB_MISS: {
857                 unsigned long eaddr = vcpu->arch.fault_dear;
858                 int gtlb_index;
859                 gpa_t gpaddr;
860                 gfn_t gfn;
861
862 #ifdef CONFIG_KVM_E500V2
863                 if (!(vcpu->arch.shared->msr & MSR_PR) &&
864                     (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
865                         kvmppc_map_magic(vcpu);
866                         kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
867                         r = RESUME_GUEST;
868
869                         break;
870                 }
871 #endif
872
873                 /* Check the guest TLB. */
874                 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
875                 if (gtlb_index < 0) {
876                         /* The guest didn't have a mapping for it. */
877                         kvmppc_core_queue_dtlb_miss(vcpu,
878                                                     vcpu->arch.fault_dear,
879                                                     vcpu->arch.fault_esr);
880                         kvmppc_mmu_dtlb_miss(vcpu);
881                         kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
882                         r = RESUME_GUEST;
883                         break;
884                 }
885
886                 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
887                 gfn = gpaddr >> PAGE_SHIFT;
888
889                 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
890                         /* The guest TLB had a mapping, but the shadow TLB
891                          * didn't, and it is RAM. This could be because:
892                          * a) the entry is mapping the host kernel, or
893                          * b) the guest used a large mapping which we're faking
894                          * Either way, we need to satisfy the fault without
895                          * invoking the guest. */
896                         kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
897                         kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
898                         r = RESUME_GUEST;
899                 } else {
900                         /* Guest has mapped and accessed a page which is not
901                          * actually RAM. */
902                         vcpu->arch.paddr_accessed = gpaddr;
903                         vcpu->arch.vaddr_accessed = eaddr;
904                         r = kvmppc_emulate_mmio(run, vcpu);
905                         kvmppc_account_exit(vcpu, MMIO_EXITS);
906                 }
907
908                 break;
909         }
910
911         case BOOKE_INTERRUPT_ITLB_MISS: {
912                 unsigned long eaddr = vcpu->arch.pc;
913                 gpa_t gpaddr;
914                 gfn_t gfn;
915                 int gtlb_index;
916
917                 r = RESUME_GUEST;
918
919                 /* Check the guest TLB. */
920                 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
921                 if (gtlb_index < 0) {
922                         /* The guest didn't have a mapping for it. */
923                         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
924                         kvmppc_mmu_itlb_miss(vcpu);
925                         kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
926                         break;
927                 }
928
929                 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
930
931                 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
932                 gfn = gpaddr >> PAGE_SHIFT;
933
934                 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
935                         /* The guest TLB had a mapping, but the shadow TLB
936                          * didn't. This could be because:
937                          * a) the entry is mapping the host kernel, or
938                          * b) the guest used a large mapping which we're faking
939                          * Either way, we need to satisfy the fault without
940                          * invoking the guest. */
941                         kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
942                 } else {
943                         /* Guest mapped and leaped at non-RAM! */
944                         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
945                 }
946
947                 break;
948         }
949
950         case BOOKE_INTERRUPT_DEBUG: {
951                 u32 dbsr;
952
953                 vcpu->arch.pc = mfspr(SPRN_CSRR0);
954
955                 /* clear IAC events in DBSR register */
956                 dbsr = mfspr(SPRN_DBSR);
957                 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
958                 mtspr(SPRN_DBSR, dbsr);
959
960                 run->exit_reason = KVM_EXIT_DEBUG;
961                 kvmppc_account_exit(vcpu, DEBUG_EXITS);
962                 r = RESUME_HOST;
963                 break;
964         }
965
966         default:
967                 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
968                 BUG();
969         }
970
971         /*
972          * To avoid clobbering exit_reason, only check for signals if we
973          * aren't already exiting to userspace for some other reason.
974          */
975         if (!(r & RESUME_HOST)) {
976                 local_irq_disable();
977                 if (kvmppc_prepare_to_enter(vcpu)) {
978                         run->exit_reason = KVM_EXIT_INTR;
979                         r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
980                         kvmppc_account_exit(vcpu, SIGNAL_EXITS);
981                 }
982         }
983
984         return r;
985 }
986
987 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
988 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
989 {
990         int i;
991         int r;
992
993         vcpu->arch.pc = 0;
994         vcpu->arch.shared->pir = vcpu->vcpu_id;
995         kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
996         kvmppc_set_msr(vcpu, 0);
997
998 #ifndef CONFIG_KVM_BOOKE_HV
999         vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
1000         vcpu->arch.shadow_pid = 1;
1001         vcpu->arch.shared->msr = 0;
1002 #endif
1003
1004         /* Eye-catching numbers so we know if the guest takes an interrupt
1005          * before it's programmed its own IVPR/IVORs. */
1006         vcpu->arch.ivpr = 0x55550000;
1007         for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
1008                 vcpu->arch.ivor[i] = 0x7700 | i * 4;
1009
1010         kvmppc_init_timing_stats(vcpu);
1011
1012         r = kvmppc_core_vcpu_setup(vcpu);
1013         kvmppc_sanity_check(vcpu);
1014         return r;
1015 }
1016
1017 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1018 {
1019         int i;
1020
1021         regs->pc = vcpu->arch.pc;
1022         regs->cr = kvmppc_get_cr(vcpu);
1023         regs->ctr = vcpu->arch.ctr;
1024         regs->lr = vcpu->arch.lr;
1025         regs->xer = kvmppc_get_xer(vcpu);
1026         regs->msr = vcpu->arch.shared->msr;
1027         regs->srr0 = vcpu->arch.shared->srr0;
1028         regs->srr1 = vcpu->arch.shared->srr1;
1029         regs->pid = vcpu->arch.pid;
1030         regs->sprg0 = vcpu->arch.shared->sprg0;
1031         regs->sprg1 = vcpu->arch.shared->sprg1;
1032         regs->sprg2 = vcpu->arch.shared->sprg2;
1033         regs->sprg3 = vcpu->arch.shared->sprg3;
1034         regs->sprg4 = vcpu->arch.shared->sprg4;
1035         regs->sprg5 = vcpu->arch.shared->sprg5;
1036         regs->sprg6 = vcpu->arch.shared->sprg6;
1037         regs->sprg7 = vcpu->arch.shared->sprg7;
1038
1039         for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1040                 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
1041
1042         return 0;
1043 }
1044
1045 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1046 {
1047         int i;
1048
1049         vcpu->arch.pc = regs->pc;
1050         kvmppc_set_cr(vcpu, regs->cr);
1051         vcpu->arch.ctr = regs->ctr;
1052         vcpu->arch.lr = regs->lr;
1053         kvmppc_set_xer(vcpu, regs->xer);
1054         kvmppc_set_msr(vcpu, regs->msr);
1055         vcpu->arch.shared->srr0 = regs->srr0;
1056         vcpu->arch.shared->srr1 = regs->srr1;
1057         kvmppc_set_pid(vcpu, regs->pid);
1058         vcpu->arch.shared->sprg0 = regs->sprg0;
1059         vcpu->arch.shared->sprg1 = regs->sprg1;
1060         vcpu->arch.shared->sprg2 = regs->sprg2;
1061         vcpu->arch.shared->sprg3 = regs->sprg3;
1062         vcpu->arch.shared->sprg4 = regs->sprg4;
1063         vcpu->arch.shared->sprg5 = regs->sprg5;
1064         vcpu->arch.shared->sprg6 = regs->sprg6;
1065         vcpu->arch.shared->sprg7 = regs->sprg7;
1066
1067         for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1068                 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
1069
1070         return 0;
1071 }
1072
1073 static void get_sregs_base(struct kvm_vcpu *vcpu,
1074                            struct kvm_sregs *sregs)
1075 {
1076         u64 tb = get_tb();
1077
1078         sregs->u.e.features |= KVM_SREGS_E_BASE;
1079
1080         sregs->u.e.csrr0 = vcpu->arch.csrr0;
1081         sregs->u.e.csrr1 = vcpu->arch.csrr1;
1082         sregs->u.e.mcsr = vcpu->arch.mcsr;
1083         sregs->u.e.esr = get_guest_esr(vcpu);
1084         sregs->u.e.dear = get_guest_dear(vcpu);
1085         sregs->u.e.tsr = vcpu->arch.tsr;
1086         sregs->u.e.tcr = vcpu->arch.tcr;
1087         sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1088         sregs->u.e.tb = tb;
1089         sregs->u.e.vrsave = vcpu->arch.vrsave;
1090 }
1091
1092 static int set_sregs_base(struct kvm_vcpu *vcpu,
1093                           struct kvm_sregs *sregs)
1094 {
1095         if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1096                 return 0;
1097
1098         vcpu->arch.csrr0 = sregs->u.e.csrr0;
1099         vcpu->arch.csrr1 = sregs->u.e.csrr1;
1100         vcpu->arch.mcsr = sregs->u.e.mcsr;
1101         set_guest_esr(vcpu, sregs->u.e.esr);
1102         set_guest_dear(vcpu, sregs->u.e.dear);
1103         vcpu->arch.vrsave = sregs->u.e.vrsave;
1104         kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
1105
1106         if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
1107                 vcpu->arch.dec = sregs->u.e.dec;
1108                 kvmppc_emulate_dec(vcpu);
1109         }
1110
1111         if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
1112                 vcpu->arch.tsr = sregs->u.e.tsr;
1113                 update_timer_ints(vcpu);
1114         }
1115
1116         return 0;
1117 }
1118
1119 static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1120                               struct kvm_sregs *sregs)
1121 {
1122         sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1123
1124         sregs->u.e.pir = vcpu->vcpu_id;
1125         sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1126         sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1127         sregs->u.e.decar = vcpu->arch.decar;
1128         sregs->u.e.ivpr = vcpu->arch.ivpr;
1129 }
1130
1131 static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1132                              struct kvm_sregs *sregs)
1133 {
1134         if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1135                 return 0;
1136
1137         if (sregs->u.e.pir != vcpu->vcpu_id)
1138                 return -EINVAL;
1139
1140         vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1141         vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1142         vcpu->arch.decar = sregs->u.e.decar;
1143         vcpu->arch.ivpr = sregs->u.e.ivpr;
1144
1145         return 0;
1146 }
1147
1148 void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1149 {
1150         sregs->u.e.features |= KVM_SREGS_E_IVOR;
1151
1152         sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1153         sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1154         sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1155         sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1156         sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1157         sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1158         sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1159         sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1160         sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1161         sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1162         sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1163         sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1164         sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1165         sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1166         sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1167         sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
1168 }
1169
1170 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1171 {
1172         if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1173                 return 0;
1174
1175         vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1176         vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1177         vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1178         vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1179         vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1180         vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1181         vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1182         vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1183         vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1184         vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1185         vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1186         vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1187         vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1188         vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1189         vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1190         vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1191
1192         return 0;
1193 }
1194
1195 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1196                                   struct kvm_sregs *sregs)
1197 {
1198         sregs->pvr = vcpu->arch.pvr;
1199
1200         get_sregs_base(vcpu, sregs);
1201         get_sregs_arch206(vcpu, sregs);
1202         kvmppc_core_get_sregs(vcpu, sregs);
1203         return 0;
1204 }
1205
1206 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1207                                   struct kvm_sregs *sregs)
1208 {
1209         int ret;
1210
1211         if (vcpu->arch.pvr != sregs->pvr)
1212                 return -EINVAL;
1213
1214         ret = set_sregs_base(vcpu, sregs);
1215         if (ret < 0)
1216                 return ret;
1217
1218         ret = set_sregs_arch206(vcpu, sregs);
1219         if (ret < 0)
1220                 return ret;
1221
1222         return kvmppc_core_set_sregs(vcpu, sregs);
1223 }
1224
1225 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1226 {
1227         return -EINVAL;
1228 }
1229
1230 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1231 {
1232         return -EINVAL;
1233 }
1234
1235 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1236 {
1237         return -ENOTSUPP;
1238 }
1239
1240 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1241 {
1242         return -ENOTSUPP;
1243 }
1244
1245 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1246                                   struct kvm_translation *tr)
1247 {
1248         int r;
1249
1250         r = kvmppc_core_vcpu_translate(vcpu, tr);
1251         return r;
1252 }
1253
1254 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1255 {
1256         return -ENOTSUPP;
1257 }
1258
1259 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1260                                       struct kvm_userspace_memory_region *mem)
1261 {
1262         return 0;
1263 }
1264
1265 void kvmppc_core_commit_memory_region(struct kvm *kvm,
1266                                 struct kvm_userspace_memory_region *mem)
1267 {
1268 }
1269
1270 void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1271 {
1272         vcpu->arch.tcr = new_tcr;
1273         update_timer_ints(vcpu);
1274 }
1275
1276 void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1277 {
1278         set_bits(tsr_bits, &vcpu->arch.tsr);
1279         smp_wmb();
1280         kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1281         kvm_vcpu_kick(vcpu);
1282 }
1283
1284 void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1285 {
1286         clear_bits(tsr_bits, &vcpu->arch.tsr);
1287         update_timer_ints(vcpu);
1288 }
1289
1290 void kvmppc_decrementer_func(unsigned long data)
1291 {
1292         struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1293
1294         if (vcpu->arch.tcr & TCR_ARE) {
1295                 vcpu->arch.dec = vcpu->arch.decar;
1296                 kvmppc_emulate_dec(vcpu);
1297         }
1298
1299         kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1300 }
1301
1302 void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1303 {
1304         current->thread.kvm_vcpu = vcpu;
1305 }
1306
1307 void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
1308 {
1309         current->thread.kvm_vcpu = NULL;
1310 }
1311
1312 int __init kvmppc_booke_init(void)
1313 {
1314 #ifndef CONFIG_KVM_BOOKE_HV
1315         unsigned long ivor[16];
1316         unsigned long max_ivor = 0;
1317         int i;
1318
1319         /* We install our own exception handlers by hijacking IVPR. IVPR must
1320          * be 16-bit aligned, so we need a 64KB allocation. */
1321         kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
1322                                                  VCPU_SIZE_ORDER);
1323         if (!kvmppc_booke_handlers)
1324                 return -ENOMEM;
1325
1326         /* XXX make sure our handlers are smaller than Linux's */
1327
1328         /* Copy our interrupt handlers to match host IVORs. That way we don't
1329          * have to swap the IVORs on every guest/host transition. */
1330         ivor[0] = mfspr(SPRN_IVOR0);
1331         ivor[1] = mfspr(SPRN_IVOR1);
1332         ivor[2] = mfspr(SPRN_IVOR2);
1333         ivor[3] = mfspr(SPRN_IVOR3);
1334         ivor[4] = mfspr(SPRN_IVOR4);
1335         ivor[5] = mfspr(SPRN_IVOR5);
1336         ivor[6] = mfspr(SPRN_IVOR6);
1337         ivor[7] = mfspr(SPRN_IVOR7);
1338         ivor[8] = mfspr(SPRN_IVOR8);
1339         ivor[9] = mfspr(SPRN_IVOR9);
1340         ivor[10] = mfspr(SPRN_IVOR10);
1341         ivor[11] = mfspr(SPRN_IVOR11);
1342         ivor[12] = mfspr(SPRN_IVOR12);
1343         ivor[13] = mfspr(SPRN_IVOR13);
1344         ivor[14] = mfspr(SPRN_IVOR14);
1345         ivor[15] = mfspr(SPRN_IVOR15);
1346
1347         for (i = 0; i < 16; i++) {
1348                 if (ivor[i] > max_ivor)
1349                         max_ivor = ivor[i];
1350
1351                 memcpy((void *)kvmppc_booke_handlers + ivor[i],
1352                        kvmppc_handlers_start + i * kvmppc_handler_len,
1353                        kvmppc_handler_len);
1354         }
1355         flush_icache_range(kvmppc_booke_handlers,
1356                            kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
1357 #endif /* !BOOKE_HV */
1358         return 0;
1359 }
1360
1361 void __exit kvmppc_booke_exit(void)
1362 {
1363         free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
1364         kvm_exit();
1365 }