]> Pileus Git - ~andy/linux/blob - arch/powerpc/kvm/booke.c
KVM: PPC: Convert SPRG[0-4] to shared page
[~andy/linux] / arch / powerpc / kvm / booke.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  *
17  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19  */
20
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/gfp.h>
25 #include <linux/module.h>
26 #include <linux/vmalloc.h>
27 #include <linux/fs.h>
28
29 #include <asm/cputable.h>
30 #include <asm/uaccess.h>
31 #include <asm/kvm_ppc.h>
32 #include "timing.h"
33 #include <asm/cacheflush.h>
34
35 #include "booke.h"
36
37 unsigned long kvmppc_booke_handlers;
38
39 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
40 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41
42 struct kvm_stats_debugfs_item debugfs_entries[] = {
43         { "mmio",       VCPU_STAT(mmio_exits) },
44         { "dcr",        VCPU_STAT(dcr_exits) },
45         { "sig",        VCPU_STAT(signal_exits) },
46         { "itlb_r",     VCPU_STAT(itlb_real_miss_exits) },
47         { "itlb_v",     VCPU_STAT(itlb_virt_miss_exits) },
48         { "dtlb_r",     VCPU_STAT(dtlb_real_miss_exits) },
49         { "dtlb_v",     VCPU_STAT(dtlb_virt_miss_exits) },
50         { "sysc",       VCPU_STAT(syscall_exits) },
51         { "isi",        VCPU_STAT(isi_exits) },
52         { "dsi",        VCPU_STAT(dsi_exits) },
53         { "inst_emu",   VCPU_STAT(emulated_inst_exits) },
54         { "dec",        VCPU_STAT(dec_exits) },
55         { "ext_intr",   VCPU_STAT(ext_intr_exits) },
56         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
57         { NULL }
58 };
59
60 /* TODO: use vcpu_printf() */
61 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
62 {
63         int i;
64
65         printk("pc:   %08lx msr:  %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
66         printk("lr:   %08lx ctr:  %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
67         printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
68                                             vcpu->arch.shared->srr1);
69
70         printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
71
72         for (i = 0; i < 32; i += 4) {
73                 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
74                        kvmppc_get_gpr(vcpu, i),
75                        kvmppc_get_gpr(vcpu, i+1),
76                        kvmppc_get_gpr(vcpu, i+2),
77                        kvmppc_get_gpr(vcpu, i+3));
78         }
79 }
80
81 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
82                                        unsigned int priority)
83 {
84         set_bit(priority, &vcpu->arch.pending_exceptions);
85 }
86
87 static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
88                                         ulong dear_flags, ulong esr_flags)
89 {
90         vcpu->arch.queued_dear = dear_flags;
91         vcpu->arch.queued_esr = esr_flags;
92         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
93 }
94
95 static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
96                                            ulong dear_flags, ulong esr_flags)
97 {
98         vcpu->arch.queued_dear = dear_flags;
99         vcpu->arch.queued_esr = esr_flags;
100         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
101 }
102
103 static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
104                                            ulong esr_flags)
105 {
106         vcpu->arch.queued_esr = esr_flags;
107         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
108 }
109
110 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
111 {
112         vcpu->arch.queued_esr = esr_flags;
113         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
114 }
115
116 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
117 {
118         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
119 }
120
121 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
122 {
123         return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
124 }
125
126 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
127 {
128         clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
129 }
130
131 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
132                                 struct kvm_interrupt *irq)
133 {
134         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL);
135 }
136
137 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
138                                   struct kvm_interrupt *irq)
139 {
140         clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
141 }
142
143 /* Deliver the interrupt of the corresponding priority, if possible. */
144 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
145                                         unsigned int priority)
146 {
147         int allowed = 0;
148         ulong uninitialized_var(msr_mask);
149         bool update_esr = false, update_dear = false;
150
151         switch (priority) {
152         case BOOKE_IRQPRIO_DTLB_MISS:
153         case BOOKE_IRQPRIO_DATA_STORAGE:
154                 update_dear = true;
155                 /* fall through */
156         case BOOKE_IRQPRIO_INST_STORAGE:
157         case BOOKE_IRQPRIO_PROGRAM:
158                 update_esr = true;
159                 /* fall through */
160         case BOOKE_IRQPRIO_ITLB_MISS:
161         case BOOKE_IRQPRIO_SYSCALL:
162         case BOOKE_IRQPRIO_FP_UNAVAIL:
163         case BOOKE_IRQPRIO_SPE_UNAVAIL:
164         case BOOKE_IRQPRIO_SPE_FP_DATA:
165         case BOOKE_IRQPRIO_SPE_FP_ROUND:
166         case BOOKE_IRQPRIO_AP_UNAVAIL:
167         case BOOKE_IRQPRIO_ALIGNMENT:
168                 allowed = 1;
169                 msr_mask = MSR_CE|MSR_ME|MSR_DE;
170                 break;
171         case BOOKE_IRQPRIO_CRITICAL:
172         case BOOKE_IRQPRIO_WATCHDOG:
173                 allowed = vcpu->arch.shared->msr & MSR_CE;
174                 msr_mask = MSR_ME;
175                 break;
176         case BOOKE_IRQPRIO_MACHINE_CHECK:
177                 allowed = vcpu->arch.shared->msr & MSR_ME;
178                 msr_mask = 0;
179                 break;
180         case BOOKE_IRQPRIO_EXTERNAL:
181         case BOOKE_IRQPRIO_DECREMENTER:
182         case BOOKE_IRQPRIO_FIT:
183                 allowed = vcpu->arch.shared->msr & MSR_EE;
184                 msr_mask = MSR_CE|MSR_ME|MSR_DE;
185                 break;
186         case BOOKE_IRQPRIO_DEBUG:
187                 allowed = vcpu->arch.shared->msr & MSR_DE;
188                 msr_mask = MSR_ME;
189                 break;
190         }
191
192         if (allowed) {
193                 vcpu->arch.shared->srr0 = vcpu->arch.pc;
194                 vcpu->arch.shared->srr1 = vcpu->arch.shared->msr;
195                 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
196                 if (update_esr == true)
197                         vcpu->arch.esr = vcpu->arch.queued_esr;
198                 if (update_dear == true)
199                         vcpu->arch.shared->dar = vcpu->arch.queued_dear;
200                 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
201
202                 clear_bit(priority, &vcpu->arch.pending_exceptions);
203         }
204
205         return allowed;
206 }
207
208 /* Check pending exceptions and deliver one, if possible. */
209 void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
210 {
211         unsigned long *pending = &vcpu->arch.pending_exceptions;
212         unsigned int priority;
213
214         priority = __ffs(*pending);
215         while (priority <= BOOKE_IRQPRIO_MAX) {
216                 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
217                         break;
218
219                 priority = find_next_bit(pending,
220                                          BITS_PER_BYTE * sizeof(*pending),
221                                          priority + 1);
222         }
223 }
224
225 /**
226  * kvmppc_handle_exit
227  *
228  * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
229  */
230 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
231                        unsigned int exit_nr)
232 {
233         enum emulation_result er;
234         int r = RESUME_HOST;
235
236         /* update before a new last_exit_type is rewritten */
237         kvmppc_update_timing_stats(vcpu);
238
239         local_irq_enable();
240
241         run->exit_reason = KVM_EXIT_UNKNOWN;
242         run->ready_for_interrupt_injection = 1;
243
244         switch (exit_nr) {
245         case BOOKE_INTERRUPT_MACHINE_CHECK:
246                 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
247                 kvmppc_dump_vcpu(vcpu);
248                 r = RESUME_HOST;
249                 break;
250
251         case BOOKE_INTERRUPT_EXTERNAL:
252                 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
253                 if (need_resched())
254                         cond_resched();
255                 r = RESUME_GUEST;
256                 break;
257
258         case BOOKE_INTERRUPT_DECREMENTER:
259                 /* Since we switched IVPR back to the host's value, the host
260                  * handled this interrupt the moment we enabled interrupts.
261                  * Now we just offer it a chance to reschedule the guest. */
262                 kvmppc_account_exit(vcpu, DEC_EXITS);
263                 if (need_resched())
264                         cond_resched();
265                 r = RESUME_GUEST;
266                 break;
267
268         case BOOKE_INTERRUPT_PROGRAM:
269                 if (vcpu->arch.shared->msr & MSR_PR) {
270                         /* Program traps generated by user-level software must be handled
271                          * by the guest kernel. */
272                         kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
273                         r = RESUME_GUEST;
274                         kvmppc_account_exit(vcpu, USR_PR_INST);
275                         break;
276                 }
277
278                 er = kvmppc_emulate_instruction(run, vcpu);
279                 switch (er) {
280                 case EMULATE_DONE:
281                         /* don't overwrite subtypes, just account kvm_stats */
282                         kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
283                         /* Future optimization: only reload non-volatiles if
284                          * they were actually modified by emulation. */
285                         r = RESUME_GUEST_NV;
286                         break;
287                 case EMULATE_DO_DCR:
288                         run->exit_reason = KVM_EXIT_DCR;
289                         r = RESUME_HOST;
290                         break;
291                 case EMULATE_FAIL:
292                         /* XXX Deliver Program interrupt to guest. */
293                         printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
294                                __func__, vcpu->arch.pc, vcpu->arch.last_inst);
295                         /* For debugging, encode the failing instruction and
296                          * report it to userspace. */
297                         run->hw.hardware_exit_reason = ~0ULL << 32;
298                         run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
299                         r = RESUME_HOST;
300                         break;
301                 default:
302                         BUG();
303                 }
304                 break;
305
306         case BOOKE_INTERRUPT_FP_UNAVAIL:
307                 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
308                 kvmppc_account_exit(vcpu, FP_UNAVAIL);
309                 r = RESUME_GUEST;
310                 break;
311
312         case BOOKE_INTERRUPT_SPE_UNAVAIL:
313                 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL);
314                 r = RESUME_GUEST;
315                 break;
316
317         case BOOKE_INTERRUPT_SPE_FP_DATA:
318                 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
319                 r = RESUME_GUEST;
320                 break;
321
322         case BOOKE_INTERRUPT_SPE_FP_ROUND:
323                 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
324                 r = RESUME_GUEST;
325                 break;
326
327         case BOOKE_INTERRUPT_DATA_STORAGE:
328                 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
329                                                vcpu->arch.fault_esr);
330                 kvmppc_account_exit(vcpu, DSI_EXITS);
331                 r = RESUME_GUEST;
332                 break;
333
334         case BOOKE_INTERRUPT_INST_STORAGE:
335                 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
336                 kvmppc_account_exit(vcpu, ISI_EXITS);
337                 r = RESUME_GUEST;
338                 break;
339
340         case BOOKE_INTERRUPT_SYSCALL:
341                 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
342                 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
343                 r = RESUME_GUEST;
344                 break;
345
346         case BOOKE_INTERRUPT_DTLB_MISS: {
347                 unsigned long eaddr = vcpu->arch.fault_dear;
348                 int gtlb_index;
349                 gpa_t gpaddr;
350                 gfn_t gfn;
351
352                 /* Check the guest TLB. */
353                 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
354                 if (gtlb_index < 0) {
355                         /* The guest didn't have a mapping for it. */
356                         kvmppc_core_queue_dtlb_miss(vcpu,
357                                                     vcpu->arch.fault_dear,
358                                                     vcpu->arch.fault_esr);
359                         kvmppc_mmu_dtlb_miss(vcpu);
360                         kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
361                         r = RESUME_GUEST;
362                         break;
363                 }
364
365                 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
366                 gfn = gpaddr >> PAGE_SHIFT;
367
368                 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
369                         /* The guest TLB had a mapping, but the shadow TLB
370                          * didn't, and it is RAM. This could be because:
371                          * a) the entry is mapping the host kernel, or
372                          * b) the guest used a large mapping which we're faking
373                          * Either way, we need to satisfy the fault without
374                          * invoking the guest. */
375                         kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
376                         kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
377                         r = RESUME_GUEST;
378                 } else {
379                         /* Guest has mapped and accessed a page which is not
380                          * actually RAM. */
381                         vcpu->arch.paddr_accessed = gpaddr;
382                         r = kvmppc_emulate_mmio(run, vcpu);
383                         kvmppc_account_exit(vcpu, MMIO_EXITS);
384                 }
385
386                 break;
387         }
388
389         case BOOKE_INTERRUPT_ITLB_MISS: {
390                 unsigned long eaddr = vcpu->arch.pc;
391                 gpa_t gpaddr;
392                 gfn_t gfn;
393                 int gtlb_index;
394
395                 r = RESUME_GUEST;
396
397                 /* Check the guest TLB. */
398                 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
399                 if (gtlb_index < 0) {
400                         /* The guest didn't have a mapping for it. */
401                         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
402                         kvmppc_mmu_itlb_miss(vcpu);
403                         kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
404                         break;
405                 }
406
407                 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
408
409                 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
410                 gfn = gpaddr >> PAGE_SHIFT;
411
412                 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
413                         /* The guest TLB had a mapping, but the shadow TLB
414                          * didn't. This could be because:
415                          * a) the entry is mapping the host kernel, or
416                          * b) the guest used a large mapping which we're faking
417                          * Either way, we need to satisfy the fault without
418                          * invoking the guest. */
419                         kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
420                 } else {
421                         /* Guest mapped and leaped at non-RAM! */
422                         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
423                 }
424
425                 break;
426         }
427
428         case BOOKE_INTERRUPT_DEBUG: {
429                 u32 dbsr;
430
431                 vcpu->arch.pc = mfspr(SPRN_CSRR0);
432
433                 /* clear IAC events in DBSR register */
434                 dbsr = mfspr(SPRN_DBSR);
435                 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
436                 mtspr(SPRN_DBSR, dbsr);
437
438                 run->exit_reason = KVM_EXIT_DEBUG;
439                 kvmppc_account_exit(vcpu, DEBUG_EXITS);
440                 r = RESUME_HOST;
441                 break;
442         }
443
444         default:
445                 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
446                 BUG();
447         }
448
449         local_irq_disable();
450
451         kvmppc_core_deliver_interrupts(vcpu);
452
453         if (!(r & RESUME_HOST)) {
454                 /* To avoid clobbering exit_reason, only check for signals if
455                  * we aren't already exiting to userspace for some other
456                  * reason. */
457                 if (signal_pending(current)) {
458                         run->exit_reason = KVM_EXIT_INTR;
459                         r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
460                         kvmppc_account_exit(vcpu, SIGNAL_EXITS);
461                 }
462         }
463
464         return r;
465 }
466
467 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
468 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
469 {
470         vcpu->arch.pc = 0;
471         vcpu->arch.shared->msr = 0;
472         kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
473
474         vcpu->arch.shadow_pid = 1;
475
476         /* Eye-catching number so we know if the guest takes an interrupt
477          * before it's programmed its own IVPR. */
478         vcpu->arch.ivpr = 0x55550000;
479
480         kvmppc_init_timing_stats(vcpu);
481
482         return kvmppc_core_vcpu_setup(vcpu);
483 }
484
485 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
486 {
487         int i;
488
489         regs->pc = vcpu->arch.pc;
490         regs->cr = kvmppc_get_cr(vcpu);
491         regs->ctr = vcpu->arch.ctr;
492         regs->lr = vcpu->arch.lr;
493         regs->xer = kvmppc_get_xer(vcpu);
494         regs->msr = vcpu->arch.shared->msr;
495         regs->srr0 = vcpu->arch.shared->srr0;
496         regs->srr1 = vcpu->arch.shared->srr1;
497         regs->pid = vcpu->arch.pid;
498         regs->sprg0 = vcpu->arch.shared->sprg0;
499         regs->sprg1 = vcpu->arch.shared->sprg1;
500         regs->sprg2 = vcpu->arch.shared->sprg2;
501         regs->sprg3 = vcpu->arch.shared->sprg3;
502         regs->sprg5 = vcpu->arch.sprg4;
503         regs->sprg6 = vcpu->arch.sprg5;
504         regs->sprg7 = vcpu->arch.sprg6;
505
506         for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
507                 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
508
509         return 0;
510 }
511
512 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
513 {
514         int i;
515
516         vcpu->arch.pc = regs->pc;
517         kvmppc_set_cr(vcpu, regs->cr);
518         vcpu->arch.ctr = regs->ctr;
519         vcpu->arch.lr = regs->lr;
520         kvmppc_set_xer(vcpu, regs->xer);
521         kvmppc_set_msr(vcpu, regs->msr);
522         vcpu->arch.shared->srr0 = regs->srr0;
523         vcpu->arch.shared->srr1 = regs->srr1;
524         vcpu->arch.shared->sprg0 = regs->sprg0;
525         vcpu->arch.shared->sprg1 = regs->sprg1;
526         vcpu->arch.shared->sprg2 = regs->sprg2;
527         vcpu->arch.shared->sprg3 = regs->sprg3;
528         vcpu->arch.sprg5 = regs->sprg4;
529         vcpu->arch.sprg6 = regs->sprg5;
530         vcpu->arch.sprg7 = regs->sprg6;
531
532         for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
533                 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
534
535         return 0;
536 }
537
538 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
539                                   struct kvm_sregs *sregs)
540 {
541         return -ENOTSUPP;
542 }
543
544 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
545                                   struct kvm_sregs *sregs)
546 {
547         return -ENOTSUPP;
548 }
549
550 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
551 {
552         return -ENOTSUPP;
553 }
554
555 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
556 {
557         return -ENOTSUPP;
558 }
559
560 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
561                                   struct kvm_translation *tr)
562 {
563         int r;
564
565         r = kvmppc_core_vcpu_translate(vcpu, tr);
566         return r;
567 }
568
569 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
570 {
571         return -ENOTSUPP;
572 }
573
574 int __init kvmppc_booke_init(void)
575 {
576         unsigned long ivor[16];
577         unsigned long max_ivor = 0;
578         int i;
579
580         /* We install our own exception handlers by hijacking IVPR. IVPR must
581          * be 16-bit aligned, so we need a 64KB allocation. */
582         kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
583                                                  VCPU_SIZE_ORDER);
584         if (!kvmppc_booke_handlers)
585                 return -ENOMEM;
586
587         /* XXX make sure our handlers are smaller than Linux's */
588
589         /* Copy our interrupt handlers to match host IVORs. That way we don't
590          * have to swap the IVORs on every guest/host transition. */
591         ivor[0] = mfspr(SPRN_IVOR0);
592         ivor[1] = mfspr(SPRN_IVOR1);
593         ivor[2] = mfspr(SPRN_IVOR2);
594         ivor[3] = mfspr(SPRN_IVOR3);
595         ivor[4] = mfspr(SPRN_IVOR4);
596         ivor[5] = mfspr(SPRN_IVOR5);
597         ivor[6] = mfspr(SPRN_IVOR6);
598         ivor[7] = mfspr(SPRN_IVOR7);
599         ivor[8] = mfspr(SPRN_IVOR8);
600         ivor[9] = mfspr(SPRN_IVOR9);
601         ivor[10] = mfspr(SPRN_IVOR10);
602         ivor[11] = mfspr(SPRN_IVOR11);
603         ivor[12] = mfspr(SPRN_IVOR12);
604         ivor[13] = mfspr(SPRN_IVOR13);
605         ivor[14] = mfspr(SPRN_IVOR14);
606         ivor[15] = mfspr(SPRN_IVOR15);
607
608         for (i = 0; i < 16; i++) {
609                 if (ivor[i] > max_ivor)
610                         max_ivor = ivor[i];
611
612                 memcpy((void *)kvmppc_booke_handlers + ivor[i],
613                        kvmppc_handlers_start + i * kvmppc_handler_len,
614                        kvmppc_handler_len);
615         }
616         flush_icache_range(kvmppc_booke_handlers,
617                            kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
618
619         return 0;
620 }
621
622 void __exit kvmppc_booke_exit(void)
623 {
624         free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
625         kvm_exit();
626 }