2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
27 #include <linux/slab.h>
28 #include <asm/cputable.h>
29 #include <asm/uaccess.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/tlbflush.h>
32 #include <asm/cputhreads.h>
33 #include <asm/irqflags.h>
35 #include "../mm/mmu_decl.h"
37 #define CREATE_TRACE_POINTS
40 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
42 return !!(v->arch.pending_exceptions) ||
46 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
51 #ifndef CONFIG_KVM_BOOK3S_64_HV
53 * Common checks before entering the guest world. Call with interrupts
58 * == 1 if we're ready to go into guest state
59 * <= 0 if we need to go back to the host with return value
61 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
65 WARN_ON_ONCE(!irqs_disabled());
74 if (signal_pending(current)) {
75 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
76 vcpu->run->exit_reason = KVM_EXIT_INTR;
81 vcpu->mode = IN_GUEST_MODE;
84 * Reading vcpu->requests must happen after setting vcpu->mode,
85 * so we don't miss a request because the requester sees
86 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
87 * before next entering the guest (and thus doesn't IPI).
92 /* Make sure we process requests preemptable */
94 trace_kvm_check_requests(vcpu);
95 r = kvmppc_core_check_requests(vcpu);
102 if (kvmppc_core_prepare_to_enter(vcpu)) {
103 /* interrupts got enabled in between, so we
104 are back at square 1 */
111 if (lazy_irq_pending()) {
112 /* Got an interrupt in between, try again */
128 #endif /* CONFIG_KVM_BOOK3S_64_HV */
130 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
132 int nr = kvmppc_get_gpr(vcpu, 11);
134 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
135 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
136 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
137 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
138 unsigned long r2 = 0;
140 if (!(vcpu->arch.shared->msr & MSR_SF)) {
142 param1 &= 0xffffffff;
143 param2 &= 0xffffffff;
144 param3 &= 0xffffffff;
145 param4 &= 0xffffffff;
149 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
151 vcpu->arch.magic_page_pa = param1;
152 vcpu->arch.magic_page_ea = param2;
154 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
159 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
161 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
162 /* XXX Missing magic page on 44x */
163 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
166 /* Second return value is in r4 */
168 case EV_HCALL_TOKEN(EV_IDLE):
170 kvm_vcpu_block(vcpu);
171 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
174 r = EV_UNIMPLEMENTED;
178 kvmppc_set_gpr(vcpu, 4, r2);
183 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
187 /* We have to know what CPU to virtualize */
191 /* PAPR only works with book3s_64 */
192 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
195 #ifdef CONFIG_KVM_BOOK3S_64_HV
196 /* HV KVM can only do PAPR mode for now */
197 if (!vcpu->arch.papr_enabled)
201 #ifdef CONFIG_KVM_BOOKE_HV
202 if (!cpu_has_feature(CPU_FTR_EMB_HV))
210 return r ? 0 : -EINVAL;
213 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
215 enum emulation_result er;
218 er = kvmppc_emulate_instruction(run, vcpu);
221 /* Future optimization: only reload non-volatiles if they were
222 * actually modified. */
225 case EMULATE_DO_MMIO:
226 run->exit_reason = KVM_EXIT_MMIO;
227 /* We must reload nonvolatiles because "update" load/store
228 * instructions modify register state. */
229 /* Future optimization: only reload non-volatiles if they were
230 * actually modified. */
234 /* XXX Deliver Program interrupt to guest. */
235 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
236 kvmppc_get_last_inst(vcpu));
247 int kvm_arch_hardware_enable(void *garbage)
252 void kvm_arch_hardware_disable(void *garbage)
256 int kvm_arch_hardware_setup(void)
261 void kvm_arch_hardware_unsetup(void)
265 void kvm_arch_check_processor_compat(void *rtn)
267 *(int *)rtn = kvmppc_core_check_processor_compat();
270 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
275 return kvmppc_core_init_vm(kvm);
278 void kvm_arch_destroy_vm(struct kvm *kvm)
281 struct kvm_vcpu *vcpu;
283 kvm_for_each_vcpu(i, vcpu, kvm)
284 kvm_arch_vcpu_free(vcpu);
286 mutex_lock(&kvm->lock);
287 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
288 kvm->vcpus[i] = NULL;
290 atomic_set(&kvm->online_vcpus, 0);
292 kvmppc_core_destroy_vm(kvm);
294 mutex_unlock(&kvm->lock);
297 void kvm_arch_sync_events(struct kvm *kvm)
301 int kvm_dev_ioctl_check_extension(long ext)
307 case KVM_CAP_PPC_BOOKE_SREGS:
308 case KVM_CAP_PPC_BOOKE_WATCHDOG:
309 case KVM_CAP_PPC_EPR:
311 case KVM_CAP_PPC_SEGSTATE:
312 case KVM_CAP_PPC_HIOR:
313 case KVM_CAP_PPC_PAPR:
315 case KVM_CAP_PPC_UNSET_IRQ:
316 case KVM_CAP_PPC_IRQ_LEVEL:
317 case KVM_CAP_ENABLE_CAP:
318 case KVM_CAP_ONE_REG:
319 case KVM_CAP_IOEVENTFD:
322 #ifndef CONFIG_KVM_BOOK3S_64_HV
323 case KVM_CAP_PPC_PAIRED_SINGLES:
324 case KVM_CAP_PPC_OSI:
325 case KVM_CAP_PPC_GET_PVINFO:
326 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
331 case KVM_CAP_COALESCED_MMIO:
332 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
335 #ifdef CONFIG_PPC_BOOK3S_64
336 case KVM_CAP_SPAPR_TCE:
337 case KVM_CAP_PPC_ALLOC_HTAB:
340 #endif /* CONFIG_PPC_BOOK3S_64 */
341 #ifdef CONFIG_KVM_BOOK3S_64_HV
342 case KVM_CAP_PPC_SMT:
343 r = threads_per_core;
345 case KVM_CAP_PPC_RMA:
347 /* PPC970 requires an RMA */
348 if (cpu_has_feature(CPU_FTR_ARCH_201))
352 case KVM_CAP_SYNC_MMU:
353 #ifdef CONFIG_KVM_BOOK3S_64_HV
354 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
355 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
361 #ifdef CONFIG_KVM_BOOK3S_64_HV
362 case KVM_CAP_PPC_HTAB_FD:
367 case KVM_CAP_NR_VCPUS:
369 * Recommending a number of CPUs is somewhat arbitrary; we
370 * return the number of present CPUs for -HV (since a host
371 * will have secondary threads "offline"), and for other KVM
372 * implementations just count online CPUs.
374 #ifdef CONFIG_KVM_BOOK3S_64_HV
375 r = num_present_cpus();
377 r = num_online_cpus();
380 case KVM_CAP_MAX_VCPUS:
383 #ifdef CONFIG_PPC_BOOK3S_64
384 case KVM_CAP_PPC_GET_SMMU_INFO:
396 long kvm_arch_dev_ioctl(struct file *filp,
397 unsigned int ioctl, unsigned long arg)
402 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
403 struct kvm_memory_slot *dont)
405 kvmppc_core_free_memslot(free, dont);
408 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
410 return kvmppc_core_create_memslot(slot, npages);
413 int kvm_arch_prepare_memory_region(struct kvm *kvm,
414 struct kvm_memory_slot *memslot,
415 struct kvm_userspace_memory_region *mem,
416 enum kvm_mr_change change)
418 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
421 void kvm_arch_commit_memory_region(struct kvm *kvm,
422 struct kvm_userspace_memory_region *mem,
423 const struct kvm_memory_slot *old,
424 enum kvm_mr_change change)
426 kvmppc_core_commit_memory_region(kvm, mem, old);
429 void kvm_arch_flush_shadow_all(struct kvm *kvm)
433 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
434 struct kvm_memory_slot *slot)
436 kvmppc_core_flush_memslot(kvm, slot);
439 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
441 struct kvm_vcpu *vcpu;
442 vcpu = kvmppc_core_vcpu_create(kvm, id);
444 vcpu->arch.wqp = &vcpu->wq;
445 kvmppc_create_vcpu_debugfs(vcpu, id);
450 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
455 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
457 /* Make sure we're not using the vcpu anymore */
458 hrtimer_cancel(&vcpu->arch.dec_timer);
459 tasklet_kill(&vcpu->arch.tasklet);
461 kvmppc_remove_vcpu_debugfs(vcpu);
462 kvmppc_core_vcpu_free(vcpu);
465 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
467 kvm_arch_vcpu_free(vcpu);
470 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
472 return kvmppc_core_pending_dec(vcpu);
476 * low level hrtimer wake routine. Because this runs in hardirq context
477 * we schedule a tasklet to do the real work.
479 enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
481 struct kvm_vcpu *vcpu;
483 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
484 tasklet_schedule(&vcpu->arch.tasklet);
486 return HRTIMER_NORESTART;
489 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
493 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
494 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
495 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
496 vcpu->arch.dec_expires = ~(u64)0;
498 #ifdef CONFIG_KVM_EXIT_TIMING
499 mutex_init(&vcpu->arch.exit_timing_lock);
501 ret = kvmppc_subarch_vcpu_init(vcpu);
505 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
507 kvmppc_mmu_destroy(vcpu);
508 kvmppc_subarch_vcpu_uninit(vcpu);
511 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
515 * vrsave (formerly usprg0) isn't used by Linux, but may
516 * be used by the guest.
518 * On non-booke this is associated with Altivec and
519 * is handled by code in book3s.c.
521 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
523 kvmppc_core_vcpu_load(vcpu, cpu);
526 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
528 kvmppc_core_vcpu_put(vcpu);
530 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
534 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
535 struct kvm_guest_debug *dbg)
540 static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
543 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
546 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
549 u64 uninitialized_var(gpr);
551 if (run->mmio.len > sizeof(gpr)) {
552 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
556 if (vcpu->arch.mmio_is_bigendian) {
557 switch (run->mmio.len) {
558 case 8: gpr = *(u64 *)run->mmio.data; break;
559 case 4: gpr = *(u32 *)run->mmio.data; break;
560 case 2: gpr = *(u16 *)run->mmio.data; break;
561 case 1: gpr = *(u8 *)run->mmio.data; break;
564 /* Convert BE data from userland back to LE. */
565 switch (run->mmio.len) {
566 case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
567 case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
568 case 1: gpr = *(u8 *)run->mmio.data; break;
572 if (vcpu->arch.mmio_sign_extend) {
573 switch (run->mmio.len) {
588 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
590 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
591 case KVM_MMIO_REG_GPR:
592 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
594 case KVM_MMIO_REG_FPR:
595 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
597 #ifdef CONFIG_PPC_BOOK3S
598 case KVM_MMIO_REG_QPR:
599 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
601 case KVM_MMIO_REG_FQPR:
602 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
603 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
611 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
612 unsigned int rt, unsigned int bytes, int is_bigendian)
614 if (bytes > sizeof(run->mmio.data)) {
615 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
619 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
620 run->mmio.len = bytes;
621 run->mmio.is_write = 0;
623 vcpu->arch.io_gpr = rt;
624 vcpu->arch.mmio_is_bigendian = is_bigendian;
625 vcpu->mmio_needed = 1;
626 vcpu->mmio_is_write = 0;
627 vcpu->arch.mmio_sign_extend = 0;
629 if (!kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
630 bytes, &run->mmio.data)) {
631 kvmppc_complete_mmio_load(vcpu, run);
632 vcpu->mmio_needed = 0;
636 return EMULATE_DO_MMIO;
639 /* Same as above, but sign extends */
640 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
641 unsigned int rt, unsigned int bytes, int is_bigendian)
645 vcpu->arch.mmio_sign_extend = 1;
646 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
651 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
652 u64 val, unsigned int bytes, int is_bigendian)
654 void *data = run->mmio.data;
656 if (bytes > sizeof(run->mmio.data)) {
657 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
661 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
662 run->mmio.len = bytes;
663 run->mmio.is_write = 1;
664 vcpu->mmio_needed = 1;
665 vcpu->mmio_is_write = 1;
667 /* Store the value at the lowest bytes in 'data'. */
670 case 8: *(u64 *)data = val; break;
671 case 4: *(u32 *)data = val; break;
672 case 2: *(u16 *)data = val; break;
673 case 1: *(u8 *)data = val; break;
676 /* Store LE value into 'data'. */
678 case 4: st_le32(data, val); break;
679 case 2: st_le16(data, val); break;
680 case 1: *(u8 *)data = val; break;
684 if (!kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
685 bytes, &run->mmio.data)) {
686 kvmppc_complete_mmio_load(vcpu, run);
687 vcpu->mmio_needed = 0;
691 return EMULATE_DO_MMIO;
694 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
699 if (vcpu->sigset_active)
700 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
702 if (vcpu->mmio_needed) {
703 if (!vcpu->mmio_is_write)
704 kvmppc_complete_mmio_load(vcpu, run);
705 vcpu->mmio_needed = 0;
706 } else if (vcpu->arch.dcr_needed) {
707 if (!vcpu->arch.dcr_is_write)
708 kvmppc_complete_dcr_load(vcpu, run);
709 vcpu->arch.dcr_needed = 0;
710 } else if (vcpu->arch.osi_needed) {
711 u64 *gprs = run->osi.gprs;
714 for (i = 0; i < 32; i++)
715 kvmppc_set_gpr(vcpu, i, gprs[i]);
716 vcpu->arch.osi_needed = 0;
717 } else if (vcpu->arch.hcall_needed) {
720 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
721 for (i = 0; i < 9; ++i)
722 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
723 vcpu->arch.hcall_needed = 0;
725 } else if (vcpu->arch.epr_needed) {
726 kvmppc_set_epr(vcpu, run->epr.epr);
727 vcpu->arch.epr_needed = 0;
731 r = kvmppc_vcpu_run(run, vcpu);
733 if (vcpu->sigset_active)
734 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
739 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
741 if (irq->irq == KVM_INTERRUPT_UNSET) {
742 kvmppc_core_dequeue_external(vcpu);
746 kvmppc_core_queue_external(vcpu, irq);
753 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
754 struct kvm_enable_cap *cap)
762 case KVM_CAP_PPC_OSI:
764 vcpu->arch.osi_enabled = true;
766 case KVM_CAP_PPC_PAPR:
768 vcpu->arch.papr_enabled = true;
770 case KVM_CAP_PPC_EPR:
772 vcpu->arch.epr_enabled = cap->args[0];
775 case KVM_CAP_PPC_BOOKE_WATCHDOG:
777 vcpu->arch.watchdog_enabled = true;
780 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
781 case KVM_CAP_SW_TLB: {
782 struct kvm_config_tlb cfg;
783 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
786 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
789 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
799 r = kvmppc_sanity_check(vcpu);
804 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
805 struct kvm_mp_state *mp_state)
810 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
811 struct kvm_mp_state *mp_state)
816 long kvm_arch_vcpu_ioctl(struct file *filp,
817 unsigned int ioctl, unsigned long arg)
819 struct kvm_vcpu *vcpu = filp->private_data;
820 void __user *argp = (void __user *)arg;
824 case KVM_INTERRUPT: {
825 struct kvm_interrupt irq;
827 if (copy_from_user(&irq, argp, sizeof(irq)))
829 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
835 struct kvm_enable_cap cap;
837 if (copy_from_user(&cap, argp, sizeof(cap)))
839 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
843 case KVM_SET_ONE_REG:
844 case KVM_GET_ONE_REG:
846 struct kvm_one_reg reg;
848 if (copy_from_user(®, argp, sizeof(reg)))
850 if (ioctl == KVM_SET_ONE_REG)
851 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
853 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
857 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
858 case KVM_DIRTY_TLB: {
859 struct kvm_dirty_tlb dirty;
861 if (copy_from_user(&dirty, argp, sizeof(dirty)))
863 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
875 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
877 return VM_FAULT_SIGBUS;
880 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
882 u32 inst_nop = 0x60000000;
883 #ifdef CONFIG_KVM_BOOKE_HV
884 u32 inst_sc1 = 0x44000022;
885 pvinfo->hcall[0] = inst_sc1;
886 pvinfo->hcall[1] = inst_nop;
887 pvinfo->hcall[2] = inst_nop;
888 pvinfo->hcall[3] = inst_nop;
890 u32 inst_lis = 0x3c000000;
891 u32 inst_ori = 0x60000000;
892 u32 inst_sc = 0x44000002;
893 u32 inst_imm_mask = 0xffff;
896 * The hypercall to get into KVM from within guest context is as
899 * lis r0, r0, KVM_SC_MAGIC_R0@h
900 * ori r0, KVM_SC_MAGIC_R0@l
904 pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask);
905 pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask);
906 pvinfo->hcall[2] = inst_sc;
907 pvinfo->hcall[3] = inst_nop;
910 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
915 long kvm_arch_vm_ioctl(struct file *filp,
916 unsigned int ioctl, unsigned long arg)
918 void __user *argp = (void __user *)arg;
922 case KVM_PPC_GET_PVINFO: {
923 struct kvm_ppc_pvinfo pvinfo;
924 memset(&pvinfo, 0, sizeof(pvinfo));
925 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
926 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
933 #ifdef CONFIG_PPC_BOOK3S_64
934 case KVM_CREATE_SPAPR_TCE: {
935 struct kvm_create_spapr_tce create_tce;
936 struct kvm *kvm = filp->private_data;
939 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
941 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
944 #endif /* CONFIG_PPC_BOOK3S_64 */
946 #ifdef CONFIG_KVM_BOOK3S_64_HV
947 case KVM_ALLOCATE_RMA: {
948 struct kvm *kvm = filp->private_data;
949 struct kvm_allocate_rma rma;
951 r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
952 if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
957 case KVM_PPC_ALLOCATE_HTAB: {
958 struct kvm *kvm = filp->private_data;
962 if (get_user(htab_order, (u32 __user *)argp))
964 r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
968 if (put_user(htab_order, (u32 __user *)argp))
974 case KVM_PPC_GET_HTAB_FD: {
975 struct kvm *kvm = filp->private_data;
976 struct kvm_get_htab_fd ghf;
979 if (copy_from_user(&ghf, argp, sizeof(ghf)))
981 r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
984 #endif /* CONFIG_KVM_BOOK3S_64_HV */
986 #ifdef CONFIG_PPC_BOOK3S_64
987 case KVM_PPC_GET_SMMU_INFO: {
988 struct kvm *kvm = filp->private_data;
989 struct kvm_ppc_smmu_info info;
991 memset(&info, 0, sizeof(info));
992 r = kvm_vm_ioctl_get_smmu_info(kvm, &info);
993 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
997 #endif /* CONFIG_PPC_BOOK3S_64 */
1006 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1007 static unsigned long nr_lpids;
1009 long kvmppc_alloc_lpid(void)
1014 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1015 if (lpid >= nr_lpids) {
1016 pr_err("%s: No LPIDs free\n", __func__);
1019 } while (test_and_set_bit(lpid, lpid_inuse));
1024 void kvmppc_claim_lpid(long lpid)
1026 set_bit(lpid, lpid_inuse);
1029 void kvmppc_free_lpid(long lpid)
1031 clear_bit(lpid, lpid_inuse);
1034 void kvmppc_init_lpid(unsigned long nr_lpids_param)
1036 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1037 memset(lpid_inuse, 0, sizeof(lpid_inuse));
1040 int kvm_arch_init(void *opaque)
1045 void kvm_arch_exit(void)