2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
6 * Paul Mackerras <paulus@au1.ibm.com>
7 * Alexander Graf <agraf@suse.de>
8 * Kevin Wolf <mail@kevin-wolf.de>
10 * Description: KVM functions specific to running on Book 3S
11 * processors in hypervisor mode (specifically POWER7 and later).
13 * This file is derived from arch/powerpc/kvm/book3s.c,
14 * by Alexander Graf <agraf@suse.de>.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License, version 2, as
18 * published by the Free Software Foundation.
21 #include <linux/kvm_host.h>
22 #include <linux/err.h>
23 #include <linux/slab.h>
24 #include <linux/preempt.h>
25 #include <linux/sched.h>
26 #include <linux/delay.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/cpumask.h>
30 #include <linux/spinlock.h>
31 #include <linux/page-flags.h>
34 #include <asm/cputable.h>
35 #include <asm/cacheflush.h>
36 #include <asm/tlbflush.h>
37 #include <asm/uaccess.h>
39 #include <asm/kvm_ppc.h>
40 #include <asm/kvm_book3s.h>
41 #include <asm/mmu_context.h>
42 #include <asm/lppaca.h>
43 #include <asm/processor.h>
44 #include <asm/cputhreads.h>
46 #include <linux/gfp.h>
47 #include <linux/sched.h>
48 #include <linux/vmalloc.h>
49 #include <linux/highmem.h>
52 * For now, limit memory to 64GB and require it to be large pages.
53 * This value is chosen because it makes the ram_pginfo array be
54 * 64kB in size, which is about as large as we want to be trying
55 * to allocate with kmalloc.
57 #define MAX_MEM_ORDER 36
59 #define LARGE_PAGE_ORDER 24 /* 16MB pages */
61 /* #define EXIT_DEBUG */
62 /* #define EXIT_DEBUG_SIMPLE */
63 /* #define EXIT_DEBUG_INT */
65 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
67 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
69 local_paca->kvm_hstate.kvm_vcpu = vcpu;
70 local_paca->kvm_hstate.kvm_vcore = vcpu->arch.vcore;
73 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
77 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
79 vcpu->arch.shregs.msr = msr;
80 kvmppc_end_cede(vcpu);
83 void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
88 void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
92 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
93 pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
94 vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
95 for (r = 0; r < 16; ++r)
96 pr_err("r%2d = %.16lx r%d = %.16lx\n",
97 r, kvmppc_get_gpr(vcpu, r),
98 r+16, kvmppc_get_gpr(vcpu, r+16));
99 pr_err("ctr = %.16lx lr = %.16lx\n",
100 vcpu->arch.ctr, vcpu->arch.lr);
101 pr_err("srr0 = %.16llx srr1 = %.16llx\n",
102 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
103 pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
104 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
105 pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
106 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
107 pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
108 vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
109 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
110 pr_err("fault dar = %.16lx dsisr = %.8x\n",
111 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
112 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
113 for (r = 0; r < vcpu->arch.slb_max; ++r)
114 pr_err(" ESID = %.16llx VSID = %.16llx\n",
115 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
116 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
117 vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1,
118 vcpu->arch.last_inst);
121 struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
124 struct kvm_vcpu *v, *ret = NULL;
126 mutex_lock(&kvm->lock);
127 kvm_for_each_vcpu(r, v, kvm) {
128 if (v->vcpu_id == id) {
133 mutex_unlock(&kvm->lock);
137 static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
139 vpa->shared_proc = 1;
140 vpa->yield_count = 1;
143 static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
145 unsigned long vcpuid, unsigned long vpa)
147 struct kvm *kvm = vcpu->kvm;
148 unsigned long pg_index, ra, len;
149 unsigned long pg_offset;
151 struct kvm_vcpu *tvcpu;
153 tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
159 if (flags == 0 || flags == 4)
164 /* registering new area; convert logical addr to real */
165 pg_index = vpa >> kvm->arch.ram_porder;
166 pg_offset = vpa & (kvm->arch.ram_psize - 1);
167 if (pg_index >= kvm->arch.ram_npages)
169 if (kvm->arch.ram_pginfo[pg_index].pfn == 0)
171 ra = kvm->arch.ram_pginfo[pg_index].pfn << PAGE_SHIFT;
175 len = *(unsigned short *)(va + 4);
177 len = *(unsigned int *)(va + 4);
178 if (pg_offset + len > kvm->arch.ram_psize)
181 case 1: /* register VPA */
184 tvcpu->arch.vpa = va;
187 case 2: /* register DTL */
190 if (!tvcpu->arch.vpa)
193 tvcpu->arch.dtl = va;
194 tvcpu->arch.dtl_end = va + len;
196 case 3: /* register SLB shadow buffer */
199 if (!tvcpu->arch.vpa)
201 tvcpu->arch.slb_shadow = va;
202 len = (len - 16) / 16;
203 tvcpu->arch.slb_shadow = va;
208 case 5: /* unregister VPA */
209 if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl)
211 tvcpu->arch.vpa = NULL;
213 case 6: /* unregister DTL */
214 tvcpu->arch.dtl = NULL;
216 case 7: /* unregister SLB shadow buffer */
217 tvcpu->arch.slb_shadow = NULL;
224 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
226 unsigned long req = kvmppc_get_gpr(vcpu, 3);
227 unsigned long target, ret = H_SUCCESS;
228 struct kvm_vcpu *tvcpu;
234 target = kvmppc_get_gpr(vcpu, 4);
235 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
240 tvcpu->arch.prodded = 1;
242 if (vcpu->arch.ceded) {
243 if (waitqueue_active(&vcpu->wq)) {
244 wake_up_interruptible(&vcpu->wq);
245 vcpu->stat.halt_wakeup++;
252 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
253 kvmppc_get_gpr(vcpu, 5),
254 kvmppc_get_gpr(vcpu, 6));
259 kvmppc_set_gpr(vcpu, 3, ret);
260 vcpu->arch.hcall_needed = 0;
264 static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
265 struct task_struct *tsk)
269 vcpu->stat.sum_exits++;
271 run->exit_reason = KVM_EXIT_UNKNOWN;
272 run->ready_for_interrupt_injection = 1;
273 switch (vcpu->arch.trap) {
274 /* We're good on these - the host merely wanted to get our attention */
275 case BOOK3S_INTERRUPT_HV_DECREMENTER:
276 vcpu->stat.dec_exits++;
279 case BOOK3S_INTERRUPT_EXTERNAL:
280 vcpu->stat.ext_intr_exits++;
283 case BOOK3S_INTERRUPT_PERFMON:
286 case BOOK3S_INTERRUPT_PROGRAM:
290 * Normally program interrupts are delivered directly
291 * to the guest by the hardware, but we can get here
292 * as a result of a hypervisor emulation interrupt
293 * (e40) getting turned into a 700 by BML RTAS.
295 flags = vcpu->arch.shregs.msr & 0x1f0000ull;
296 kvmppc_core_queue_program(vcpu, flags);
300 case BOOK3S_INTERRUPT_SYSCALL:
302 /* hcall - punt to userspace */
305 if (vcpu->arch.shregs.msr & MSR_PR) {
306 /* sc 1 from userspace - reflect to guest syscall */
307 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL);
311 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
312 for (i = 0; i < 9; ++i)
313 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
314 run->exit_reason = KVM_EXIT_PAPR_HCALL;
315 vcpu->arch.hcall_needed = 1;
320 * We get these next two if the guest does a bad real-mode access,
321 * as we have enabled VRMA (virtualized real mode area) mode in the
322 * LPCR. We just generate an appropriate DSI/ISI to the guest.
324 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
325 vcpu->arch.shregs.dsisr = vcpu->arch.fault_dsisr;
326 vcpu->arch.shregs.dar = vcpu->arch.fault_dar;
327 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
330 case BOOK3S_INTERRUPT_H_INST_STORAGE:
331 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE,
336 * This occurs if the guest executes an illegal instruction.
337 * We just generate a program interrupt to the guest, since
338 * we don't emulate any guest instructions at this stage.
340 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
341 kvmppc_core_queue_program(vcpu, 0x80000);
345 kvmppc_dump_regs(vcpu);
346 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
347 vcpu->arch.trap, kvmppc_get_pc(vcpu),
348 vcpu->arch.shregs.msr);
357 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
358 struct kvm_sregs *sregs)
362 sregs->pvr = vcpu->arch.pvr;
364 memset(sregs, 0, sizeof(struct kvm_sregs));
365 for (i = 0; i < vcpu->arch.slb_max; i++) {
366 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
367 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
373 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
374 struct kvm_sregs *sregs)
378 kvmppc_set_pvr(vcpu, sregs->pvr);
381 for (i = 0; i < vcpu->arch.slb_nr; i++) {
382 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
383 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
384 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
388 vcpu->arch.slb_max = j;
393 int kvmppc_core_check_processor_compat(void)
395 if (cpu_has_feature(CPU_FTR_HVMODE))
400 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
402 struct kvm_vcpu *vcpu;
405 struct kvmppc_vcore *vcore;
407 core = id / threads_per_core;
408 if (core >= KVM_MAX_VCORES)
412 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
416 err = kvm_vcpu_init(vcpu, kvm, id);
420 vcpu->arch.shared = &vcpu->arch.shregs;
421 vcpu->arch.last_cpu = -1;
422 vcpu->arch.mmcr[0] = MMCR0_FC;
423 vcpu->arch.ctrl = CTRL_RUNLATCH;
424 /* default to host PVR, since we can't spoof it */
425 vcpu->arch.pvr = mfspr(SPRN_PVR);
426 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
428 kvmppc_mmu_book3s_hv_init(vcpu);
431 * We consider the vcpu stopped until we see the first run ioctl for it.
433 vcpu->arch.state = KVMPPC_VCPU_STOPPED;
435 init_waitqueue_head(&vcpu->arch.cpu_run);
437 mutex_lock(&kvm->lock);
438 vcore = kvm->arch.vcores[core];
440 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
442 INIT_LIST_HEAD(&vcore->runnable_threads);
443 spin_lock_init(&vcore->lock);
444 init_waitqueue_head(&vcore->wq);
446 kvm->arch.vcores[core] = vcore;
448 mutex_unlock(&kvm->lock);
453 spin_lock(&vcore->lock);
454 ++vcore->num_threads;
455 spin_unlock(&vcore->lock);
456 vcpu->arch.vcore = vcore;
458 vcpu->arch.cpu_type = KVM_CPU_3S_64;
459 kvmppc_sanity_check(vcpu);
469 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
471 kvm_vcpu_uninit(vcpu);
475 static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
477 unsigned long dec_nsec, now;
480 if (now > vcpu->arch.dec_expires) {
481 /* decrementer has already gone negative */
482 kvmppc_core_queue_dec(vcpu);
483 kvmppc_core_deliver_interrupts(vcpu);
486 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
488 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
490 vcpu->arch.timer_running = 1;
493 static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
495 vcpu->arch.ceded = 0;
496 if (vcpu->arch.timer_running) {
497 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
498 vcpu->arch.timer_running = 0;
502 extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
503 extern void xics_wake_cpu(int cpu);
505 static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
506 struct kvm_vcpu *vcpu)
510 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
512 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
515 /* decrement the physical thread id of each following vcpu */
517 list_for_each_entry_continue(v, &vc->runnable_threads, arch.run_list)
519 list_del(&vcpu->arch.run_list);
522 static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
525 struct paca_struct *tpaca;
526 struct kvmppc_vcore *vc = vcpu->arch.vcore;
528 if (vcpu->arch.timer_running) {
529 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
530 vcpu->arch.timer_running = 0;
532 cpu = vc->pcpu + vcpu->arch.ptid;
534 tpaca->kvm_hstate.kvm_vcpu = vcpu;
535 tpaca->kvm_hstate.kvm_vcore = vc;
536 tpaca->kvm_hstate.napping = 0;
537 vcpu->cpu = vc->pcpu;
539 #ifdef CONFIG_PPC_ICP_NATIVE
540 if (vcpu->arch.ptid) {
541 tpaca->cpu_start = 0x80;
549 static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc)
555 while (vc->nap_count < vc->n_woken) {
556 if (++i >= 1000000) {
557 pr_err("kvmppc_wait_for_nap timeout %d %d\n",
558 vc->nap_count, vc->n_woken);
567 * Check that we are on thread 0 and that any other threads in
568 * this core are off-line.
570 static int on_primary_thread(void)
572 int cpu = smp_processor_id();
573 int thr = cpu_thread_in_core(cpu);
577 while (++thr < threads_per_core)
578 if (cpu_online(cpu + thr))
584 * Run a set of guest threads on a physical core.
585 * Called with vc->lock held.
587 static int kvmppc_run_core(struct kvmppc_vcore *vc)
589 struct kvm_vcpu *vcpu, *vcpu0, *vnext;
594 /* don't start if any threads have a signal pending */
595 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
596 if (signal_pending(vcpu->arch.run_task))
600 * Make sure we are running on thread 0, and that
601 * secondary threads are offline.
602 * XXX we should also block attempts to bring any
603 * secondary threads online.
605 if (threads_per_core > 1 && !on_primary_thread()) {
606 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
607 vcpu->arch.ret = -EBUSY;
612 * Assign physical thread IDs, first to non-ceded vcpus
613 * and then to ceded ones.
617 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
618 if (!vcpu->arch.ceded) {
621 vcpu->arch.ptid = ptid++;
625 return 0; /* nothing to run */
626 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
627 if (vcpu->arch.ceded)
628 vcpu->arch.ptid = ptid++;
632 vc->entry_exit_count = 0;
633 vc->vcore_state = VCORE_RUNNING;
635 vc->pcpu = smp_processor_id();
636 vc->napping_threads = 0;
637 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
638 kvmppc_start_thread(vcpu);
641 spin_unlock(&vc->lock);
644 __kvmppc_vcore_entry(NULL, vcpu0);
646 spin_lock(&vc->lock);
647 /* disable sending of IPIs on virtual external irqs */
648 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
650 /* wait for secondary threads to finish writing their state to memory */
651 if (vc->nap_count < vc->n_woken)
652 kvmppc_wait_for_nap(vc);
653 /* prevent other vcpu threads from doing kvmppc_start_thread() now */
654 vc->vcore_state = VCORE_EXITING;
655 spin_unlock(&vc->lock);
657 /* make sure updates to secondary vcpu structs are visible now */
665 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
666 /* cancel pending dec exception if dec is positive */
667 if (now < vcpu->arch.dec_expires &&
668 kvmppc_core_pending_dec(vcpu))
669 kvmppc_core_dequeue_dec(vcpu);
673 ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu,
674 vcpu->arch.run_task);
676 vcpu->arch.ret = ret;
679 if (vcpu->arch.ceded) {
680 if (ret != RESUME_GUEST)
681 kvmppc_end_cede(vcpu);
683 kvmppc_set_timer(vcpu);
687 spin_lock(&vc->lock);
689 vc->vcore_state = VCORE_INACTIVE;
690 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
692 if (vcpu->arch.ret != RESUME_GUEST) {
693 kvmppc_remove_runnable(vc, vcpu);
694 wake_up(&vcpu->arch.cpu_run);
702 * Wait for some other vcpu thread to execute us, and
703 * wake us up when we need to handle something in the host.
705 static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state)
709 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
710 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE)
712 finish_wait(&vcpu->arch.cpu_run, &wait);
716 * All the vcpus in this vcore are idle, so wait for a decrementer
717 * or external interrupt to one of the vcpus. vc->lock is held.
719 static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
725 prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
726 vc->vcore_state = VCORE_SLEEPING;
727 spin_unlock(&vc->lock);
728 list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
729 if (!v->arch.ceded || v->arch.pending_exceptions) {
736 finish_wait(&vc->wq, &wait);
737 spin_lock(&vc->lock);
738 vc->vcore_state = VCORE_INACTIVE;
741 static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
745 struct kvmppc_vcore *vc;
746 struct kvm_vcpu *v, *vn;
748 kvm_run->exit_reason = 0;
749 vcpu->arch.ret = RESUME_GUEST;
753 * Synchronize with other threads in this virtual core
755 vc = vcpu->arch.vcore;
756 spin_lock(&vc->lock);
757 vcpu->arch.ceded = 0;
758 vcpu->arch.run_task = current;
759 vcpu->arch.kvm_run = kvm_run;
760 prev_state = vcpu->arch.state;
761 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
762 list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
766 * This happens the first time this is called for a vcpu.
767 * If the vcore is already running, we may be able to start
768 * this thread straight away and have it join in.
770 if (prev_state == KVMPPC_VCPU_STOPPED) {
771 if (vc->vcore_state == VCORE_RUNNING &&
772 VCORE_EXIT_COUNT(vc) == 0) {
773 vcpu->arch.ptid = vc->n_runnable - 1;
774 kvmppc_start_thread(vcpu);
777 } else if (prev_state == KVMPPC_VCPU_BUSY_IN_HOST)
780 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
781 !signal_pending(current)) {
782 if (vc->n_busy || vc->vcore_state != VCORE_INACTIVE) {
783 spin_unlock(&vc->lock);
784 kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE);
785 spin_lock(&vc->lock);
789 list_for_each_entry(v, &vc->runnable_threads, arch.run_list)
790 n_ceded += v->arch.ceded;
791 if (n_ceded == vc->n_runnable)
792 kvmppc_vcore_blocked(vc);
796 list_for_each_entry_safe(v, vn, &vc->runnable_threads,
798 kvmppc_core_deliver_interrupts(v);
799 if (signal_pending(v->arch.run_task)) {
800 kvmppc_remove_runnable(vc, v);
801 v->stat.signal_exits++;
802 v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
803 v->arch.ret = -EINTR;
804 wake_up(&v->arch.cpu_run);
809 if (signal_pending(current)) {
810 if (vc->vcore_state == VCORE_RUNNING ||
811 vc->vcore_state == VCORE_EXITING) {
812 spin_unlock(&vc->lock);
813 kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE);
814 spin_lock(&vc->lock);
816 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
817 kvmppc_remove_runnable(vc, vcpu);
818 vcpu->stat.signal_exits++;
819 kvm_run->exit_reason = KVM_EXIT_INTR;
820 vcpu->arch.ret = -EINTR;
824 spin_unlock(&vc->lock);
825 return vcpu->arch.ret;
828 int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
832 if (!vcpu->arch.sane) {
833 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
837 /* No need to go into the guest when all we'll do is come back out */
838 if (signal_pending(current)) {
839 run->exit_reason = KVM_EXIT_INTR;
843 /* On PPC970, check that we have an RMA region */
844 if (!vcpu->kvm->arch.rma && cpu_has_feature(CPU_FTR_ARCH_201))
847 flush_fp_to_thread(current);
848 flush_altivec_to_thread(current);
849 flush_vsx_to_thread(current);
850 vcpu->arch.wqp = &vcpu->arch.vcore->wq;
853 r = kvmppc_run_vcpu(run, vcpu);
855 if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
856 !(vcpu->arch.shregs.msr & MSR_PR)) {
857 r = kvmppc_pseries_do_hcall(vcpu);
858 kvmppc_core_deliver_interrupts(vcpu);
860 } while (r == RESUME_GUEST);
864 static long kvmppc_stt_npages(unsigned long window_size)
866 return ALIGN((window_size >> SPAPR_TCE_SHIFT)
867 * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
870 static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
872 struct kvm *kvm = stt->kvm;
875 mutex_lock(&kvm->lock);
876 list_del(&stt->list);
877 for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
878 __free_page(stt->pages[i]);
880 mutex_unlock(&kvm->lock);
885 static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
887 struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
890 if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
891 return VM_FAULT_SIGBUS;
893 page = stt->pages[vmf->pgoff];
899 static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
900 .fault = kvm_spapr_tce_fault,
903 static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
905 vma->vm_ops = &kvm_spapr_tce_vm_ops;
909 static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
911 struct kvmppc_spapr_tce_table *stt = filp->private_data;
913 release_spapr_tce_table(stt);
917 static struct file_operations kvm_spapr_tce_fops = {
918 .mmap = kvm_spapr_tce_mmap,
919 .release = kvm_spapr_tce_release,
922 long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
923 struct kvm_create_spapr_tce *args)
925 struct kvmppc_spapr_tce_table *stt = NULL;
930 /* Check this LIOBN hasn't been previously allocated */
931 list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
932 if (stt->liobn == args->liobn)
936 npages = kvmppc_stt_npages(args->window_size);
938 stt = kzalloc(sizeof(*stt) + npages* sizeof(struct page *),
943 stt->liobn = args->liobn;
944 stt->window_size = args->window_size;
947 for (i = 0; i < npages; i++) {
948 stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
955 mutex_lock(&kvm->lock);
956 list_add(&stt->list, &kvm->arch.spapr_tce_tables);
958 mutex_unlock(&kvm->lock);
960 return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
965 for (i = 0; i < npages; i++)
967 __free_page(stt->pages[i]);
974 /* Work out RMLS (real mode limit selector) field value for a given RMA size.
975 Assumes POWER7 or PPC970. */
976 static inline int lpcr_rmls(unsigned long rma_size)
979 case 32ul << 20: /* 32 MB */
980 if (cpu_has_feature(CPU_FTR_ARCH_206))
981 return 8; /* only supported on POWER7 */
983 case 64ul << 20: /* 64 MB */
985 case 128ul << 20: /* 128 MB */
987 case 256ul << 20: /* 256 MB */
989 case 1ul << 30: /* 1 GB */
991 case 16ul << 30: /* 16 GB */
993 case 256ul << 30: /* 256 GB */
1000 static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1002 struct kvmppc_rma_info *ri = vma->vm_file->private_data;
1005 if (vmf->pgoff >= ri->npages)
1006 return VM_FAULT_SIGBUS;
1008 page = pfn_to_page(ri->base_pfn + vmf->pgoff);
1014 static const struct vm_operations_struct kvm_rma_vm_ops = {
1015 .fault = kvm_rma_fault,
1018 static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
1020 vma->vm_flags |= VM_RESERVED;
1021 vma->vm_ops = &kvm_rma_vm_ops;
1025 static int kvm_rma_release(struct inode *inode, struct file *filp)
1027 struct kvmppc_rma_info *ri = filp->private_data;
1029 kvm_release_rma(ri);
1033 static struct file_operations kvm_rma_fops = {
1034 .mmap = kvm_rma_mmap,
1035 .release = kvm_rma_release,
1038 long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
1040 struct kvmppc_rma_info *ri;
1043 ri = kvm_alloc_rma();
1047 fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR);
1049 kvm_release_rma(ri);
1051 ret->rma_size = ri->npages << PAGE_SHIFT;
1055 static struct page *hva_to_page(unsigned long addr)
1057 struct page *page[1];
1062 npages = get_user_pages_fast(addr, 1, 1, page);
1064 if (unlikely(npages != 1))
1070 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1071 struct kvm_userspace_memory_region *mem)
1073 unsigned long psize, porder;
1074 unsigned long i, npages, totalpages;
1075 unsigned long pg_ix;
1076 struct kvmppc_pginfo *pginfo;
1078 struct kvmppc_rma_info *ri = NULL;
1081 /* For now, only allow 16MB pages */
1082 porder = LARGE_PAGE_ORDER;
1083 psize = 1ul << porder;
1084 if ((mem->memory_size & (psize - 1)) ||
1085 (mem->guest_phys_addr & (psize - 1))) {
1086 pr_err("bad memory_size=%llx @ %llx\n",
1087 mem->memory_size, mem->guest_phys_addr);
1091 npages = mem->memory_size >> porder;
1092 totalpages = (mem->guest_phys_addr + mem->memory_size) >> porder;
1094 /* More memory than we have space to track? */
1095 if (totalpages > (1ul << (MAX_MEM_ORDER - LARGE_PAGE_ORDER)))
1098 /* Do we already have an RMA registered? */
1099 if (mem->guest_phys_addr == 0 && kvm->arch.rma)
1102 if (totalpages > kvm->arch.ram_npages)
1103 kvm->arch.ram_npages = totalpages;
1105 /* Is this one of our preallocated RMAs? */
1106 if (mem->guest_phys_addr == 0) {
1107 struct vm_area_struct *vma;
1109 down_read(¤t->mm->mmap_sem);
1110 vma = find_vma(current->mm, mem->userspace_addr);
1111 if (vma && vma->vm_file &&
1112 vma->vm_file->f_op == &kvm_rma_fops &&
1113 mem->userspace_addr == vma->vm_start)
1114 ri = vma->vm_file->private_data;
1115 up_read(¤t->mm->mmap_sem);
1116 if (!ri && cpu_has_feature(CPU_FTR_ARCH_201)) {
1117 pr_err("CPU requires an RMO\n");
1123 unsigned long rma_size;
1127 rma_size = ri->npages << PAGE_SHIFT;
1128 if (rma_size > mem->memory_size)
1129 rma_size = mem->memory_size;
1130 rmls = lpcr_rmls(rma_size);
1132 pr_err("Can't use RMA of 0x%lx bytes\n", rma_size);
1135 atomic_inc(&ri->use_count);
1137 kvm->arch.n_rma_pages = rma_size >> porder;
1139 /* Update LPCR and RMOR */
1140 lpcr = kvm->arch.lpcr;
1141 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1142 /* PPC970; insert RMLS value (split field) in HID4 */
1143 lpcr &= ~((1ul << HID4_RMLS0_SH) |
1144 (3ul << HID4_RMLS2_SH));
1145 lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) |
1146 ((rmls & 3) << HID4_RMLS2_SH);
1147 /* RMOR is also in HID4 */
1148 lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
1152 lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
1153 lpcr |= rmls << LPCR_RMLS_SH;
1154 kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
1156 kvm->arch.lpcr = lpcr;
1157 pr_info("Using RMO at %lx size %lx (LPCR = %lx)\n",
1158 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
1161 pg_ix = mem->guest_phys_addr >> porder;
1162 pginfo = kvm->arch.ram_pginfo + pg_ix;
1163 for (i = 0; i < npages; ++i, ++pg_ix) {
1164 if (ri && pg_ix < kvm->arch.n_rma_pages) {
1165 pginfo[i].pfn = ri->base_pfn +
1166 (pg_ix << (porder - PAGE_SHIFT));
1169 hva = mem->userspace_addr + (i << porder);
1170 page = hva_to_page(hva);
1172 pr_err("oops, no pfn for hva %lx\n", hva);
1175 /* Check it's a 16MB page */
1176 if (!PageHead(page) ||
1177 compound_order(page) != (LARGE_PAGE_ORDER - PAGE_SHIFT)) {
1178 pr_err("page at %lx isn't 16MB (o=%d)\n",
1179 hva, compound_order(page));
1182 pginfo[i].pfn = page_to_pfn(page);
1191 void kvmppc_core_commit_memory_region(struct kvm *kvm,
1192 struct kvm_userspace_memory_region *mem)
1194 if (mem->guest_phys_addr == 0 && mem->memory_size != 0 &&
1196 kvmppc_map_vrma(kvm, mem);
1199 int kvmppc_core_init_vm(struct kvm *kvm)
1202 unsigned long npages = 1ul << (MAX_MEM_ORDER - LARGE_PAGE_ORDER);
1206 /* Allocate hashed page table */
1207 r = kvmppc_alloc_hpt(kvm);
1211 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
1213 kvm->arch.ram_pginfo = kzalloc(npages * sizeof(struct kvmppc_pginfo),
1215 if (!kvm->arch.ram_pginfo) {
1216 pr_err("kvmppc_core_init_vm: couldn't alloc %lu bytes\n",
1217 npages * sizeof(struct kvmppc_pginfo));
1221 kvm->arch.ram_npages = 0;
1222 kvm->arch.ram_psize = 1ul << LARGE_PAGE_ORDER;
1223 kvm->arch.ram_porder = LARGE_PAGE_ORDER;
1224 kvm->arch.rma = NULL;
1225 kvm->arch.n_rma_pages = 0;
1227 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
1229 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1230 /* PPC970; HID4 is effectively the LPCR */
1231 unsigned long lpid = kvm->arch.lpid;
1232 kvm->arch.host_lpid = 0;
1233 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
1234 lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
1235 lpcr |= ((lpid >> 4) << HID4_LPID1_SH) |
1236 ((lpid & 0xf) << HID4_LPID5_SH);
1238 /* POWER7; init LPCR for virtual RMA mode */
1239 kvm->arch.host_lpid = mfspr(SPRN_LPID);
1240 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
1241 lpcr &= LPCR_PECE | LPCR_LPES;
1242 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
1243 LPCR_VPM0 | LPCR_VRMA_L;
1245 kvm->arch.lpcr = lpcr;
1250 kvmppc_free_hpt(kvm);
1254 void kvmppc_core_destroy_vm(struct kvm *kvm)
1256 struct kvmppc_pginfo *pginfo;
1259 if (kvm->arch.ram_pginfo) {
1260 pginfo = kvm->arch.ram_pginfo;
1261 kvm->arch.ram_pginfo = NULL;
1262 for (i = kvm->arch.n_rma_pages; i < kvm->arch.ram_npages; ++i)
1264 put_page(pfn_to_page(pginfo[i].pfn));
1267 if (kvm->arch.rma) {
1268 kvm_release_rma(kvm->arch.rma);
1269 kvm->arch.rma = NULL;
1272 kvmppc_free_hpt(kvm);
1273 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1276 /* These are stubs for now */
1277 void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
1281 /* We don't need to emulate any privileged instructions or dcbz */
1282 int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
1283 unsigned int inst, int *advance)
1285 return EMULATE_FAIL;
1288 int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
1290 return EMULATE_FAIL;
1293 int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
1295 return EMULATE_FAIL;
1298 static int kvmppc_book3s_hv_init(void)
1302 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1307 r = kvmppc_mmu_hv_init();
1312 static void kvmppc_book3s_hv_exit(void)
1317 module_init(kvmppc_book3s_hv_init);
1318 module_exit(kvmppc_book3s_hv_exit);