]> Pileus Git - ~andy/linux/blobdiff - arch/powerpc/kvm/book3s_hv.c
Merge branch 'kvm-ppc-next' of git://github.com/agraf/linux-2.6 into kvm-queue
[~andy/linux] / arch / powerpc / kvm / book3s_hv.c
index 7e1813ceabc1ce2f17ddf0722e42053e181f067d..17fc9496b6aca0d088bf6d6b13d38dca19121a13 100644 (file)
@@ -135,8 +135,9 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
 static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
 {
        struct kvmppc_vcore *vc = vcpu->arch.vcore;
+       unsigned long flags;
 
-       spin_lock(&vcpu->arch.tbacct_lock);
+       spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
        if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE &&
            vc->preempt_tb != TB_NIL) {
                vc->stolen_tb += mftb() - vc->preempt_tb;
@@ -147,19 +148,20 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
                vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
                vcpu->arch.busy_preempt = TB_NIL;
        }
-       spin_unlock(&vcpu->arch.tbacct_lock);
+       spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
 }
 
 static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
 {
        struct kvmppc_vcore *vc = vcpu->arch.vcore;
+       unsigned long flags;
 
-       spin_lock(&vcpu->arch.tbacct_lock);
+       spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
        if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
                vc->preempt_tb = mftb();
        if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
                vcpu->arch.busy_preempt = mftb();
-       spin_unlock(&vcpu->arch.tbacct_lock);
+       spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
 }
 
 static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
@@ -184,14 +186,28 @@ int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
 
                switch (arch_compat) {
                case PVR_ARCH_205:
-                       pcr = PCR_ARCH_205;
+                       /*
+                        * If an arch bit is set in PCR, all the defined
+                        * higher-order arch bits also have to be set.
+                        */
+                       pcr = PCR_ARCH_206 | PCR_ARCH_205;
                        break;
                case PVR_ARCH_206:
                case PVR_ARCH_206p:
+                       pcr = PCR_ARCH_206;
+                       break;
+               case PVR_ARCH_207:
                        break;
                default:
                        return -EINVAL;
                }
+
+               if (!cpu_has_feature(CPU_FTR_ARCH_207S)) {
+                       /* POWER7 can't emulate POWER8 */
+                       if (!(pcr & PCR_ARCH_206))
+                               return -EINVAL;
+                       pcr &= ~PCR_ARCH_206;
+               }
        }
 
        spin_lock(&vc->lock);
@@ -490,11 +506,11 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
         */
        if (vc->vcore_state != VCORE_INACTIVE &&
            vc->runner->arch.run_task != current) {
-               spin_lock(&vc->runner->arch.tbacct_lock);
+               spin_lock_irq(&vc->runner->arch.tbacct_lock);
                p = vc->stolen_tb;
                if (vc->preempt_tb != TB_NIL)
                        p += now - vc->preempt_tb;
-               spin_unlock(&vc->runner->arch.tbacct_lock);
+               spin_unlock_irq(&vc->runner->arch.tbacct_lock);
        } else {
                p = vc->stolen_tb;
        }
@@ -516,10 +532,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
        core_stolen = vcore_stolen_time(vc, now);
        stolen = core_stolen - vcpu->arch.stolen_logged;
        vcpu->arch.stolen_logged = core_stolen;
-       spin_lock(&vcpu->arch.tbacct_lock);
+       spin_lock_irq(&vcpu->arch.tbacct_lock);
        stolen += vcpu->arch.busy_stolen;
        vcpu->arch.busy_stolen = 0;
-       spin_unlock(&vcpu->arch.tbacct_lock);
+       spin_unlock_irq(&vcpu->arch.tbacct_lock);
        if (!dt || !vpa)
                return;
        memset(dt, 0, sizeof(struct dtl_entry));
@@ -593,7 +609,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                if (list_empty(&vcpu->kvm->arch.rtas_tokens))
                        return RESUME_HOST;
 
+               idx = srcu_read_lock(&vcpu->kvm->srcu);
                rc = kvmppc_rtas_hcall(vcpu);
+               srcu_read_unlock(&vcpu->kvm->srcu, idx);
 
                if (rc == -ENOENT)
                        return RESUME_HOST;
@@ -637,6 +655,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
                r = RESUME_GUEST;
                break;
        case BOOK3S_INTERRUPT_EXTERNAL:
+       case BOOK3S_INTERRUPT_H_DOORBELL:
                vcpu->stat.ext_intr_exits++;
                r = RESUME_GUEST;
                break;
@@ -706,7 +725,16 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
         * we don't emulate any guest instructions at this stage.
         */
        case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
-               kvmppc_core_queue_program(vcpu, 0x80000);
+               kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
+               r = RESUME_GUEST;
+               break;
+       /*
+        * This occurs if the guest (kernel or userspace), does something that
+        * is prohibited by HFSCR.  We just generate a program interrupt to
+        * the guest.
+        */
+       case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
+               kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
                r = RESUME_GUEST;
                break;
        default:
@@ -763,11 +791,35 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
        u64 mask;
 
        spin_lock(&vc->lock);
+       /*
+        * If ILE (interrupt little-endian) has changed, update the
+        * MSR_LE bit in the intr_msr for each vcpu in this vcore.
+        */
+       if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
+               struct kvm *kvm = vcpu->kvm;
+               struct kvm_vcpu *vcpu;
+               int i;
+
+               mutex_lock(&kvm->lock);
+               kvm_for_each_vcpu(i, vcpu, kvm) {
+                       if (vcpu->arch.vcore != vc)
+                               continue;
+                       if (new_lpcr & LPCR_ILE)
+                               vcpu->arch.intr_msr |= MSR_LE;
+                       else
+                               vcpu->arch.intr_msr &= ~MSR_LE;
+               }
+               mutex_unlock(&kvm->lock);
+       }
+
        /*
         * Userspace can only modify DPFD (default prefetch depth),
         * ILE (interrupt little-endian) and TC (translation control).
+        * On POWER8 userspace can also modify AIL (alt. interrupt loc.)
         */
        mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
+       if (cpu_has_feature(CPU_FTR_ARCH_207S))
+               mask |= LPCR_AIL;
        vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
        spin_unlock(&vc->lock);
 }
@@ -785,6 +837,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
        case KVM_REG_PPC_DABR:
                *val = get_reg_val(id, vcpu->arch.dabr);
                break;
+       case KVM_REG_PPC_DABRX:
+               *val = get_reg_val(id, vcpu->arch.dabrx);
+               break;
        case KVM_REG_PPC_DSCR:
                *val = get_reg_val(id, vcpu->arch.dscr);
                break;
@@ -800,7 +855,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
        case KVM_REG_PPC_UAMOR:
                *val = get_reg_val(id, vcpu->arch.uamor);
                break;
-       case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA:
+       case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
                i = id - KVM_REG_PPC_MMCR0;
                *val = get_reg_val(id, vcpu->arch.mmcr[i]);
                break;
@@ -808,12 +863,87 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
                i = id - KVM_REG_PPC_PMC1;
                *val = get_reg_val(id, vcpu->arch.pmc[i]);
                break;
+       case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
+               i = id - KVM_REG_PPC_SPMC1;
+               *val = get_reg_val(id, vcpu->arch.spmc[i]);
+               break;
        case KVM_REG_PPC_SIAR:
                *val = get_reg_val(id, vcpu->arch.siar);
                break;
        case KVM_REG_PPC_SDAR:
                *val = get_reg_val(id, vcpu->arch.sdar);
                break;
+       case KVM_REG_PPC_SIER:
+               *val = get_reg_val(id, vcpu->arch.sier);
+               break;
+       case KVM_REG_PPC_IAMR:
+               *val = get_reg_val(id, vcpu->arch.iamr);
+               break;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+       case KVM_REG_PPC_TFHAR:
+               *val = get_reg_val(id, vcpu->arch.tfhar);
+               break;
+       case KVM_REG_PPC_TFIAR:
+               *val = get_reg_val(id, vcpu->arch.tfiar);
+               break;
+       case KVM_REG_PPC_TEXASR:
+               *val = get_reg_val(id, vcpu->arch.texasr);
+               break;
+#endif
+       case KVM_REG_PPC_FSCR:
+               *val = get_reg_val(id, vcpu->arch.fscr);
+               break;
+       case KVM_REG_PPC_PSPB:
+               *val = get_reg_val(id, vcpu->arch.pspb);
+               break;
+       case KVM_REG_PPC_EBBHR:
+               *val = get_reg_val(id, vcpu->arch.ebbhr);
+               break;
+       case KVM_REG_PPC_EBBRR:
+               *val = get_reg_val(id, vcpu->arch.ebbrr);
+               break;
+       case KVM_REG_PPC_BESCR:
+               *val = get_reg_val(id, vcpu->arch.bescr);
+               break;
+       case KVM_REG_PPC_TAR:
+               *val = get_reg_val(id, vcpu->arch.tar);
+               break;
+       case KVM_REG_PPC_DPDES:
+               *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
+               break;
+       case KVM_REG_PPC_DAWR:
+               *val = get_reg_val(id, vcpu->arch.dawr);
+               break;
+       case KVM_REG_PPC_DAWRX:
+               *val = get_reg_val(id, vcpu->arch.dawrx);
+               break;
+       case KVM_REG_PPC_CIABR:
+               *val = get_reg_val(id, vcpu->arch.ciabr);
+               break;
+       case KVM_REG_PPC_IC:
+               *val = get_reg_val(id, vcpu->arch.ic);
+               break;
+       case KVM_REG_PPC_VTB:
+               *val = get_reg_val(id, vcpu->arch.vtb);
+               break;
+       case KVM_REG_PPC_CSIGR:
+               *val = get_reg_val(id, vcpu->arch.csigr);
+               break;
+       case KVM_REG_PPC_TACR:
+               *val = get_reg_val(id, vcpu->arch.tacr);
+               break;
+       case KVM_REG_PPC_TCSCR:
+               *val = get_reg_val(id, vcpu->arch.tcscr);
+               break;
+       case KVM_REG_PPC_PID:
+               *val = get_reg_val(id, vcpu->arch.pid);
+               break;
+       case KVM_REG_PPC_ACOP:
+               *val = get_reg_val(id, vcpu->arch.acop);
+               break;
+       case KVM_REG_PPC_WORT:
+               *val = get_reg_val(id, vcpu->arch.wort);
+               break;
        case KVM_REG_PPC_VPA_ADDR:
                spin_lock(&vcpu->arch.vpa_update_lock);
                *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
@@ -867,6 +997,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
        case KVM_REG_PPC_DABR:
                vcpu->arch.dabr = set_reg_val(id, *val);
                break;
+       case KVM_REG_PPC_DABRX:
+               vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
+               break;
        case KVM_REG_PPC_DSCR:
                vcpu->arch.dscr = set_reg_val(id, *val);
                break;
@@ -882,7 +1015,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
        case KVM_REG_PPC_UAMOR:
                vcpu->arch.uamor = set_reg_val(id, *val);
                break;
-       case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA:
+       case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
                i = id - KVM_REG_PPC_MMCR0;
                vcpu->arch.mmcr[i] = set_reg_val(id, *val);
                break;
@@ -890,12 +1023,90 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
                i = id - KVM_REG_PPC_PMC1;
                vcpu->arch.pmc[i] = set_reg_val(id, *val);
                break;
+       case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
+               i = id - KVM_REG_PPC_SPMC1;
+               vcpu->arch.spmc[i] = set_reg_val(id, *val);
+               break;
        case KVM_REG_PPC_SIAR:
                vcpu->arch.siar = set_reg_val(id, *val);
                break;
        case KVM_REG_PPC_SDAR:
                vcpu->arch.sdar = set_reg_val(id, *val);
                break;
+       case KVM_REG_PPC_SIER:
+               vcpu->arch.sier = set_reg_val(id, *val);
+               break;
+       case KVM_REG_PPC_IAMR:
+               vcpu->arch.iamr = set_reg_val(id, *val);
+               break;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+       case KVM_REG_PPC_TFHAR:
+               vcpu->arch.tfhar = set_reg_val(id, *val);
+               break;
+       case KVM_REG_PPC_TFIAR:
+               vcpu->arch.tfiar = set_reg_val(id, *val);
+               break;
+       case KVM_REG_PPC_TEXASR:
+               vcpu->arch.texasr = set_reg_val(id, *val);
+               break;
+#endif
+       case KVM_REG_PPC_FSCR:
+               vcpu->arch.fscr = set_reg_val(id, *val);
+               break;
+       case KVM_REG_PPC_PSPB:
+               vcpu->arch.pspb = set_reg_val(id, *val);
+               break;
+       case KVM_REG_PPC_EBBHR:
+               vcpu->arch.ebbhr = set_reg_val(id, *val);
+               break;
+       case KVM_REG_PPC_EBBRR:
+               vcpu->arch.ebbrr = set_reg_val(id, *val);
+               break;
+       case KVM_REG_PPC_BESCR:
+               vcpu->arch.bescr = set_reg_val(id, *val);
+               break;
+       case KVM_REG_PPC_TAR:
+               vcpu->arch.tar = set_reg_val(id, *val);
+               break;
+       case KVM_REG_PPC_DPDES:
+               vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
+               break;
+       case KVM_REG_PPC_DAWR:
+               vcpu->arch.dawr = set_reg_val(id, *val);
+               break;
+       case KVM_REG_PPC_DAWRX:
+               vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
+               break;
+       case KVM_REG_PPC_CIABR:
+               vcpu->arch.ciabr = set_reg_val(id, *val);
+               /* Don't allow setting breakpoints in hypervisor code */
+               if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
+                       vcpu->arch.ciabr &= ~CIABR_PRIV;        /* disable */
+               break;
+       case KVM_REG_PPC_IC:
+               vcpu->arch.ic = set_reg_val(id, *val);
+               break;
+       case KVM_REG_PPC_VTB:
+               vcpu->arch.vtb = set_reg_val(id, *val);
+               break;
+       case KVM_REG_PPC_CSIGR:
+               vcpu->arch.csigr = set_reg_val(id, *val);
+               break;
+       case KVM_REG_PPC_TACR:
+               vcpu->arch.tacr = set_reg_val(id, *val);
+               break;
+       case KVM_REG_PPC_TCSCR:
+               vcpu->arch.tcscr = set_reg_val(id, *val);
+               break;
+       case KVM_REG_PPC_PID:
+               vcpu->arch.pid = set_reg_val(id, *val);
+               break;
+       case KVM_REG_PPC_ACOP:
+               vcpu->arch.acop = set_reg_val(id, *val);
+               break;
+       case KVM_REG_PPC_WORT:
+               vcpu->arch.wort = set_reg_val(id, *val);
+               break;
        case KVM_REG_PPC_VPA_ADDR:
                addr = set_reg_val(id, *val);
                r = -EINVAL;
@@ -973,6 +1184,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
        spin_lock_init(&vcpu->arch.vpa_update_lock);
        spin_lock_init(&vcpu->arch.tbacct_lock);
        vcpu->arch.busy_preempt = TB_NIL;
+       vcpu->arch.intr_msr = MSR_SF | MSR_ME;
 
        kvmppc_mmu_book3s_hv_init(vcpu);
 
@@ -990,6 +1202,8 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
                        init_waitqueue_head(&vcore->wq);
                        vcore->preempt_tb = TB_NIL;
                        vcore->lpcr = kvm->arch.lpcr;
+                       vcore->first_vcpuid = core * threads_per_core;
+                       vcore->kvm = kvm;
                }
                kvm->arch.vcores[core] = vcore;
                kvm->arch.online_vcores++;
@@ -1003,6 +1217,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
        ++vcore->num_threads;
        spin_unlock(&vcore->lock);
        vcpu->arch.vcore = vcore;
+       vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
 
        vcpu->arch.cpu_type = KVM_CPU_3S_64;
        kvmppc_sanity_check(vcpu);
@@ -1066,7 +1281,7 @@ static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
        }
 }
 
-extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
+extern void __kvmppc_vcore_entry(void);
 
 static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
                                   struct kvm_vcpu *vcpu)
@@ -1075,13 +1290,13 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
 
        if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
                return;
-       spin_lock(&vcpu->arch.tbacct_lock);
+       spin_lock_irq(&vcpu->arch.tbacct_lock);
        now = mftb();
        vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
                vcpu->arch.stolen_logged;
        vcpu->arch.busy_preempt = now;
        vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
-       spin_unlock(&vcpu->arch.tbacct_lock);
+       spin_unlock_irq(&vcpu->arch.tbacct_lock);
        --vc->n_runnable;
        list_del(&vcpu->arch.run_list);
 }
@@ -1140,15 +1355,16 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
        tpaca = &paca[cpu];
        tpaca->kvm_hstate.kvm_vcpu = vcpu;
        tpaca->kvm_hstate.kvm_vcore = vc;
-       tpaca->kvm_hstate.napping = 0;
+       tpaca->kvm_hstate.ptid = vcpu->arch.ptid;
        vcpu->cpu = vc->pcpu;
        smp_wmb();
 #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
-       if (vcpu->arch.ptid) {
+       if (cpu != smp_processor_id()) {
 #ifdef CONFIG_KVM_XICS
                xics_wake_cpu(cpu);
 #endif
-               ++vc->n_woken;
+               if (vcpu->arch.ptid)
+                       ++vc->n_woken;
        }
 #endif
 }
@@ -1205,10 +1421,10 @@ static int on_primary_thread(void)
  */
 static void kvmppc_run_core(struct kvmppc_vcore *vc)
 {
-       struct kvm_vcpu *vcpu, *vcpu0, *vnext;
+       struct kvm_vcpu *vcpu, *vnext;
        long ret;
        u64 now;
-       int ptid, i, need_vpa_update;
+       int i, need_vpa_update;
        int srcu_idx;
        struct kvm_vcpu *vcpus_to_update[threads_per_core];
 
@@ -1245,25 +1461,6 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
                spin_lock(&vc->lock);
        }
 
-       /*
-        * Assign physical thread IDs, first to non-ceded vcpus
-        * and then to ceded ones.
-        */
-       ptid = 0;
-       vcpu0 = NULL;
-       list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
-               if (!vcpu->arch.ceded) {
-                       if (!ptid)
-                               vcpu0 = vcpu;
-                       vcpu->arch.ptid = ptid++;
-               }
-       }
-       if (!vcpu0)
-               goto out;       /* nothing to run; should never happen */
-       list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
-               if (vcpu->arch.ceded)
-                       vcpu->arch.ptid = ptid++;
-
        /*
         * Make sure we are running on thread 0, and that
         * secondary threads are offline.
@@ -1280,15 +1477,19 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
                kvmppc_create_dtl_entry(vcpu, vc);
        }
 
+       /* Set this explicitly in case thread 0 doesn't have a vcpu */
+       get_paca()->kvm_hstate.kvm_vcore = vc;
+       get_paca()->kvm_hstate.ptid = 0;
+
        vc->vcore_state = VCORE_RUNNING;
        preempt_disable();
        spin_unlock(&vc->lock);
 
        kvm_guest_enter();
 
-       srcu_idx = srcu_read_lock(&vcpu0->kvm->srcu);
+       srcu_idx = srcu_read_lock(&vc->kvm->srcu);
 
-       __kvmppc_vcore_entry(NULL, vcpu0);
+       __kvmppc_vcore_entry();
 
        spin_lock(&vc->lock);
        /* disable sending of IPIs on virtual external irqs */
@@ -1303,14 +1504,14 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
        vc->vcore_state = VCORE_EXITING;
        spin_unlock(&vc->lock);
 
-       srcu_read_unlock(&vcpu0->kvm->srcu, srcu_idx);
+       srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
 
        /* make sure updates to secondary vcpu structs are visible now */
        smp_mb();
        kvm_guest_exit();
 
        preempt_enable();
-       kvm_resched(vcpu);
+       cond_resched();
 
        spin_lock(&vc->lock);
        now = get_tb();
@@ -1411,7 +1612,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
        if (!signal_pending(current)) {
                if (vc->vcore_state == VCORE_RUNNING &&
                    VCORE_EXIT_COUNT(vc) == 0) {
-                       vcpu->arch.ptid = vc->n_runnable - 1;
                        kvmppc_create_dtl_entry(vcpu, vc);
                        kvmppc_start_thread(vcpu);
                } else if (vc->vcore_state == VCORE_SLEEPING) {
@@ -2006,6 +2206,9 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
                        LPCR_VPM0 | LPCR_VPM1;
                kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
                        (VRMA_VSID << SLB_VSID_SHIFT_1T);
+               /* On POWER8 turn on online bit to enable PURR/SPURR */
+               if (cpu_has_feature(CPU_FTR_ARCH_207S))
+                       lpcr |= LPCR_ONL;
        }
        kvm->arch.lpcr = lpcr;