]> Pileus Git - ~andy/linux/commitdiff
KVM: PPC: booke: Check for MSR[WE] in prepare_to_enter
authorScott Wood <scottwood@freescale.com>
Wed, 9 Nov 2011 00:23:25 +0000 (18:23 -0600)
committerAvi Kivity <avi@redhat.com>
Mon, 5 Mar 2012 12:52:26 +0000 (14:52 +0200)
This prevents us from inappropriately blocking in a KVM_SET_REGS
ioctl -- the MSR[WE] will take effect when the guest is next entered.

It also causes SRR1[WE] to be set when we enter the guest's interrupt
handler, which is what e500 hardware is documented to do.

Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/powerpc/kvm/booke.c

index feaefc433276cb78863604622084fd83818708d7..557f028a9ad4a81d237f4b1fa6c3b2ae9f686d45 100644 (file)
@@ -124,12 +124,6 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
        vcpu->arch.shared->msr = new_msr;
 
        kvmppc_mmu_msr_notify(vcpu, old_msr);
-
-       if (vcpu->arch.shared->msr & MSR_WE) {
-               kvm_vcpu_block(vcpu);
-               kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
-       };
-
        kvmppc_vcpu_sync_spe(vcpu);
 }
 
@@ -288,15 +282,12 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
        return allowed;
 }
 
-/* Check pending exceptions and deliver one, if possible. */
-void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
+static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
 {
        unsigned long *pending = &vcpu->arch.pending_exceptions;
        unsigned long old_pending = vcpu->arch.pending_exceptions;
        unsigned int priority;
 
-       WARN_ON_ONCE(!irqs_disabled());
-
        priority = __ffs(*pending);
        while (priority <= BOOKE_IRQPRIO_MAX) {
                if (kvmppc_booke_irqprio_deliver(vcpu, priority))
@@ -314,6 +305,23 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
                vcpu->arch.shared->int_pending = 0;
 }
 
+/* Check pending exceptions and deliver one, if possible. */
+void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
+{
+       WARN_ON_ONCE(!irqs_disabled());
+
+       kvmppc_core_check_exceptions(vcpu);
+
+       if (vcpu->arch.shared->msr & MSR_WE) {
+               local_irq_enable();
+               kvm_vcpu_block(vcpu);
+               local_irq_disable();
+
+               kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
+               kvmppc_core_check_exceptions(vcpu);
+       };
+}
+
 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
        int ret;