]> Pileus Git - ~andy/linux/blobdiff - arch/x86/kvm/x86.c
KVM: make processes waiting on vcpu mutex killable
[~andy/linux] / arch / x86 / kvm / x86.c
index 317241619e2d97bcd716adae48a6749d8cd09788..19047eafa38d74430adecaf6b5a9614f18e046ce 100644 (file)
@@ -817,7 +817,7 @@ static u32 msrs_to_save[] = {
 
 static unsigned num_msrs_to_save;
 
-static u32 emulated_msrs[] = {
+static const u32 emulated_msrs[] = {
        MSR_IA32_TSCDEADLINE,
        MSR_IA32_MISC_ENABLE,
        MSR_IA32_MCG_STATUS,
@@ -2366,7 +2366,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
                                    struct kvm_interrupt *irq)
 {
-       if (irq->irq < 0 || irq->irq >= 256)
+       if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS)
                return -EINVAL;
        if (irqchip_in_kernel(vcpu->kvm))
                return -ENXIO;
@@ -4544,6 +4544,9 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
        return true;
 }
 
+static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
+static int complete_emulated_pio(struct kvm_vcpu *vcpu);
+
 int x86_emulate_instruction(struct kvm_vcpu *vcpu,
                            unsigned long cr2,
                            int emulation_type,
@@ -4614,13 +4617,16 @@ restart:
        } else if (vcpu->arch.pio.count) {
                if (!vcpu->arch.pio.in)
                        vcpu->arch.pio.count = 0;
-               else
+               else {
                        writeback = false;
+                       vcpu->arch.complete_userspace_io = complete_emulated_pio;
+               }
                r = EMULATE_DO_MMIO;
        } else if (vcpu->mmio_needed) {
                if (!vcpu->mmio_is_write)
                        writeback = false;
                r = EMULATE_DO_MMIO;
+               vcpu->arch.complete_userspace_io = complete_emulated_mmio;
        } else if (r == EMULATION_RESTART)
                goto restart;
        else
@@ -5476,6 +5482,24 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
        return r;
 }
 
+static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
+{
+       int r;
+       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
+       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+       if (r != EMULATE_DONE)
+               return 0;
+       return 1;
+}
+
+static int complete_emulated_pio(struct kvm_vcpu *vcpu)
+{
+       BUG_ON(!vcpu->arch.pio.count);
+
+       return complete_emulated_io(vcpu);
+}
+
 /*
  * Implements the following, as a state machine:
  *
@@ -5492,47 +5516,37 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
  *      copy data
  *      exit
  */
-static int complete_mmio(struct kvm_vcpu *vcpu)
+static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
 {
        struct kvm_run *run = vcpu->run;
        struct kvm_mmio_fragment *frag;
-       int r;
 
-       if (!(vcpu->arch.pio.count || vcpu->mmio_needed))
-               return 1;
+       BUG_ON(!vcpu->mmio_needed);
 
-       if (vcpu->mmio_needed) {
-               /* Complete previous fragment */
-               frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment++];
-               if (!vcpu->mmio_is_write)
-                       memcpy(frag->data, run->mmio.data, frag->len);
-               if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
-                       vcpu->mmio_needed = 0;
-                       if (vcpu->mmio_is_write)
-                               return 1;
-                       vcpu->mmio_read_completed = 1;
-                       goto done;
-               }
-               /* Initiate next fragment */
-               ++frag;
-               run->exit_reason = KVM_EXIT_MMIO;
-               run->mmio.phys_addr = frag->gpa;
+       /* Complete previous fragment */
+       frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment++];
+       if (!vcpu->mmio_is_write)
+               memcpy(frag->data, run->mmio.data, frag->len);
+       if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
+               vcpu->mmio_needed = 0;
                if (vcpu->mmio_is_write)
-                       memcpy(run->mmio.data, frag->data, frag->len);
-               run->mmio.len = frag->len;
-               run->mmio.is_write = vcpu->mmio_is_write;
-               return 0;
-
-       }
-done:
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
-       r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
-       if (r != EMULATE_DONE)
-               return 0;
-       return 1;
+                       return 1;
+               vcpu->mmio_read_completed = 1;
+               return complete_emulated_io(vcpu);
+       }
+       /* Initiate next fragment */
+       ++frag;
+       run->exit_reason = KVM_EXIT_MMIO;
+       run->mmio.phys_addr = frag->gpa;
+       if (vcpu->mmio_is_write)
+               memcpy(run->mmio.data, frag->data, frag->len);
+       run->mmio.len = frag->len;
+       run->mmio.is_write = vcpu->mmio_is_write;
+       vcpu->arch.complete_userspace_io = complete_emulated_mmio;
+       return 0;
 }
 
+
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        int r;
@@ -5559,9 +5573,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                }
        }
 
-       r = complete_mmio(vcpu);
-       if (r <= 0)
-               goto out;
+       if (unlikely(vcpu->arch.complete_userspace_io)) {
+               int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
+               vcpu->arch.complete_userspace_io = NULL;
+               r = cui(vcpu);
+               if (r <= 0)
+                       goto out;
+       } else
+               WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
 
        r = __vcpu_run(vcpu);
 
@@ -5774,7 +5793,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        if (mmu_reset_needed)
                kvm_mmu_reset_context(vcpu);
 
-       max_bits = (sizeof sregs->interrupt_bitmap) << 3;
+       max_bits = KVM_NR_INTERRUPTS;
        pending_vec = find_first_bit(
                (const unsigned long *)sregs->interrupt_bitmap, max_bits);
        if (pending_vec < max_bits) {
@@ -5997,7 +6016,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
        int r;
 
        vcpu->arch.mtrr_state.have_fixed = 1;
-       vcpu_load(vcpu);
+       r = vcpu_load(vcpu);
+       if (r)
+               return r;
        r = kvm_arch_vcpu_reset(vcpu);
        if (r == 0)
                r = kvm_mmu_setup(vcpu);
@@ -6008,9 +6029,11 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 
 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
+       int r;
        vcpu->arch.apf.msr_val = 0;
 
-       vcpu_load(vcpu);
+       r = vcpu_load(vcpu);
+       BUG_ON(r);
        kvm_mmu_unload(vcpu);
        vcpu_put(vcpu);
 
@@ -6256,7 +6279,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
 {
-       vcpu_load(vcpu);
+       int r;
+       r = vcpu_load(vcpu);
+       BUG_ON(r);
        kvm_mmu_unload(vcpu);
        vcpu_put(vcpu);
 }
@@ -6445,14 +6470,28 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
                kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
        kvm_mmu_slot_remove_write_access(kvm, mem->slot);
        spin_unlock(&kvm->mmu_lock);
+       /*
+        * If memory slot is created, or moved, we need to clear all
+        * mmio sptes.
+        */
+       if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT) {
+               kvm_mmu_zap_all(kvm);
+               kvm_reload_remote_mmus(kvm);
+       }
 }
 
-void kvm_arch_flush_shadow(struct kvm *kvm)
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
 {
        kvm_mmu_zap_all(kvm);
        kvm_reload_remote_mmus(kvm);
 }
 
+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+                                  struct kvm_memory_slot *slot)
+{
+       kvm_arch_flush_shadow_all(kvm);
+}
+
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 {
        return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&