]> Pileus Git - ~andy/linux/blobdiff - arch/x86/kvm/x86.c
Merge tag 'kvm-3.10-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[~andy/linux] / arch / x86 / kvm / x86.c
index e1721324c271e18430f43ee226a367c4495d18ab..05a8b1a2300df0997d116e753506bf6e6ffa1fe2 100644 (file)
@@ -162,8 +162,6 @@ u64 __read_mostly host_xcr0;
 
 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
 
-static int kvm_vcpu_reset(struct kvm_vcpu *vcpu);
-
 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
 {
        int i;
@@ -263,6 +261,13 @@ void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
 }
 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
 
+asmlinkage void kvm_spurious_fault(void)
+{
+       /* Fault while not rebooting.  We want the trace. */
+       BUG();
+}
+EXPORT_SYMBOL_GPL(kvm_spurious_fault);
+
 #define EXCPT_BENIGN           0
 #define EXCPT_CONTRIBUTORY     1
 #define EXCPT_PF               2
@@ -840,23 +845,17 @@ static const u32 emulated_msrs[] = {
        MSR_IA32_MCG_CTL,
 };
 
-static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
+bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
-       u64 old_efer = vcpu->arch.efer;
-
        if (efer & efer_reserved_bits)
-               return 1;
-
-       if (is_paging(vcpu)
-           && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
-               return 1;
+               return false;
 
        if (efer & EFER_FFXSR) {
                struct kvm_cpuid_entry2 *feat;
 
                feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
                if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
-                       return 1;
+                       return false;
        }
 
        if (efer & EFER_SVME) {
@@ -864,9 +863,24 @@ static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
 
                feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
                if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
-                       return 1;
+                       return false;
        }
 
+       return true;
+}
+EXPORT_SYMBOL_GPL(kvm_valid_efer);
+
+static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
+{
+       u64 old_efer = vcpu->arch.efer;
+
+       if (!kvm_valid_efer(vcpu, efer))
+               return 1;
+
+       if (is_paging(vcpu)
+           && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
+               return 1;
+
        efer &= ~EFER_LMA;
        efer |= vcpu->arch.efer & EFER_LMA;
 
@@ -1079,6 +1093,10 @@ static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
        u32 thresh_lo, thresh_hi;
        int use_scaling = 0;
 
+       /* tsc_khz can be zero if TSC calibration fails */
+       if (this_tsc_khz == 0)
+               return;
+
        /* Compute a scale to convert nanoseconds in TSC cycles */
        kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
                           &vcpu->arch.virtual_tsc_shift,
@@ -1156,20 +1174,23 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
        ns = get_kernel_ns();
        elapsed = ns - kvm->arch.last_tsc_nsec;
 
-       /* n.b - signed multiplication and division required */
-       usdiff = data - kvm->arch.last_tsc_write;
+       if (vcpu->arch.virtual_tsc_khz) {
+               /* n.b - signed multiplication and division required */
+               usdiff = data - kvm->arch.last_tsc_write;
 #ifdef CONFIG_X86_64
-       usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
+               usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
 #else
-       /* do_div() only does unsigned */
-       asm("idivl %2; xor %%edx, %%edx"
-           : "=A"(usdiff)
-           : "A"(usdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz));
+               /* do_div() only does unsigned */
+               asm("idivl %2; xor %%edx, %%edx"
+               : "=A"(usdiff)
+               : "A"(usdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz));
 #endif
-       do_div(elapsed, 1000);
-       usdiff -= elapsed;
-       if (usdiff < 0)
-               usdiff = -usdiff;
+               do_div(elapsed, 1000);
+               usdiff -= elapsed;
+               if (usdiff < 0)
+                       usdiff = -usdiff;
+       } else
+               usdiff = USEC_PER_SEC; /* disable TSC match window below */
 
        /*
         * Special case: TSC write with a small delta (1 second) of virtual
@@ -2034,7 +2055,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_P6_EVNTSEL0:
        case MSR_P6_EVNTSEL1:
                if (kvm_pmu_msr(vcpu, msr))
-                       return kvm_pmu_set_msr(vcpu, msr, data);
+                       return kvm_pmu_set_msr(vcpu, msr_info);
 
                if (pr || data != 0)
                        vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
@@ -2080,7 +2101,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
                        return xen_hvm_config(vcpu, data);
                if (kvm_pmu_msr(vcpu, msr))
-                       return kvm_pmu_set_msr(vcpu, msr, data);
+                       return kvm_pmu_set_msr(vcpu, msr_info);
                if (!ignore_msrs) {
                        vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
                                    msr, data);
@@ -2479,7 +2500,6 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_USER_NMI:
        case KVM_CAP_REINJECT_CONTROL:
        case KVM_CAP_IRQ_INJECT_STATUS:
-       case KVM_CAP_ASSIGN_DEV_IRQ:
        case KVM_CAP_IRQFD:
        case KVM_CAP_IOEVENTFD:
        case KVM_CAP_PIT2:
@@ -2497,10 +2517,12 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_XSAVE:
        case KVM_CAP_ASYNC_PF:
        case KVM_CAP_GET_TSC_KHZ:
-       case KVM_CAP_PCI_2_3:
        case KVM_CAP_KVMCLOCK_CTRL:
        case KVM_CAP_READONLY_MEM:
-       case KVM_CAP_IRQFD_RESAMPLE:
+#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
+       case KVM_CAP_ASSIGN_DEV_IRQ:
+       case KVM_CAP_PCI_2_3:
+#endif
                r = 1;
                break;
        case KVM_CAP_COALESCED_MMIO:
@@ -2521,9 +2543,11 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_PV_MMU:    /* obsolete */
                r = 0;
                break;
+#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
        case KVM_CAP_IOMMU:
                r = iommu_present(&pci_bus_type);
                break;
+#endif
        case KVM_CAP_MCE:
                r = KVM_MAX_MCE_BANKS;
                break;
@@ -2679,6 +2703,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
                                    struct kvm_lapic_state *s)
 {
+       kvm_x86_ops->sync_pir_to_irr(vcpu);
        memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
 
        return 0;
@@ -2696,7 +2721,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
                                    struct kvm_interrupt *irq)
 {
-       if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS)
+       if (irq->irq >= KVM_NR_INTERRUPTS)
                return -EINVAL;
        if (irqchip_in_kernel(vcpu->kvm))
                return -ENXIO;
@@ -2819,10 +2844,9 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
        events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
        events->nmi.pad = 0;
 
-       events->sipi_vector = vcpu->arch.sipi_vector;
+       events->sipi_vector = 0; /* never valid when reporting to user space */
 
        events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
-                        | KVM_VCPUEVENT_VALID_SIPI_VECTOR
                         | KVM_VCPUEVENT_VALID_SHADOW);
        memset(&events->reserved, 0, sizeof(events->reserved));
 }
@@ -2853,8 +2877,9 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                vcpu->arch.nmi_pending = events->nmi.pending;
        kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
 
-       if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
-               vcpu->arch.sipi_vector = events->sipi_vector;
+       if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
+           kvm_vcpu_has_lapic(vcpu))
+               vcpu->arch.apic->sipi_vector = events->sipi_vector;
 
        kvm_make_request(KVM_REQ_EVENT, vcpu);
 
@@ -3478,13 +3503,15 @@ out:
        return r;
 }
 
-int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event)
+int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
+                       bool line_status)
 {
        if (!irqchip_in_kernel(kvm))
                return -ENXIO;
 
        irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
-                                       irq_event->irq, irq_event->level);
+                                       irq_event->irq, irq_event->level,
+                                       line_status);
        return 0;
 }
 
@@ -4752,11 +4779,15 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
 }
 
 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
-                                 bool write_fault_to_shadow_pgtable)
+                                 bool write_fault_to_shadow_pgtable,
+                                 int emulation_type)
 {
        gpa_t gpa = cr2;
        pfn_t pfn;
 
+       if (emulation_type & EMULTYPE_NO_REEXECUTE)
+               return false;
+
        if (!vcpu->arch.mmu.direct_map) {
                /*
                 * Write permission should be allowed since only
@@ -4899,8 +4930,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
                if (r != EMULATION_OK)  {
                        if (emulation_type & EMULTYPE_TRAP_UD)
                                return EMULATE_FAIL;
-                       if (reexecute_instruction(vcpu, cr2,
-                                                 write_fault_to_spt))
+                       if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
+                                               emulation_type))
                                return EMULATE_DONE;
                        if (emulation_type & EMULTYPE_SKIP)
                                return EMULATE_FAIL;
@@ -4930,7 +4961,8 @@ restart:
                return EMULATE_DONE;
 
        if (r == EMULATION_FAILED) {
-               if (reexecute_instruction(vcpu, cr2, write_fault_to_spt))
+               if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
+                                       emulation_type))
                        return EMULATE_DONE;
 
                return handle_emulation_failure(vcpu);
@@ -5641,14 +5673,20 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
 #endif
 }
 
-static void update_eoi_exitmap(struct kvm_vcpu *vcpu)
+static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
 {
        u64 eoi_exit_bitmap[4];
+       u32 tmr[8];
+
+       if (!kvm_apic_hw_enabled(vcpu->arch.apic))
+               return;
 
        memset(eoi_exit_bitmap, 0, 32);
+       memset(tmr, 0, 32);
 
-       kvm_ioapic_calculate_eoi_exitmap(vcpu, eoi_exit_bitmap);
+       kvm_ioapic_scan_entry(vcpu, eoi_exit_bitmap, tmr);
        kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
+       kvm_apic_update_tmr(vcpu, tmr);
 }
 
 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
@@ -5656,7 +5694,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        int r;
        bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
                vcpu->run->request_interrupt_window;
-       bool req_immediate_exit = 0;
+       bool req_immediate_exit = false;
 
        if (vcpu->requests) {
                if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
@@ -5698,24 +5736,30 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        record_steal_time(vcpu);
                if (kvm_check_request(KVM_REQ_NMI, vcpu))
                        process_nmi(vcpu);
-               req_immediate_exit =
-                       kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
                if (kvm_check_request(KVM_REQ_PMU, vcpu))
                        kvm_handle_pmu_event(vcpu);
                if (kvm_check_request(KVM_REQ_PMI, vcpu))
                        kvm_deliver_pmi(vcpu);
-               if (kvm_check_request(KVM_REQ_EOIBITMAP, vcpu))
-                       update_eoi_exitmap(vcpu);
+               if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
+                       vcpu_scan_ioapic(vcpu);
        }
 
        if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
+               kvm_apic_accept_events(vcpu);
+               if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
+                       r = 1;
+                       goto out;
+               }
+
                inject_pending_event(vcpu);
 
                /* enable NMI/IRQ window open exits if needed */
                if (vcpu->arch.nmi_pending)
-                       kvm_x86_ops->enable_nmi_window(vcpu);
+                       req_immediate_exit =
+                               kvm_x86_ops->enable_nmi_window(vcpu) != 0;
                else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
-                       kvm_x86_ops->enable_irq_window(vcpu);
+                       req_immediate_exit =
+                               kvm_x86_ops->enable_irq_window(vcpu) != 0;
 
                if (kvm_lapic_enabled(vcpu)) {
                        /*
@@ -5794,7 +5838,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
        vcpu->mode = OUTSIDE_GUEST_MODE;
        smp_wmb();
-       local_irq_enable();
+
+       /* Interrupt is enabled by handle_external_intr() */
+       kvm_x86_ops->handle_external_intr(vcpu);
 
        ++vcpu->stat.exits;
 
@@ -5843,16 +5889,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
        int r;
        struct kvm *kvm = vcpu->kvm;
 
-       if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
-               pr_debug("vcpu %d received sipi with vector # %x\n",
-                        vcpu->vcpu_id, vcpu->arch.sipi_vector);
-               kvm_lapic_reset(vcpu);
-               r = kvm_vcpu_reset(vcpu);
-               if (r)
-                       return r;
-               vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
-       }
-
        vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
        r = vapic_enter(vcpu);
        if (r) {
@@ -5869,8 +5905,8 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                        srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
                        kvm_vcpu_block(vcpu);
                        vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
-                       if (kvm_check_request(KVM_REQ_UNHALT, vcpu))
-                       {
+                       if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
+                               kvm_apic_accept_events(vcpu);
                                switch(vcpu->arch.mp_state) {
                                case KVM_MP_STATE_HALTED:
                                        vcpu->arch.mp_state =
@@ -5878,7 +5914,8 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                                case KVM_MP_STATE_RUNNABLE:
                                        vcpu->arch.apf.halted = false;
                                        break;
-                               case KVM_MP_STATE_SIPI_RECEIVED:
+                               case KVM_MP_STATE_INIT_RECEIVED:
+                                       break;
                                default:
                                        r = -EINTR;
                                        break;
@@ -6013,6 +6050,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
        if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
                kvm_vcpu_block(vcpu);
+               kvm_apic_accept_events(vcpu);
                clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
                r = -EAGAIN;
                goto out;
@@ -6169,6 +6207,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
                                    struct kvm_mp_state *mp_state)
 {
+       kvm_apic_accept_events(vcpu);
        mp_state->mp_state = vcpu->arch.mp_state;
        return 0;
 }
@@ -6176,7 +6215,15 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
                                    struct kvm_mp_state *mp_state)
 {
-       vcpu->arch.mp_state = mp_state->mp_state;
+       if (!kvm_vcpu_has_lapic(vcpu) &&
+           mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
+               return -EINVAL;
+
+       if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
+               vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
+               set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
+       } else
+               vcpu->arch.mp_state = mp_state->mp_state;
        kvm_make_request(KVM_REQ_EVENT, vcpu);
        return 0;
 }
@@ -6475,9 +6522,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
        r = vcpu_load(vcpu);
        if (r)
                return r;
-       r = kvm_vcpu_reset(vcpu);
-       if (r == 0)
-               r = kvm_mmu_setup(vcpu);
+       kvm_vcpu_reset(vcpu);
+       r = kvm_mmu_setup(vcpu);
        vcpu_put(vcpu);
 
        return r;
@@ -6514,7 +6560,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
        kvm_x86_ops->vcpu_free(vcpu);
 }
 
-static int kvm_vcpu_reset(struct kvm_vcpu *vcpu)
+void kvm_vcpu_reset(struct kvm_vcpu *vcpu)
 {
        atomic_set(&vcpu->arch.nmi_queued, 0);
        vcpu->arch.nmi_pending = 0;
@@ -6541,7 +6587,18 @@ static int kvm_vcpu_reset(struct kvm_vcpu *vcpu)
        vcpu->arch.regs_avail = ~0;
        vcpu->arch.regs_dirty = ~0;
 
-       return kvm_x86_ops->vcpu_reset(vcpu);
+       kvm_x86_ops->vcpu_reset(vcpu);
+}
+
+void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector)
+{
+       struct kvm_segment cs;
+
+       kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
+       cs.selector = vector << 8;
+       cs.base = vector << 12;
+       kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
+       kvm_rip_write(vcpu, 0);
 }
 
 int kvm_arch_hardware_enable(void *garbage)
@@ -6706,8 +6763,10 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        }
        vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
 
-       if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) {
+               r = -ENOMEM;
                goto fail_free_mce_banks;
+       }
 
        r = fx_init(vcpu);
        if (r)
@@ -6811,6 +6870,23 @@ void kvm_arch_sync_events(struct kvm *kvm)
 
 void kvm_arch_destroy_vm(struct kvm *kvm)
 {
+       if (current->mm == kvm->mm) {
+               /*
+                * Free memory regions allocated on behalf of userspace,
+                * unless the the memory map has changed due to process exit
+                * or fd copying.
+                */
+               struct kvm_userspace_memory_region mem;
+               memset(&mem, 0, sizeof(mem));
+               mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
+               kvm_set_memory_region(kvm, &mem);
+
+               mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
+               kvm_set_memory_region(kvm, &mem);
+
+               mem.slot = TSS_PRIVATE_MEMSLOT;
+               kvm_set_memory_region(kvm, &mem);
+       }
        kvm_iommu_unmap_guest(kvm);
        kfree(kvm->arch.vpic);
        kfree(kvm->arch.vioapic);
@@ -6903,24 +6979,21 @@ out_free:
 
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
                                struct kvm_memory_slot *memslot,
-                               struct kvm_memory_slot old,
                                struct kvm_userspace_memory_region *mem,
-                               bool user_alloc)
+                               enum kvm_mr_change change)
 {
-       int npages = memslot->npages;
-
        /*
         * Only private memory slots need to be mapped here since
         * KVM_SET_MEMORY_REGION ioctl is no longer supported.
         */
-       if ((memslot->id >= KVM_USER_MEM_SLOTS) && npages && !old.npages) {
+       if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) {
                unsigned long userspace_addr;
 
                /*
                 * MAP_SHARED to prevent internal slot pages from being moved
                 * by fork()/COW.
                 */
-               userspace_addr = vm_mmap(NULL, 0, npages * PAGE_SIZE,
+               userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE,
                                         PROT_READ | PROT_WRITE,
                                         MAP_SHARED | MAP_ANONYMOUS, 0);
 
@@ -6935,17 +7008,17 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
 
 void kvm_arch_commit_memory_region(struct kvm *kvm,
                                struct kvm_userspace_memory_region *mem,
-                               struct kvm_memory_slot old,
-                               bool user_alloc)
+                               const struct kvm_memory_slot *old,
+                               enum kvm_mr_change change)
 {
 
-       int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
+       int nr_mmu_pages = 0;
 
-       if ((mem->slot >= KVM_USER_MEM_SLOTS) && old.npages && !npages) {
+       if ((mem->slot >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_DELETE)) {
                int ret;
 
-               ret = vm_munmap(old.userspace_addr,
-                               old.npages * PAGE_SIZE);
+               ret = vm_munmap(old->userspace_addr,
+                               old->npages * PAGE_SIZE);
                if (ret < 0)
                        printk(KERN_WARNING
                               "kvm_vm_ioctl_set_memory_region: "
@@ -6962,14 +7035,14 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
         * Existing largepage mappings are destroyed here and new ones will
         * not be created until the end of the logging.
         */
-       if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES))
+       if ((change != KVM_MR_DELETE) && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES))
                kvm_mmu_slot_remove_write_access(kvm, mem->slot);
        /*
         * If memory slot is created, or moved, we need to clear all
         * mmio sptes.
         */
-       if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT) {
-               kvm_mmu_zap_all(kvm);
+       if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
+               kvm_mmu_zap_mmio_sptes(kvm);
                kvm_reload_remote_mmus(kvm);
        }
 }
@@ -6991,7 +7064,7 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
        return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
                !vcpu->arch.apf.halted)
                || !list_empty_careful(&vcpu->async_pf.done)
-               || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
+               || kvm_apic_has_events(vcpu)
                || atomic_read(&vcpu->arch.nmi_queued) ||
                (kvm_arch_interrupt_allowed(vcpu) &&
                 kvm_cpu_has_interrupt(vcpu));