2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
14 * This work is licensed under the terms of the GNU GPL, version 2. See
15 * the COPYING file in the top-level directory.
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
26 #include <linux/highmem.h>
27 #include <linux/sched.h>
28 #include <linux/moduleparam.h>
29 #include <linux/ftrace_event.h>
30 #include <linux/slab.h>
31 #include <linux/tboot.h>
32 #include "kvm_cache_regs.h"
38 #include <asm/virtext.h>
45 #define __ex(x) __kvm_handle_fault_on_reboot(x)
47 MODULE_AUTHOR("Qumranet");
48 MODULE_LICENSE("GPL");
50 static int __read_mostly bypass_guest_pf = 1;
51 module_param(bypass_guest_pf, bool, S_IRUGO);
53 static int __read_mostly enable_vpid = 1;
54 module_param_named(vpid, enable_vpid, bool, 0444);
56 static int __read_mostly flexpriority_enabled = 1;
57 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
59 static int __read_mostly enable_ept = 1;
60 module_param_named(ept, enable_ept, bool, S_IRUGO);
62 static int __read_mostly enable_unrestricted_guest = 1;
63 module_param_named(unrestricted_guest,
64 enable_unrestricted_guest, bool, S_IRUGO);
66 static int __read_mostly emulate_invalid_guest_state = 0;
67 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
69 static int __read_mostly vmm_exclusive = 1;
70 module_param(vmm_exclusive, bool, S_IRUGO);
72 static int __read_mostly yield_on_hlt = 1;
73 module_param(yield_on_hlt, bool, S_IRUGO);
75 #define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \
76 (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
77 #define KVM_GUEST_CR0_MASK \
78 (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
79 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \
80 (X86_CR0_WP | X86_CR0_NE)
81 #define KVM_VM_CR0_ALWAYS_ON \
82 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
83 #define KVM_CR4_GUEST_OWNED_BITS \
84 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
87 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
88 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
90 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
93 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
94 * ple_gap: upper bound on the amount of time between two successive
95 * executions of PAUSE in a loop. Also indicate if ple enabled.
96 * According to test, this time is usually smaller than 128 cycles.
97 * ple_window: upper bound on the amount of time a guest is allowed to execute
98 * in a PAUSE loop. Tests indicate that most spinlocks are held for
99 * less than 2^12 cycles
100 * Time is measured based on a counter that runs at the same rate as the TSC,
101 * refer SDM volume 3b section 21.6.13 & 22.1.3.
103 #define KVM_VMX_DEFAULT_PLE_GAP 128
104 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096
105 static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
106 module_param(ple_gap, int, S_IRUGO);
108 static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
109 module_param(ple_window, int, S_IRUGO);
111 #define NR_AUTOLOAD_MSRS 1
119 struct shared_msr_entry {
126 struct kvm_vcpu vcpu;
127 struct list_head local_vcpus_link;
128 unsigned long host_rsp;
132 bool nmi_known_unmasked;
134 u32 idt_vectoring_info;
136 struct shared_msr_entry *guest_msrs;
140 u64 msr_host_kernel_gs_base;
141 u64 msr_guest_kernel_gs_base;
144 struct msr_autoload {
146 struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
147 struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
151 u16 fs_sel, gs_sel, ldt_sel;
152 int gs_ldt_reload_needed;
153 int fs_reload_needed;
158 struct kvm_save_segment {
163 } tr, es, ds, fs, gs;
166 u32 bitmask; /* 4 bits per segment (1 bit per field) */
167 struct kvm_save_segment seg[8];
170 bool emulation_required;
172 /* Support for vnmi-less CPUs */
173 int soft_vnmi_blocked;
175 s64 vnmi_blocked_time;
181 enum segment_cache_field {
190 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
192 return container_of(vcpu, struct vcpu_vmx, vcpu);
195 static u64 construct_eptp(unsigned long root_hpa);
196 static void kvm_cpu_vmxon(u64 addr);
197 static void kvm_cpu_vmxoff(void);
198 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
199 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
201 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
202 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
203 static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu);
204 static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
206 static unsigned long *vmx_io_bitmap_a;
207 static unsigned long *vmx_io_bitmap_b;
208 static unsigned long *vmx_msr_bitmap_legacy;
209 static unsigned long *vmx_msr_bitmap_longmode;
211 static bool cpu_has_load_ia32_efer;
213 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
214 static DEFINE_SPINLOCK(vmx_vpid_lock);
216 static struct vmcs_config {
220 u32 pin_based_exec_ctrl;
221 u32 cpu_based_exec_ctrl;
222 u32 cpu_based_2nd_exec_ctrl;
227 static struct vmx_capability {
232 #define VMX_SEGMENT_FIELD(seg) \
233 [VCPU_SREG_##seg] = { \
234 .selector = GUEST_##seg##_SELECTOR, \
235 .base = GUEST_##seg##_BASE, \
236 .limit = GUEST_##seg##_LIMIT, \
237 .ar_bytes = GUEST_##seg##_AR_BYTES, \
240 static struct kvm_vmx_segment_field {
245 } kvm_vmx_segment_fields[] = {
246 VMX_SEGMENT_FIELD(CS),
247 VMX_SEGMENT_FIELD(DS),
248 VMX_SEGMENT_FIELD(ES),
249 VMX_SEGMENT_FIELD(FS),
250 VMX_SEGMENT_FIELD(GS),
251 VMX_SEGMENT_FIELD(SS),
252 VMX_SEGMENT_FIELD(TR),
253 VMX_SEGMENT_FIELD(LDTR),
256 static u64 host_efer;
258 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
261 * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
262 * away by decrementing the array size.
264 static const u32 vmx_msr_index[] = {
266 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
268 MSR_EFER, MSR_TSC_AUX, MSR_STAR,
270 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
272 static inline bool is_page_fault(u32 intr_info)
274 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
275 INTR_INFO_VALID_MASK)) ==
276 (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
279 static inline bool is_no_device(u32 intr_info)
281 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
282 INTR_INFO_VALID_MASK)) ==
283 (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
286 static inline bool is_invalid_opcode(u32 intr_info)
288 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
289 INTR_INFO_VALID_MASK)) ==
290 (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
293 static inline bool is_external_interrupt(u32 intr_info)
295 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
296 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
299 static inline bool is_machine_check(u32 intr_info)
301 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
302 INTR_INFO_VALID_MASK)) ==
303 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
306 static inline bool cpu_has_vmx_msr_bitmap(void)
308 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
311 static inline bool cpu_has_vmx_tpr_shadow(void)
313 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
316 static inline bool vm_need_tpr_shadow(struct kvm *kvm)
318 return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
321 static inline bool cpu_has_secondary_exec_ctrls(void)
323 return vmcs_config.cpu_based_exec_ctrl &
324 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
327 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
329 return vmcs_config.cpu_based_2nd_exec_ctrl &
330 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
333 static inline bool cpu_has_vmx_flexpriority(void)
335 return cpu_has_vmx_tpr_shadow() &&
336 cpu_has_vmx_virtualize_apic_accesses();
339 static inline bool cpu_has_vmx_ept_execute_only(void)
341 return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
344 static inline bool cpu_has_vmx_eptp_uncacheable(void)
346 return vmx_capability.ept & VMX_EPTP_UC_BIT;
349 static inline bool cpu_has_vmx_eptp_writeback(void)
351 return vmx_capability.ept & VMX_EPTP_WB_BIT;
354 static inline bool cpu_has_vmx_ept_2m_page(void)
356 return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
359 static inline bool cpu_has_vmx_ept_1g_page(void)
361 return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
364 static inline bool cpu_has_vmx_ept_4levels(void)
366 return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
369 static inline bool cpu_has_vmx_invept_individual_addr(void)
371 return vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT;
374 static inline bool cpu_has_vmx_invept_context(void)
376 return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
379 static inline bool cpu_has_vmx_invept_global(void)
381 return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
384 static inline bool cpu_has_vmx_invvpid_single(void)
386 return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
389 static inline bool cpu_has_vmx_invvpid_global(void)
391 return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
394 static inline bool cpu_has_vmx_ept(void)
396 return vmcs_config.cpu_based_2nd_exec_ctrl &
397 SECONDARY_EXEC_ENABLE_EPT;
400 static inline bool cpu_has_vmx_unrestricted_guest(void)
402 return vmcs_config.cpu_based_2nd_exec_ctrl &
403 SECONDARY_EXEC_UNRESTRICTED_GUEST;
406 static inline bool cpu_has_vmx_ple(void)
408 return vmcs_config.cpu_based_2nd_exec_ctrl &
409 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
412 static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm)
414 return flexpriority_enabled && irqchip_in_kernel(kvm);
417 static inline bool cpu_has_vmx_vpid(void)
419 return vmcs_config.cpu_based_2nd_exec_ctrl &
420 SECONDARY_EXEC_ENABLE_VPID;
423 static inline bool cpu_has_vmx_rdtscp(void)
425 return vmcs_config.cpu_based_2nd_exec_ctrl &
426 SECONDARY_EXEC_RDTSCP;
429 static inline bool cpu_has_virtual_nmis(void)
431 return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
434 static inline bool cpu_has_vmx_wbinvd_exit(void)
436 return vmcs_config.cpu_based_2nd_exec_ctrl &
437 SECONDARY_EXEC_WBINVD_EXITING;
440 static inline bool report_flexpriority(void)
442 return flexpriority_enabled;
445 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
449 for (i = 0; i < vmx->nmsrs; ++i)
450 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
455 static inline void __invvpid(int ext, u16 vpid, gva_t gva)
461 } operand = { vpid, 0, gva };
463 asm volatile (__ex(ASM_VMX_INVVPID)
464 /* CF==1 or ZF==1 --> rc = -1 */
466 : : "a"(&operand), "c"(ext) : "cc", "memory");
469 static inline void __invept(int ext, u64 eptp, gpa_t gpa)
473 } operand = {eptp, gpa};
475 asm volatile (__ex(ASM_VMX_INVEPT)
476 /* CF==1 or ZF==1 --> rc = -1 */
477 "; ja 1f ; ud2 ; 1:\n"
478 : : "a" (&operand), "c" (ext) : "cc", "memory");
481 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
485 i = __find_msr_index(vmx, msr);
487 return &vmx->guest_msrs[i];
491 static void vmcs_clear(struct vmcs *vmcs)
493 u64 phys_addr = __pa(vmcs);
496 asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
497 : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
500 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
504 static void vmcs_load(struct vmcs *vmcs)
506 u64 phys_addr = __pa(vmcs);
509 asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
510 : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
513 printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
517 static void __vcpu_clear(void *arg)
519 struct vcpu_vmx *vmx = arg;
520 int cpu = raw_smp_processor_id();
522 if (vmx->vcpu.cpu == cpu)
523 vmcs_clear(vmx->vmcs);
524 if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
525 per_cpu(current_vmcs, cpu) = NULL;
526 list_del(&vmx->local_vcpus_link);
531 static void vcpu_clear(struct vcpu_vmx *vmx)
533 if (vmx->vcpu.cpu == -1)
535 smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1);
538 static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx)
543 if (cpu_has_vmx_invvpid_single())
544 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
547 static inline void vpid_sync_vcpu_global(void)
549 if (cpu_has_vmx_invvpid_global())
550 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
553 static inline void vpid_sync_context(struct vcpu_vmx *vmx)
555 if (cpu_has_vmx_invvpid_single())
556 vpid_sync_vcpu_single(vmx);
558 vpid_sync_vcpu_global();
561 static inline void ept_sync_global(void)
563 if (cpu_has_vmx_invept_global())
564 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
567 static inline void ept_sync_context(u64 eptp)
570 if (cpu_has_vmx_invept_context())
571 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
577 static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa)
580 if (cpu_has_vmx_invept_individual_addr())
581 __invept(VMX_EPT_EXTENT_INDIVIDUAL_ADDR,
584 ept_sync_context(eptp);
588 static unsigned long vmcs_readl(unsigned long field)
590 unsigned long value = 0;
592 asm volatile (__ex(ASM_VMX_VMREAD_RDX_RAX)
593 : "+a"(value) : "d"(field) : "cc");
597 static u16 vmcs_read16(unsigned long field)
599 return vmcs_readl(field);
602 static u32 vmcs_read32(unsigned long field)
604 return vmcs_readl(field);
607 static u64 vmcs_read64(unsigned long field)
610 return vmcs_readl(field);
612 return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
616 static noinline void vmwrite_error(unsigned long field, unsigned long value)
618 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
619 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
623 static void vmcs_writel(unsigned long field, unsigned long value)
627 asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
628 : "=q"(error) : "a"(value), "d"(field) : "cc");
630 vmwrite_error(field, value);
633 static void vmcs_write16(unsigned long field, u16 value)
635 vmcs_writel(field, value);
638 static void vmcs_write32(unsigned long field, u32 value)
640 vmcs_writel(field, value);
643 static void vmcs_write64(unsigned long field, u64 value)
645 vmcs_writel(field, value);
646 #ifndef CONFIG_X86_64
648 vmcs_writel(field+1, value >> 32);
652 static void vmcs_clear_bits(unsigned long field, u32 mask)
654 vmcs_writel(field, vmcs_readl(field) & ~mask);
657 static void vmcs_set_bits(unsigned long field, u32 mask)
659 vmcs_writel(field, vmcs_readl(field) | mask);
662 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
664 vmx->segment_cache.bitmask = 0;
667 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
671 u32 mask = 1 << (seg * SEG_FIELD_NR + field);
673 if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
674 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
675 vmx->segment_cache.bitmask = 0;
677 ret = vmx->segment_cache.bitmask & mask;
678 vmx->segment_cache.bitmask |= mask;
682 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
684 u16 *p = &vmx->segment_cache.seg[seg].selector;
686 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
687 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
691 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
693 ulong *p = &vmx->segment_cache.seg[seg].base;
695 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
696 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
700 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
702 u32 *p = &vmx->segment_cache.seg[seg].limit;
704 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
705 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
709 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
711 u32 *p = &vmx->segment_cache.seg[seg].ar;
713 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
714 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
718 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
722 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
723 (1u << NM_VECTOR) | (1u << DB_VECTOR);
724 if ((vcpu->guest_debug &
725 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
726 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
727 eb |= 1u << BP_VECTOR;
728 if (to_vmx(vcpu)->rmode.vm86_active)
731 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
732 if (vcpu->fpu_active)
733 eb &= ~(1u << NM_VECTOR);
734 vmcs_write32(EXCEPTION_BITMAP, eb);
737 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
740 struct msr_autoload *m = &vmx->msr_autoload;
742 if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
743 vmcs_clear_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
744 vmcs_clear_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
748 for (i = 0; i < m->nr; ++i)
749 if (m->guest[i].index == msr)
755 m->guest[i] = m->guest[m->nr];
756 m->host[i] = m->host[m->nr];
757 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
758 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
761 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
762 u64 guest_val, u64 host_val)
765 struct msr_autoload *m = &vmx->msr_autoload;
767 if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
768 vmcs_write64(GUEST_IA32_EFER, guest_val);
769 vmcs_write64(HOST_IA32_EFER, host_val);
770 vmcs_set_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
771 vmcs_set_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
775 for (i = 0; i < m->nr; ++i)
776 if (m->guest[i].index == msr)
781 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
782 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
785 m->guest[i].index = msr;
786 m->guest[i].value = guest_val;
787 m->host[i].index = msr;
788 m->host[i].value = host_val;
791 static void reload_tss(void)
794 * VT restores TR but not its size. Useless.
796 struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
797 struct desc_struct *descs;
799 descs = (void *)gdt->address;
800 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
804 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
809 guest_efer = vmx->vcpu.arch.efer;
812 * NX is emulated; LMA and LME handled by hardware; SCE meaninless
815 ignore_bits = EFER_NX | EFER_SCE;
817 ignore_bits |= EFER_LMA | EFER_LME;
818 /* SCE is meaningful only in long mode on Intel */
819 if (guest_efer & EFER_LMA)
820 ignore_bits &= ~(u64)EFER_SCE;
822 guest_efer &= ~ignore_bits;
823 guest_efer |= host_efer & ignore_bits;
824 vmx->guest_msrs[efer_offset].data = guest_efer;
825 vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
827 clear_atomic_switch_msr(vmx, MSR_EFER);
828 /* On ept, can't emulate nx, and must switch nx atomically */
829 if (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX)) {
830 guest_efer = vmx->vcpu.arch.efer;
831 if (!(guest_efer & EFER_LMA))
832 guest_efer &= ~EFER_LME;
833 add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer);
840 static unsigned long segment_base(u16 selector)
842 struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
843 struct desc_struct *d;
844 unsigned long table_base;
847 if (!(selector & ~3))
850 table_base = gdt->address;
852 if (selector & 4) { /* from ldt */
853 u16 ldt_selector = kvm_read_ldt();
855 if (!(ldt_selector & ~3))
858 table_base = segment_base(ldt_selector);
860 d = (struct desc_struct *)(table_base + (selector & ~7));
861 v = get_desc_base(d);
863 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
864 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
869 static inline unsigned long kvm_read_tr_base(void)
872 asm("str %0" : "=g"(tr));
873 return segment_base(tr);
876 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
878 struct vcpu_vmx *vmx = to_vmx(vcpu);
881 if (vmx->host_state.loaded)
884 vmx->host_state.loaded = 1;
886 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
887 * allow segment selectors with cpl > 0 or ti == 1.
889 vmx->host_state.ldt_sel = kvm_read_ldt();
890 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
891 savesegment(fs, vmx->host_state.fs_sel);
892 if (!(vmx->host_state.fs_sel & 7)) {
893 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
894 vmx->host_state.fs_reload_needed = 0;
896 vmcs_write16(HOST_FS_SELECTOR, 0);
897 vmx->host_state.fs_reload_needed = 1;
899 savesegment(gs, vmx->host_state.gs_sel);
900 if (!(vmx->host_state.gs_sel & 7))
901 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
903 vmcs_write16(HOST_GS_SELECTOR, 0);
904 vmx->host_state.gs_ldt_reload_needed = 1;
908 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
909 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
911 vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
912 vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
916 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
917 if (is_long_mode(&vmx->vcpu))
918 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
920 for (i = 0; i < vmx->save_nmsrs; ++i)
921 kvm_set_shared_msr(vmx->guest_msrs[i].index,
922 vmx->guest_msrs[i].data,
923 vmx->guest_msrs[i].mask);
926 static void __vmx_load_host_state(struct vcpu_vmx *vmx)
928 if (!vmx->host_state.loaded)
931 ++vmx->vcpu.stat.host_state_reload;
932 vmx->host_state.loaded = 0;
934 if (is_long_mode(&vmx->vcpu))
935 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
937 if (vmx->host_state.gs_ldt_reload_needed) {
938 kvm_load_ldt(vmx->host_state.ldt_sel);
940 load_gs_index(vmx->host_state.gs_sel);
942 loadsegment(gs, vmx->host_state.gs_sel);
945 if (vmx->host_state.fs_reload_needed)
946 loadsegment(fs, vmx->host_state.fs_sel);
949 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
951 if (current_thread_info()->status & TS_USEDFPU)
953 load_gdt(&__get_cpu_var(host_gdt));
956 static void vmx_load_host_state(struct vcpu_vmx *vmx)
959 __vmx_load_host_state(vmx);
964 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
965 * vcpu mutex is already taken.
967 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
969 struct vcpu_vmx *vmx = to_vmx(vcpu);
970 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
973 kvm_cpu_vmxon(phys_addr);
974 else if (vcpu->cpu != cpu)
977 if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
978 per_cpu(current_vmcs, cpu) = vmx->vmcs;
979 vmcs_load(vmx->vmcs);
982 if (vcpu->cpu != cpu) {
983 struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
984 unsigned long sysenter_esp;
986 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
988 list_add(&vmx->local_vcpus_link,
989 &per_cpu(vcpus_on_cpu, cpu));
993 * Linux uses per-cpu TSS and GDT, so set these when switching
996 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
997 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
999 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
1000 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
1004 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
1006 __vmx_load_host_state(to_vmx(vcpu));
1007 if (!vmm_exclusive) {
1008 __vcpu_clear(to_vmx(vcpu));
1013 static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
1017 if (vcpu->fpu_active)
1019 vcpu->fpu_active = 1;
1020 cr0 = vmcs_readl(GUEST_CR0);
1021 cr0 &= ~(X86_CR0_TS | X86_CR0_MP);
1022 cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP);
1023 vmcs_writel(GUEST_CR0, cr0);
1024 update_exception_bitmap(vcpu);
1025 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
1026 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
1029 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
1031 static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
1033 vmx_decache_cr0_guest_bits(vcpu);
1034 vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP);
1035 update_exception_bitmap(vcpu);
1036 vcpu->arch.cr0_guest_owned_bits = 0;
1037 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
1038 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
1041 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
1043 unsigned long rflags, save_rflags;
1045 if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
1046 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
1047 rflags = vmcs_readl(GUEST_RFLAGS);
1048 if (to_vmx(vcpu)->rmode.vm86_active) {
1049 rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1050 save_rflags = to_vmx(vcpu)->rmode.save_rflags;
1051 rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1053 to_vmx(vcpu)->rflags = rflags;
1055 return to_vmx(vcpu)->rflags;
1058 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1060 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
1061 __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
1062 to_vmx(vcpu)->rflags = rflags;
1063 if (to_vmx(vcpu)->rmode.vm86_active) {
1064 to_vmx(vcpu)->rmode.save_rflags = rflags;
1065 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1067 vmcs_writel(GUEST_RFLAGS, rflags);
1070 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1072 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1075 if (interruptibility & GUEST_INTR_STATE_STI)
1076 ret |= KVM_X86_SHADOW_INT_STI;
1077 if (interruptibility & GUEST_INTR_STATE_MOV_SS)
1078 ret |= KVM_X86_SHADOW_INT_MOV_SS;
1083 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1085 u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1086 u32 interruptibility = interruptibility_old;
1088 interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
1090 if (mask & KVM_X86_SHADOW_INT_MOV_SS)
1091 interruptibility |= GUEST_INTR_STATE_MOV_SS;
1092 else if (mask & KVM_X86_SHADOW_INT_STI)
1093 interruptibility |= GUEST_INTR_STATE_STI;
1095 if ((interruptibility != interruptibility_old))
1096 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
1099 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
1103 rip = kvm_rip_read(vcpu);
1104 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1105 kvm_rip_write(vcpu, rip);
1107 /* skipping an emulated instruction also counts */
1108 vmx_set_interrupt_shadow(vcpu, 0);
1111 static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
1113 /* Ensure that we clear the HLT state in the VMCS. We don't need to
1114 * explicitly skip the instruction because if the HLT state is set, then
1115 * the instruction is already executing and RIP has already been
1117 if (!yield_on_hlt &&
1118 vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
1119 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
1122 static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
1123 bool has_error_code, u32 error_code,
1126 struct vcpu_vmx *vmx = to_vmx(vcpu);
1127 u32 intr_info = nr | INTR_INFO_VALID_MASK;
1129 if (has_error_code) {
1130 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
1131 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
1134 if (vmx->rmode.vm86_active) {
1136 if (kvm_exception_is_soft(nr))
1137 inc_eip = vcpu->arch.event_exit_inst_len;
1138 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
1139 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
1143 if (kvm_exception_is_soft(nr)) {
1144 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
1145 vmx->vcpu.arch.event_exit_inst_len);
1146 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
1148 intr_info |= INTR_TYPE_HARD_EXCEPTION;
1150 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
1151 vmx_clear_hlt(vcpu);
1154 static bool vmx_rdtscp_supported(void)
1156 return cpu_has_vmx_rdtscp();
1160 * Swap MSR entry in host/guest MSR entry array.
1162 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
1164 struct shared_msr_entry tmp;
1166 tmp = vmx->guest_msrs[to];
1167 vmx->guest_msrs[to] = vmx->guest_msrs[from];
1168 vmx->guest_msrs[from] = tmp;
1172 * Set up the vmcs to automatically save and restore system
1173 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
1174 * mode, as fiddling with msrs is very expensive.
1176 static void setup_msrs(struct vcpu_vmx *vmx)
1178 int save_nmsrs, index;
1179 unsigned long *msr_bitmap;
1181 vmx_load_host_state(vmx);
1183 #ifdef CONFIG_X86_64
1184 if (is_long_mode(&vmx->vcpu)) {
1185 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
1187 move_msr_up(vmx, index, save_nmsrs++);
1188 index = __find_msr_index(vmx, MSR_LSTAR);
1190 move_msr_up(vmx, index, save_nmsrs++);
1191 index = __find_msr_index(vmx, MSR_CSTAR);
1193 move_msr_up(vmx, index, save_nmsrs++);
1194 index = __find_msr_index(vmx, MSR_TSC_AUX);
1195 if (index >= 0 && vmx->rdtscp_enabled)
1196 move_msr_up(vmx, index, save_nmsrs++);
1198 * MSR_STAR is only needed on long mode guests, and only
1199 * if efer.sce is enabled.
1201 index = __find_msr_index(vmx, MSR_STAR);
1202 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
1203 move_msr_up(vmx, index, save_nmsrs++);
1206 index = __find_msr_index(vmx, MSR_EFER);
1207 if (index >= 0 && update_transition_efer(vmx, index))
1208 move_msr_up(vmx, index, save_nmsrs++);
1210 vmx->save_nmsrs = save_nmsrs;
1212 if (cpu_has_vmx_msr_bitmap()) {
1213 if (is_long_mode(&vmx->vcpu))
1214 msr_bitmap = vmx_msr_bitmap_longmode;
1216 msr_bitmap = vmx_msr_bitmap_legacy;
1218 vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
1223 * reads and returns guest's timestamp counter "register"
1224 * guest_tsc = host_tsc + tsc_offset -- 21.3
1226 static u64 guest_read_tsc(void)
1228 u64 host_tsc, tsc_offset;
1231 tsc_offset = vmcs_read64(TSC_OFFSET);
1232 return host_tsc + tsc_offset;
1236 * Empty call-back. Needs to be implemented when VMX enables the SET_TSC_KHZ
1237 * ioctl. In this case the call-back should update internal vmx state to make
1238 * the changes effective.
1240 static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
1242 /* Nothing to do here */
1246 * writes 'offset' into guest's timestamp counter offset register
1248 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1250 vmcs_write64(TSC_OFFSET, offset);
1253 static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
1255 u64 offset = vmcs_read64(TSC_OFFSET);
1256 vmcs_write64(TSC_OFFSET, offset + adjustment);
1259 static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1261 return target_tsc - native_read_tsc();
1265 * Reads an msr value (of 'msr_index') into 'pdata'.
1266 * Returns 0 on success, non-0 otherwise.
1267 * Assumes vcpu_load() was already called.
1269 static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1272 struct shared_msr_entry *msr;
1275 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
1279 switch (msr_index) {
1280 #ifdef CONFIG_X86_64
1282 data = vmcs_readl(GUEST_FS_BASE);
1285 data = vmcs_readl(GUEST_GS_BASE);
1287 case MSR_KERNEL_GS_BASE:
1288 vmx_load_host_state(to_vmx(vcpu));
1289 data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
1293 return kvm_get_msr_common(vcpu, msr_index, pdata);
1295 data = guest_read_tsc();
1297 case MSR_IA32_SYSENTER_CS:
1298 data = vmcs_read32(GUEST_SYSENTER_CS);
1300 case MSR_IA32_SYSENTER_EIP:
1301 data = vmcs_readl(GUEST_SYSENTER_EIP);
1303 case MSR_IA32_SYSENTER_ESP:
1304 data = vmcs_readl(GUEST_SYSENTER_ESP);
1307 if (!to_vmx(vcpu)->rdtscp_enabled)
1309 /* Otherwise falls through */
1311 vmx_load_host_state(to_vmx(vcpu));
1312 msr = find_msr_entry(to_vmx(vcpu), msr_index);
1314 vmx_load_host_state(to_vmx(vcpu));
1318 return kvm_get_msr_common(vcpu, msr_index, pdata);
1326 * Writes msr value into into the appropriate "register".
1327 * Returns 0 on success, non-0 otherwise.
1328 * Assumes vcpu_load() was already called.
1330 static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1332 struct vcpu_vmx *vmx = to_vmx(vcpu);
1333 struct shared_msr_entry *msr;
1336 switch (msr_index) {
1338 vmx_load_host_state(vmx);
1339 ret = kvm_set_msr_common(vcpu, msr_index, data);
1341 #ifdef CONFIG_X86_64
1343 vmx_segment_cache_clear(vmx);
1344 vmcs_writel(GUEST_FS_BASE, data);
1347 vmx_segment_cache_clear(vmx);
1348 vmcs_writel(GUEST_GS_BASE, data);
1350 case MSR_KERNEL_GS_BASE:
1351 vmx_load_host_state(vmx);
1352 vmx->msr_guest_kernel_gs_base = data;
1355 case MSR_IA32_SYSENTER_CS:
1356 vmcs_write32(GUEST_SYSENTER_CS, data);
1358 case MSR_IA32_SYSENTER_EIP:
1359 vmcs_writel(GUEST_SYSENTER_EIP, data);
1361 case MSR_IA32_SYSENTER_ESP:
1362 vmcs_writel(GUEST_SYSENTER_ESP, data);
1365 kvm_write_tsc(vcpu, data);
1367 case MSR_IA32_CR_PAT:
1368 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
1369 vmcs_write64(GUEST_IA32_PAT, data);
1370 vcpu->arch.pat = data;
1373 ret = kvm_set_msr_common(vcpu, msr_index, data);
1376 if (!vmx->rdtscp_enabled)
1378 /* Check reserved bit, higher 32 bits should be zero */
1379 if ((data >> 32) != 0)
1381 /* Otherwise falls through */
1383 msr = find_msr_entry(vmx, msr_index);
1385 vmx_load_host_state(vmx);
1389 ret = kvm_set_msr_common(vcpu, msr_index, data);
1395 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1397 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
1400 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
1403 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
1405 case VCPU_EXREG_PDPTR:
1407 ept_save_pdptrs(vcpu);
1414 static void set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
1416 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1417 vmcs_writel(GUEST_DR7, dbg->arch.debugreg[7]);
1419 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
1421 update_exception_bitmap(vcpu);
1424 static __init int cpu_has_kvm_support(void)
1426 return cpu_has_vmx();
1429 static __init int vmx_disabled_by_bios(void)
1433 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
1434 if (msr & FEATURE_CONTROL_LOCKED) {
1435 /* launched w/ TXT and VMX disabled */
1436 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
1439 /* launched w/o TXT and VMX only enabled w/ TXT */
1440 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
1441 && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
1442 && !tboot_enabled()) {
1443 printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
1444 "activate TXT before enabling KVM\n");
1447 /* launched w/o TXT and VMX disabled */
1448 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
1449 && !tboot_enabled())
1456 static void kvm_cpu_vmxon(u64 addr)
1458 asm volatile (ASM_VMX_VMXON_RAX
1459 : : "a"(&addr), "m"(addr)
1463 static int hardware_enable(void *garbage)
1465 int cpu = raw_smp_processor_id();
1466 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
1469 if (read_cr4() & X86_CR4_VMXE)
1472 INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
1473 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
1475 test_bits = FEATURE_CONTROL_LOCKED;
1476 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
1477 if (tboot_enabled())
1478 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
1480 if ((old & test_bits) != test_bits) {
1481 /* enable and lock */
1482 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
1484 write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
1486 if (vmm_exclusive) {
1487 kvm_cpu_vmxon(phys_addr);
1491 store_gdt(&__get_cpu_var(host_gdt));
1496 static void vmclear_local_vcpus(void)
1498 int cpu = raw_smp_processor_id();
1499 struct vcpu_vmx *vmx, *n;
1501 list_for_each_entry_safe(vmx, n, &per_cpu(vcpus_on_cpu, cpu),
1507 /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
1510 static void kvm_cpu_vmxoff(void)
1512 asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
1515 static void hardware_disable(void *garbage)
1517 if (vmm_exclusive) {
1518 vmclear_local_vcpus();
1521 write_cr4(read_cr4() & ~X86_CR4_VMXE);
1524 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
1525 u32 msr, u32 *result)
1527 u32 vmx_msr_low, vmx_msr_high;
1528 u32 ctl = ctl_min | ctl_opt;
1530 rdmsr(msr, vmx_msr_low, vmx_msr_high);
1532 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
1533 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
1535 /* Ensure minimum (required) set of control bits are supported. */
1543 static __init bool allow_1_setting(u32 msr, u32 ctl)
1545 u32 vmx_msr_low, vmx_msr_high;
1547 rdmsr(msr, vmx_msr_low, vmx_msr_high);
1548 return vmx_msr_high & ctl;
1551 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
1553 u32 vmx_msr_low, vmx_msr_high;
1554 u32 min, opt, min2, opt2;
1555 u32 _pin_based_exec_control = 0;
1556 u32 _cpu_based_exec_control = 0;
1557 u32 _cpu_based_2nd_exec_control = 0;
1558 u32 _vmexit_control = 0;
1559 u32 _vmentry_control = 0;
1561 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
1562 opt = PIN_BASED_VIRTUAL_NMIS;
1563 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
1564 &_pin_based_exec_control) < 0)
1568 #ifdef CONFIG_X86_64
1569 CPU_BASED_CR8_LOAD_EXITING |
1570 CPU_BASED_CR8_STORE_EXITING |
1572 CPU_BASED_CR3_LOAD_EXITING |
1573 CPU_BASED_CR3_STORE_EXITING |
1574 CPU_BASED_USE_IO_BITMAPS |
1575 CPU_BASED_MOV_DR_EXITING |
1576 CPU_BASED_USE_TSC_OFFSETING |
1577 CPU_BASED_MWAIT_EXITING |
1578 CPU_BASED_MONITOR_EXITING |
1579 CPU_BASED_INVLPG_EXITING;
1582 min |= CPU_BASED_HLT_EXITING;
1584 opt = CPU_BASED_TPR_SHADOW |
1585 CPU_BASED_USE_MSR_BITMAPS |
1586 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1587 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
1588 &_cpu_based_exec_control) < 0)
1590 #ifdef CONFIG_X86_64
1591 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
1592 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
1593 ~CPU_BASED_CR8_STORE_EXITING;
1595 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
1597 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
1598 SECONDARY_EXEC_WBINVD_EXITING |
1599 SECONDARY_EXEC_ENABLE_VPID |
1600 SECONDARY_EXEC_ENABLE_EPT |
1601 SECONDARY_EXEC_UNRESTRICTED_GUEST |
1602 SECONDARY_EXEC_PAUSE_LOOP_EXITING |
1603 SECONDARY_EXEC_RDTSCP;
1604 if (adjust_vmx_controls(min2, opt2,
1605 MSR_IA32_VMX_PROCBASED_CTLS2,
1606 &_cpu_based_2nd_exec_control) < 0)
1609 #ifndef CONFIG_X86_64
1610 if (!(_cpu_based_2nd_exec_control &
1611 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
1612 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
1614 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
1615 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
1617 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
1618 CPU_BASED_CR3_STORE_EXITING |
1619 CPU_BASED_INVLPG_EXITING);
1620 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
1621 vmx_capability.ept, vmx_capability.vpid);
1625 #ifdef CONFIG_X86_64
1626 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
1628 opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT;
1629 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
1630 &_vmexit_control) < 0)
1634 opt = VM_ENTRY_LOAD_IA32_PAT;
1635 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
1636 &_vmentry_control) < 0)
1639 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
1641 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
1642 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
1645 #ifdef CONFIG_X86_64
1646 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
1647 if (vmx_msr_high & (1u<<16))
1651 /* Require Write-Back (WB) memory type for VMCS accesses. */
1652 if (((vmx_msr_high >> 18) & 15) != 6)
1655 vmcs_conf->size = vmx_msr_high & 0x1fff;
1656 vmcs_conf->order = get_order(vmcs_config.size);
1657 vmcs_conf->revision_id = vmx_msr_low;
1659 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
1660 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
1661 vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
1662 vmcs_conf->vmexit_ctrl = _vmexit_control;
1663 vmcs_conf->vmentry_ctrl = _vmentry_control;
1665 cpu_has_load_ia32_efer =
1666 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
1667 VM_ENTRY_LOAD_IA32_EFER)
1668 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
1669 VM_EXIT_LOAD_IA32_EFER);
1674 static struct vmcs *alloc_vmcs_cpu(int cpu)
1676 int node = cpu_to_node(cpu);
1680 pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order);
1683 vmcs = page_address(pages);
1684 memset(vmcs, 0, vmcs_config.size);
1685 vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
1689 static struct vmcs *alloc_vmcs(void)
1691 return alloc_vmcs_cpu(raw_smp_processor_id());
1694 static void free_vmcs(struct vmcs *vmcs)
1696 free_pages((unsigned long)vmcs, vmcs_config.order);
1699 static void free_kvm_area(void)
1703 for_each_possible_cpu(cpu) {
1704 free_vmcs(per_cpu(vmxarea, cpu));
1705 per_cpu(vmxarea, cpu) = NULL;
1709 static __init int alloc_kvm_area(void)
1713 for_each_possible_cpu(cpu) {
1716 vmcs = alloc_vmcs_cpu(cpu);
1722 per_cpu(vmxarea, cpu) = vmcs;
1727 static __init int hardware_setup(void)
1729 if (setup_vmcs_config(&vmcs_config) < 0)
1732 if (boot_cpu_has(X86_FEATURE_NX))
1733 kvm_enable_efer_bits(EFER_NX);
1735 if (!cpu_has_vmx_vpid())
1738 if (!cpu_has_vmx_ept() ||
1739 !cpu_has_vmx_ept_4levels()) {
1741 enable_unrestricted_guest = 0;
1744 if (!cpu_has_vmx_unrestricted_guest())
1745 enable_unrestricted_guest = 0;
1747 if (!cpu_has_vmx_flexpriority())
1748 flexpriority_enabled = 0;
1750 if (!cpu_has_vmx_tpr_shadow())
1751 kvm_x86_ops->update_cr8_intercept = NULL;
1753 if (enable_ept && !cpu_has_vmx_ept_2m_page())
1754 kvm_disable_largepages();
1756 if (!cpu_has_vmx_ple())
1759 return alloc_kvm_area();
1762 static __exit void hardware_unsetup(void)
1767 static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
1769 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1771 if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
1772 vmcs_write16(sf->selector, save->selector);
1773 vmcs_writel(sf->base, save->base);
1774 vmcs_write32(sf->limit, save->limit);
1775 vmcs_write32(sf->ar_bytes, save->ar);
1777 u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
1779 vmcs_write32(sf->ar_bytes, 0x93 | dpl);
1783 static void enter_pmode(struct kvm_vcpu *vcpu)
1785 unsigned long flags;
1786 struct vcpu_vmx *vmx = to_vmx(vcpu);
1788 vmx->emulation_required = 1;
1789 vmx->rmode.vm86_active = 0;
1791 vmx_segment_cache_clear(vmx);
1793 vmcs_write16(GUEST_TR_SELECTOR, vmx->rmode.tr.selector);
1794 vmcs_writel(GUEST_TR_BASE, vmx->rmode.tr.base);
1795 vmcs_write32(GUEST_TR_LIMIT, vmx->rmode.tr.limit);
1796 vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
1798 flags = vmcs_readl(GUEST_RFLAGS);
1799 flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1800 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1801 vmcs_writel(GUEST_RFLAGS, flags);
1803 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
1804 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
1806 update_exception_bitmap(vcpu);
1808 if (emulate_invalid_guest_state)
1811 fix_pmode_dataseg(VCPU_SREG_ES, &vmx->rmode.es);
1812 fix_pmode_dataseg(VCPU_SREG_DS, &vmx->rmode.ds);
1813 fix_pmode_dataseg(VCPU_SREG_GS, &vmx->rmode.gs);
1814 fix_pmode_dataseg(VCPU_SREG_FS, &vmx->rmode.fs);
1816 vmx_segment_cache_clear(vmx);
1818 vmcs_write16(GUEST_SS_SELECTOR, 0);
1819 vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
1821 vmcs_write16(GUEST_CS_SELECTOR,
1822 vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
1823 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1826 static gva_t rmode_tss_base(struct kvm *kvm)
1828 if (!kvm->arch.tss_addr) {
1829 struct kvm_memslots *slots;
1832 slots = kvm_memslots(kvm);
1833 base_gfn = slots->memslots[0].base_gfn +
1834 kvm->memslots->memslots[0].npages - 3;
1835 return base_gfn << PAGE_SHIFT;
1837 return kvm->arch.tss_addr;
1840 static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
1842 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1844 save->selector = vmcs_read16(sf->selector);
1845 save->base = vmcs_readl(sf->base);
1846 save->limit = vmcs_read32(sf->limit);
1847 save->ar = vmcs_read32(sf->ar_bytes);
1848 vmcs_write16(sf->selector, save->base >> 4);
1849 vmcs_write32(sf->base, save->base & 0xffff0);
1850 vmcs_write32(sf->limit, 0xffff);
1851 vmcs_write32(sf->ar_bytes, 0xf3);
1852 if (save->base & 0xf)
1853 printk_once(KERN_WARNING "kvm: segment base is not paragraph"
1854 " aligned when entering protected mode (seg=%d)",
1858 static void enter_rmode(struct kvm_vcpu *vcpu)
1860 unsigned long flags;
1861 struct vcpu_vmx *vmx = to_vmx(vcpu);
1863 if (enable_unrestricted_guest)
1866 vmx->emulation_required = 1;
1867 vmx->rmode.vm86_active = 1;
1870 * Very old userspace does not call KVM_SET_TSS_ADDR before entering
1871 * vcpu. Call it here with phys address pointing 16M below 4G.
1873 if (!vcpu->kvm->arch.tss_addr) {
1874 printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be "
1875 "called before entering vcpu\n");
1876 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1877 vmx_set_tss_addr(vcpu->kvm, 0xfeffd000);
1878 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1881 vmx_segment_cache_clear(vmx);
1883 vmx->rmode.tr.selector = vmcs_read16(GUEST_TR_SELECTOR);
1884 vmx->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
1885 vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
1887 vmx->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
1888 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
1890 vmx->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
1891 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1893 flags = vmcs_readl(GUEST_RFLAGS);
1894 vmx->rmode.save_rflags = flags;
1896 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1898 vmcs_writel(GUEST_RFLAGS, flags);
1899 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
1900 update_exception_bitmap(vcpu);
1902 if (emulate_invalid_guest_state)
1903 goto continue_rmode;
1905 vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
1906 vmcs_write32(GUEST_SS_LIMIT, 0xffff);
1907 vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
1909 vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
1910 vmcs_write32(GUEST_CS_LIMIT, 0xffff);
1911 if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
1912 vmcs_writel(GUEST_CS_BASE, 0xf0000);
1913 vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
1915 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es);
1916 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds);
1917 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs);
1918 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs);
1921 kvm_mmu_reset_context(vcpu);
1924 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1926 struct vcpu_vmx *vmx = to_vmx(vcpu);
1927 struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
1933 * Force kernel_gs_base reloading before EFER changes, as control
1934 * of this msr depends on is_long_mode().
1936 vmx_load_host_state(to_vmx(vcpu));
1937 vcpu->arch.efer = efer;
1938 if (efer & EFER_LMA) {
1939 vmcs_write32(VM_ENTRY_CONTROLS,
1940 vmcs_read32(VM_ENTRY_CONTROLS) |
1941 VM_ENTRY_IA32E_MODE);
1944 vmcs_write32(VM_ENTRY_CONTROLS,
1945 vmcs_read32(VM_ENTRY_CONTROLS) &
1946 ~VM_ENTRY_IA32E_MODE);
1948 msr->data = efer & ~EFER_LME;
1953 #ifdef CONFIG_X86_64
1955 static void enter_lmode(struct kvm_vcpu *vcpu)
1959 vmx_segment_cache_clear(to_vmx(vcpu));
1961 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
1962 if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
1963 printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
1965 vmcs_write32(GUEST_TR_AR_BYTES,
1966 (guest_tr_ar & ~AR_TYPE_MASK)
1967 | AR_TYPE_BUSY_64_TSS);
1969 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
1972 static void exit_lmode(struct kvm_vcpu *vcpu)
1974 vmcs_write32(VM_ENTRY_CONTROLS,
1975 vmcs_read32(VM_ENTRY_CONTROLS)
1976 & ~VM_ENTRY_IA32E_MODE);
1977 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
1982 static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
1984 vpid_sync_context(to_vmx(vcpu));
1986 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
1988 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
1992 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
1994 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
1996 vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
1997 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
2000 static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
2002 if (enable_ept && is_paging(vcpu))
2003 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
2004 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
2007 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
2009 ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
2011 vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
2012 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
2015 static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
2017 if (!test_bit(VCPU_EXREG_PDPTR,
2018 (unsigned long *)&vcpu->arch.regs_dirty))
2021 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
2022 vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]);
2023 vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]);
2024 vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]);
2025 vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]);
2029 static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
2031 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
2032 vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
2033 vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
2034 vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
2035 vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
2038 __set_bit(VCPU_EXREG_PDPTR,
2039 (unsigned long *)&vcpu->arch.regs_avail);
2040 __set_bit(VCPU_EXREG_PDPTR,
2041 (unsigned long *)&vcpu->arch.regs_dirty);
2044 static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
2046 static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
2048 struct kvm_vcpu *vcpu)
2050 vmx_decache_cr3(vcpu);
2051 if (!(cr0 & X86_CR0_PG)) {
2052 /* From paging/starting to nonpaging */
2053 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
2054 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
2055 (CPU_BASED_CR3_LOAD_EXITING |
2056 CPU_BASED_CR3_STORE_EXITING));
2057 vcpu->arch.cr0 = cr0;
2058 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
2059 } else if (!is_paging(vcpu)) {
2060 /* From nonpaging to paging */
2061 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
2062 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
2063 ~(CPU_BASED_CR3_LOAD_EXITING |
2064 CPU_BASED_CR3_STORE_EXITING));
2065 vcpu->arch.cr0 = cr0;
2066 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
2069 if (!(cr0 & X86_CR0_WP))
2070 *hw_cr0 &= ~X86_CR0_WP;
2073 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
2075 struct vcpu_vmx *vmx = to_vmx(vcpu);
2076 unsigned long hw_cr0;
2078 if (enable_unrestricted_guest)
2079 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST)
2080 | KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
2082 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON;
2084 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
2087 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
2090 #ifdef CONFIG_X86_64
2091 if (vcpu->arch.efer & EFER_LME) {
2092 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
2094 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
2100 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
2102 if (!vcpu->fpu_active)
2103 hw_cr0 |= X86_CR0_TS | X86_CR0_MP;
2105 vmcs_writel(CR0_READ_SHADOW, cr0);
2106 vmcs_writel(GUEST_CR0, hw_cr0);
2107 vcpu->arch.cr0 = cr0;
2108 __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
2111 static u64 construct_eptp(unsigned long root_hpa)
2115 /* TODO write the value reading from MSR */
2116 eptp = VMX_EPT_DEFAULT_MT |
2117 VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
2118 eptp |= (root_hpa & PAGE_MASK);
2123 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
2125 unsigned long guest_cr3;
2130 eptp = construct_eptp(cr3);
2131 vmcs_write64(EPT_POINTER, eptp);
2132 guest_cr3 = is_paging(vcpu) ? kvm_read_cr3(vcpu) :
2133 vcpu->kvm->arch.ept_identity_map_addr;
2134 ept_load_pdptrs(vcpu);
2137 vmx_flush_tlb(vcpu);
2138 vmcs_writel(GUEST_CR3, guest_cr3);
2141 static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
2143 unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
2144 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
2146 vcpu->arch.cr4 = cr4;
2148 if (!is_paging(vcpu)) {
2149 hw_cr4 &= ~X86_CR4_PAE;
2150 hw_cr4 |= X86_CR4_PSE;
2151 } else if (!(cr4 & X86_CR4_PAE)) {
2152 hw_cr4 &= ~X86_CR4_PAE;
2156 vmcs_writel(CR4_READ_SHADOW, cr4);
2157 vmcs_writel(GUEST_CR4, hw_cr4);
2160 static void vmx_get_segment(struct kvm_vcpu *vcpu,
2161 struct kvm_segment *var, int seg)
2163 struct vcpu_vmx *vmx = to_vmx(vcpu);
2164 struct kvm_save_segment *save;
2167 if (vmx->rmode.vm86_active
2168 && (seg == VCPU_SREG_TR || seg == VCPU_SREG_ES
2169 || seg == VCPU_SREG_DS || seg == VCPU_SREG_FS
2170 || seg == VCPU_SREG_GS)
2171 && !emulate_invalid_guest_state) {
2173 case VCPU_SREG_TR: save = &vmx->rmode.tr; break;
2174 case VCPU_SREG_ES: save = &vmx->rmode.es; break;
2175 case VCPU_SREG_DS: save = &vmx->rmode.ds; break;
2176 case VCPU_SREG_FS: save = &vmx->rmode.fs; break;
2177 case VCPU_SREG_GS: save = &vmx->rmode.gs; break;
2180 var->selector = save->selector;
2181 var->base = save->base;
2182 var->limit = save->limit;
2184 if (seg == VCPU_SREG_TR
2185 || var->selector == vmx_read_guest_seg_selector(vmx, seg))
2186 goto use_saved_rmode_seg;
2188 var->base = vmx_read_guest_seg_base(vmx, seg);
2189 var->limit = vmx_read_guest_seg_limit(vmx, seg);
2190 var->selector = vmx_read_guest_seg_selector(vmx, seg);
2191 ar = vmx_read_guest_seg_ar(vmx, seg);
2192 use_saved_rmode_seg:
2193 if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state)
2195 var->type = ar & 15;
2196 var->s = (ar >> 4) & 1;
2197 var->dpl = (ar >> 5) & 3;
2198 var->present = (ar >> 7) & 1;
2199 var->avl = (ar >> 12) & 1;
2200 var->l = (ar >> 13) & 1;
2201 var->db = (ar >> 14) & 1;
2202 var->g = (ar >> 15) & 1;
2203 var->unusable = (ar >> 16) & 1;
2206 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
2208 struct kvm_segment s;
2210 if (to_vmx(vcpu)->rmode.vm86_active) {
2211 vmx_get_segment(vcpu, &s, seg);
2214 return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
2217 static int __vmx_get_cpl(struct kvm_vcpu *vcpu)
2219 if (!is_protmode(vcpu))
2222 if (!is_long_mode(vcpu)
2223 && (kvm_get_rflags(vcpu) & X86_EFLAGS_VM)) /* if virtual 8086 */
2226 return vmx_read_guest_seg_selector(to_vmx(vcpu), VCPU_SREG_CS) & 3;
2229 static int vmx_get_cpl(struct kvm_vcpu *vcpu)
2231 if (!test_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail)) {
2232 __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
2233 to_vmx(vcpu)->cpl = __vmx_get_cpl(vcpu);
2235 return to_vmx(vcpu)->cpl;
2239 static u32 vmx_segment_access_rights(struct kvm_segment *var)
2246 ar = var->type & 15;
2247 ar |= (var->s & 1) << 4;
2248 ar |= (var->dpl & 3) << 5;
2249 ar |= (var->present & 1) << 7;
2250 ar |= (var->avl & 1) << 12;
2251 ar |= (var->l & 1) << 13;
2252 ar |= (var->db & 1) << 14;
2253 ar |= (var->g & 1) << 15;
2255 if (ar == 0) /* a 0 value means unusable */
2256 ar = AR_UNUSABLE_MASK;
2261 static void vmx_set_segment(struct kvm_vcpu *vcpu,
2262 struct kvm_segment *var, int seg)
2264 struct vcpu_vmx *vmx = to_vmx(vcpu);
2265 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
2268 vmx_segment_cache_clear(vmx);
2270 if (vmx->rmode.vm86_active && seg == VCPU_SREG_TR) {
2271 vmcs_write16(sf->selector, var->selector);
2272 vmx->rmode.tr.selector = var->selector;
2273 vmx->rmode.tr.base = var->base;
2274 vmx->rmode.tr.limit = var->limit;
2275 vmx->rmode.tr.ar = vmx_segment_access_rights(var);
2278 vmcs_writel(sf->base, var->base);
2279 vmcs_write32(sf->limit, var->limit);
2280 vmcs_write16(sf->selector, var->selector);
2281 if (vmx->rmode.vm86_active && var->s) {
2283 * Hack real-mode segments into vm86 compatibility.
2285 if (var->base == 0xffff0000 && var->selector == 0xf000)
2286 vmcs_writel(sf->base, 0xf0000);
2289 ar = vmx_segment_access_rights(var);
2292 * Fix the "Accessed" bit in AR field of segment registers for older
2294 * IA32 arch specifies that at the time of processor reset the
2295 * "Accessed" bit in the AR field of segment registers is 1. And qemu
2296 * is setting it to 0 in the usedland code. This causes invalid guest
2297 * state vmexit when "unrestricted guest" mode is turned on.
2298 * Fix for this setup issue in cpu_reset is being pushed in the qemu
2299 * tree. Newer qemu binaries with that qemu fix would not need this
2302 if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
2303 ar |= 0x1; /* Accessed */
2305 vmcs_write32(sf->ar_bytes, ar);
2306 __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
2309 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
2311 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
2313 *db = (ar >> 14) & 1;
2314 *l = (ar >> 13) & 1;
2317 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
2319 dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
2320 dt->address = vmcs_readl(GUEST_IDTR_BASE);
2323 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
2325 vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
2326 vmcs_writel(GUEST_IDTR_BASE, dt->address);
2329 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
2331 dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
2332 dt->address = vmcs_readl(GUEST_GDTR_BASE);
2335 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
2337 vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
2338 vmcs_writel(GUEST_GDTR_BASE, dt->address);
2341 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
2343 struct kvm_segment var;
2346 vmx_get_segment(vcpu, &var, seg);
2347 ar = vmx_segment_access_rights(&var);
2349 if (var.base != (var.selector << 4))
2351 if (var.limit != 0xffff)
2359 static bool code_segment_valid(struct kvm_vcpu *vcpu)
2361 struct kvm_segment cs;
2362 unsigned int cs_rpl;
2364 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
2365 cs_rpl = cs.selector & SELECTOR_RPL_MASK;
2369 if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK))
2373 if (cs.type & AR_TYPE_WRITEABLE_MASK) {
2374 if (cs.dpl > cs_rpl)
2377 if (cs.dpl != cs_rpl)
2383 /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
2387 static bool stack_segment_valid(struct kvm_vcpu *vcpu)
2389 struct kvm_segment ss;
2390 unsigned int ss_rpl;
2392 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
2393 ss_rpl = ss.selector & SELECTOR_RPL_MASK;
2397 if (ss.type != 3 && ss.type != 7)
2401 if (ss.dpl != ss_rpl) /* DPL != RPL */
2409 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
2411 struct kvm_segment var;
2414 vmx_get_segment(vcpu, &var, seg);
2415 rpl = var.selector & SELECTOR_RPL_MASK;
2423 if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) {
2424 if (var.dpl < rpl) /* DPL < RPL */
2428 /* TODO: Add other members to kvm_segment_field to allow checking for other access
2434 static bool tr_valid(struct kvm_vcpu *vcpu)
2436 struct kvm_segment tr;
2438 vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
2442 if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */
2444 if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
2452 static bool ldtr_valid(struct kvm_vcpu *vcpu)
2454 struct kvm_segment ldtr;
2456 vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
2460 if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */
2470 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
2472 struct kvm_segment cs, ss;
2474 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
2475 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
2477 return ((cs.selector & SELECTOR_RPL_MASK) ==
2478 (ss.selector & SELECTOR_RPL_MASK));
2482 * Check if guest state is valid. Returns true if valid, false if
2484 * We assume that registers are always usable
2486 static bool guest_state_valid(struct kvm_vcpu *vcpu)
2488 /* real mode guest state checks */
2489 if (!is_protmode(vcpu)) {
2490 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
2492 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
2494 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
2496 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
2498 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
2500 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
2503 /* protected mode guest state checks */
2504 if (!cs_ss_rpl_check(vcpu))
2506 if (!code_segment_valid(vcpu))
2508 if (!stack_segment_valid(vcpu))
2510 if (!data_segment_valid(vcpu, VCPU_SREG_DS))
2512 if (!data_segment_valid(vcpu, VCPU_SREG_ES))
2514 if (!data_segment_valid(vcpu, VCPU_SREG_FS))
2516 if (!data_segment_valid(vcpu, VCPU_SREG_GS))
2518 if (!tr_valid(vcpu))
2520 if (!ldtr_valid(vcpu))
2524 * - Add checks on RIP
2525 * - Add checks on RFLAGS
2531 static int init_rmode_tss(struct kvm *kvm)
2535 int r, idx, ret = 0;
2537 idx = srcu_read_lock(&kvm->srcu);
2538 fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
2539 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
2542 data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
2543 r = kvm_write_guest_page(kvm, fn++, &data,
2544 TSS_IOPB_BASE_OFFSET, sizeof(u16));
2547 r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
2550 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
2554 r = kvm_write_guest_page(kvm, fn, &data,
2555 RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
2562 srcu_read_unlock(&kvm->srcu, idx);
2566 static int init_rmode_identity_map(struct kvm *kvm)
2569 pfn_t identity_map_pfn;
2574 if (unlikely(!kvm->arch.ept_identity_pagetable)) {
2575 printk(KERN_ERR "EPT: identity-mapping pagetable "
2576 "haven't been allocated!\n");
2579 if (likely(kvm->arch.ept_identity_pagetable_done))
2582 identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT;
2583 idx = srcu_read_lock(&kvm->srcu);
2584 r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
2587 /* Set up identity-mapping pagetable for EPT in real mode */
2588 for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
2589 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
2590 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
2591 r = kvm_write_guest_page(kvm, identity_map_pfn,
2592 &tmp, i * sizeof(tmp), sizeof(tmp));
2596 kvm->arch.ept_identity_pagetable_done = true;
2599 srcu_read_unlock(&kvm->srcu, idx);
2603 static void seg_setup(int seg)
2605 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
2608 vmcs_write16(sf->selector, 0);
2609 vmcs_writel(sf->base, 0);
2610 vmcs_write32(sf->limit, 0xffff);
2611 if (enable_unrestricted_guest) {
2613 if (seg == VCPU_SREG_CS)
2614 ar |= 0x08; /* code segment */
2618 vmcs_write32(sf->ar_bytes, ar);
2621 static int alloc_apic_access_page(struct kvm *kvm)
2623 struct kvm_userspace_memory_region kvm_userspace_mem;
2626 mutex_lock(&kvm->slots_lock);
2627 if (kvm->arch.apic_access_page)
2629 kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
2630 kvm_userspace_mem.flags = 0;
2631 kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
2632 kvm_userspace_mem.memory_size = PAGE_SIZE;
2633 r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
2637 kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
2639 mutex_unlock(&kvm->slots_lock);
2643 static int alloc_identity_pagetable(struct kvm *kvm)
2645 struct kvm_userspace_memory_region kvm_userspace_mem;
2648 mutex_lock(&kvm->slots_lock);
2649 if (kvm->arch.ept_identity_pagetable)
2651 kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
2652 kvm_userspace_mem.flags = 0;
2653 kvm_userspace_mem.guest_phys_addr =
2654 kvm->arch.ept_identity_map_addr;
2655 kvm_userspace_mem.memory_size = PAGE_SIZE;
2656 r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
2660 kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
2661 kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
2663 mutex_unlock(&kvm->slots_lock);
2667 static void allocate_vpid(struct vcpu_vmx *vmx)
2674 spin_lock(&vmx_vpid_lock);
2675 vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
2676 if (vpid < VMX_NR_VPIDS) {
2678 __set_bit(vpid, vmx_vpid_bitmap);
2680 spin_unlock(&vmx_vpid_lock);
2683 static void free_vpid(struct vcpu_vmx *vmx)
2687 spin_lock(&vmx_vpid_lock);
2689 __clear_bit(vmx->vpid, vmx_vpid_bitmap);
2690 spin_unlock(&vmx_vpid_lock);
2693 static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
2695 int f = sizeof(unsigned long);
2697 if (!cpu_has_vmx_msr_bitmap())
2701 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
2702 * have the write-low and read-high bitmap offsets the wrong way round.
2703 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
2705 if (msr <= 0x1fff) {
2706 __clear_bit(msr, msr_bitmap + 0x000 / f); /* read-low */
2707 __clear_bit(msr, msr_bitmap + 0x800 / f); /* write-low */
2708 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
2710 __clear_bit(msr, msr_bitmap + 0x400 / f); /* read-high */
2711 __clear_bit(msr, msr_bitmap + 0xc00 / f); /* write-high */
2715 static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
2718 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr);
2719 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr);
2723 * Sets up the vmcs for emulated real mode.
2725 static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
2727 u32 host_sysenter_cs, msr_low, msr_high;
2733 unsigned long kvm_vmx_return;
2737 vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
2738 vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
2740 if (cpu_has_vmx_msr_bitmap())
2741 vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
2743 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
2746 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
2747 vmcs_config.pin_based_exec_ctrl);
2749 exec_control = vmcs_config.cpu_based_exec_ctrl;
2750 if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
2751 exec_control &= ~CPU_BASED_TPR_SHADOW;
2752 #ifdef CONFIG_X86_64
2753 exec_control |= CPU_BASED_CR8_STORE_EXITING |
2754 CPU_BASED_CR8_LOAD_EXITING;
2758 exec_control |= CPU_BASED_CR3_STORE_EXITING |
2759 CPU_BASED_CR3_LOAD_EXITING |
2760 CPU_BASED_INVLPG_EXITING;
2761 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
2763 if (cpu_has_secondary_exec_ctrls()) {
2764 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
2765 if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
2767 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
2769 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
2771 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
2772 enable_unrestricted_guest = 0;
2774 if (!enable_unrestricted_guest)
2775 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
2777 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
2778 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
2782 vmcs_write32(PLE_GAP, ple_gap);
2783 vmcs_write32(PLE_WINDOW, ple_window);
2786 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
2787 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
2788 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
2790 vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */
2791 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
2792 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
2794 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
2795 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
2796 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
2797 vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
2798 vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
2799 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
2800 #ifdef CONFIG_X86_64
2801 rdmsrl(MSR_FS_BASE, a);
2802 vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
2803 rdmsrl(MSR_GS_BASE, a);
2804 vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
2806 vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
2807 vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
2810 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
2812 native_store_idt(&dt);
2813 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
2815 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
2816 vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
2817 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
2818 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
2819 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
2820 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
2821 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
2823 rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
2824 vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
2825 rdmsrl(MSR_IA32_SYSENTER_ESP, a);
2826 vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */
2827 rdmsrl(MSR_IA32_SYSENTER_EIP, a);
2828 vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */
2830 if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
2831 rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
2832 host_pat = msr_low | ((u64) msr_high << 32);
2833 vmcs_write64(HOST_IA32_PAT, host_pat);
2835 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2836 rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
2837 host_pat = msr_low | ((u64) msr_high << 32);
2838 /* Write the default value follow host pat */
2839 vmcs_write64(GUEST_IA32_PAT, host_pat);
2840 /* Keep arch.pat sync with GUEST_IA32_PAT */
2841 vmx->vcpu.arch.pat = host_pat;
2844 for (i = 0; i < NR_VMX_MSR; ++i) {
2845 u32 index = vmx_msr_index[i];
2846 u32 data_low, data_high;
2849 if (rdmsr_safe(index, &data_low, &data_high) < 0)
2851 if (wrmsr_safe(index, data_low, data_high) < 0)
2853 vmx->guest_msrs[j].index = i;
2854 vmx->guest_msrs[j].data = 0;
2855 vmx->guest_msrs[j].mask = -1ull;
2859 vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
2861 /* 22.2.1, 20.8.1 */
2862 vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
2864 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
2865 vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
2867 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
2868 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
2870 kvm_write_tsc(&vmx->vcpu, 0);
2875 static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2877 struct vcpu_vmx *vmx = to_vmx(vcpu);
2881 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
2883 vmx->rmode.vm86_active = 0;
2885 vmx->soft_vnmi_blocked = 0;
2887 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
2888 kvm_set_cr8(&vmx->vcpu, 0);
2889 msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
2890 if (kvm_vcpu_is_bsp(&vmx->vcpu))
2891 msr |= MSR_IA32_APICBASE_BSP;
2892 kvm_set_apic_base(&vmx->vcpu, msr);
2894 ret = fx_init(&vmx->vcpu);
2898 vmx_segment_cache_clear(vmx);
2900 seg_setup(VCPU_SREG_CS);
2902 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
2903 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
2905 if (kvm_vcpu_is_bsp(&vmx->vcpu)) {
2906 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
2907 vmcs_writel(GUEST_CS_BASE, 0x000f0000);
2909 vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8);
2910 vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12);
2913 seg_setup(VCPU_SREG_DS);
2914 seg_setup(VCPU_SREG_ES);
2915 seg_setup(VCPU_SREG_FS);
2916 seg_setup(VCPU_SREG_GS);
2917 seg_setup(VCPU_SREG_SS);
2919 vmcs_write16(GUEST_TR_SELECTOR, 0);
2920 vmcs_writel(GUEST_TR_BASE, 0);
2921 vmcs_write32(GUEST_TR_LIMIT, 0xffff);
2922 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
2924 vmcs_write16(GUEST_LDTR_SELECTOR, 0);
2925 vmcs_writel(GUEST_LDTR_BASE, 0);
2926 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
2927 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
2929 vmcs_write32(GUEST_SYSENTER_CS, 0);
2930 vmcs_writel(GUEST_SYSENTER_ESP, 0);
2931 vmcs_writel(GUEST_SYSENTER_EIP, 0);
2933 vmcs_writel(GUEST_RFLAGS, 0x02);
2934 if (kvm_vcpu_is_bsp(&vmx->vcpu))
2935 kvm_rip_write(vcpu, 0xfff0);
2937 kvm_rip_write(vcpu, 0);
2938 kvm_register_write(vcpu, VCPU_REGS_RSP, 0);
2940 vmcs_writel(GUEST_DR7, 0x400);
2942 vmcs_writel(GUEST_GDTR_BASE, 0);
2943 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
2945 vmcs_writel(GUEST_IDTR_BASE, 0);
2946 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
2948 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
2949 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
2950 vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
2952 /* Special registers */
2953 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
2957 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
2959 if (cpu_has_vmx_tpr_shadow()) {
2960 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
2961 if (vm_need_tpr_shadow(vmx->vcpu.kvm))
2962 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
2963 __pa(vmx->vcpu.arch.apic->regs));
2964 vmcs_write32(TPR_THRESHOLD, 0);
2967 if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
2968 vmcs_write64(APIC_ACCESS_ADDR,
2969 page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
2972 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
2974 vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
2975 vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
2976 vmx_set_cr4(&vmx->vcpu, 0);
2977 vmx_set_efer(&vmx->vcpu, 0);
2978 vmx_fpu_activate(&vmx->vcpu);
2979 update_exception_bitmap(&vmx->vcpu);
2981 vpid_sync_context(vmx);
2985 /* HACK: Don't enable emulation on guest boot/reset */
2986 vmx->emulation_required = 0;
2992 static void enable_irq_window(struct kvm_vcpu *vcpu)
2994 u32 cpu_based_vm_exec_control;
2996 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2997 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
2998 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
3001 static void enable_nmi_window(struct kvm_vcpu *vcpu)
3003 u32 cpu_based_vm_exec_control;
3005 if (!cpu_has_virtual_nmis()) {
3006 enable_irq_window(vcpu);
3010 if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
3011 enable_irq_window(vcpu);
3014 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
3015 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
3016 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
3019 static void vmx_inject_irq(struct kvm_vcpu *vcpu)
3021 struct vcpu_vmx *vmx = to_vmx(vcpu);
3023 int irq = vcpu->arch.interrupt.nr;
3025 trace_kvm_inj_virq(irq);
3027 ++vcpu->stat.irq_injections;
3028 if (vmx->rmode.vm86_active) {
3030 if (vcpu->arch.interrupt.soft)
3031 inc_eip = vcpu->arch.event_exit_inst_len;
3032 if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE)
3033 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3036 intr = irq | INTR_INFO_VALID_MASK;
3037 if (vcpu->arch.interrupt.soft) {
3038 intr |= INTR_TYPE_SOFT_INTR;
3039 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
3040 vmx->vcpu.arch.event_exit_inst_len);
3042 intr |= INTR_TYPE_EXT_INTR;
3043 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
3044 vmx_clear_hlt(vcpu);
3047 static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
3049 struct vcpu_vmx *vmx = to_vmx(vcpu);
3051 if (!cpu_has_virtual_nmis()) {
3053 * Tracking the NMI-blocked state in software is built upon
3054 * finding the next open IRQ window. This, in turn, depends on
3055 * well-behaving guests: They have to keep IRQs disabled at
3056 * least as long as the NMI handler runs. Otherwise we may
3057 * cause NMI nesting, maybe breaking the guest. But as this is
3058 * highly unlikely, we can live with the residual risk.
3060 vmx->soft_vnmi_blocked = 1;
3061 vmx->vnmi_blocked_time = 0;
3064 ++vcpu->stat.nmi_injections;
3065 vmx->nmi_known_unmasked = false;
3066 if (vmx->rmode.vm86_active) {
3067 if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
3068 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3071 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
3072 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
3073 vmx_clear_hlt(vcpu);
3076 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
3078 if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
3081 return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
3082 (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
3083 | GUEST_INTR_STATE_NMI));
3086 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
3088 if (!cpu_has_virtual_nmis())
3089 return to_vmx(vcpu)->soft_vnmi_blocked;
3090 if (to_vmx(vcpu)->nmi_known_unmasked)
3092 return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
3095 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3097 struct vcpu_vmx *vmx = to_vmx(vcpu);
3099 if (!cpu_has_virtual_nmis()) {
3100 if (vmx->soft_vnmi_blocked != masked) {
3101 vmx->soft_vnmi_blocked = masked;
3102 vmx->vnmi_blocked_time = 0;
3105 vmx->nmi_known_unmasked = !masked;
3107 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
3108 GUEST_INTR_STATE_NMI);
3110 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
3111 GUEST_INTR_STATE_NMI);
3115 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
3117 return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
3118 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
3119 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
3122 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
3125 struct kvm_userspace_memory_region tss_mem = {
3126 .slot = TSS_PRIVATE_MEMSLOT,
3127 .guest_phys_addr = addr,
3128 .memory_size = PAGE_SIZE * 3,
3132 ret = kvm_set_memory_region(kvm, &tss_mem, 0);
3135 kvm->arch.tss_addr = addr;
3136 if (!init_rmode_tss(kvm))
3142 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
3143 int vec, u32 err_code)
3146 * Instruction with address size override prefix opcode 0x67
3147 * Cause the #SS fault with 0 error code in VM86 mode.
3149 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
3150 if (emulate_instruction(vcpu, 0) == EMULATE_DONE)
3153 * Forward all other exceptions that are valid in real mode.
3154 * FIXME: Breaks guest debugging in real mode, needs to be fixed with
3155 * the required debugging infrastructure rework.
3159 if (vcpu->guest_debug &
3160 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
3162 kvm_queue_exception(vcpu, vec);
3166 * Update instruction length as we may reinject the exception
3167 * from user space while in guest debugging mode.
3169 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
3170 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3171 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
3182 kvm_queue_exception(vcpu, vec);
3189 * Trigger machine check on the host. We assume all the MSRs are already set up
3190 * by the CPU and that we still run on the same CPU as the MCE occurred on.
3191 * We pass a fake environment to the machine check handler because we want
3192 * the guest to be always treated like user space, no matter what context
3193 * it used internally.
3195 static void kvm_machine_check(void)
3197 #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
3198 struct pt_regs regs = {
3199 .cs = 3, /* Fake ring 3 no matter what the guest ran on */
3200 .flags = X86_EFLAGS_IF,
3203 do_machine_check(®s, 0);
3207 static int handle_machine_check(struct kvm_vcpu *vcpu)
3209 /* already handled by vcpu_run */
3213 static int handle_exception(struct kvm_vcpu *vcpu)
3215 struct vcpu_vmx *vmx = to_vmx(vcpu);
3216 struct kvm_run *kvm_run = vcpu->run;
3217 u32 intr_info, ex_no, error_code;
3218 unsigned long cr2, rip, dr6;
3220 enum emulation_result er;
3222 vect_info = vmx->idt_vectoring_info;
3223 intr_info = vmx->exit_intr_info;
3225 if (is_machine_check(intr_info))
3226 return handle_machine_check(vcpu);
3228 if ((vect_info & VECTORING_INFO_VALID_MASK) &&
3229 !is_page_fault(intr_info)) {
3230 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3231 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
3232 vcpu->run->internal.ndata = 2;
3233 vcpu->run->internal.data[0] = vect_info;
3234 vcpu->run->internal.data[1] = intr_info;
3238 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
3239 return 1; /* already handled by vmx_vcpu_run() */
3241 if (is_no_device(intr_info)) {
3242 vmx_fpu_activate(vcpu);
3246 if (is_invalid_opcode(intr_info)) {
3247 er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
3248 if (er != EMULATE_DONE)
3249 kvm_queue_exception(vcpu, UD_VECTOR);
3254 if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
3255 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
3256 if (is_page_fault(intr_info)) {
3257 /* EPT won't cause page fault directly */
3260 cr2 = vmcs_readl(EXIT_QUALIFICATION);
3261 trace_kvm_page_fault(cr2, error_code);
3263 if (kvm_event_needs_reinjection(vcpu))
3264 kvm_mmu_unprotect_page_virt(vcpu, cr2);
3265 return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);
3268 if (vmx->rmode.vm86_active &&
3269 handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
3271 if (vcpu->arch.halt_request) {
3272 vcpu->arch.halt_request = 0;
3273 return kvm_emulate_halt(vcpu);
3278 ex_no = intr_info & INTR_INFO_VECTOR_MASK;
3281 dr6 = vmcs_readl(EXIT_QUALIFICATION);
3282 if (!(vcpu->guest_debug &
3283 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
3284 vcpu->arch.dr6 = dr6 | DR6_FIXED_1;
3285 kvm_queue_exception(vcpu, DB_VECTOR);
3288 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
3289 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
3293 * Update instruction length as we may reinject #BP from
3294 * user space while in guest debugging mode. Reading it for
3295 * #DB as well causes no harm, it is not used in that case.
3297 vmx->vcpu.arch.event_exit_inst_len =
3298 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3299 kvm_run->exit_reason = KVM_EXIT_DEBUG;
3300 rip = kvm_rip_read(vcpu);
3301 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
3302 kvm_run->debug.arch.exception = ex_no;
3305 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
3306 kvm_run->ex.exception = ex_no;
3307 kvm_run->ex.error_code = error_code;
3313 static int handle_external_interrupt(struct kvm_vcpu *vcpu)
3315 ++vcpu->stat.irq_exits;
3319 static int handle_triple_fault(struct kvm_vcpu *vcpu)
3321 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
3325 static int handle_io(struct kvm_vcpu *vcpu)
3327 unsigned long exit_qualification;
3328 int size, in, string;
3331 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
3332 string = (exit_qualification & 16) != 0;
3333 in = (exit_qualification & 8) != 0;
3335 ++vcpu->stat.io_exits;
3338 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
3340 port = exit_qualification >> 16;
3341 size = (exit_qualification & 7) + 1;
3342 skip_emulated_instruction(vcpu);
3344 return kvm_fast_pio_out(vcpu, size, port);
3348 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
3351 * Patch in the VMCALL instruction:
3353 hypercall[0] = 0x0f;
3354 hypercall[1] = 0x01;
3355 hypercall[2] = 0xc1;
3358 static int handle_cr(struct kvm_vcpu *vcpu)
3360 unsigned long exit_qualification, val;
3365 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
3366 cr = exit_qualification & 15;
3367 reg = (exit_qualification >> 8) & 15;
3368 switch ((exit_qualification >> 4) & 3) {
3369 case 0: /* mov to cr */
3370 val = kvm_register_read(vcpu, reg);
3371 trace_kvm_cr_write(cr, val);
3374 err = kvm_set_cr0(vcpu, val);
3375 kvm_complete_insn_gp(vcpu, err);
3378 err = kvm_set_cr3(vcpu, val);
3379 kvm_complete_insn_gp(vcpu, err);
3382 err = kvm_set_cr4(vcpu, val);
3383 kvm_complete_insn_gp(vcpu, err);
3386 u8 cr8_prev = kvm_get_cr8(vcpu);
3387 u8 cr8 = kvm_register_read(vcpu, reg);
3388 err = kvm_set_cr8(vcpu, cr8);
3389 kvm_complete_insn_gp(vcpu, err);
3390 if (irqchip_in_kernel(vcpu->kvm))
3392 if (cr8_prev <= cr8)
3394 vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
3400 vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
3401 trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
3402 skip_emulated_instruction(vcpu);
3403 vmx_fpu_activate(vcpu);
3405 case 1: /*mov from cr*/
3408 val = kvm_read_cr3(vcpu);
3409 kvm_register_write(vcpu, reg, val);
3410 trace_kvm_cr_read(cr, val);
3411 skip_emulated_instruction(vcpu);
3414 val = kvm_get_cr8(vcpu);
3415 kvm_register_write(vcpu, reg, val);
3416 trace_kvm_cr_read(cr, val);
3417 skip_emulated_instruction(vcpu);
3422 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
3423 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
3424 kvm_lmsw(vcpu, val);
3426 skip_emulated_instruction(vcpu);
3431 vcpu->run->exit_reason = 0;
3432 pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
3433 (int)(exit_qualification >> 4) & 3, cr);
3437 static int handle_dr(struct kvm_vcpu *vcpu)
3439 unsigned long exit_qualification;
3442 /* Do not handle if the CPL > 0, will trigger GP on re-entry */
3443 if (!kvm_require_cpl(vcpu, 0))
3445 dr = vmcs_readl(GUEST_DR7);
3448 * As the vm-exit takes precedence over the debug trap, we
3449 * need to emulate the latter, either for the host or the
3450 * guest debugging itself.
3452 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
3453 vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
3454 vcpu->run->debug.arch.dr7 = dr;
3455 vcpu->run->debug.arch.pc =
3456 vmcs_readl(GUEST_CS_BASE) +
3457 vmcs_readl(GUEST_RIP);
3458 vcpu->run->debug.arch.exception = DB_VECTOR;
3459 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
3462 vcpu->arch.dr7 &= ~DR7_GD;
3463 vcpu->arch.dr6 |= DR6_BD;
3464 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
3465 kvm_queue_exception(vcpu, DB_VECTOR);
3470 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
3471 dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
3472 reg = DEBUG_REG_ACCESS_REG(exit_qualification);
3473 if (exit_qualification & TYPE_MOV_FROM_DR) {
3475 if (!kvm_get_dr(vcpu, dr, &val))
3476 kvm_register_write(vcpu, reg, val);
3478 kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]);
3479 skip_emulated_instruction(vcpu);
3483 static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
3485 vmcs_writel(GUEST_DR7, val);
3488 static int handle_cpuid(struct kvm_vcpu *vcpu)
3490 kvm_emulate_cpuid(vcpu);
3494 static int handle_rdmsr(struct kvm_vcpu *vcpu)
3496 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
3499 if (vmx_get_msr(vcpu, ecx, &data)) {
3500 trace_kvm_msr_read_ex(ecx);
3501 kvm_inject_gp(vcpu, 0);
3505 trace_kvm_msr_read(ecx, data);
3507 /* FIXME: handling of bits 32:63 of rax, rdx */
3508 vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
3509 vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
3510 skip_emulated_instruction(vcpu);
3514 static int handle_wrmsr(struct kvm_vcpu *vcpu)
3516 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
3517 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
3518 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
3520 if (vmx_set_msr(vcpu, ecx, data) != 0) {
3521 trace_kvm_msr_write_ex(ecx, data);
3522 kvm_inject_gp(vcpu, 0);
3526 trace_kvm_msr_write(ecx, data);
3527 skip_emulated_instruction(vcpu);
3531 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
3533 kvm_make_request(KVM_REQ_EVENT, vcpu);
3537 static int handle_interrupt_window(struct kvm_vcpu *vcpu)
3539 u32 cpu_based_vm_exec_control;
3541 /* clear pending irq */
3542 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
3543 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
3544 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
3546 kvm_make_request(KVM_REQ_EVENT, vcpu);
3548 ++vcpu->stat.irq_window_exits;
3551 * If the user space waits to inject interrupts, exit as soon as
3554 if (!irqchip_in_kernel(vcpu->kvm) &&
3555 vcpu->run->request_interrupt_window &&
3556 !kvm_cpu_has_interrupt(vcpu)) {
3557 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
3563 static int handle_halt(struct kvm_vcpu *vcpu)
3565 skip_emulated_instruction(vcpu);
3566 return kvm_emulate_halt(vcpu);
3569 static int handle_vmcall(struct kvm_vcpu *vcpu)
3571 skip_emulated_instruction(vcpu);
3572 kvm_emulate_hypercall(vcpu);
3576 static int handle_vmx_insn(struct kvm_vcpu *vcpu)
3578 kvm_queue_exception(vcpu, UD_VECTOR);
3582 static int handle_invd(struct kvm_vcpu *vcpu)
3584 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
3587 static int handle_invlpg(struct kvm_vcpu *vcpu)
3589 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
3591 kvm_mmu_invlpg(vcpu, exit_qualification);
3592 skip_emulated_instruction(vcpu);
3596 static int handle_wbinvd(struct kvm_vcpu *vcpu)
3598 skip_emulated_instruction(vcpu);
3599 kvm_emulate_wbinvd(vcpu);
3603 static int handle_xsetbv(struct kvm_vcpu *vcpu)
3605 u64 new_bv = kvm_read_edx_eax(vcpu);
3606 u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
3608 if (kvm_set_xcr(vcpu, index, new_bv) == 0)
3609 skip_emulated_instruction(vcpu);
3613 static int handle_apic_access(struct kvm_vcpu *vcpu)
3615 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
3618 static int handle_task_switch(struct kvm_vcpu *vcpu)
3620 struct vcpu_vmx *vmx = to_vmx(vcpu);
3621 unsigned long exit_qualification;
3622 bool has_error_code = false;
3625 int reason, type, idt_v;
3627 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
3628 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
3630 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
3632 reason = (u32)exit_qualification >> 30;
3633 if (reason == TASK_SWITCH_GATE && idt_v) {
3635 case INTR_TYPE_NMI_INTR:
3636 vcpu->arch.nmi_injected = false;
3637 vmx_set_nmi_mask(vcpu, true);
3639 case INTR_TYPE_EXT_INTR:
3640 case INTR_TYPE_SOFT_INTR:
3641 kvm_clear_interrupt_queue(vcpu);
3643 case INTR_TYPE_HARD_EXCEPTION:
3644 if (vmx->idt_vectoring_info &
3645 VECTORING_INFO_DELIVER_CODE_MASK) {
3646 has_error_code = true;
3648 vmcs_read32(IDT_VECTORING_ERROR_CODE);
3651 case INTR_TYPE_SOFT_EXCEPTION:
3652 kvm_clear_exception_queue(vcpu);
3658 tss_selector = exit_qualification;
3660 if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
3661 type != INTR_TYPE_EXT_INTR &&
3662 type != INTR_TYPE_NMI_INTR))
3663 skip_emulated_instruction(vcpu);
3665 if (kvm_task_switch(vcpu, tss_selector, reason,
3666 has_error_code, error_code) == EMULATE_FAIL) {
3667 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3668 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
3669 vcpu->run->internal.ndata = 0;
3673 /* clear all local breakpoint enable flags */
3674 vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55);
3677 * TODO: What about debug traps on tss switch?
3678 * Are we supposed to inject them and update dr6?
3684 static int handle_ept_violation(struct kvm_vcpu *vcpu)
3686 unsigned long exit_qualification;
3690 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
3692 if (exit_qualification & (1 << 6)) {
3693 printk(KERN_ERR "EPT: GPA exceeds GAW!\n");
3697 gla_validity = (exit_qualification >> 7) & 0x3;
3698 if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) {
3699 printk(KERN_ERR "EPT: Handling EPT violation failed!\n");
3700 printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n",
3701 (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS),
3702 vmcs_readl(GUEST_LINEAR_ADDRESS));
3703 printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
3704 (long unsigned int)exit_qualification);
3705 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
3706 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION;
3710 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
3711 trace_kvm_page_fault(gpa, exit_qualification);
3712 return kvm_mmu_page_fault(vcpu, gpa, exit_qualification & 0x3, NULL, 0);
3715 static u64 ept_rsvd_mask(u64 spte, int level)
3720 for (i = 51; i > boot_cpu_data.x86_phys_bits; i--)
3721 mask |= (1ULL << i);
3724 /* bits 7:3 reserved */
3726 else if (level == 2) {
3727 if (spte & (1ULL << 7))
3728 /* 2MB ref, bits 20:12 reserved */
3731 /* bits 6:3 reserved */
3738 static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte,
3741 printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level);
3743 /* 010b (write-only) */
3744 WARN_ON((spte & 0x7) == 0x2);
3746 /* 110b (write/execute) */
3747 WARN_ON((spte & 0x7) == 0x6);
3749 /* 100b (execute-only) and value not supported by logical processor */
3750 if (!cpu_has_vmx_ept_execute_only())
3751 WARN_ON((spte & 0x7) == 0x4);
3755 u64 rsvd_bits = spte & ept_rsvd_mask(spte, level);
3757 if (rsvd_bits != 0) {
3758 printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n",
3759 __func__, rsvd_bits);
3763 if (level == 1 || (level == 2 && (spte & (1ULL << 7)))) {
3764 u64 ept_mem_type = (spte & 0x38) >> 3;
3766 if (ept_mem_type == 2 || ept_mem_type == 3 ||
3767 ept_mem_type == 7) {
3768 printk(KERN_ERR "%s: ept_mem_type=0x%llx\n",
3769 __func__, ept_mem_type);
3776 static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
3782 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
3784 printk(KERN_ERR "EPT: Misconfiguration.\n");
3785 printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa);
3787 nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes);
3789 for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i)
3790 ept_misconfig_inspect_spte(vcpu, sptes[i-1], i);
3792 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
3793 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG;
3798 static int handle_nmi_window(struct kvm_vcpu *vcpu)
3800 u32 cpu_based_vm_exec_control;
3802 /* clear pending NMI */
3803 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
3804 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
3805 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
3806 ++vcpu->stat.nmi_window_exits;
3807 kvm_make_request(KVM_REQ_EVENT, vcpu);
3812 static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
3814 struct vcpu_vmx *vmx = to_vmx(vcpu);
3815 enum emulation_result err = EMULATE_DONE;
3818 bool intr_window_requested;
3820 cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
3821 intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING;
3823 while (!guest_state_valid(vcpu)) {
3824 if (intr_window_requested
3825 && (kvm_get_rflags(&vmx->vcpu) & X86_EFLAGS_IF))
3826 return handle_interrupt_window(&vmx->vcpu);
3828 err = emulate_instruction(vcpu, 0);
3830 if (err == EMULATE_DO_MMIO) {
3835 if (err != EMULATE_DONE)
3838 if (signal_pending(current))
3844 vmx->emulation_required = 0;
3850 * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
3851 * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
3853 static int handle_pause(struct kvm_vcpu *vcpu)
3855 skip_emulated_instruction(vcpu);
3856 kvm_vcpu_on_spin(vcpu);
3861 static int handle_invalid_op(struct kvm_vcpu *vcpu)
3863 kvm_queue_exception(vcpu, UD_VECTOR);
3868 * The exit handlers return 1 if the exit was handled fully and guest execution
3869 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
3870 * to be done to userspace and return 0.
3872 static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
3873 [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
3874 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
3875 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
3876 [EXIT_REASON_NMI_WINDOW] = handle_nmi_window,
3877 [EXIT_REASON_IO_INSTRUCTION] = handle_io,
3878 [EXIT_REASON_CR_ACCESS] = handle_cr,
3879 [EXIT_REASON_DR_ACCESS] = handle_dr,
3880 [EXIT_REASON_CPUID] = handle_cpuid,
3881 [EXIT_REASON_MSR_READ] = handle_rdmsr,
3882 [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
3883 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
3884 [EXIT_REASON_HLT] = handle_halt,
3885 [EXIT_REASON_INVD] = handle_invd,
3886 [EXIT_REASON_INVLPG] = handle_invlpg,
3887 [EXIT_REASON_VMCALL] = handle_vmcall,
3888 [EXIT_REASON_VMCLEAR] = handle_vmx_insn,
3889 [EXIT_REASON_VMLAUNCH] = handle_vmx_insn,
3890 [EXIT_REASON_VMPTRLD] = handle_vmx_insn,
3891 [EXIT_REASON_VMPTRST] = handle_vmx_insn,
3892 [EXIT_REASON_VMREAD] = handle_vmx_insn,
3893 [EXIT_REASON_VMRESUME] = handle_vmx_insn,
3894 [EXIT_REASON_VMWRITE] = handle_vmx_insn,
3895 [EXIT_REASON_VMOFF] = handle_vmx_insn,
3896 [EXIT_REASON_VMON] = handle_vmx_insn,
3897 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
3898 [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
3899 [EXIT_REASON_WBINVD] = handle_wbinvd,
3900 [EXIT_REASON_XSETBV] = handle_xsetbv,
3901 [EXIT_REASON_TASK_SWITCH] = handle_task_switch,
3902 [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
3903 [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
3904 [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
3905 [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
3906 [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op,
3907 [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op,
3910 static const int kvm_vmx_max_exit_handlers =
3911 ARRAY_SIZE(kvm_vmx_exit_handlers);
3913 static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
3915 *info1 = vmcs_readl(EXIT_QUALIFICATION);
3916 *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
3920 * The guest has exited. See if we can fix it or if we need userspace
3923 static int vmx_handle_exit(struct kvm_vcpu *vcpu)
3925 struct vcpu_vmx *vmx = to_vmx(vcpu);
3926 u32 exit_reason = vmx->exit_reason;
3927 u32 vectoring_info = vmx->idt_vectoring_info;
3929 trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
3931 /* If guest state is invalid, start emulating */
3932 if (vmx->emulation_required && emulate_invalid_guest_state)
3933 return handle_invalid_guest_state(vcpu);
3935 if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
3936 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3937 vcpu->run->fail_entry.hardware_entry_failure_reason
3942 if (unlikely(vmx->fail)) {
3943 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3944 vcpu->run->fail_entry.hardware_entry_failure_reason
3945 = vmcs_read32(VM_INSTRUCTION_ERROR);
3949 if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
3950 (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
3951 exit_reason != EXIT_REASON_EPT_VIOLATION &&
3952 exit_reason != EXIT_REASON_TASK_SWITCH))
3953 printk(KERN_WARNING "%s: unexpected, valid vectoring info "
3954 "(0x%x) and exit reason is 0x%x\n",
3955 __func__, vectoring_info, exit_reason);
3957 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) {
3958 if (vmx_interrupt_allowed(vcpu)) {
3959 vmx->soft_vnmi_blocked = 0;
3960 } else if (vmx->vnmi_blocked_time > 1000000000LL &&
3961 vcpu->arch.nmi_pending) {
3963 * This CPU don't support us in finding the end of an
3964 * NMI-blocked window if the guest runs with IRQs
3965 * disabled. So we pull the trigger after 1 s of
3966 * futile waiting, but inform the user about this.
3968 printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
3969 "state on VCPU %d after 1 s timeout\n",
3970 __func__, vcpu->vcpu_id);
3971 vmx->soft_vnmi_blocked = 0;
3975 if (exit_reason < kvm_vmx_max_exit_handlers
3976 && kvm_vmx_exit_handlers[exit_reason])
3977 return kvm_vmx_exit_handlers[exit_reason](vcpu);
3979 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
3980 vcpu->run->hw.hardware_exit_reason = exit_reason;
3985 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3987 if (irr == -1 || tpr < irr) {
3988 vmcs_write32(TPR_THRESHOLD, 0);
3992 vmcs_write32(TPR_THRESHOLD, irr);
3995 static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
3999 if (!(vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
4000 || vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI))
4003 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
4004 exit_intr_info = vmx->exit_intr_info;
4006 /* Handle machine checks before interrupts are enabled */
4007 if (is_machine_check(exit_intr_info))
4008 kvm_machine_check();
4010 /* We need to handle NMIs before interrupts are enabled */
4011 if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
4012 (exit_intr_info & INTR_INFO_VALID_MASK)) {
4013 kvm_before_handle_nmi(&vmx->vcpu);
4015 kvm_after_handle_nmi(&vmx->vcpu);
4019 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
4024 bool idtv_info_valid;
4026 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
4028 if (cpu_has_virtual_nmis()) {
4029 if (vmx->nmi_known_unmasked)
4032 * Can't use vmx->exit_intr_info since we're not sure what
4033 * the exit reason is.
4035 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
4036 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
4037 vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
4039 * SDM 3: 27.7.1.2 (September 2008)
4040 * Re-set bit "block by NMI" before VM entry if vmexit caused by
4041 * a guest IRET fault.
4042 * SDM 3: 23.2.2 (September 2008)
4043 * Bit 12 is undefined in any of the following cases:
4044 * If the VM exit sets the valid bit in the IDT-vectoring
4045 * information field.
4046 * If the VM exit is due to a double fault.
4048 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
4049 vector != DF_VECTOR && !idtv_info_valid)
4050 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
4051 GUEST_INTR_STATE_NMI);
4053 vmx->nmi_known_unmasked =
4054 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
4055 & GUEST_INTR_STATE_NMI);
4056 } else if (unlikely(vmx->soft_vnmi_blocked))
4057 vmx->vnmi_blocked_time +=
4058 ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
4061 static void __vmx_complete_interrupts(struct vcpu_vmx *vmx,
4062 u32 idt_vectoring_info,
4063 int instr_len_field,
4064 int error_code_field)
4068 bool idtv_info_valid;
4070 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
4072 vmx->vcpu.arch.nmi_injected = false;
4073 kvm_clear_exception_queue(&vmx->vcpu);
4074 kvm_clear_interrupt_queue(&vmx->vcpu);
4076 if (!idtv_info_valid)
4079 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
4081 vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
4082 type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
4085 case INTR_TYPE_NMI_INTR:
4086 vmx->vcpu.arch.nmi_injected = true;
4088 * SDM 3: 27.7.1.2 (September 2008)
4089 * Clear bit "block by NMI" before VM entry if a NMI
4092 vmx_set_nmi_mask(&vmx->vcpu, false);
4094 case INTR_TYPE_SOFT_EXCEPTION:
4095 vmx->vcpu.arch.event_exit_inst_len =
4096 vmcs_read32(instr_len_field);
4098 case INTR_TYPE_HARD_EXCEPTION:
4099 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
4100 u32 err = vmcs_read32(error_code_field);
4101 kvm_queue_exception_e(&vmx->vcpu, vector, err);
4103 kvm_queue_exception(&vmx->vcpu, vector);
4105 case INTR_TYPE_SOFT_INTR:
4106 vmx->vcpu.arch.event_exit_inst_len =
4107 vmcs_read32(instr_len_field);
4109 case INTR_TYPE_EXT_INTR:
4110 kvm_queue_interrupt(&vmx->vcpu, vector,
4111 type == INTR_TYPE_SOFT_INTR);
4118 static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
4120 __vmx_complete_interrupts(vmx, vmx->idt_vectoring_info,
4121 VM_EXIT_INSTRUCTION_LEN,
4122 IDT_VECTORING_ERROR_CODE);
4125 static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
4127 __vmx_complete_interrupts(to_vmx(vcpu),
4128 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
4129 VM_ENTRY_INSTRUCTION_LEN,
4130 VM_ENTRY_EXCEPTION_ERROR_CODE);
4132 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
4135 #ifdef CONFIG_X86_64
4143 static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
4145 struct vcpu_vmx *vmx = to_vmx(vcpu);
4147 /* Record the guest's net vcpu time for enforced NMI injections. */
4148 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
4149 vmx->entry_time = ktime_get();
4151 /* Don't enter VMX if guest state is invalid, let the exit handler
4152 start emulation until we arrive back to a valid state */
4153 if (vmx->emulation_required && emulate_invalid_guest_state)
4156 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
4157 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
4158 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
4159 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
4161 /* When single-stepping over STI and MOV SS, we must clear the
4162 * corresponding interruptibility bits in the guest state. Otherwise
4163 * vmentry fails as it then expects bit 14 (BS) in pending debug
4164 * exceptions being set, but that's not correct for the guest debugging
4166 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
4167 vmx_set_interrupt_shadow(vcpu, 0);
4170 /* Store host registers */
4171 "push %%"R"dx; push %%"R"bp;"
4172 "push %%"R"cx \n\t" /* placeholder for guest rcx */
4174 "cmp %%"R"sp, %c[host_rsp](%0) \n\t"
4176 "mov %%"R"sp, %c[host_rsp](%0) \n\t"
4177 __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
4179 /* Reload cr2 if changed */
4180 "mov %c[cr2](%0), %%"R"ax \n\t"
4181 "mov %%cr2, %%"R"dx \n\t"
4182 "cmp %%"R"ax, %%"R"dx \n\t"
4184 "mov %%"R"ax, %%cr2 \n\t"
4186 /* Check if vmlaunch of vmresume is needed */
4187 "cmpl $0, %c[launched](%0) \n\t"
4188 /* Load guest registers. Don't clobber flags. */
4189 "mov %c[rax](%0), %%"R"ax \n\t"
4190 "mov %c[rbx](%0), %%"R"bx \n\t"
4191 "mov %c[rdx](%0), %%"R"dx \n\t"
4192 "mov %c[rsi](%0), %%"R"si \n\t"
4193 "mov %c[rdi](%0), %%"R"di \n\t"
4194 "mov %c[rbp](%0), %%"R"bp \n\t"
4195 #ifdef CONFIG_X86_64
4196 "mov %c[r8](%0), %%r8 \n\t"
4197 "mov %c[r9](%0), %%r9 \n\t"
4198 "mov %c[r10](%0), %%r10 \n\t"
4199 "mov %c[r11](%0), %%r11 \n\t"
4200 "mov %c[r12](%0), %%r12 \n\t"
4201 "mov %c[r13](%0), %%r13 \n\t"
4202 "mov %c[r14](%0), %%r14 \n\t"
4203 "mov %c[r15](%0), %%r15 \n\t"
4205 "mov %c[rcx](%0), %%"R"cx \n\t" /* kills %0 (ecx) */
4207 /* Enter guest mode */
4208 "jne .Llaunched \n\t"
4209 __ex(ASM_VMX_VMLAUNCH) "\n\t"
4210 "jmp .Lkvm_vmx_return \n\t"
4211 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
4212 ".Lkvm_vmx_return: "
4213 /* Save guest registers, load host registers, keep flags */
4214 "mov %0, %c[wordsize](%%"R"sp) \n\t"
4216 "mov %%"R"ax, %c[rax](%0) \n\t"
4217 "mov %%"R"bx, %c[rbx](%0) \n\t"
4218 "pop"Q" %c[rcx](%0) \n\t"
4219 "mov %%"R"dx, %c[rdx](%0) \n\t"
4220 "mov %%"R"si, %c[rsi](%0) \n\t"
4221 "mov %%"R"di, %c[rdi](%0) \n\t"
4222 "mov %%"R"bp, %c[rbp](%0) \n\t"
4223 #ifdef CONFIG_X86_64
4224 "mov %%r8, %c[r8](%0) \n\t"
4225 "mov %%r9, %c[r9](%0) \n\t"
4226 "mov %%r10, %c[r10](%0) \n\t"
4227 "mov %%r11, %c[r11](%0) \n\t"
4228 "mov %%r12, %c[r12](%0) \n\t"
4229 "mov %%r13, %c[r13](%0) \n\t"
4230 "mov %%r14, %c[r14](%0) \n\t"
4231 "mov %%r15, %c[r15](%0) \n\t"
4233 "mov %%cr2, %%"R"ax \n\t"
4234 "mov %%"R"ax, %c[cr2](%0) \n\t"
4236 "pop %%"R"bp; pop %%"R"dx \n\t"
4237 "setbe %c[fail](%0) \n\t"
4238 : : "c"(vmx), "d"((unsigned long)HOST_RSP),
4239 [launched]"i"(offsetof(struct vcpu_vmx, launched)),
4240 [fail]"i"(offsetof(struct vcpu_vmx, fail)),
4241 [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
4242 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
4243 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
4244 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
4245 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
4246 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
4247 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
4248 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
4249 #ifdef CONFIG_X86_64
4250 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
4251 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
4252 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
4253 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
4254 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
4255 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
4256 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
4257 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
4259 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
4260 [wordsize]"i"(sizeof(ulong))
4262 , R"ax", R"bx", R"di", R"si"
4263 #ifdef CONFIG_X86_64
4264 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
4268 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
4269 | (1 << VCPU_EXREG_RFLAGS)
4270 | (1 << VCPU_EXREG_CPL)
4271 | (1 << VCPU_EXREG_PDPTR)
4272 | (1 << VCPU_EXREG_SEGMENTS)
4273 | (1 << VCPU_EXREG_CR3));
4274 vcpu->arch.regs_dirty = 0;
4276 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
4278 asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
4281 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
4283 vmx_complete_atomic_exit(vmx);
4284 vmx_recover_nmi_blocking(vmx);
4285 vmx_complete_interrupts(vmx);
4291 static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
4293 struct vcpu_vmx *vmx = to_vmx(vcpu);
4297 free_vmcs(vmx->vmcs);
4302 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
4304 struct vcpu_vmx *vmx = to_vmx(vcpu);
4307 vmx_free_vmcs(vcpu);
4308 kfree(vmx->guest_msrs);
4309 kvm_vcpu_uninit(vcpu);
4310 kmem_cache_free(kvm_vcpu_cache, vmx);
4313 static inline void vmcs_init(struct vmcs *vmcs)
4315 u64 phys_addr = __pa(per_cpu(vmxarea, raw_smp_processor_id()));
4318 kvm_cpu_vmxon(phys_addr);
4326 static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
4329 struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
4333 return ERR_PTR(-ENOMEM);
4337 err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
4341 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
4343 if (!vmx->guest_msrs) {
4347 vmx->vmcs = alloc_vmcs();
4351 vmcs_init(vmx->vmcs);
4354 vmx_vcpu_load(&vmx->vcpu, cpu);
4355 vmx->vcpu.cpu = cpu;
4356 err = vmx_vcpu_setup(vmx);
4357 vmx_vcpu_put(&vmx->vcpu);
4361 if (vm_need_virtualize_apic_accesses(kvm))
4362 err = alloc_apic_access_page(kvm);
4367 if (!kvm->arch.ept_identity_map_addr)
4368 kvm->arch.ept_identity_map_addr =
4369 VMX_EPT_IDENTITY_PAGETABLE_ADDR;
4371 if (alloc_identity_pagetable(kvm) != 0)
4373 if (!init_rmode_identity_map(kvm))
4380 free_vmcs(vmx->vmcs);
4382 kfree(vmx->guest_msrs);
4384 kvm_vcpu_uninit(&vmx->vcpu);
4387 kmem_cache_free(kvm_vcpu_cache, vmx);
4388 return ERR_PTR(err);
4391 static void __init vmx_check_processor_compat(void *rtn)
4393 struct vmcs_config vmcs_conf;
4396 if (setup_vmcs_config(&vmcs_conf) < 0)
4398 if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
4399 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
4400 smp_processor_id());
4405 static int get_ept_level(void)
4407 return VMX_EPT_DEFAULT_GAW + 1;
4410 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
4414 /* For VT-d and EPT combination
4415 * 1. MMIO: always map as UC
4417 * a. VT-d without snooping control feature: can't guarantee the
4418 * result, try to trust guest.
4419 * b. VT-d with snooping control feature: snooping control feature of
4420 * VT-d engine can guarantee the cache correctness. Just set it
4421 * to WB to keep consistent with host. So the same as item 3.
4422 * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
4423 * consistent with host MTRR
4426 ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
4427 else if (vcpu->kvm->arch.iommu_domain &&
4428 !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY))
4429 ret = kvm_get_guest_memory_type(vcpu, gfn) <<
4430 VMX_EPT_MT_EPTE_SHIFT;
4432 ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT)
4438 #define _ER(x) { EXIT_REASON_##x, #x }
4440 static const struct trace_print_flags vmx_exit_reasons_str[] = {
4442 _ER(EXTERNAL_INTERRUPT),
4444 _ER(PENDING_INTERRUPT),
4464 _ER(IO_INSTRUCTION),
4467 _ER(MWAIT_INSTRUCTION),
4468 _ER(MONITOR_INSTRUCTION),
4469 _ER(PAUSE_INSTRUCTION),
4470 _ER(MCE_DURING_VMENTRY),
4471 _ER(TPR_BELOW_THRESHOLD),
4481 static int vmx_get_lpage_level(void)
4483 if (enable_ept && !cpu_has_vmx_ept_1g_page())
4484 return PT_DIRECTORY_LEVEL;
4486 /* For shadow and EPT supported 1GB page */
4487 return PT_PDPE_LEVEL;
4490 static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
4492 struct kvm_cpuid_entry2 *best;
4493 struct vcpu_vmx *vmx = to_vmx(vcpu);
4496 vmx->rdtscp_enabled = false;
4497 if (vmx_rdtscp_supported()) {
4498 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
4499 if (exec_control & SECONDARY_EXEC_RDTSCP) {
4500 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
4501 if (best && (best->edx & bit(X86_FEATURE_RDTSCP)))
4502 vmx->rdtscp_enabled = true;
4504 exec_control &= ~SECONDARY_EXEC_RDTSCP;
4505 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
4512 static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
4516 static int vmx_check_intercept(struct kvm_vcpu *vcpu,
4517 struct x86_instruction_info *info,
4518 enum x86_intercept_stage stage)
4520 return X86EMUL_CONTINUE;
4523 static struct kvm_x86_ops vmx_x86_ops = {
4524 .cpu_has_kvm_support = cpu_has_kvm_support,
4525 .disabled_by_bios = vmx_disabled_by_bios,
4526 .hardware_setup = hardware_setup,
4527 .hardware_unsetup = hardware_unsetup,
4528 .check_processor_compatibility = vmx_check_processor_compat,
4529 .hardware_enable = hardware_enable,
4530 .hardware_disable = hardware_disable,
4531 .cpu_has_accelerated_tpr = report_flexpriority,
4533 .vcpu_create = vmx_create_vcpu,
4534 .vcpu_free = vmx_free_vcpu,
4535 .vcpu_reset = vmx_vcpu_reset,
4537 .prepare_guest_switch = vmx_save_host_state,
4538 .vcpu_load = vmx_vcpu_load,
4539 .vcpu_put = vmx_vcpu_put,
4541 .set_guest_debug = set_guest_debug,
4542 .get_msr = vmx_get_msr,
4543 .set_msr = vmx_set_msr,
4544 .get_segment_base = vmx_get_segment_base,
4545 .get_segment = vmx_get_segment,
4546 .set_segment = vmx_set_segment,
4547 .get_cpl = vmx_get_cpl,
4548 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
4549 .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
4550 .decache_cr3 = vmx_decache_cr3,
4551 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
4552 .set_cr0 = vmx_set_cr0,
4553 .set_cr3 = vmx_set_cr3,
4554 .set_cr4 = vmx_set_cr4,
4555 .set_efer = vmx_set_efer,
4556 .get_idt = vmx_get_idt,
4557 .set_idt = vmx_set_idt,
4558 .get_gdt = vmx_get_gdt,
4559 .set_gdt = vmx_set_gdt,
4560 .set_dr7 = vmx_set_dr7,
4561 .cache_reg = vmx_cache_reg,
4562 .get_rflags = vmx_get_rflags,
4563 .set_rflags = vmx_set_rflags,
4564 .fpu_activate = vmx_fpu_activate,
4565 .fpu_deactivate = vmx_fpu_deactivate,
4567 .tlb_flush = vmx_flush_tlb,
4569 .run = vmx_vcpu_run,
4570 .handle_exit = vmx_handle_exit,
4571 .skip_emulated_instruction = skip_emulated_instruction,
4572 .set_interrupt_shadow = vmx_set_interrupt_shadow,
4573 .get_interrupt_shadow = vmx_get_interrupt_shadow,
4574 .patch_hypercall = vmx_patch_hypercall,
4575 .set_irq = vmx_inject_irq,
4576 .set_nmi = vmx_inject_nmi,
4577 .queue_exception = vmx_queue_exception,
4578 .cancel_injection = vmx_cancel_injection,
4579 .interrupt_allowed = vmx_interrupt_allowed,
4580 .nmi_allowed = vmx_nmi_allowed,
4581 .get_nmi_mask = vmx_get_nmi_mask,
4582 .set_nmi_mask = vmx_set_nmi_mask,
4583 .enable_nmi_window = enable_nmi_window,
4584 .enable_irq_window = enable_irq_window,
4585 .update_cr8_intercept = update_cr8_intercept,
4587 .set_tss_addr = vmx_set_tss_addr,
4588 .get_tdp_level = get_ept_level,
4589 .get_mt_mask = vmx_get_mt_mask,
4591 .get_exit_info = vmx_get_exit_info,
4592 .exit_reasons_str = vmx_exit_reasons_str,
4594 .get_lpage_level = vmx_get_lpage_level,
4596 .cpuid_update = vmx_cpuid_update,
4598 .rdtscp_supported = vmx_rdtscp_supported,
4600 .set_supported_cpuid = vmx_set_supported_cpuid,
4602 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
4604 .set_tsc_khz = vmx_set_tsc_khz,
4605 .write_tsc_offset = vmx_write_tsc_offset,
4606 .adjust_tsc_offset = vmx_adjust_tsc_offset,
4607 .compute_tsc_offset = vmx_compute_tsc_offset,
4609 .set_tdp_cr3 = vmx_set_cr3,
4611 .check_intercept = vmx_check_intercept,
4614 static int __init vmx_init(void)
4618 rdmsrl_safe(MSR_EFER, &host_efer);
4620 for (i = 0; i < NR_VMX_MSR; ++i)
4621 kvm_define_shared_msr(i, vmx_msr_index[i]);
4623 vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
4624 if (!vmx_io_bitmap_a)
4627 vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
4628 if (!vmx_io_bitmap_b) {
4633 vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
4634 if (!vmx_msr_bitmap_legacy) {
4639 vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
4640 if (!vmx_msr_bitmap_longmode) {
4646 * Allow direct access to the PC debug port (it is often used for I/O
4647 * delays, but the vmexits simply slow things down).
4649 memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
4650 clear_bit(0x80, vmx_io_bitmap_a);
4652 memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
4654 memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
4655 memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
4657 set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
4659 r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
4660 __alignof__(struct vcpu_vmx), THIS_MODULE);
4664 vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
4665 vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
4666 vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
4667 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
4668 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
4669 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
4672 bypass_guest_pf = 0;
4673 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
4674 VMX_EPT_EXECUTABLE_MASK);
4679 if (bypass_guest_pf)
4680 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
4685 free_page((unsigned long)vmx_msr_bitmap_longmode);
4687 free_page((unsigned long)vmx_msr_bitmap_legacy);
4689 free_page((unsigned long)vmx_io_bitmap_b);
4691 free_page((unsigned long)vmx_io_bitmap_a);
4695 static void __exit vmx_exit(void)
4697 free_page((unsigned long)vmx_msr_bitmap_legacy);
4698 free_page((unsigned long)vmx_msr_bitmap_longmode);
4699 free_page((unsigned long)vmx_io_bitmap_b);
4700 free_page((unsigned long)vmx_io_bitmap_a);
4705 module_init(vmx_init)
4706 module_exit(vmx_exit)