]> Pileus Git - ~andy/linux/blob - arch/x86/kvm/svm.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth...
[~andy/linux] / arch / x86 / kvm / svm.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * AMD SVM support
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8  *
9  * Authors:
10  *   Yaniv Kamay  <yaniv@qumranet.com>
11  *   Avi Kivity   <avi@qumranet.com>
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  *
16  */
17 #include <linux/kvm_host.h>
18
19 #include "irq.h"
20 #include "mmu.h"
21 #include "kvm_cache_regs.h"
22 #include "x86.h"
23 #include "cpuid.h"
24
25 #include <linux/module.h>
26 #include <linux/mod_devicetable.h>
27 #include <linux/kernel.h>
28 #include <linux/vmalloc.h>
29 #include <linux/highmem.h>
30 #include <linux/sched.h>
31 #include <linux/ftrace_event.h>
32 #include <linux/slab.h>
33
34 #include <asm/perf_event.h>
35 #include <asm/tlbflush.h>
36 #include <asm/desc.h>
37 #include <asm/kvm_para.h>
38
39 #include <asm/virtext.h>
40 #include "trace.h"
41
42 #define __ex(x) __kvm_handle_fault_on_reboot(x)
43
44 MODULE_AUTHOR("Qumranet");
45 MODULE_LICENSE("GPL");
46
47 static const struct x86_cpu_id svm_cpu_id[] = {
48         X86_FEATURE_MATCH(X86_FEATURE_SVM),
49         {}
50 };
51 MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
52
53 #define IOPM_ALLOC_ORDER 2
54 #define MSRPM_ALLOC_ORDER 1
55
56 #define SEG_TYPE_LDT 2
57 #define SEG_TYPE_BUSY_TSS16 3
58
59 #define SVM_FEATURE_NPT            (1 <<  0)
60 #define SVM_FEATURE_LBRV           (1 <<  1)
61 #define SVM_FEATURE_SVML           (1 <<  2)
62 #define SVM_FEATURE_NRIP           (1 <<  3)
63 #define SVM_FEATURE_TSC_RATE       (1 <<  4)
64 #define SVM_FEATURE_VMCB_CLEAN     (1 <<  5)
65 #define SVM_FEATURE_FLUSH_ASID     (1 <<  6)
66 #define SVM_FEATURE_DECODE_ASSIST  (1 <<  7)
67 #define SVM_FEATURE_PAUSE_FILTER   (1 << 10)
68
69 #define NESTED_EXIT_HOST        0       /* Exit handled on host level */
70 #define NESTED_EXIT_DONE        1       /* Exit caused nested vmexit  */
71 #define NESTED_EXIT_CONTINUE    2       /* Further checks needed      */
72
73 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
74
75 #define TSC_RATIO_RSVD          0xffffff0000000000ULL
76 #define TSC_RATIO_MIN           0x0000000000000001ULL
77 #define TSC_RATIO_MAX           0x000000ffffffffffULL
78
79 static bool erratum_383_found __read_mostly;
80
81 static const u32 host_save_user_msrs[] = {
82 #ifdef CONFIG_X86_64
83         MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
84         MSR_FS_BASE,
85 #endif
86         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
87 };
88
89 #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
90
91 struct kvm_vcpu;
92
93 struct nested_state {
94         struct vmcb *hsave;
95         u64 hsave_msr;
96         u64 vm_cr_msr;
97         u64 vmcb;
98
99         /* These are the merged vectors */
100         u32 *msrpm;
101
102         /* gpa pointers to the real vectors */
103         u64 vmcb_msrpm;
104         u64 vmcb_iopm;
105
106         /* A VMEXIT is required but not yet emulated */
107         bool exit_required;
108
109         /* cache for intercepts of the guest */
110         u32 intercept_cr;
111         u32 intercept_dr;
112         u32 intercept_exceptions;
113         u64 intercept;
114
115         /* Nested Paging related state */
116         u64 nested_cr3;
117 };
118
119 #define MSRPM_OFFSETS   16
120 static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
121
122 /*
123  * Set osvw_len to higher value when updated Revision Guides
124  * are published and we know what the new status bits are
125  */
126 static uint64_t osvw_len = 4, osvw_status;
127
128 struct vcpu_svm {
129         struct kvm_vcpu vcpu;
130         struct vmcb *vmcb;
131         unsigned long vmcb_pa;
132         struct svm_cpu_data *svm_data;
133         uint64_t asid_generation;
134         uint64_t sysenter_esp;
135         uint64_t sysenter_eip;
136
137         u64 next_rip;
138
139         u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
140         struct {
141                 u16 fs;
142                 u16 gs;
143                 u16 ldt;
144                 u64 gs_base;
145         } host;
146
147         u32 *msrpm;
148
149         ulong nmi_iret_rip;
150
151         struct nested_state nested;
152
153         bool nmi_singlestep;
154
155         unsigned int3_injected;
156         unsigned long int3_rip;
157         u32 apf_reason;
158
159         u64  tsc_ratio;
160 };
161
162 static DEFINE_PER_CPU(u64, current_tsc_ratio);
163 #define TSC_RATIO_DEFAULT       0x0100000000ULL
164
165 #define MSR_INVALID                     0xffffffffU
166
167 static const struct svm_direct_access_msrs {
168         u32 index;   /* Index of the MSR */
169         bool always; /* True if intercept is always on */
170 } direct_access_msrs[] = {
171         { .index = MSR_STAR,                            .always = true  },
172         { .index = MSR_IA32_SYSENTER_CS,                .always = true  },
173 #ifdef CONFIG_X86_64
174         { .index = MSR_GS_BASE,                         .always = true  },
175         { .index = MSR_FS_BASE,                         .always = true  },
176         { .index = MSR_KERNEL_GS_BASE,                  .always = true  },
177         { .index = MSR_LSTAR,                           .always = true  },
178         { .index = MSR_CSTAR,                           .always = true  },
179         { .index = MSR_SYSCALL_MASK,                    .always = true  },
180 #endif
181         { .index = MSR_IA32_LASTBRANCHFROMIP,           .always = false },
182         { .index = MSR_IA32_LASTBRANCHTOIP,             .always = false },
183         { .index = MSR_IA32_LASTINTFROMIP,              .always = false },
184         { .index = MSR_IA32_LASTINTTOIP,                .always = false },
185         { .index = MSR_INVALID,                         .always = false },
186 };
187
188 /* enable NPT for AMD64 and X86 with PAE */
189 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
190 static bool npt_enabled = true;
191 #else
192 static bool npt_enabled;
193 #endif
194
195 /* allow nested paging (virtualized MMU) for all guests */
196 static int npt = true;
197 module_param(npt, int, S_IRUGO);
198
199 /* allow nested virtualization in KVM/SVM */
200 static int nested = true;
201 module_param(nested, int, S_IRUGO);
202
203 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
204 static void svm_complete_interrupts(struct vcpu_svm *svm);
205
206 static int nested_svm_exit_handled(struct vcpu_svm *svm);
207 static int nested_svm_intercept(struct vcpu_svm *svm);
208 static int nested_svm_vmexit(struct vcpu_svm *svm);
209 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
210                                       bool has_error_code, u32 error_code);
211 static u64 __scale_tsc(u64 ratio, u64 tsc);
212
213 enum {
214         VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
215                             pause filter count */
216         VMCB_PERM_MAP,   /* IOPM Base and MSRPM Base */
217         VMCB_ASID,       /* ASID */
218         VMCB_INTR,       /* int_ctl, int_vector */
219         VMCB_NPT,        /* npt_en, nCR3, gPAT */
220         VMCB_CR,         /* CR0, CR3, CR4, EFER */
221         VMCB_DR,         /* DR6, DR7 */
222         VMCB_DT,         /* GDT, IDT */
223         VMCB_SEG,        /* CS, DS, SS, ES, CPL */
224         VMCB_CR2,        /* CR2 only */
225         VMCB_LBR,        /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
226         VMCB_DIRTY_MAX,
227 };
228
229 /* TPR and CR2 are always written before VMRUN */
230 #define VMCB_ALWAYS_DIRTY_MASK  ((1U << VMCB_INTR) | (1U << VMCB_CR2))
231
232 static inline void mark_all_dirty(struct vmcb *vmcb)
233 {
234         vmcb->control.clean = 0;
235 }
236
237 static inline void mark_all_clean(struct vmcb *vmcb)
238 {
239         vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
240                                & ~VMCB_ALWAYS_DIRTY_MASK;
241 }
242
243 static inline void mark_dirty(struct vmcb *vmcb, int bit)
244 {
245         vmcb->control.clean &= ~(1 << bit);
246 }
247
248 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
249 {
250         return container_of(vcpu, struct vcpu_svm, vcpu);
251 }
252
253 static void recalc_intercepts(struct vcpu_svm *svm)
254 {
255         struct vmcb_control_area *c, *h;
256         struct nested_state *g;
257
258         mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
259
260         if (!is_guest_mode(&svm->vcpu))
261                 return;
262
263         c = &svm->vmcb->control;
264         h = &svm->nested.hsave->control;
265         g = &svm->nested;
266
267         c->intercept_cr = h->intercept_cr | g->intercept_cr;
268         c->intercept_dr = h->intercept_dr | g->intercept_dr;
269         c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
270         c->intercept = h->intercept | g->intercept;
271 }
272
273 static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
274 {
275         if (is_guest_mode(&svm->vcpu))
276                 return svm->nested.hsave;
277         else
278                 return svm->vmcb;
279 }
280
281 static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
282 {
283         struct vmcb *vmcb = get_host_vmcb(svm);
284
285         vmcb->control.intercept_cr |= (1U << bit);
286
287         recalc_intercepts(svm);
288 }
289
290 static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
291 {
292         struct vmcb *vmcb = get_host_vmcb(svm);
293
294         vmcb->control.intercept_cr &= ~(1U << bit);
295
296         recalc_intercepts(svm);
297 }
298
299 static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
300 {
301         struct vmcb *vmcb = get_host_vmcb(svm);
302
303         return vmcb->control.intercept_cr & (1U << bit);
304 }
305
306 static inline void set_dr_intercept(struct vcpu_svm *svm, int bit)
307 {
308         struct vmcb *vmcb = get_host_vmcb(svm);
309
310         vmcb->control.intercept_dr |= (1U << bit);
311
312         recalc_intercepts(svm);
313 }
314
315 static inline void clr_dr_intercept(struct vcpu_svm *svm, int bit)
316 {
317         struct vmcb *vmcb = get_host_vmcb(svm);
318
319         vmcb->control.intercept_dr &= ~(1U << bit);
320
321         recalc_intercepts(svm);
322 }
323
324 static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
325 {
326         struct vmcb *vmcb = get_host_vmcb(svm);
327
328         vmcb->control.intercept_exceptions |= (1U << bit);
329
330         recalc_intercepts(svm);
331 }
332
333 static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
334 {
335         struct vmcb *vmcb = get_host_vmcb(svm);
336
337         vmcb->control.intercept_exceptions &= ~(1U << bit);
338
339         recalc_intercepts(svm);
340 }
341
342 static inline void set_intercept(struct vcpu_svm *svm, int bit)
343 {
344         struct vmcb *vmcb = get_host_vmcb(svm);
345
346         vmcb->control.intercept |= (1ULL << bit);
347
348         recalc_intercepts(svm);
349 }
350
351 static inline void clr_intercept(struct vcpu_svm *svm, int bit)
352 {
353         struct vmcb *vmcb = get_host_vmcb(svm);
354
355         vmcb->control.intercept &= ~(1ULL << bit);
356
357         recalc_intercepts(svm);
358 }
359
360 static inline void enable_gif(struct vcpu_svm *svm)
361 {
362         svm->vcpu.arch.hflags |= HF_GIF_MASK;
363 }
364
365 static inline void disable_gif(struct vcpu_svm *svm)
366 {
367         svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
368 }
369
370 static inline bool gif_set(struct vcpu_svm *svm)
371 {
372         return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
373 }
374
375 static unsigned long iopm_base;
376
377 struct kvm_ldttss_desc {
378         u16 limit0;
379         u16 base0;
380         unsigned base1:8, type:5, dpl:2, p:1;
381         unsigned limit1:4, zero0:3, g:1, base2:8;
382         u32 base3;
383         u32 zero1;
384 } __attribute__((packed));
385
386 struct svm_cpu_data {
387         int cpu;
388
389         u64 asid_generation;
390         u32 max_asid;
391         u32 next_asid;
392         struct kvm_ldttss_desc *tss_desc;
393
394         struct page *save_area;
395 };
396
397 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
398
399 struct svm_init_data {
400         int cpu;
401         int r;
402 };
403
404 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
405
406 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
407 #define MSRS_RANGE_SIZE 2048
408 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
409
410 static u32 svm_msrpm_offset(u32 msr)
411 {
412         u32 offset;
413         int i;
414
415         for (i = 0; i < NUM_MSR_MAPS; i++) {
416                 if (msr < msrpm_ranges[i] ||
417                     msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
418                         continue;
419
420                 offset  = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
421                 offset += (i * MSRS_RANGE_SIZE);       /* add range offset */
422
423                 /* Now we have the u8 offset - but need the u32 offset */
424                 return offset / 4;
425         }
426
427         /* MSR not in any range */
428         return MSR_INVALID;
429 }
430
431 #define MAX_INST_SIZE 15
432
433 static inline void clgi(void)
434 {
435         asm volatile (__ex(SVM_CLGI));
436 }
437
438 static inline void stgi(void)
439 {
440         asm volatile (__ex(SVM_STGI));
441 }
442
443 static inline void invlpga(unsigned long addr, u32 asid)
444 {
445         asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
446 }
447
448 static int get_npt_level(void)
449 {
450 #ifdef CONFIG_X86_64
451         return PT64_ROOT_LEVEL;
452 #else
453         return PT32E_ROOT_LEVEL;
454 #endif
455 }
456
457 static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
458 {
459         vcpu->arch.efer = efer;
460         if (!npt_enabled && !(efer & EFER_LMA))
461                 efer &= ~EFER_LME;
462
463         to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
464         mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
465 }
466
467 static int is_external_interrupt(u32 info)
468 {
469         info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
470         return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
471 }
472
473 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
474 {
475         struct vcpu_svm *svm = to_svm(vcpu);
476         u32 ret = 0;
477
478         if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
479                 ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
480         return ret & mask;
481 }
482
483 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
484 {
485         struct vcpu_svm *svm = to_svm(vcpu);
486
487         if (mask == 0)
488                 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
489         else
490                 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
491
492 }
493
494 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
495 {
496         struct vcpu_svm *svm = to_svm(vcpu);
497
498         if (svm->vmcb->control.next_rip != 0)
499                 svm->next_rip = svm->vmcb->control.next_rip;
500
501         if (!svm->next_rip) {
502                 if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
503                                 EMULATE_DONE)
504                         printk(KERN_DEBUG "%s: NOP\n", __func__);
505                 return;
506         }
507         if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
508                 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
509                        __func__, kvm_rip_read(vcpu), svm->next_rip);
510
511         kvm_rip_write(vcpu, svm->next_rip);
512         svm_set_interrupt_shadow(vcpu, 0);
513 }
514
515 static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
516                                 bool has_error_code, u32 error_code,
517                                 bool reinject)
518 {
519         struct vcpu_svm *svm = to_svm(vcpu);
520
521         /*
522          * If we are within a nested VM we'd better #VMEXIT and let the guest
523          * handle the exception
524          */
525         if (!reinject &&
526             nested_svm_check_exception(svm, nr, has_error_code, error_code))
527                 return;
528
529         if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
530                 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
531
532                 /*
533                  * For guest debugging where we have to reinject #BP if some
534                  * INT3 is guest-owned:
535                  * Emulate nRIP by moving RIP forward. Will fail if injection
536                  * raises a fault that is not intercepted. Still better than
537                  * failing in all cases.
538                  */
539                 skip_emulated_instruction(&svm->vcpu);
540                 rip = kvm_rip_read(&svm->vcpu);
541                 svm->int3_rip = rip + svm->vmcb->save.cs.base;
542                 svm->int3_injected = rip - old_rip;
543         }
544
545         svm->vmcb->control.event_inj = nr
546                 | SVM_EVTINJ_VALID
547                 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
548                 | SVM_EVTINJ_TYPE_EXEPT;
549         svm->vmcb->control.event_inj_err = error_code;
550 }
551
552 static void svm_init_erratum_383(void)
553 {
554         u32 low, high;
555         int err;
556         u64 val;
557
558         if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
559                 return;
560
561         /* Use _safe variants to not break nested virtualization */
562         val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
563         if (err)
564                 return;
565
566         val |= (1ULL << 47);
567
568         low  = lower_32_bits(val);
569         high = upper_32_bits(val);
570
571         native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
572
573         erratum_383_found = true;
574 }
575
576 static void svm_init_osvw(struct kvm_vcpu *vcpu)
577 {
578         /*
579          * Guests should see errata 400 and 415 as fixed (assuming that
580          * HLT and IO instructions are intercepted).
581          */
582         vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
583         vcpu->arch.osvw.status = osvw_status & ~(6ULL);
584
585         /*
586          * By increasing VCPU's osvw.length to 3 we are telling the guest that
587          * all osvw.status bits inside that length, including bit 0 (which is
588          * reserved for erratum 298), are valid. However, if host processor's
589          * osvw_len is 0 then osvw_status[0] carries no information. We need to
590          * be conservative here and therefore we tell the guest that erratum 298
591          * is present (because we really don't know).
592          */
593         if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
594                 vcpu->arch.osvw.status |= 1;
595 }
596
597 static int has_svm(void)
598 {
599         const char *msg;
600
601         if (!cpu_has_svm(&msg)) {
602                 printk(KERN_INFO "has_svm: %s\n", msg);
603                 return 0;
604         }
605
606         return 1;
607 }
608
609 static void svm_hardware_disable(void *garbage)
610 {
611         /* Make sure we clean up behind us */
612         if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
613                 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
614
615         cpu_svm_disable();
616
617         amd_pmu_disable_virt();
618 }
619
620 static int svm_hardware_enable(void *garbage)
621 {
622
623         struct svm_cpu_data *sd;
624         uint64_t efer;
625         struct desc_ptr gdt_descr;
626         struct desc_struct *gdt;
627         int me = raw_smp_processor_id();
628
629         rdmsrl(MSR_EFER, efer);
630         if (efer & EFER_SVME)
631                 return -EBUSY;
632
633         if (!has_svm()) {
634                 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
635                 return -EINVAL;
636         }
637         sd = per_cpu(svm_data, me);
638         if (!sd) {
639                 pr_err("%s: svm_data is NULL on %d\n", __func__, me);
640                 return -EINVAL;
641         }
642
643         sd->asid_generation = 1;
644         sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
645         sd->next_asid = sd->max_asid + 1;
646
647         native_store_gdt(&gdt_descr);
648         gdt = (struct desc_struct *)gdt_descr.address;
649         sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
650
651         wrmsrl(MSR_EFER, efer | EFER_SVME);
652
653         wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
654
655         if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
656                 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
657                 __get_cpu_var(current_tsc_ratio) = TSC_RATIO_DEFAULT;
658         }
659
660
661         /*
662          * Get OSVW bits.
663          *
664          * Note that it is possible to have a system with mixed processor
665          * revisions and therefore different OSVW bits. If bits are not the same
666          * on different processors then choose the worst case (i.e. if erratum
667          * is present on one processor and not on another then assume that the
668          * erratum is present everywhere).
669          */
670         if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
671                 uint64_t len, status = 0;
672                 int err;
673
674                 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
675                 if (!err)
676                         status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
677                                                       &err);
678
679                 if (err)
680                         osvw_status = osvw_len = 0;
681                 else {
682                         if (len < osvw_len)
683                                 osvw_len = len;
684                         osvw_status |= status;
685                         osvw_status &= (1ULL << osvw_len) - 1;
686                 }
687         } else
688                 osvw_status = osvw_len = 0;
689
690         svm_init_erratum_383();
691
692         amd_pmu_enable_virt();
693
694         return 0;
695 }
696
697 static void svm_cpu_uninit(int cpu)
698 {
699         struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
700
701         if (!sd)
702                 return;
703
704         per_cpu(svm_data, raw_smp_processor_id()) = NULL;
705         __free_page(sd->save_area);
706         kfree(sd);
707 }
708
709 static int svm_cpu_init(int cpu)
710 {
711         struct svm_cpu_data *sd;
712         int r;
713
714         sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
715         if (!sd)
716                 return -ENOMEM;
717         sd->cpu = cpu;
718         sd->save_area = alloc_page(GFP_KERNEL);
719         r = -ENOMEM;
720         if (!sd->save_area)
721                 goto err_1;
722
723         per_cpu(svm_data, cpu) = sd;
724
725         return 0;
726
727 err_1:
728         kfree(sd);
729         return r;
730
731 }
732
733 static bool valid_msr_intercept(u32 index)
734 {
735         int i;
736
737         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
738                 if (direct_access_msrs[i].index == index)
739                         return true;
740
741         return false;
742 }
743
744 static void set_msr_interception(u32 *msrpm, unsigned msr,
745                                  int read, int write)
746 {
747         u8 bit_read, bit_write;
748         unsigned long tmp;
749         u32 offset;
750
751         /*
752          * If this warning triggers extend the direct_access_msrs list at the
753          * beginning of the file
754          */
755         WARN_ON(!valid_msr_intercept(msr));
756
757         offset    = svm_msrpm_offset(msr);
758         bit_read  = 2 * (msr & 0x0f);
759         bit_write = 2 * (msr & 0x0f) + 1;
760         tmp       = msrpm[offset];
761
762         BUG_ON(offset == MSR_INVALID);
763
764         read  ? clear_bit(bit_read,  &tmp) : set_bit(bit_read,  &tmp);
765         write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
766
767         msrpm[offset] = tmp;
768 }
769
770 static void svm_vcpu_init_msrpm(u32 *msrpm)
771 {
772         int i;
773
774         memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
775
776         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
777                 if (!direct_access_msrs[i].always)
778                         continue;
779
780                 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
781         }
782 }
783
784 static void add_msr_offset(u32 offset)
785 {
786         int i;
787
788         for (i = 0; i < MSRPM_OFFSETS; ++i) {
789
790                 /* Offset already in list? */
791                 if (msrpm_offsets[i] == offset)
792                         return;
793
794                 /* Slot used by another offset? */
795                 if (msrpm_offsets[i] != MSR_INVALID)
796                         continue;
797
798                 /* Add offset to list */
799                 msrpm_offsets[i] = offset;
800
801                 return;
802         }
803
804         /*
805          * If this BUG triggers the msrpm_offsets table has an overflow. Just
806          * increase MSRPM_OFFSETS in this case.
807          */
808         BUG();
809 }
810
811 static void init_msrpm_offsets(void)
812 {
813         int i;
814
815         memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
816
817         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
818                 u32 offset;
819
820                 offset = svm_msrpm_offset(direct_access_msrs[i].index);
821                 BUG_ON(offset == MSR_INVALID);
822
823                 add_msr_offset(offset);
824         }
825 }
826
827 static void svm_enable_lbrv(struct vcpu_svm *svm)
828 {
829         u32 *msrpm = svm->msrpm;
830
831         svm->vmcb->control.lbr_ctl = 1;
832         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
833         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
834         set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
835         set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
836 }
837
838 static void svm_disable_lbrv(struct vcpu_svm *svm)
839 {
840         u32 *msrpm = svm->msrpm;
841
842         svm->vmcb->control.lbr_ctl = 0;
843         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
844         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
845         set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
846         set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
847 }
848
849 static __init int svm_hardware_setup(void)
850 {
851         int cpu;
852         struct page *iopm_pages;
853         void *iopm_va;
854         int r;
855
856         iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
857
858         if (!iopm_pages)
859                 return -ENOMEM;
860
861         iopm_va = page_address(iopm_pages);
862         memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
863         iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
864
865         init_msrpm_offsets();
866
867         if (boot_cpu_has(X86_FEATURE_NX))
868                 kvm_enable_efer_bits(EFER_NX);
869
870         if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
871                 kvm_enable_efer_bits(EFER_FFXSR);
872
873         if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
874                 u64 max;
875
876                 kvm_has_tsc_control = true;
877
878                 /*
879                  * Make sure the user can only configure tsc_khz values that
880                  * fit into a signed integer.
881                  * A min value is not calculated needed because it will always
882                  * be 1 on all machines and a value of 0 is used to disable
883                  * tsc-scaling for the vcpu.
884                  */
885                 max = min(0x7fffffffULL, __scale_tsc(tsc_khz, TSC_RATIO_MAX));
886
887                 kvm_max_guest_tsc_khz = max;
888         }
889
890         if (nested) {
891                 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
892                 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
893         }
894
895         for_each_possible_cpu(cpu) {
896                 r = svm_cpu_init(cpu);
897                 if (r)
898                         goto err;
899         }
900
901         if (!boot_cpu_has(X86_FEATURE_NPT))
902                 npt_enabled = false;
903
904         if (npt_enabled && !npt) {
905                 printk(KERN_INFO "kvm: Nested Paging disabled\n");
906                 npt_enabled = false;
907         }
908
909         if (npt_enabled) {
910                 printk(KERN_INFO "kvm: Nested Paging enabled\n");
911                 kvm_enable_tdp();
912         } else
913                 kvm_disable_tdp();
914
915         return 0;
916
917 err:
918         __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
919         iopm_base = 0;
920         return r;
921 }
922
923 static __exit void svm_hardware_unsetup(void)
924 {
925         int cpu;
926
927         for_each_possible_cpu(cpu)
928                 svm_cpu_uninit(cpu);
929
930         __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
931         iopm_base = 0;
932 }
933
934 static void init_seg(struct vmcb_seg *seg)
935 {
936         seg->selector = 0;
937         seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
938                       SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
939         seg->limit = 0xffff;
940         seg->base = 0;
941 }
942
943 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
944 {
945         seg->selector = 0;
946         seg->attrib = SVM_SELECTOR_P_MASK | type;
947         seg->limit = 0xffff;
948         seg->base = 0;
949 }
950
951 static u64 __scale_tsc(u64 ratio, u64 tsc)
952 {
953         u64 mult, frac, _tsc;
954
955         mult  = ratio >> 32;
956         frac  = ratio & ((1ULL << 32) - 1);
957
958         _tsc  = tsc;
959         _tsc *= mult;
960         _tsc += (tsc >> 32) * frac;
961         _tsc += ((tsc & ((1ULL << 32) - 1)) * frac) >> 32;
962
963         return _tsc;
964 }
965
966 static u64 svm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
967 {
968         struct vcpu_svm *svm = to_svm(vcpu);
969         u64 _tsc = tsc;
970
971         if (svm->tsc_ratio != TSC_RATIO_DEFAULT)
972                 _tsc = __scale_tsc(svm->tsc_ratio, tsc);
973
974         return _tsc;
975 }
976
977 static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
978 {
979         struct vcpu_svm *svm = to_svm(vcpu);
980         u64 ratio;
981         u64 khz;
982
983         /* Guest TSC same frequency as host TSC? */
984         if (!scale) {
985                 svm->tsc_ratio = TSC_RATIO_DEFAULT;
986                 return;
987         }
988
989         /* TSC scaling supported? */
990         if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
991                 if (user_tsc_khz > tsc_khz) {
992                         vcpu->arch.tsc_catchup = 1;
993                         vcpu->arch.tsc_always_catchup = 1;
994                 } else
995                         WARN(1, "user requested TSC rate below hardware speed\n");
996                 return;
997         }
998
999         khz = user_tsc_khz;
1000
1001         /* TSC scaling required  - calculate ratio */
1002         ratio = khz << 32;
1003         do_div(ratio, tsc_khz);
1004
1005         if (ratio == 0 || ratio & TSC_RATIO_RSVD) {
1006                 WARN_ONCE(1, "Invalid TSC ratio - virtual-tsc-khz=%u\n",
1007                                 user_tsc_khz);
1008                 return;
1009         }
1010         svm->tsc_ratio             = ratio;
1011 }
1012
1013 static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu)
1014 {
1015         struct vcpu_svm *svm = to_svm(vcpu);
1016
1017         return svm->vmcb->control.tsc_offset;
1018 }
1019
1020 static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1021 {
1022         struct vcpu_svm *svm = to_svm(vcpu);
1023         u64 g_tsc_offset = 0;
1024
1025         if (is_guest_mode(vcpu)) {
1026                 g_tsc_offset = svm->vmcb->control.tsc_offset -
1027                                svm->nested.hsave->control.tsc_offset;
1028                 svm->nested.hsave->control.tsc_offset = offset;
1029         } else
1030                 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1031                                            svm->vmcb->control.tsc_offset,
1032                                            offset);
1033
1034         svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
1035
1036         mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1037 }
1038
1039 static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
1040 {
1041         struct vcpu_svm *svm = to_svm(vcpu);
1042
1043         WARN_ON(adjustment < 0);
1044         if (host)
1045                 adjustment = svm_scale_tsc(vcpu, adjustment);
1046
1047         svm->vmcb->control.tsc_offset += adjustment;
1048         if (is_guest_mode(vcpu))
1049                 svm->nested.hsave->control.tsc_offset += adjustment;
1050         else
1051                 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1052                                      svm->vmcb->control.tsc_offset - adjustment,
1053                                      svm->vmcb->control.tsc_offset);
1054
1055         mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1056 }
1057
1058 static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1059 {
1060         u64 tsc;
1061
1062         tsc = svm_scale_tsc(vcpu, native_read_tsc());
1063
1064         return target_tsc - tsc;
1065 }
1066
1067 static void init_vmcb(struct vcpu_svm *svm)
1068 {
1069         struct vmcb_control_area *control = &svm->vmcb->control;
1070         struct vmcb_save_area *save = &svm->vmcb->save;
1071
1072         svm->vcpu.fpu_active = 1;
1073         svm->vcpu.arch.hflags = 0;
1074
1075         set_cr_intercept(svm, INTERCEPT_CR0_READ);
1076         set_cr_intercept(svm, INTERCEPT_CR3_READ);
1077         set_cr_intercept(svm, INTERCEPT_CR4_READ);
1078         set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1079         set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1080         set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
1081         set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
1082
1083         set_dr_intercept(svm, INTERCEPT_DR0_READ);
1084         set_dr_intercept(svm, INTERCEPT_DR1_READ);
1085         set_dr_intercept(svm, INTERCEPT_DR2_READ);
1086         set_dr_intercept(svm, INTERCEPT_DR3_READ);
1087         set_dr_intercept(svm, INTERCEPT_DR4_READ);
1088         set_dr_intercept(svm, INTERCEPT_DR5_READ);
1089         set_dr_intercept(svm, INTERCEPT_DR6_READ);
1090         set_dr_intercept(svm, INTERCEPT_DR7_READ);
1091
1092         set_dr_intercept(svm, INTERCEPT_DR0_WRITE);
1093         set_dr_intercept(svm, INTERCEPT_DR1_WRITE);
1094         set_dr_intercept(svm, INTERCEPT_DR2_WRITE);
1095         set_dr_intercept(svm, INTERCEPT_DR3_WRITE);
1096         set_dr_intercept(svm, INTERCEPT_DR4_WRITE);
1097         set_dr_intercept(svm, INTERCEPT_DR5_WRITE);
1098         set_dr_intercept(svm, INTERCEPT_DR6_WRITE);
1099         set_dr_intercept(svm, INTERCEPT_DR7_WRITE);
1100
1101         set_exception_intercept(svm, PF_VECTOR);
1102         set_exception_intercept(svm, UD_VECTOR);
1103         set_exception_intercept(svm, MC_VECTOR);
1104
1105         set_intercept(svm, INTERCEPT_INTR);
1106         set_intercept(svm, INTERCEPT_NMI);
1107         set_intercept(svm, INTERCEPT_SMI);
1108         set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
1109         set_intercept(svm, INTERCEPT_RDPMC);
1110         set_intercept(svm, INTERCEPT_CPUID);
1111         set_intercept(svm, INTERCEPT_INVD);
1112         set_intercept(svm, INTERCEPT_HLT);
1113         set_intercept(svm, INTERCEPT_INVLPG);
1114         set_intercept(svm, INTERCEPT_INVLPGA);
1115         set_intercept(svm, INTERCEPT_IOIO_PROT);
1116         set_intercept(svm, INTERCEPT_MSR_PROT);
1117         set_intercept(svm, INTERCEPT_TASK_SWITCH);
1118         set_intercept(svm, INTERCEPT_SHUTDOWN);
1119         set_intercept(svm, INTERCEPT_VMRUN);
1120         set_intercept(svm, INTERCEPT_VMMCALL);
1121         set_intercept(svm, INTERCEPT_VMLOAD);
1122         set_intercept(svm, INTERCEPT_VMSAVE);
1123         set_intercept(svm, INTERCEPT_STGI);
1124         set_intercept(svm, INTERCEPT_CLGI);
1125         set_intercept(svm, INTERCEPT_SKINIT);
1126         set_intercept(svm, INTERCEPT_WBINVD);
1127         set_intercept(svm, INTERCEPT_MONITOR);
1128         set_intercept(svm, INTERCEPT_MWAIT);
1129         set_intercept(svm, INTERCEPT_XSETBV);
1130
1131         control->iopm_base_pa = iopm_base;
1132         control->msrpm_base_pa = __pa(svm->msrpm);
1133         control->int_ctl = V_INTR_MASKING_MASK;
1134
1135         init_seg(&save->es);
1136         init_seg(&save->ss);
1137         init_seg(&save->ds);
1138         init_seg(&save->fs);
1139         init_seg(&save->gs);
1140
1141         save->cs.selector = 0xf000;
1142         save->cs.base = 0xffff0000;
1143         /* Executable/Readable Code Segment */
1144         save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1145                 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1146         save->cs.limit = 0xffff;
1147
1148         save->gdtr.limit = 0xffff;
1149         save->idtr.limit = 0xffff;
1150
1151         init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1152         init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1153
1154         svm_set_efer(&svm->vcpu, 0);
1155         save->dr6 = 0xffff0ff0;
1156         kvm_set_rflags(&svm->vcpu, 2);
1157         save->rip = 0x0000fff0;
1158         svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
1159
1160         /*
1161          * This is the guest-visible cr0 value.
1162          * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
1163          */
1164         svm->vcpu.arch.cr0 = 0;
1165         (void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
1166
1167         save->cr4 = X86_CR4_PAE;
1168         /* rdx = ?? */
1169
1170         if (npt_enabled) {
1171                 /* Setup VMCB for Nested Paging */
1172                 control->nested_ctl = 1;
1173                 clr_intercept(svm, INTERCEPT_INVLPG);
1174                 clr_exception_intercept(svm, PF_VECTOR);
1175                 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1176                 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1177                 save->g_pat = 0x0007040600070406ULL;
1178                 save->cr3 = 0;
1179                 save->cr4 = 0;
1180         }
1181         svm->asid_generation = 0;
1182
1183         svm->nested.vmcb = 0;
1184         svm->vcpu.arch.hflags = 0;
1185
1186         if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
1187                 control->pause_filter_count = 3000;
1188                 set_intercept(svm, INTERCEPT_PAUSE);
1189         }
1190
1191         mark_all_dirty(svm->vmcb);
1192
1193         enable_gif(svm);
1194 }
1195
1196 static void svm_vcpu_reset(struct kvm_vcpu *vcpu)
1197 {
1198         struct vcpu_svm *svm = to_svm(vcpu);
1199         u32 dummy;
1200         u32 eax = 1;
1201
1202         init_vmcb(svm);
1203
1204         kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy);
1205         kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
1206 }
1207
1208 static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
1209 {
1210         struct vcpu_svm *svm;
1211         struct page *page;
1212         struct page *msrpm_pages;
1213         struct page *hsave_page;
1214         struct page *nested_msrpm_pages;
1215         int err;
1216
1217         svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1218         if (!svm) {
1219                 err = -ENOMEM;
1220                 goto out;
1221         }
1222
1223         svm->tsc_ratio = TSC_RATIO_DEFAULT;
1224
1225         err = kvm_vcpu_init(&svm->vcpu, kvm, id);
1226         if (err)
1227                 goto free_svm;
1228
1229         err = -ENOMEM;
1230         page = alloc_page(GFP_KERNEL);
1231         if (!page)
1232                 goto uninit;
1233
1234         msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1235         if (!msrpm_pages)
1236                 goto free_page1;
1237
1238         nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1239         if (!nested_msrpm_pages)
1240                 goto free_page2;
1241
1242         hsave_page = alloc_page(GFP_KERNEL);
1243         if (!hsave_page)
1244                 goto free_page3;
1245
1246         svm->nested.hsave = page_address(hsave_page);
1247
1248         svm->msrpm = page_address(msrpm_pages);
1249         svm_vcpu_init_msrpm(svm->msrpm);
1250
1251         svm->nested.msrpm = page_address(nested_msrpm_pages);
1252         svm_vcpu_init_msrpm(svm->nested.msrpm);
1253
1254         svm->vmcb = page_address(page);
1255         clear_page(svm->vmcb);
1256         svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
1257         svm->asid_generation = 0;
1258         init_vmcb(svm);
1259
1260         svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
1261         if (kvm_vcpu_is_bsp(&svm->vcpu))
1262                 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
1263
1264         svm_init_osvw(&svm->vcpu);
1265
1266         return &svm->vcpu;
1267
1268 free_page3:
1269         __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1270 free_page2:
1271         __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
1272 free_page1:
1273         __free_page(page);
1274 uninit:
1275         kvm_vcpu_uninit(&svm->vcpu);
1276 free_svm:
1277         kmem_cache_free(kvm_vcpu_cache, svm);
1278 out:
1279         return ERR_PTR(err);
1280 }
1281
1282 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
1283 {
1284         struct vcpu_svm *svm = to_svm(vcpu);
1285
1286         __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
1287         __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
1288         __free_page(virt_to_page(svm->nested.hsave));
1289         __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
1290         kvm_vcpu_uninit(vcpu);
1291         kmem_cache_free(kvm_vcpu_cache, svm);
1292 }
1293
1294 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1295 {
1296         struct vcpu_svm *svm = to_svm(vcpu);
1297         int i;
1298
1299         if (unlikely(cpu != vcpu->cpu)) {
1300                 svm->asid_generation = 0;
1301                 mark_all_dirty(svm->vmcb);
1302         }
1303
1304 #ifdef CONFIG_X86_64
1305         rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
1306 #endif
1307         savesegment(fs, svm->host.fs);
1308         savesegment(gs, svm->host.gs);
1309         svm->host.ldt = kvm_read_ldt();
1310
1311         for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
1312                 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
1313
1314         if (static_cpu_has(X86_FEATURE_TSCRATEMSR) &&
1315             svm->tsc_ratio != __get_cpu_var(current_tsc_ratio)) {
1316                 __get_cpu_var(current_tsc_ratio) = svm->tsc_ratio;
1317                 wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio);
1318         }
1319 }
1320
1321 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1322 {
1323         struct vcpu_svm *svm = to_svm(vcpu);
1324         int i;
1325
1326         ++vcpu->stat.host_state_reload;
1327         kvm_load_ldt(svm->host.ldt);
1328 #ifdef CONFIG_X86_64
1329         loadsegment(fs, svm->host.fs);
1330         wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
1331         load_gs_index(svm->host.gs);
1332 #else
1333 #ifdef CONFIG_X86_32_LAZY_GS
1334         loadsegment(gs, svm->host.gs);
1335 #endif
1336 #endif
1337         for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
1338                 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
1339 }
1340
1341 static void svm_update_cpl(struct kvm_vcpu *vcpu)
1342 {
1343         struct vcpu_svm *svm = to_svm(vcpu);
1344         int cpl;
1345
1346         if (!is_protmode(vcpu))
1347                 cpl = 0;
1348         else if (svm->vmcb->save.rflags & X86_EFLAGS_VM)
1349                 cpl = 3;
1350         else
1351                 cpl = svm->vmcb->save.cs.selector & 0x3;
1352
1353         svm->vmcb->save.cpl = cpl;
1354 }
1355
1356 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1357 {
1358         return to_svm(vcpu)->vmcb->save.rflags;
1359 }
1360
1361 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1362 {
1363         unsigned long old_rflags = to_svm(vcpu)->vmcb->save.rflags;
1364
1365         to_svm(vcpu)->vmcb->save.rflags = rflags;
1366         if ((old_rflags ^ rflags) & X86_EFLAGS_VM)
1367                 svm_update_cpl(vcpu);
1368 }
1369
1370 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1371 {
1372         switch (reg) {
1373         case VCPU_EXREG_PDPTR:
1374                 BUG_ON(!npt_enabled);
1375                 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
1376                 break;
1377         default:
1378                 BUG();
1379         }
1380 }
1381
1382 static void svm_set_vintr(struct vcpu_svm *svm)
1383 {
1384         set_intercept(svm, INTERCEPT_VINTR);
1385 }
1386
1387 static void svm_clear_vintr(struct vcpu_svm *svm)
1388 {
1389         clr_intercept(svm, INTERCEPT_VINTR);
1390 }
1391
1392 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1393 {
1394         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1395
1396         switch (seg) {
1397         case VCPU_SREG_CS: return &save->cs;
1398         case VCPU_SREG_DS: return &save->ds;
1399         case VCPU_SREG_ES: return &save->es;
1400         case VCPU_SREG_FS: return &save->fs;
1401         case VCPU_SREG_GS: return &save->gs;
1402         case VCPU_SREG_SS: return &save->ss;
1403         case VCPU_SREG_TR: return &save->tr;
1404         case VCPU_SREG_LDTR: return &save->ldtr;
1405         }
1406         BUG();
1407         return NULL;
1408 }
1409
1410 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1411 {
1412         struct vmcb_seg *s = svm_seg(vcpu, seg);
1413
1414         return s->base;
1415 }
1416
1417 static void svm_get_segment(struct kvm_vcpu *vcpu,
1418                             struct kvm_segment *var, int seg)
1419 {
1420         struct vmcb_seg *s = svm_seg(vcpu, seg);
1421
1422         var->base = s->base;
1423         var->limit = s->limit;
1424         var->selector = s->selector;
1425         var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1426         var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1427         var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1428         var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1429         var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1430         var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1431         var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1432         var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
1433
1434         /*
1435          * AMD's VMCB does not have an explicit unusable field, so emulate it
1436          * for cross vendor migration purposes by "not present"
1437          */
1438         var->unusable = !var->present || (var->type == 0);
1439
1440         switch (seg) {
1441         case VCPU_SREG_CS:
1442                 /*
1443                  * SVM always stores 0 for the 'G' bit in the CS selector in
1444                  * the VMCB on a VMEXIT. This hurts cross-vendor migration:
1445                  * Intel's VMENTRY has a check on the 'G' bit.
1446                  */
1447                 var->g = s->limit > 0xfffff;
1448                 break;
1449         case VCPU_SREG_TR:
1450                 /*
1451                  * Work around a bug where the busy flag in the tr selector
1452                  * isn't exposed
1453                  */
1454                 var->type |= 0x2;
1455                 break;
1456         case VCPU_SREG_DS:
1457         case VCPU_SREG_ES:
1458         case VCPU_SREG_FS:
1459         case VCPU_SREG_GS:
1460                 /*
1461                  * The accessed bit must always be set in the segment
1462                  * descriptor cache, although it can be cleared in the
1463                  * descriptor, the cached bit always remains at 1. Since
1464                  * Intel has a check on this, set it here to support
1465                  * cross-vendor migration.
1466                  */
1467                 if (!var->unusable)
1468                         var->type |= 0x1;
1469                 break;
1470         case VCPU_SREG_SS:
1471                 /*
1472                  * On AMD CPUs sometimes the DB bit in the segment
1473                  * descriptor is left as 1, although the whole segment has
1474                  * been made unusable. Clear it here to pass an Intel VMX
1475                  * entry check when cross vendor migrating.
1476                  */
1477                 if (var->unusable)
1478                         var->db = 0;
1479                 break;
1480         }
1481 }
1482
1483 static int svm_get_cpl(struct kvm_vcpu *vcpu)
1484 {
1485         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1486
1487         return save->cpl;
1488 }
1489
1490 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1491 {
1492         struct vcpu_svm *svm = to_svm(vcpu);
1493
1494         dt->size = svm->vmcb->save.idtr.limit;
1495         dt->address = svm->vmcb->save.idtr.base;
1496 }
1497
1498 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1499 {
1500         struct vcpu_svm *svm = to_svm(vcpu);
1501
1502         svm->vmcb->save.idtr.limit = dt->size;
1503         svm->vmcb->save.idtr.base = dt->address ;
1504         mark_dirty(svm->vmcb, VMCB_DT);
1505 }
1506
1507 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1508 {
1509         struct vcpu_svm *svm = to_svm(vcpu);
1510
1511         dt->size = svm->vmcb->save.gdtr.limit;
1512         dt->address = svm->vmcb->save.gdtr.base;
1513 }
1514
1515 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1516 {
1517         struct vcpu_svm *svm = to_svm(vcpu);
1518
1519         svm->vmcb->save.gdtr.limit = dt->size;
1520         svm->vmcb->save.gdtr.base = dt->address ;
1521         mark_dirty(svm->vmcb, VMCB_DT);
1522 }
1523
1524 static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
1525 {
1526 }
1527
1528 static void svm_decache_cr3(struct kvm_vcpu *vcpu)
1529 {
1530 }
1531
1532 static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1533 {
1534 }
1535
1536 static void update_cr0_intercept(struct vcpu_svm *svm)
1537 {
1538         ulong gcr0 = svm->vcpu.arch.cr0;
1539         u64 *hcr0 = &svm->vmcb->save.cr0;
1540
1541         if (!svm->vcpu.fpu_active)
1542                 *hcr0 |= SVM_CR0_SELECTIVE_MASK;
1543         else
1544                 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
1545                         | (gcr0 & SVM_CR0_SELECTIVE_MASK);
1546
1547         mark_dirty(svm->vmcb, VMCB_CR);
1548
1549         if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
1550                 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
1551                 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1552         } else {
1553                 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1554                 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1555         }
1556 }
1557
1558 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1559 {
1560         struct vcpu_svm *svm = to_svm(vcpu);
1561
1562 #ifdef CONFIG_X86_64
1563         if (vcpu->arch.efer & EFER_LME) {
1564                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
1565                         vcpu->arch.efer |= EFER_LMA;
1566                         svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
1567                 }
1568
1569                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
1570                         vcpu->arch.efer &= ~EFER_LMA;
1571                         svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
1572                 }
1573         }
1574 #endif
1575         vcpu->arch.cr0 = cr0;
1576
1577         if (!npt_enabled)
1578                 cr0 |= X86_CR0_PG | X86_CR0_WP;
1579
1580         if (!vcpu->fpu_active)
1581                 cr0 |= X86_CR0_TS;
1582         /*
1583          * re-enable caching here because the QEMU bios
1584          * does not do it - this results in some delay at
1585          * reboot
1586          */
1587         cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1588         svm->vmcb->save.cr0 = cr0;
1589         mark_dirty(svm->vmcb, VMCB_CR);
1590         update_cr0_intercept(svm);
1591 }
1592
1593 static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1594 {
1595         unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
1596         unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
1597
1598         if (cr4 & X86_CR4_VMXE)
1599                 return 1;
1600
1601         if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
1602                 svm_flush_tlb(vcpu);
1603
1604         vcpu->arch.cr4 = cr4;
1605         if (!npt_enabled)
1606                 cr4 |= X86_CR4_PAE;
1607         cr4 |= host_cr4_mce;
1608         to_svm(vcpu)->vmcb->save.cr4 = cr4;
1609         mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
1610         return 0;
1611 }
1612
1613 static void svm_set_segment(struct kvm_vcpu *vcpu,
1614                             struct kvm_segment *var, int seg)
1615 {
1616         struct vcpu_svm *svm = to_svm(vcpu);
1617         struct vmcb_seg *s = svm_seg(vcpu, seg);
1618
1619         s->base = var->base;
1620         s->limit = var->limit;
1621         s->selector = var->selector;
1622         if (var->unusable)
1623                 s->attrib = 0;
1624         else {
1625                 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1626                 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1627                 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1628                 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
1629                 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1630                 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1631                 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1632                 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1633         }
1634         if (seg == VCPU_SREG_CS)
1635                 svm_update_cpl(vcpu);
1636
1637         mark_dirty(svm->vmcb, VMCB_SEG);
1638 }
1639
1640 static void update_db_bp_intercept(struct kvm_vcpu *vcpu)
1641 {
1642         struct vcpu_svm *svm = to_svm(vcpu);
1643
1644         clr_exception_intercept(svm, DB_VECTOR);
1645         clr_exception_intercept(svm, BP_VECTOR);
1646
1647         if (svm->nmi_singlestep)
1648                 set_exception_intercept(svm, DB_VECTOR);
1649
1650         if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1651                 if (vcpu->guest_debug &
1652                     (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
1653                         set_exception_intercept(svm, DB_VECTOR);
1654                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1655                         set_exception_intercept(svm, BP_VECTOR);
1656         } else
1657                 vcpu->guest_debug = 0;
1658 }
1659
1660 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
1661 {
1662         if (sd->next_asid > sd->max_asid) {
1663                 ++sd->asid_generation;
1664                 sd->next_asid = 1;
1665                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
1666         }
1667
1668         svm->asid_generation = sd->asid_generation;
1669         svm->vmcb->control.asid = sd->next_asid++;
1670
1671         mark_dirty(svm->vmcb, VMCB_ASID);
1672 }
1673
1674 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
1675 {
1676         struct vcpu_svm *svm = to_svm(vcpu);
1677
1678         svm->vmcb->save.dr7 = value;
1679         mark_dirty(svm->vmcb, VMCB_DR);
1680 }
1681
1682 static int pf_interception(struct vcpu_svm *svm)
1683 {
1684         u64 fault_address = svm->vmcb->control.exit_info_2;
1685         u32 error_code;
1686         int r = 1;
1687
1688         switch (svm->apf_reason) {
1689         default:
1690                 error_code = svm->vmcb->control.exit_info_1;
1691
1692                 trace_kvm_page_fault(fault_address, error_code);
1693                 if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
1694                         kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
1695                 r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
1696                         svm->vmcb->control.insn_bytes,
1697                         svm->vmcb->control.insn_len);
1698                 break;
1699         case KVM_PV_REASON_PAGE_NOT_PRESENT:
1700                 svm->apf_reason = 0;
1701                 local_irq_disable();
1702                 kvm_async_pf_task_wait(fault_address);
1703                 local_irq_enable();
1704                 break;
1705         case KVM_PV_REASON_PAGE_READY:
1706                 svm->apf_reason = 0;
1707                 local_irq_disable();
1708                 kvm_async_pf_task_wake(fault_address);
1709                 local_irq_enable();
1710                 break;
1711         }
1712         return r;
1713 }
1714
1715 static int db_interception(struct vcpu_svm *svm)
1716 {
1717         struct kvm_run *kvm_run = svm->vcpu.run;
1718
1719         if (!(svm->vcpu.guest_debug &
1720               (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
1721                 !svm->nmi_singlestep) {
1722                 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
1723                 return 1;
1724         }
1725
1726         if (svm->nmi_singlestep) {
1727                 svm->nmi_singlestep = false;
1728                 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
1729                         svm->vmcb->save.rflags &=
1730                                 ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1731                 update_db_bp_intercept(&svm->vcpu);
1732         }
1733
1734         if (svm->vcpu.guest_debug &
1735             (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
1736                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1737                 kvm_run->debug.arch.pc =
1738                         svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1739                 kvm_run->debug.arch.exception = DB_VECTOR;
1740                 return 0;
1741         }
1742
1743         return 1;
1744 }
1745
1746 static int bp_interception(struct vcpu_svm *svm)
1747 {
1748         struct kvm_run *kvm_run = svm->vcpu.run;
1749
1750         kvm_run->exit_reason = KVM_EXIT_DEBUG;
1751         kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1752         kvm_run->debug.arch.exception = BP_VECTOR;
1753         return 0;
1754 }
1755
1756 static int ud_interception(struct vcpu_svm *svm)
1757 {
1758         int er;
1759
1760         er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
1761         if (er != EMULATE_DONE)
1762                 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1763         return 1;
1764 }
1765
1766 static void svm_fpu_activate(struct kvm_vcpu *vcpu)
1767 {
1768         struct vcpu_svm *svm = to_svm(vcpu);
1769
1770         clr_exception_intercept(svm, NM_VECTOR);
1771
1772         svm->vcpu.fpu_active = 1;
1773         update_cr0_intercept(svm);
1774 }
1775
1776 static int nm_interception(struct vcpu_svm *svm)
1777 {
1778         svm_fpu_activate(&svm->vcpu);
1779         return 1;
1780 }
1781
1782 static bool is_erratum_383(void)
1783 {
1784         int err, i;
1785         u64 value;
1786
1787         if (!erratum_383_found)
1788                 return false;
1789
1790         value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
1791         if (err)
1792                 return false;
1793
1794         /* Bit 62 may or may not be set for this mce */
1795         value &= ~(1ULL << 62);
1796
1797         if (value != 0xb600000000010015ULL)
1798                 return false;
1799
1800         /* Clear MCi_STATUS registers */
1801         for (i = 0; i < 6; ++i)
1802                 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
1803
1804         value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
1805         if (!err) {
1806                 u32 low, high;
1807
1808                 value &= ~(1ULL << 2);
1809                 low    = lower_32_bits(value);
1810                 high   = upper_32_bits(value);
1811
1812                 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
1813         }
1814
1815         /* Flush tlb to evict multi-match entries */
1816         __flush_tlb_all();
1817
1818         return true;
1819 }
1820
1821 static void svm_handle_mce(struct vcpu_svm *svm)
1822 {
1823         if (is_erratum_383()) {
1824                 /*
1825                  * Erratum 383 triggered. Guest state is corrupt so kill the
1826                  * guest.
1827                  */
1828                 pr_err("KVM: Guest triggered AMD Erratum 383\n");
1829
1830                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
1831
1832                 return;
1833         }
1834
1835         /*
1836          * On an #MC intercept the MCE handler is not called automatically in
1837          * the host. So do it by hand here.
1838          */
1839         asm volatile (
1840                 "int $0x12\n");
1841         /* not sure if we ever come back to this point */
1842
1843         return;
1844 }
1845
1846 static int mc_interception(struct vcpu_svm *svm)
1847 {
1848         return 1;
1849 }
1850
1851 static int shutdown_interception(struct vcpu_svm *svm)
1852 {
1853         struct kvm_run *kvm_run = svm->vcpu.run;
1854
1855         /*
1856          * VMCB is undefined after a SHUTDOWN intercept
1857          * so reinitialize it.
1858          */
1859         clear_page(svm->vmcb);
1860         init_vmcb(svm);
1861
1862         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1863         return 0;
1864 }
1865
1866 static int io_interception(struct vcpu_svm *svm)
1867 {
1868         struct kvm_vcpu *vcpu = &svm->vcpu;
1869         u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
1870         int size, in, string;
1871         unsigned port;
1872
1873         ++svm->vcpu.stat.io_exits;
1874         string = (io_info & SVM_IOIO_STR_MASK) != 0;
1875         in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1876         if (string || in)
1877                 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
1878
1879         port = io_info >> 16;
1880         size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
1881         svm->next_rip = svm->vmcb->control.exit_info_2;
1882         skip_emulated_instruction(&svm->vcpu);
1883
1884         return kvm_fast_pio_out(vcpu, size, port);
1885 }
1886
1887 static int nmi_interception(struct vcpu_svm *svm)
1888 {
1889         return 1;
1890 }
1891
1892 static int intr_interception(struct vcpu_svm *svm)
1893 {
1894         ++svm->vcpu.stat.irq_exits;
1895         return 1;
1896 }
1897
1898 static int nop_on_interception(struct vcpu_svm *svm)
1899 {
1900         return 1;
1901 }
1902
1903 static int halt_interception(struct vcpu_svm *svm)
1904 {
1905         svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
1906         skip_emulated_instruction(&svm->vcpu);
1907         return kvm_emulate_halt(&svm->vcpu);
1908 }
1909
1910 static int vmmcall_interception(struct vcpu_svm *svm)
1911 {
1912         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1913         skip_emulated_instruction(&svm->vcpu);
1914         kvm_emulate_hypercall(&svm->vcpu);
1915         return 1;
1916 }
1917
1918 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
1919 {
1920         struct vcpu_svm *svm = to_svm(vcpu);
1921
1922         return svm->nested.nested_cr3;
1923 }
1924
1925 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
1926 {
1927         struct vcpu_svm *svm = to_svm(vcpu);
1928         u64 cr3 = svm->nested.nested_cr3;
1929         u64 pdpte;
1930         int ret;
1931
1932         ret = kvm_read_guest_page(vcpu->kvm, gpa_to_gfn(cr3), &pdpte,
1933                                   offset_in_page(cr3) + index * 8, 8);
1934         if (ret)
1935                 return 0;
1936         return pdpte;
1937 }
1938
1939 static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
1940                                    unsigned long root)
1941 {
1942         struct vcpu_svm *svm = to_svm(vcpu);
1943
1944         svm->vmcb->control.nested_cr3 = root;
1945         mark_dirty(svm->vmcb, VMCB_NPT);
1946         svm_flush_tlb(vcpu);
1947 }
1948
1949 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
1950                                        struct x86_exception *fault)
1951 {
1952         struct vcpu_svm *svm = to_svm(vcpu);
1953
1954         svm->vmcb->control.exit_code = SVM_EXIT_NPF;
1955         svm->vmcb->control.exit_code_hi = 0;
1956         svm->vmcb->control.exit_info_1 = fault->error_code;
1957         svm->vmcb->control.exit_info_2 = fault->address;
1958
1959         nested_svm_vmexit(svm);
1960 }
1961
1962 static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
1963 {
1964         int r;
1965
1966         r = kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
1967
1968         vcpu->arch.mmu.set_cr3           = nested_svm_set_tdp_cr3;
1969         vcpu->arch.mmu.get_cr3           = nested_svm_get_tdp_cr3;
1970         vcpu->arch.mmu.get_pdptr         = nested_svm_get_tdp_pdptr;
1971         vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
1972         vcpu->arch.mmu.shadow_root_level = get_npt_level();
1973         vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
1974
1975         return r;
1976 }
1977
1978 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
1979 {
1980         vcpu->arch.walk_mmu = &vcpu->arch.mmu;
1981 }
1982
1983 static int nested_svm_check_permissions(struct vcpu_svm *svm)
1984 {
1985         if (!(svm->vcpu.arch.efer & EFER_SVME)
1986             || !is_paging(&svm->vcpu)) {
1987                 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1988                 return 1;
1989         }
1990
1991         if (svm->vmcb->save.cpl) {
1992                 kvm_inject_gp(&svm->vcpu, 0);
1993                 return 1;
1994         }
1995
1996        return 0;
1997 }
1998
1999 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
2000                                       bool has_error_code, u32 error_code)
2001 {
2002         int vmexit;
2003
2004         if (!is_guest_mode(&svm->vcpu))
2005                 return 0;
2006
2007         svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
2008         svm->vmcb->control.exit_code_hi = 0;
2009         svm->vmcb->control.exit_info_1 = error_code;
2010         svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
2011
2012         vmexit = nested_svm_intercept(svm);
2013         if (vmexit == NESTED_EXIT_DONE)
2014                 svm->nested.exit_required = true;
2015
2016         return vmexit;
2017 }
2018
2019 /* This function returns true if it is save to enable the irq window */
2020 static inline bool nested_svm_intr(struct vcpu_svm *svm)
2021 {
2022         if (!is_guest_mode(&svm->vcpu))
2023                 return true;
2024
2025         if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
2026                 return true;
2027
2028         if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
2029                 return false;
2030
2031         /*
2032          * if vmexit was already requested (by intercepted exception
2033          * for instance) do not overwrite it with "external interrupt"
2034          * vmexit.
2035          */
2036         if (svm->nested.exit_required)
2037                 return false;
2038
2039         svm->vmcb->control.exit_code   = SVM_EXIT_INTR;
2040         svm->vmcb->control.exit_info_1 = 0;
2041         svm->vmcb->control.exit_info_2 = 0;
2042
2043         if (svm->nested.intercept & 1ULL) {
2044                 /*
2045                  * The #vmexit can't be emulated here directly because this
2046                  * code path runs with irqs and preemption disabled. A
2047                  * #vmexit emulation might sleep. Only signal request for
2048                  * the #vmexit here.
2049                  */
2050                 svm->nested.exit_required = true;
2051                 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
2052                 return false;
2053         }
2054
2055         return true;
2056 }
2057
2058 /* This function returns true if it is save to enable the nmi window */
2059 static inline bool nested_svm_nmi(struct vcpu_svm *svm)
2060 {
2061         if (!is_guest_mode(&svm->vcpu))
2062                 return true;
2063
2064         if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
2065                 return true;
2066
2067         svm->vmcb->control.exit_code = SVM_EXIT_NMI;
2068         svm->nested.exit_required = true;
2069
2070         return false;
2071 }
2072
2073 static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
2074 {
2075         struct page *page;
2076
2077         might_sleep();
2078
2079         page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
2080         if (is_error_page(page))
2081                 goto error;
2082
2083         *_page = page;
2084
2085         return kmap(page);
2086
2087 error:
2088         kvm_inject_gp(&svm->vcpu, 0);
2089
2090         return NULL;
2091 }
2092
2093 static void nested_svm_unmap(struct page *page)
2094 {
2095         kunmap(page);
2096         kvm_release_page_dirty(page);
2097 }
2098
2099 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
2100 {
2101         unsigned port;
2102         u8 val, bit;
2103         u64 gpa;
2104
2105         if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
2106                 return NESTED_EXIT_HOST;
2107
2108         port = svm->vmcb->control.exit_info_1 >> 16;
2109         gpa  = svm->nested.vmcb_iopm + (port / 8);
2110         bit  = port % 8;
2111         val  = 0;
2112
2113         if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, 1))
2114                 val &= (1 << bit);
2115
2116         return val ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
2117 }
2118
2119 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
2120 {
2121         u32 offset, msr, value;
2122         int write, mask;
2123
2124         if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
2125                 return NESTED_EXIT_HOST;
2126
2127         msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2128         offset = svm_msrpm_offset(msr);
2129         write  = svm->vmcb->control.exit_info_1 & 1;
2130         mask   = 1 << ((2 * (msr & 0xf)) + write);
2131
2132         if (offset == MSR_INVALID)
2133                 return NESTED_EXIT_DONE;
2134
2135         /* Offset is in 32 bit units but need in 8 bit units */
2136         offset *= 4;
2137
2138         if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4))
2139                 return NESTED_EXIT_DONE;
2140
2141         return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
2142 }
2143
2144 static int nested_svm_exit_special(struct vcpu_svm *svm)
2145 {
2146         u32 exit_code = svm->vmcb->control.exit_code;
2147
2148         switch (exit_code) {
2149         case SVM_EXIT_INTR:
2150         case SVM_EXIT_NMI:
2151         case SVM_EXIT_EXCP_BASE + MC_VECTOR:
2152                 return NESTED_EXIT_HOST;
2153         case SVM_EXIT_NPF:
2154                 /* For now we are always handling NPFs when using them */
2155                 if (npt_enabled)
2156                         return NESTED_EXIT_HOST;
2157                 break;
2158         case SVM_EXIT_EXCP_BASE + PF_VECTOR:
2159                 /* When we're shadowing, trap PFs, but not async PF */
2160                 if (!npt_enabled && svm->apf_reason == 0)
2161                         return NESTED_EXIT_HOST;
2162                 break;
2163         case SVM_EXIT_EXCP_BASE + NM_VECTOR:
2164                 nm_interception(svm);
2165                 break;
2166         default:
2167                 break;
2168         }
2169
2170         return NESTED_EXIT_CONTINUE;
2171 }
2172
2173 /*
2174  * If this function returns true, this #vmexit was already handled
2175  */
2176 static int nested_svm_intercept(struct vcpu_svm *svm)
2177 {
2178         u32 exit_code = svm->vmcb->control.exit_code;
2179         int vmexit = NESTED_EXIT_HOST;
2180
2181         switch (exit_code) {
2182         case SVM_EXIT_MSR:
2183                 vmexit = nested_svm_exit_handled_msr(svm);
2184                 break;
2185         case SVM_EXIT_IOIO:
2186                 vmexit = nested_svm_intercept_ioio(svm);
2187                 break;
2188         case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
2189                 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
2190                 if (svm->nested.intercept_cr & bit)
2191                         vmexit = NESTED_EXIT_DONE;
2192                 break;
2193         }
2194         case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
2195                 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
2196                 if (svm->nested.intercept_dr & bit)
2197                         vmexit = NESTED_EXIT_DONE;
2198                 break;
2199         }
2200         case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
2201                 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
2202                 if (svm->nested.intercept_exceptions & excp_bits)
2203                         vmexit = NESTED_EXIT_DONE;
2204                 /* async page fault always cause vmexit */
2205                 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
2206                          svm->apf_reason != 0)
2207                         vmexit = NESTED_EXIT_DONE;
2208                 break;
2209         }
2210         case SVM_EXIT_ERR: {
2211                 vmexit = NESTED_EXIT_DONE;
2212                 break;
2213         }
2214         default: {
2215                 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
2216                 if (svm->nested.intercept & exit_bits)
2217                         vmexit = NESTED_EXIT_DONE;
2218         }
2219         }
2220
2221         return vmexit;
2222 }
2223
2224 static int nested_svm_exit_handled(struct vcpu_svm *svm)
2225 {
2226         int vmexit;
2227
2228         vmexit = nested_svm_intercept(svm);
2229
2230         if (vmexit == NESTED_EXIT_DONE)
2231                 nested_svm_vmexit(svm);
2232
2233         return vmexit;
2234 }
2235
2236 static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
2237 {
2238         struct vmcb_control_area *dst  = &dst_vmcb->control;
2239         struct vmcb_control_area *from = &from_vmcb->control;
2240
2241         dst->intercept_cr         = from->intercept_cr;
2242         dst->intercept_dr         = from->intercept_dr;
2243         dst->intercept_exceptions = from->intercept_exceptions;
2244         dst->intercept            = from->intercept;
2245         dst->iopm_base_pa         = from->iopm_base_pa;
2246         dst->msrpm_base_pa        = from->msrpm_base_pa;
2247         dst->tsc_offset           = from->tsc_offset;
2248         dst->asid                 = from->asid;
2249         dst->tlb_ctl              = from->tlb_ctl;
2250         dst->int_ctl              = from->int_ctl;
2251         dst->int_vector           = from->int_vector;
2252         dst->int_state            = from->int_state;
2253         dst->exit_code            = from->exit_code;
2254         dst->exit_code_hi         = from->exit_code_hi;
2255         dst->exit_info_1          = from->exit_info_1;
2256         dst->exit_info_2          = from->exit_info_2;
2257         dst->exit_int_info        = from->exit_int_info;
2258         dst->exit_int_info_err    = from->exit_int_info_err;
2259         dst->nested_ctl           = from->nested_ctl;
2260         dst->event_inj            = from->event_inj;
2261         dst->event_inj_err        = from->event_inj_err;
2262         dst->nested_cr3           = from->nested_cr3;
2263         dst->lbr_ctl              = from->lbr_ctl;
2264 }
2265
2266 static int nested_svm_vmexit(struct vcpu_svm *svm)
2267 {
2268         struct vmcb *nested_vmcb;
2269         struct vmcb *hsave = svm->nested.hsave;
2270         struct vmcb *vmcb = svm->vmcb;
2271         struct page *page;
2272
2273         trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
2274                                        vmcb->control.exit_info_1,
2275                                        vmcb->control.exit_info_2,
2276                                        vmcb->control.exit_int_info,
2277                                        vmcb->control.exit_int_info_err,
2278                                        KVM_ISA_SVM);
2279
2280         nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
2281         if (!nested_vmcb)
2282                 return 1;
2283
2284         /* Exit Guest-Mode */
2285         leave_guest_mode(&svm->vcpu);
2286         svm->nested.vmcb = 0;
2287
2288         /* Give the current vmcb to the guest */
2289         disable_gif(svm);
2290
2291         nested_vmcb->save.es     = vmcb->save.es;
2292         nested_vmcb->save.cs     = vmcb->save.cs;
2293         nested_vmcb->save.ss     = vmcb->save.ss;
2294         nested_vmcb->save.ds     = vmcb->save.ds;
2295         nested_vmcb->save.gdtr   = vmcb->save.gdtr;
2296         nested_vmcb->save.idtr   = vmcb->save.idtr;
2297         nested_vmcb->save.efer   = svm->vcpu.arch.efer;
2298         nested_vmcb->save.cr0    = kvm_read_cr0(&svm->vcpu);
2299         nested_vmcb->save.cr3    = kvm_read_cr3(&svm->vcpu);
2300         nested_vmcb->save.cr2    = vmcb->save.cr2;
2301         nested_vmcb->save.cr4    = svm->vcpu.arch.cr4;
2302         nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
2303         nested_vmcb->save.rip    = vmcb->save.rip;
2304         nested_vmcb->save.rsp    = vmcb->save.rsp;
2305         nested_vmcb->save.rax    = vmcb->save.rax;
2306         nested_vmcb->save.dr7    = vmcb->save.dr7;
2307         nested_vmcb->save.dr6    = vmcb->save.dr6;
2308         nested_vmcb->save.cpl    = vmcb->save.cpl;
2309
2310         nested_vmcb->control.int_ctl           = vmcb->control.int_ctl;
2311         nested_vmcb->control.int_vector        = vmcb->control.int_vector;
2312         nested_vmcb->control.int_state         = vmcb->control.int_state;
2313         nested_vmcb->control.exit_code         = vmcb->control.exit_code;
2314         nested_vmcb->control.exit_code_hi      = vmcb->control.exit_code_hi;
2315         nested_vmcb->control.exit_info_1       = vmcb->control.exit_info_1;
2316         nested_vmcb->control.exit_info_2       = vmcb->control.exit_info_2;
2317         nested_vmcb->control.exit_int_info     = vmcb->control.exit_int_info;
2318         nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
2319         nested_vmcb->control.next_rip          = vmcb->control.next_rip;
2320
2321         /*
2322          * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
2323          * to make sure that we do not lose injected events. So check event_inj
2324          * here and copy it to exit_int_info if it is valid.
2325          * Exit_int_info and event_inj can't be both valid because the case
2326          * below only happens on a VMRUN instruction intercept which has
2327          * no valid exit_int_info set.
2328          */
2329         if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
2330                 struct vmcb_control_area *nc = &nested_vmcb->control;
2331
2332                 nc->exit_int_info     = vmcb->control.event_inj;
2333                 nc->exit_int_info_err = vmcb->control.event_inj_err;
2334         }
2335
2336         nested_vmcb->control.tlb_ctl           = 0;
2337         nested_vmcb->control.event_inj         = 0;
2338         nested_vmcb->control.event_inj_err     = 0;
2339
2340         /* We always set V_INTR_MASKING and remember the old value in hflags */
2341         if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
2342                 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
2343
2344         /* Restore the original control entries */
2345         copy_vmcb_control_area(vmcb, hsave);
2346
2347         kvm_clear_exception_queue(&svm->vcpu);
2348         kvm_clear_interrupt_queue(&svm->vcpu);
2349
2350         svm->nested.nested_cr3 = 0;
2351
2352         /* Restore selected save entries */
2353         svm->vmcb->save.es = hsave->save.es;
2354         svm->vmcb->save.cs = hsave->save.cs;
2355         svm->vmcb->save.ss = hsave->save.ss;
2356         svm->vmcb->save.ds = hsave->save.ds;
2357         svm->vmcb->save.gdtr = hsave->save.gdtr;
2358         svm->vmcb->save.idtr = hsave->save.idtr;
2359         kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
2360         svm_set_efer(&svm->vcpu, hsave->save.efer);
2361         svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
2362         svm_set_cr4(&svm->vcpu, hsave->save.cr4);
2363         if (npt_enabled) {
2364                 svm->vmcb->save.cr3 = hsave->save.cr3;
2365                 svm->vcpu.arch.cr3 = hsave->save.cr3;
2366         } else {
2367                 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
2368         }
2369         kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
2370         kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
2371         kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
2372         svm->vmcb->save.dr7 = 0;
2373         svm->vmcb->save.cpl = 0;
2374         svm->vmcb->control.exit_int_info = 0;
2375
2376         mark_all_dirty(svm->vmcb);
2377
2378         nested_svm_unmap(page);
2379
2380         nested_svm_uninit_mmu_context(&svm->vcpu);
2381         kvm_mmu_reset_context(&svm->vcpu);
2382         kvm_mmu_load(&svm->vcpu);
2383
2384         return 0;
2385 }
2386
2387 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
2388 {
2389         /*
2390          * This function merges the msr permission bitmaps of kvm and the
2391          * nested vmcb. It is optimized in that it only merges the parts where
2392          * the kvm msr permission bitmap may contain zero bits
2393          */
2394         int i;
2395
2396         if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
2397                 return true;
2398
2399         for (i = 0; i < MSRPM_OFFSETS; i++) {
2400                 u32 value, p;
2401                 u64 offset;
2402
2403                 if (msrpm_offsets[i] == 0xffffffff)
2404                         break;
2405
2406                 p      = msrpm_offsets[i];
2407                 offset = svm->nested.vmcb_msrpm + (p * 4);
2408
2409                 if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4))
2410                         return false;
2411
2412                 svm->nested.msrpm[p] = svm->msrpm[p] | value;
2413         }
2414
2415         svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
2416
2417         return true;
2418 }
2419
2420 static bool nested_vmcb_checks(struct vmcb *vmcb)
2421 {
2422         if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
2423                 return false;
2424
2425         if (vmcb->control.asid == 0)
2426                 return false;
2427
2428         if (vmcb->control.nested_ctl && !npt_enabled)
2429                 return false;
2430
2431         return true;
2432 }
2433
2434 static bool nested_svm_vmrun(struct vcpu_svm *svm)
2435 {
2436         struct vmcb *nested_vmcb;
2437         struct vmcb *hsave = svm->nested.hsave;
2438         struct vmcb *vmcb = svm->vmcb;
2439         struct page *page;
2440         u64 vmcb_gpa;
2441
2442         vmcb_gpa = svm->vmcb->save.rax;
2443
2444         nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
2445         if (!nested_vmcb)
2446                 return false;
2447
2448         if (!nested_vmcb_checks(nested_vmcb)) {
2449                 nested_vmcb->control.exit_code    = SVM_EXIT_ERR;
2450                 nested_vmcb->control.exit_code_hi = 0;
2451                 nested_vmcb->control.exit_info_1  = 0;
2452                 nested_vmcb->control.exit_info_2  = 0;
2453
2454                 nested_svm_unmap(page);
2455
2456                 return false;
2457         }
2458
2459         trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
2460                                nested_vmcb->save.rip,
2461                                nested_vmcb->control.int_ctl,
2462                                nested_vmcb->control.event_inj,
2463                                nested_vmcb->control.nested_ctl);
2464
2465         trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
2466                                     nested_vmcb->control.intercept_cr >> 16,
2467                                     nested_vmcb->control.intercept_exceptions,
2468                                     nested_vmcb->control.intercept);
2469
2470         /* Clear internal status */
2471         kvm_clear_exception_queue(&svm->vcpu);
2472         kvm_clear_interrupt_queue(&svm->vcpu);
2473
2474         /*
2475          * Save the old vmcb, so we don't need to pick what we save, but can
2476          * restore everything when a VMEXIT occurs
2477          */
2478         hsave->save.es     = vmcb->save.es;
2479         hsave->save.cs     = vmcb->save.cs;
2480         hsave->save.ss     = vmcb->save.ss;
2481         hsave->save.ds     = vmcb->save.ds;
2482         hsave->save.gdtr   = vmcb->save.gdtr;
2483         hsave->save.idtr   = vmcb->save.idtr;
2484         hsave->save.efer   = svm->vcpu.arch.efer;
2485         hsave->save.cr0    = kvm_read_cr0(&svm->vcpu);
2486         hsave->save.cr4    = svm->vcpu.arch.cr4;
2487         hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
2488         hsave->save.rip    = kvm_rip_read(&svm->vcpu);
2489         hsave->save.rsp    = vmcb->save.rsp;
2490         hsave->save.rax    = vmcb->save.rax;
2491         if (npt_enabled)
2492                 hsave->save.cr3    = vmcb->save.cr3;
2493         else
2494                 hsave->save.cr3    = kvm_read_cr3(&svm->vcpu);
2495
2496         copy_vmcb_control_area(hsave, vmcb);
2497
2498         if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
2499                 svm->vcpu.arch.hflags |= HF_HIF_MASK;
2500         else
2501                 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
2502
2503         if (nested_vmcb->control.nested_ctl) {
2504                 kvm_mmu_unload(&svm->vcpu);
2505                 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
2506                 nested_svm_init_mmu_context(&svm->vcpu);
2507         }
2508
2509         /* Load the nested guest state */
2510         svm->vmcb->save.es = nested_vmcb->save.es;
2511         svm->vmcb->save.cs = nested_vmcb->save.cs;
2512         svm->vmcb->save.ss = nested_vmcb->save.ss;
2513         svm->vmcb->save.ds = nested_vmcb->save.ds;
2514         svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
2515         svm->vmcb->save.idtr = nested_vmcb->save.idtr;
2516         kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
2517         svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
2518         svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
2519         svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
2520         if (npt_enabled) {
2521                 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
2522                 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
2523         } else
2524                 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
2525
2526         /* Guest paging mode is active - reset mmu */
2527         kvm_mmu_reset_context(&svm->vcpu);
2528
2529         svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
2530         kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
2531         kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
2532         kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
2533
2534         /* In case we don't even reach vcpu_run, the fields are not updated */
2535         svm->vmcb->save.rax = nested_vmcb->save.rax;
2536         svm->vmcb->save.rsp = nested_vmcb->save.rsp;
2537         svm->vmcb->save.rip = nested_vmcb->save.rip;
2538         svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
2539         svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
2540         svm->vmcb->save.cpl = nested_vmcb->save.cpl;
2541
2542         svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
2543         svm->nested.vmcb_iopm  = nested_vmcb->control.iopm_base_pa  & ~0x0fffULL;
2544
2545         /* cache intercepts */
2546         svm->nested.intercept_cr         = nested_vmcb->control.intercept_cr;
2547         svm->nested.intercept_dr         = nested_vmcb->control.intercept_dr;
2548         svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
2549         svm->nested.intercept            = nested_vmcb->control.intercept;
2550
2551         svm_flush_tlb(&svm->vcpu);
2552         svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
2553         if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
2554                 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
2555         else
2556                 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
2557
2558         if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
2559                 /* We only want the cr8 intercept bits of the guest */
2560                 clr_cr_intercept(svm, INTERCEPT_CR8_READ);
2561                 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
2562         }
2563
2564         /* We don't want to see VMMCALLs from a nested guest */
2565         clr_intercept(svm, INTERCEPT_VMMCALL);
2566
2567         svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
2568         svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
2569         svm->vmcb->control.int_state = nested_vmcb->control.int_state;
2570         svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
2571         svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
2572         svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
2573
2574         nested_svm_unmap(page);
2575
2576         /* Enter Guest-Mode */
2577         enter_guest_mode(&svm->vcpu);
2578
2579         /*
2580          * Merge guest and host intercepts - must be called  with vcpu in
2581          * guest-mode to take affect here
2582          */
2583         recalc_intercepts(svm);
2584
2585         svm->nested.vmcb = vmcb_gpa;
2586
2587         enable_gif(svm);
2588
2589         mark_all_dirty(svm->vmcb);
2590
2591         return true;
2592 }
2593
2594 static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
2595 {
2596         to_vmcb->save.fs = from_vmcb->save.fs;
2597         to_vmcb->save.gs = from_vmcb->save.gs;
2598         to_vmcb->save.tr = from_vmcb->save.tr;
2599         to_vmcb->save.ldtr = from_vmcb->save.ldtr;
2600         to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
2601         to_vmcb->save.star = from_vmcb->save.star;
2602         to_vmcb->save.lstar = from_vmcb->save.lstar;
2603         to_vmcb->save.cstar = from_vmcb->save.cstar;
2604         to_vmcb->save.sfmask = from_vmcb->save.sfmask;
2605         to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
2606         to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
2607         to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
2608 }
2609
2610 static int vmload_interception(struct vcpu_svm *svm)
2611 {
2612         struct vmcb *nested_vmcb;
2613         struct page *page;
2614
2615         if (nested_svm_check_permissions(svm))
2616                 return 1;
2617
2618         nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
2619         if (!nested_vmcb)
2620                 return 1;
2621
2622         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2623         skip_emulated_instruction(&svm->vcpu);
2624
2625         nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
2626         nested_svm_unmap(page);
2627
2628         return 1;
2629 }
2630
2631 static int vmsave_interception(struct vcpu_svm *svm)
2632 {
2633         struct vmcb *nested_vmcb;
2634         struct page *page;
2635
2636         if (nested_svm_check_permissions(svm))
2637                 return 1;
2638
2639         nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
2640         if (!nested_vmcb)
2641                 return 1;
2642
2643         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2644         skip_emulated_instruction(&svm->vcpu);
2645
2646         nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
2647         nested_svm_unmap(page);
2648
2649         return 1;
2650 }
2651
2652 static int vmrun_interception(struct vcpu_svm *svm)
2653 {
2654         if (nested_svm_check_permissions(svm))
2655                 return 1;
2656
2657         /* Save rip after vmrun instruction */
2658         kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
2659
2660         if (!nested_svm_vmrun(svm))
2661                 return 1;
2662
2663         if (!nested_svm_vmrun_msrpm(svm))
2664                 goto failed;
2665
2666         return 1;
2667
2668 failed:
2669
2670         svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
2671         svm->vmcb->control.exit_code_hi = 0;
2672         svm->vmcb->control.exit_info_1  = 0;
2673         svm->vmcb->control.exit_info_2  = 0;
2674
2675         nested_svm_vmexit(svm);
2676
2677         return 1;
2678 }
2679
2680 static int stgi_interception(struct vcpu_svm *svm)
2681 {
2682         if (nested_svm_check_permissions(svm))
2683                 return 1;
2684
2685         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2686         skip_emulated_instruction(&svm->vcpu);
2687         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2688
2689         enable_gif(svm);
2690
2691         return 1;
2692 }
2693
2694 static int clgi_interception(struct vcpu_svm *svm)
2695 {
2696         if (nested_svm_check_permissions(svm))
2697                 return 1;
2698
2699         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2700         skip_emulated_instruction(&svm->vcpu);
2701
2702         disable_gif(svm);
2703
2704         /* After a CLGI no interrupts should come */
2705         svm_clear_vintr(svm);
2706         svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
2707
2708         mark_dirty(svm->vmcb, VMCB_INTR);
2709
2710         return 1;
2711 }
2712
2713 static int invlpga_interception(struct vcpu_svm *svm)
2714 {
2715         struct kvm_vcpu *vcpu = &svm->vcpu;
2716
2717         trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX],
2718                           vcpu->arch.regs[VCPU_REGS_RAX]);
2719
2720         /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2721         kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]);
2722
2723         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2724         skip_emulated_instruction(&svm->vcpu);
2725         return 1;
2726 }
2727
2728 static int skinit_interception(struct vcpu_svm *svm)
2729 {
2730         trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]);
2731
2732         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2733         return 1;
2734 }
2735
2736 static int xsetbv_interception(struct vcpu_svm *svm)
2737 {
2738         u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
2739         u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
2740
2741         if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
2742                 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2743                 skip_emulated_instruction(&svm->vcpu);
2744         }
2745
2746         return 1;
2747 }
2748
2749 static int invalid_op_interception(struct vcpu_svm *svm)
2750 {
2751         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2752         return 1;
2753 }
2754
2755 static int task_switch_interception(struct vcpu_svm *svm)
2756 {
2757         u16 tss_selector;
2758         int reason;
2759         int int_type = svm->vmcb->control.exit_int_info &
2760                 SVM_EXITINTINFO_TYPE_MASK;
2761         int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
2762         uint32_t type =
2763                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2764         uint32_t idt_v =
2765                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
2766         bool has_error_code = false;
2767         u32 error_code = 0;
2768
2769         tss_selector = (u16)svm->vmcb->control.exit_info_1;
2770
2771         if (svm->vmcb->control.exit_info_2 &
2772             (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
2773                 reason = TASK_SWITCH_IRET;
2774         else if (svm->vmcb->control.exit_info_2 &
2775                  (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2776                 reason = TASK_SWITCH_JMP;
2777         else if (idt_v)
2778                 reason = TASK_SWITCH_GATE;
2779         else
2780                 reason = TASK_SWITCH_CALL;
2781
2782         if (reason == TASK_SWITCH_GATE) {
2783                 switch (type) {
2784                 case SVM_EXITINTINFO_TYPE_NMI:
2785                         svm->vcpu.arch.nmi_injected = false;
2786                         break;
2787                 case SVM_EXITINTINFO_TYPE_EXEPT:
2788                         if (svm->vmcb->control.exit_info_2 &
2789                             (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
2790                                 has_error_code = true;
2791                                 error_code =
2792                                         (u32)svm->vmcb->control.exit_info_2;
2793                         }
2794                         kvm_clear_exception_queue(&svm->vcpu);
2795                         break;
2796                 case SVM_EXITINTINFO_TYPE_INTR:
2797                         kvm_clear_interrupt_queue(&svm->vcpu);
2798                         break;
2799                 default:
2800                         break;
2801                 }
2802         }
2803
2804         if (reason != TASK_SWITCH_GATE ||
2805             int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2806             (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
2807              (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
2808                 skip_emulated_instruction(&svm->vcpu);
2809
2810         if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
2811                 int_vec = -1;
2812
2813         if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
2814                                 has_error_code, error_code) == EMULATE_FAIL) {
2815                 svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2816                 svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
2817                 svm->vcpu.run->internal.ndata = 0;
2818                 return 0;
2819         }
2820         return 1;
2821 }
2822
2823 static int cpuid_interception(struct vcpu_svm *svm)
2824 {
2825         svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
2826         kvm_emulate_cpuid(&svm->vcpu);
2827         return 1;
2828 }
2829
2830 static int iret_interception(struct vcpu_svm *svm)
2831 {
2832         ++svm->vcpu.stat.nmi_window_exits;
2833         clr_intercept(svm, INTERCEPT_IRET);
2834         svm->vcpu.arch.hflags |= HF_IRET_MASK;
2835         svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
2836         return 1;
2837 }
2838
2839 static int invlpg_interception(struct vcpu_svm *svm)
2840 {
2841         if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2842                 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
2843
2844         kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
2845         skip_emulated_instruction(&svm->vcpu);
2846         return 1;
2847 }
2848
2849 static int emulate_on_interception(struct vcpu_svm *svm)
2850 {
2851         return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
2852 }
2853
2854 static int rdpmc_interception(struct vcpu_svm *svm)
2855 {
2856         int err;
2857
2858         if (!static_cpu_has(X86_FEATURE_NRIPS))
2859                 return emulate_on_interception(svm);
2860
2861         err = kvm_rdpmc(&svm->vcpu);
2862         kvm_complete_insn_gp(&svm->vcpu, err);
2863
2864         return 1;
2865 }
2866
2867 bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val)
2868 {
2869         unsigned long cr0 = svm->vcpu.arch.cr0;
2870         bool ret = false;
2871         u64 intercept;
2872
2873         intercept = svm->nested.intercept;
2874
2875         if (!is_guest_mode(&svm->vcpu) ||
2876             (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
2877                 return false;
2878
2879         cr0 &= ~SVM_CR0_SELECTIVE_MASK;
2880         val &= ~SVM_CR0_SELECTIVE_MASK;
2881
2882         if (cr0 ^ val) {
2883                 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2884                 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
2885         }
2886
2887         return ret;
2888 }
2889
2890 #define CR_VALID (1ULL << 63)
2891
2892 static int cr_interception(struct vcpu_svm *svm)
2893 {
2894         int reg, cr;
2895         unsigned long val;
2896         int err;
2897
2898         if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2899                 return emulate_on_interception(svm);
2900
2901         if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
2902                 return emulate_on_interception(svm);
2903
2904         reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2905         cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2906
2907         err = 0;
2908         if (cr >= 16) { /* mov to cr */
2909                 cr -= 16;
2910                 val = kvm_register_read(&svm->vcpu, reg);
2911                 switch (cr) {
2912                 case 0:
2913                         if (!check_selective_cr0_intercepted(svm, val))
2914                                 err = kvm_set_cr0(&svm->vcpu, val);
2915                         else
2916                                 return 1;
2917
2918                         break;
2919                 case 3:
2920                         err = kvm_set_cr3(&svm->vcpu, val);
2921                         break;
2922                 case 4:
2923                         err = kvm_set_cr4(&svm->vcpu, val);
2924                         break;
2925                 case 8:
2926                         err = kvm_set_cr8(&svm->vcpu, val);
2927                         break;
2928                 default:
2929                         WARN(1, "unhandled write to CR%d", cr);
2930                         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2931                         return 1;
2932                 }
2933         } else { /* mov from cr */
2934                 switch (cr) {
2935                 case 0:
2936                         val = kvm_read_cr0(&svm->vcpu);
2937                         break;
2938                 case 2:
2939                         val = svm->vcpu.arch.cr2;
2940                         break;
2941                 case 3:
2942                         val = kvm_read_cr3(&svm->vcpu);
2943                         break;
2944                 case 4:
2945                         val = kvm_read_cr4(&svm->vcpu);
2946                         break;
2947                 case 8:
2948                         val = kvm_get_cr8(&svm->vcpu);
2949                         break;
2950                 default:
2951                         WARN(1, "unhandled read from CR%d", cr);
2952                         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2953                         return 1;
2954                 }
2955                 kvm_register_write(&svm->vcpu, reg, val);
2956         }
2957         kvm_complete_insn_gp(&svm->vcpu, err);
2958
2959         return 1;
2960 }
2961
2962 static int dr_interception(struct vcpu_svm *svm)
2963 {
2964         int reg, dr;
2965         unsigned long val;
2966         int err;
2967
2968         if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
2969                 return emulate_on_interception(svm);
2970
2971         reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2972         dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
2973
2974         if (dr >= 16) { /* mov to DRn */
2975                 val = kvm_register_read(&svm->vcpu, reg);
2976                 kvm_set_dr(&svm->vcpu, dr - 16, val);
2977         } else {
2978                 err = kvm_get_dr(&svm->vcpu, dr, &val);
2979                 if (!err)
2980                         kvm_register_write(&svm->vcpu, reg, val);
2981         }
2982
2983         skip_emulated_instruction(&svm->vcpu);
2984
2985         return 1;
2986 }
2987
2988 static int cr8_write_interception(struct vcpu_svm *svm)
2989 {
2990         struct kvm_run *kvm_run = svm->vcpu.run;
2991         int r;
2992
2993         u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
2994         /* instruction emulation calls kvm_set_cr8() */
2995         r = cr_interception(svm);
2996         if (irqchip_in_kernel(svm->vcpu.kvm)) {
2997                 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
2998                 return r;
2999         }
3000         if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
3001                 return r;
3002         kvm_run->exit_reason = KVM_EXIT_SET_TPR;
3003         return 0;
3004 }
3005
3006 u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
3007 {
3008         struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
3009         return vmcb->control.tsc_offset +
3010                 svm_scale_tsc(vcpu, host_tsc);
3011 }
3012
3013 static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
3014 {
3015         struct vcpu_svm *svm = to_svm(vcpu);
3016
3017         switch (ecx) {
3018         case MSR_IA32_TSC: {
3019                 *data = svm->vmcb->control.tsc_offset +
3020                         svm_scale_tsc(vcpu, native_read_tsc());
3021
3022                 break;
3023         }
3024         case MSR_STAR:
3025                 *data = svm->vmcb->save.star;
3026                 break;
3027 #ifdef CONFIG_X86_64
3028         case MSR_LSTAR:
3029                 *data = svm->vmcb->save.lstar;
3030                 break;
3031         case MSR_CSTAR:
3032                 *data = svm->vmcb->save.cstar;
3033                 break;
3034         case MSR_KERNEL_GS_BASE:
3035                 *data = svm->vmcb->save.kernel_gs_base;
3036                 break;
3037         case MSR_SYSCALL_MASK:
3038                 *data = svm->vmcb->save.sfmask;
3039                 break;
3040 #endif
3041         case MSR_IA32_SYSENTER_CS:
3042                 *data = svm->vmcb->save.sysenter_cs;
3043                 break;
3044         case MSR_IA32_SYSENTER_EIP:
3045                 *data = svm->sysenter_eip;
3046                 break;
3047         case MSR_IA32_SYSENTER_ESP:
3048                 *data = svm->sysenter_esp;
3049                 break;
3050         /*
3051          * Nobody will change the following 5 values in the VMCB so we can
3052          * safely return them on rdmsr. They will always be 0 until LBRV is
3053          * implemented.
3054          */
3055         case MSR_IA32_DEBUGCTLMSR:
3056                 *data = svm->vmcb->save.dbgctl;
3057                 break;
3058         case MSR_IA32_LASTBRANCHFROMIP:
3059                 *data = svm->vmcb->save.br_from;
3060                 break;
3061         case MSR_IA32_LASTBRANCHTOIP:
3062                 *data = svm->vmcb->save.br_to;
3063                 break;
3064         case MSR_IA32_LASTINTFROMIP:
3065                 *data = svm->vmcb->save.last_excp_from;
3066                 break;
3067         case MSR_IA32_LASTINTTOIP:
3068                 *data = svm->vmcb->save.last_excp_to;
3069                 break;
3070         case MSR_VM_HSAVE_PA:
3071                 *data = svm->nested.hsave_msr;
3072                 break;
3073         case MSR_VM_CR:
3074                 *data = svm->nested.vm_cr_msr;
3075                 break;
3076         case MSR_IA32_UCODE_REV:
3077                 *data = 0x01000065;
3078                 break;
3079         default:
3080                 return kvm_get_msr_common(vcpu, ecx, data);
3081         }
3082         return 0;
3083 }
3084
3085 static int rdmsr_interception(struct vcpu_svm *svm)
3086 {
3087         u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
3088         u64 data;
3089
3090         if (svm_get_msr(&svm->vcpu, ecx, &data)) {
3091                 trace_kvm_msr_read_ex(ecx);
3092                 kvm_inject_gp(&svm->vcpu, 0);
3093         } else {
3094                 trace_kvm_msr_read(ecx, data);
3095
3096                 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
3097                 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
3098                 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
3099                 skip_emulated_instruction(&svm->vcpu);
3100         }
3101         return 1;
3102 }
3103
3104 static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
3105 {
3106         struct vcpu_svm *svm = to_svm(vcpu);
3107         int svm_dis, chg_mask;
3108
3109         if (data & ~SVM_VM_CR_VALID_MASK)
3110                 return 1;
3111
3112         chg_mask = SVM_VM_CR_VALID_MASK;
3113
3114         if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
3115                 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
3116
3117         svm->nested.vm_cr_msr &= ~chg_mask;
3118         svm->nested.vm_cr_msr |= (data & chg_mask);
3119
3120         svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
3121
3122         /* check for svm_disable while efer.svme is set */
3123         if (svm_dis && (vcpu->arch.efer & EFER_SVME))
3124                 return 1;
3125
3126         return 0;
3127 }
3128
3129 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
3130 {
3131         struct vcpu_svm *svm = to_svm(vcpu);
3132
3133         u32 ecx = msr->index;
3134         u64 data = msr->data;
3135         switch (ecx) {
3136         case MSR_IA32_TSC:
3137                 kvm_write_tsc(vcpu, msr);
3138                 break;
3139         case MSR_STAR:
3140                 svm->vmcb->save.star = data;
3141                 break;
3142 #ifdef CONFIG_X86_64
3143         case MSR_LSTAR:
3144                 svm->vmcb->save.lstar = data;
3145                 break;
3146         case MSR_CSTAR:
3147                 svm->vmcb->save.cstar = data;
3148                 break;
3149         case MSR_KERNEL_GS_BASE:
3150                 svm->vmcb->save.kernel_gs_base = data;
3151                 break;
3152         case MSR_SYSCALL_MASK:
3153                 svm->vmcb->save.sfmask = data;
3154                 break;
3155 #endif
3156         case MSR_IA32_SYSENTER_CS:
3157                 svm->vmcb->save.sysenter_cs = data;
3158                 break;
3159         case MSR_IA32_SYSENTER_EIP:
3160                 svm->sysenter_eip = data;
3161                 svm->vmcb->save.sysenter_eip = data;
3162                 break;
3163         case MSR_IA32_SYSENTER_ESP:
3164                 svm->sysenter_esp = data;
3165                 svm->vmcb->save.sysenter_esp = data;
3166                 break;
3167         case MSR_IA32_DEBUGCTLMSR:
3168                 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
3169                         vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
3170                                     __func__, data);
3171                         break;
3172                 }
3173                 if (data & DEBUGCTL_RESERVED_BITS)
3174                         return 1;
3175
3176                 svm->vmcb->save.dbgctl = data;
3177                 mark_dirty(svm->vmcb, VMCB_LBR);
3178                 if (data & (1ULL<<0))
3179                         svm_enable_lbrv(svm);
3180                 else
3181                         svm_disable_lbrv(svm);
3182                 break;
3183         case MSR_VM_HSAVE_PA:
3184                 svm->nested.hsave_msr = data;
3185                 break;
3186         case MSR_VM_CR:
3187                 return svm_set_vm_cr(vcpu, data);
3188         case MSR_VM_IGNNE:
3189                 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
3190                 break;
3191         default:
3192                 return kvm_set_msr_common(vcpu, msr);
3193         }
3194         return 0;
3195 }
3196
3197 static int wrmsr_interception(struct vcpu_svm *svm)
3198 {
3199         struct msr_data msr;
3200         u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
3201         u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
3202                 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
3203
3204         msr.data = data;
3205         msr.index = ecx;
3206         msr.host_initiated = false;
3207
3208         svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
3209         if (svm_set_msr(&svm->vcpu, &msr)) {
3210                 trace_kvm_msr_write_ex(ecx, data);
3211                 kvm_inject_gp(&svm->vcpu, 0);
3212         } else {
3213                 trace_kvm_msr_write(ecx, data);
3214                 skip_emulated_instruction(&svm->vcpu);
3215         }
3216         return 1;
3217 }
3218
3219 static int msr_interception(struct vcpu_svm *svm)
3220 {
3221         if (svm->vmcb->control.exit_info_1)
3222                 return wrmsr_interception(svm);
3223         else
3224                 return rdmsr_interception(svm);
3225 }
3226
3227 static int interrupt_window_interception(struct vcpu_svm *svm)
3228 {
3229         struct kvm_run *kvm_run = svm->vcpu.run;
3230
3231         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3232         svm_clear_vintr(svm);
3233         svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
3234         mark_dirty(svm->vmcb, VMCB_INTR);
3235         ++svm->vcpu.stat.irq_window_exits;
3236         /*
3237          * If the user space waits to inject interrupts, exit as soon as
3238          * possible
3239          */
3240         if (!irqchip_in_kernel(svm->vcpu.kvm) &&
3241             kvm_run->request_interrupt_window &&
3242             !kvm_cpu_has_interrupt(&svm->vcpu)) {
3243                 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
3244                 return 0;
3245         }
3246
3247         return 1;
3248 }
3249
3250 static int pause_interception(struct vcpu_svm *svm)
3251 {
3252         kvm_vcpu_on_spin(&(svm->vcpu));
3253         return 1;
3254 }
3255
3256 static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
3257         [SVM_EXIT_READ_CR0]                     = cr_interception,
3258         [SVM_EXIT_READ_CR3]                     = cr_interception,
3259         [SVM_EXIT_READ_CR4]                     = cr_interception,
3260         [SVM_EXIT_READ_CR8]                     = cr_interception,
3261         [SVM_EXIT_CR0_SEL_WRITE]                = emulate_on_interception,
3262         [SVM_EXIT_WRITE_CR0]                    = cr_interception,
3263         [SVM_EXIT_WRITE_CR3]                    = cr_interception,
3264         [SVM_EXIT_WRITE_CR4]                    = cr_interception,
3265         [SVM_EXIT_WRITE_CR8]                    = cr8_write_interception,
3266         [SVM_EXIT_READ_DR0]                     = dr_interception,
3267         [SVM_EXIT_READ_DR1]                     = dr_interception,
3268         [SVM_EXIT_READ_DR2]                     = dr_interception,
3269         [SVM_EXIT_READ_DR3]                     = dr_interception,
3270         [SVM_EXIT_READ_DR4]                     = dr_interception,
3271         [SVM_EXIT_READ_DR5]                     = dr_interception,
3272         [SVM_EXIT_READ_DR6]                     = dr_interception,
3273         [SVM_EXIT_READ_DR7]                     = dr_interception,
3274         [SVM_EXIT_WRITE_DR0]                    = dr_interception,
3275         [SVM_EXIT_WRITE_DR1]                    = dr_interception,
3276         [SVM_EXIT_WRITE_DR2]                    = dr_interception,
3277         [SVM_EXIT_WRITE_DR3]                    = dr_interception,
3278         [SVM_EXIT_WRITE_DR4]                    = dr_interception,
3279         [SVM_EXIT_WRITE_DR5]                    = dr_interception,
3280         [SVM_EXIT_WRITE_DR6]                    = dr_interception,
3281         [SVM_EXIT_WRITE_DR7]                    = dr_interception,
3282         [SVM_EXIT_EXCP_BASE + DB_VECTOR]        = db_interception,
3283         [SVM_EXIT_EXCP_BASE + BP_VECTOR]        = bp_interception,
3284         [SVM_EXIT_EXCP_BASE + UD_VECTOR]        = ud_interception,
3285         [SVM_EXIT_EXCP_BASE + PF_VECTOR]        = pf_interception,
3286         [SVM_EXIT_EXCP_BASE + NM_VECTOR]        = nm_interception,
3287         [SVM_EXIT_EXCP_BASE + MC_VECTOR]        = mc_interception,
3288         [SVM_EXIT_INTR]                         = intr_interception,
3289         [SVM_EXIT_NMI]                          = nmi_interception,
3290         [SVM_EXIT_SMI]                          = nop_on_interception,
3291         [SVM_EXIT_INIT]                         = nop_on_interception,
3292         [SVM_EXIT_VINTR]                        = interrupt_window_interception,
3293         [SVM_EXIT_RDPMC]                        = rdpmc_interception,
3294         [SVM_EXIT_CPUID]                        = cpuid_interception,
3295         [SVM_EXIT_IRET]                         = iret_interception,
3296         [SVM_EXIT_INVD]                         = emulate_on_interception,
3297         [SVM_EXIT_PAUSE]                        = pause_interception,
3298         [SVM_EXIT_HLT]                          = halt_interception,
3299         [SVM_EXIT_INVLPG]                       = invlpg_interception,
3300         [SVM_EXIT_INVLPGA]                      = invlpga_interception,
3301         [SVM_EXIT_IOIO]                         = io_interception,
3302         [SVM_EXIT_MSR]                          = msr_interception,
3303         [SVM_EXIT_TASK_SWITCH]                  = task_switch_interception,
3304         [SVM_EXIT_SHUTDOWN]                     = shutdown_interception,
3305         [SVM_EXIT_VMRUN]                        = vmrun_interception,
3306         [SVM_EXIT_VMMCALL]                      = vmmcall_interception,
3307         [SVM_EXIT_VMLOAD]                       = vmload_interception,
3308         [SVM_EXIT_VMSAVE]                       = vmsave_interception,
3309         [SVM_EXIT_STGI]                         = stgi_interception,
3310         [SVM_EXIT_CLGI]                         = clgi_interception,
3311         [SVM_EXIT_SKINIT]                       = skinit_interception,
3312         [SVM_EXIT_WBINVD]                       = emulate_on_interception,
3313         [SVM_EXIT_MONITOR]                      = invalid_op_interception,
3314         [SVM_EXIT_MWAIT]                        = invalid_op_interception,
3315         [SVM_EXIT_XSETBV]                       = xsetbv_interception,
3316         [SVM_EXIT_NPF]                          = pf_interception,
3317 };
3318
3319 static void dump_vmcb(struct kvm_vcpu *vcpu)
3320 {
3321         struct vcpu_svm *svm = to_svm(vcpu);
3322         struct vmcb_control_area *control = &svm->vmcb->control;
3323         struct vmcb_save_area *save = &svm->vmcb->save;
3324
3325         pr_err("VMCB Control Area:\n");
3326         pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
3327         pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
3328         pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
3329         pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
3330         pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
3331         pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
3332         pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
3333         pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
3334         pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
3335         pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
3336         pr_err("%-20s%d\n", "asid:", control->asid);
3337         pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
3338         pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
3339         pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
3340         pr_err("%-20s%08x\n", "int_state:", control->int_state);
3341         pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
3342         pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
3343         pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
3344         pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
3345         pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
3346         pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
3347         pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
3348         pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
3349         pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
3350         pr_err("%-20s%lld\n", "lbr_ctl:", control->lbr_ctl);
3351         pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
3352         pr_err("VMCB State Save Area:\n");
3353         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3354                "es:",
3355                save->es.selector, save->es.attrib,
3356                save->es.limit, save->es.base);
3357         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3358                "cs:",
3359                save->cs.selector, save->cs.attrib,
3360                save->cs.limit, save->cs.base);
3361         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3362                "ss:",
3363                save->ss.selector, save->ss.attrib,
3364                save->ss.limit, save->ss.base);
3365         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3366                "ds:",
3367                save->ds.selector, save->ds.attrib,
3368                save->ds.limit, save->ds.base);
3369         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3370                "fs:",
3371                save->fs.selector, save->fs.attrib,
3372                save->fs.limit, save->fs.base);
3373         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3374                "gs:",
3375                save->gs.selector, save->gs.attrib,
3376                save->gs.limit, save->gs.base);
3377         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3378                "gdtr:",
3379                save->gdtr.selector, save->gdtr.attrib,
3380                save->gdtr.limit, save->gdtr.base);
3381         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3382                "ldtr:",
3383                save->ldtr.selector, save->ldtr.attrib,
3384                save->ldtr.limit, save->ldtr.base);
3385         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3386                "idtr:",
3387                save->idtr.selector, save->idtr.attrib,
3388                save->idtr.limit, save->idtr.base);
3389         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3390                "tr:",
3391                save->tr.selector, save->tr.attrib,
3392                save->tr.limit, save->tr.base);
3393         pr_err("cpl:            %d                efer:         %016llx\n",
3394                 save->cpl, save->efer);
3395         pr_err("%-15s %016llx %-13s %016llx\n",
3396                "cr0:", save->cr0, "cr2:", save->cr2);
3397         pr_err("%-15s %016llx %-13s %016llx\n",
3398                "cr3:", save->cr3, "cr4:", save->cr4);
3399         pr_err("%-15s %016llx %-13s %016llx\n",
3400                "dr6:", save->dr6, "dr7:", save->dr7);
3401         pr_err("%-15s %016llx %-13s %016llx\n",
3402                "rip:", save->rip, "rflags:", save->rflags);
3403         pr_err("%-15s %016llx %-13s %016llx\n",
3404                "rsp:", save->rsp, "rax:", save->rax);
3405         pr_err("%-15s %016llx %-13s %016llx\n",
3406                "star:", save->star, "lstar:", save->lstar);
3407         pr_err("%-15s %016llx %-13s %016llx\n",
3408                "cstar:", save->cstar, "sfmask:", save->sfmask);
3409         pr_err("%-15s %016llx %-13s %016llx\n",
3410                "kernel_gs_base:", save->kernel_gs_base,
3411                "sysenter_cs:", save->sysenter_cs);
3412         pr_err("%-15s %016llx %-13s %016llx\n",
3413                "sysenter_esp:", save->sysenter_esp,
3414                "sysenter_eip:", save->sysenter_eip);
3415         pr_err("%-15s %016llx %-13s %016llx\n",
3416                "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
3417         pr_err("%-15s %016llx %-13s %016llx\n",
3418                "br_from:", save->br_from, "br_to:", save->br_to);
3419         pr_err("%-15s %016llx %-13s %016llx\n",
3420                "excp_from:", save->last_excp_from,
3421                "excp_to:", save->last_excp_to);
3422 }
3423
3424 static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
3425 {
3426         struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3427
3428         *info1 = control->exit_info_1;
3429         *info2 = control->exit_info_2;
3430 }
3431
3432 static int handle_exit(struct kvm_vcpu *vcpu)
3433 {
3434         struct vcpu_svm *svm = to_svm(vcpu);
3435         struct kvm_run *kvm_run = vcpu->run;
3436         u32 exit_code = svm->vmcb->control.exit_code;
3437
3438         if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
3439                 vcpu->arch.cr0 = svm->vmcb->save.cr0;
3440         if (npt_enabled)
3441                 vcpu->arch.cr3 = svm->vmcb->save.cr3;
3442
3443         if (unlikely(svm->nested.exit_required)) {
3444                 nested_svm_vmexit(svm);
3445                 svm->nested.exit_required = false;
3446
3447                 return 1;
3448         }
3449
3450         if (is_guest_mode(vcpu)) {
3451                 int vmexit;
3452
3453                 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
3454                                         svm->vmcb->control.exit_info_1,
3455                                         svm->vmcb->control.exit_info_2,
3456                                         svm->vmcb->control.exit_int_info,
3457                                         svm->vmcb->control.exit_int_info_err,
3458                                         KVM_ISA_SVM);
3459
3460                 vmexit = nested_svm_exit_special(svm);
3461
3462                 if (vmexit == NESTED_EXIT_CONTINUE)
3463                         vmexit = nested_svm_exit_handled(svm);
3464
3465                 if (vmexit == NESTED_EXIT_DONE)
3466                         return 1;
3467         }
3468
3469         svm_complete_interrupts(svm);
3470
3471         if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
3472                 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3473                 kvm_run->fail_entry.hardware_entry_failure_reason
3474                         = svm->vmcb->control.exit_code;
3475                 pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
3476                 dump_vmcb(vcpu);
3477                 return 0;
3478         }
3479
3480         if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
3481             exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
3482             exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
3483             exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
3484                 printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
3485                        "exit_code 0x%x\n",
3486                        __func__, svm->vmcb->control.exit_int_info,
3487                        exit_code);
3488
3489         if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
3490             || !svm_exit_handlers[exit_code]) {
3491                 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
3492                 kvm_run->hw.hardware_exit_reason = exit_code;
3493                 return 0;
3494         }
3495
3496         return svm_exit_handlers[exit_code](svm);
3497 }
3498
3499 static void reload_tss(struct kvm_vcpu *vcpu)
3500 {
3501         int cpu = raw_smp_processor_id();
3502
3503         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
3504         sd->tss_desc->type = 9; /* available 32/64-bit TSS */
3505         load_TR_desc();
3506 }
3507
3508 static void pre_svm_run(struct vcpu_svm *svm)
3509 {
3510         int cpu = raw_smp_processor_id();
3511
3512         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
3513
3514         /* FIXME: handle wraparound of asid_generation */
3515         if (svm->asid_generation != sd->asid_generation)
3516                 new_asid(svm, sd);
3517 }
3518
3519 static void svm_inject_nmi(struct kvm_vcpu *vcpu)
3520 {
3521         struct vcpu_svm *svm = to_svm(vcpu);
3522
3523         svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3524         vcpu->arch.hflags |= HF_NMI_MASK;
3525         set_intercept(svm, INTERCEPT_IRET);
3526         ++vcpu->stat.nmi_injections;
3527 }
3528
3529 static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
3530 {
3531         struct vmcb_control_area *control;
3532
3533         control = &svm->vmcb->control;
3534         control->int_vector = irq;
3535         control->int_ctl &= ~V_INTR_PRIO_MASK;
3536         control->int_ctl |= V_IRQ_MASK |
3537                 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
3538         mark_dirty(svm->vmcb, VMCB_INTR);
3539 }
3540
3541 static void svm_set_irq(struct kvm_vcpu *vcpu)
3542 {
3543         struct vcpu_svm *svm = to_svm(vcpu);
3544
3545         BUG_ON(!(gif_set(svm)));
3546
3547         trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
3548         ++vcpu->stat.irq_injections;
3549
3550         svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
3551                 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
3552 }
3553
3554 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3555 {
3556         struct vcpu_svm *svm = to_svm(vcpu);
3557
3558         if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3559                 return;
3560
3561         if (irr == -1)
3562                 return;
3563
3564         if (tpr >= irr)
3565                 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3566 }
3567
3568 static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
3569 {
3570         return;
3571 }
3572
3573 static int svm_vm_has_apicv(struct kvm *kvm)
3574 {
3575         return 0;
3576 }
3577
3578 static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
3579 {
3580         return;
3581 }
3582
3583 static void svm_hwapic_isr_update(struct kvm *kvm, int isr)
3584 {
3585         return;
3586 }
3587
3588 static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
3589 {
3590         return;
3591 }
3592
3593 static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
3594 {
3595         struct vcpu_svm *svm = to_svm(vcpu);
3596         struct vmcb *vmcb = svm->vmcb;
3597         int ret;
3598         ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
3599               !(svm->vcpu.arch.hflags & HF_NMI_MASK);
3600         ret = ret && gif_set(svm) && nested_svm_nmi(svm);
3601
3602         return ret;
3603 }
3604
3605 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
3606 {
3607         struct vcpu_svm *svm = to_svm(vcpu);
3608
3609         return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
3610 }
3611
3612 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3613 {
3614         struct vcpu_svm *svm = to_svm(vcpu);
3615
3616         if (masked) {
3617                 svm->vcpu.arch.hflags |= HF_NMI_MASK;
3618                 set_intercept(svm, INTERCEPT_IRET);
3619         } else {
3620                 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
3621                 clr_intercept(svm, INTERCEPT_IRET);
3622         }
3623 }
3624
3625 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
3626 {
3627         struct vcpu_svm *svm = to_svm(vcpu);
3628         struct vmcb *vmcb = svm->vmcb;
3629         int ret;
3630
3631         if (!gif_set(svm) ||
3632              (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
3633                 return 0;
3634
3635         ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
3636
3637         if (is_guest_mode(vcpu))
3638                 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
3639
3640         return ret;
3641 }
3642
3643 static int enable_irq_window(struct kvm_vcpu *vcpu)
3644 {
3645         struct vcpu_svm *svm = to_svm(vcpu);
3646
3647         /*
3648          * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
3649          * 1, because that's a separate STGI/VMRUN intercept.  The next time we
3650          * get that intercept, this function will be called again though and
3651          * we'll get the vintr intercept.
3652          */
3653         if (gif_set(svm) && nested_svm_intr(svm)) {
3654                 svm_set_vintr(svm);
3655                 svm_inject_irq(svm, 0x0);
3656         }
3657         return 0;
3658 }
3659
3660 static int enable_nmi_window(struct kvm_vcpu *vcpu)
3661 {
3662         struct vcpu_svm *svm = to_svm(vcpu);
3663
3664         if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
3665             == HF_NMI_MASK)
3666                 return 0; /* IRET will cause a vm exit */
3667
3668         /*
3669          * Something prevents NMI from been injected. Single step over possible
3670          * problem (IRET or exception injection or interrupt shadow)
3671          */
3672         svm->nmi_singlestep = true;
3673         svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
3674         update_db_bp_intercept(vcpu);
3675         return 0;
3676 }
3677
3678 static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
3679 {
3680         return 0;
3681 }
3682
3683 static void svm_flush_tlb(struct kvm_vcpu *vcpu)
3684 {
3685         struct vcpu_svm *svm = to_svm(vcpu);
3686
3687         if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
3688                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3689         else
3690                 svm->asid_generation--;
3691 }
3692
3693 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
3694 {
3695 }
3696
3697 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
3698 {
3699         struct vcpu_svm *svm = to_svm(vcpu);
3700
3701         if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3702                 return;
3703
3704         if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
3705                 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
3706                 kvm_set_cr8(vcpu, cr8);
3707         }
3708 }
3709
3710 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
3711 {
3712         struct vcpu_svm *svm = to_svm(vcpu);
3713         u64 cr8;
3714
3715         if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3716                 return;
3717
3718         cr8 = kvm_get_cr8(vcpu);
3719         svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
3720         svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
3721 }
3722
3723 static void svm_complete_interrupts(struct vcpu_svm *svm)
3724 {
3725         u8 vector;
3726         int type;
3727         u32 exitintinfo = svm->vmcb->control.exit_int_info;
3728         unsigned int3_injected = svm->int3_injected;
3729
3730         svm->int3_injected = 0;
3731
3732         /*
3733          * If we've made progress since setting HF_IRET_MASK, we've
3734          * executed an IRET and can allow NMI injection.
3735          */
3736         if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
3737             && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
3738                 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
3739                 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3740         }
3741
3742         svm->vcpu.arch.nmi_injected = false;
3743         kvm_clear_exception_queue(&svm->vcpu);
3744         kvm_clear_interrupt_queue(&svm->vcpu);
3745
3746         if (!(exitintinfo & SVM_EXITINTINFO_VALID))
3747                 return;
3748
3749         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3750
3751         vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
3752         type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
3753
3754         switch (type) {
3755         case SVM_EXITINTINFO_TYPE_NMI:
3756                 svm->vcpu.arch.nmi_injected = true;
3757                 break;
3758         case SVM_EXITINTINFO_TYPE_EXEPT:
3759                 /*
3760                  * In case of software exceptions, do not reinject the vector,
3761                  * but re-execute the instruction instead. Rewind RIP first
3762                  * if we emulated INT3 before.
3763                  */
3764                 if (kvm_exception_is_soft(vector)) {
3765                         if (vector == BP_VECTOR && int3_injected &&
3766                             kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
3767                                 kvm_rip_write(&svm->vcpu,
3768                                               kvm_rip_read(&svm->vcpu) -
3769                                               int3_injected);
3770                         break;
3771                 }
3772                 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
3773                         u32 err = svm->vmcb->control.exit_int_info_err;
3774                         kvm_requeue_exception_e(&svm->vcpu, vector, err);
3775
3776                 } else
3777                         kvm_requeue_exception(&svm->vcpu, vector);
3778                 break;
3779         case SVM_EXITINTINFO_TYPE_INTR:
3780                 kvm_queue_interrupt(&svm->vcpu, vector, false);
3781                 break;
3782         default:
3783                 break;
3784         }
3785 }
3786
3787 static void svm_cancel_injection(struct kvm_vcpu *vcpu)
3788 {
3789         struct vcpu_svm *svm = to_svm(vcpu);
3790         struct vmcb_control_area *control = &svm->vmcb->control;
3791
3792         control->exit_int_info = control->event_inj;
3793         control->exit_int_info_err = control->event_inj_err;
3794         control->event_inj = 0;
3795         svm_complete_interrupts(svm);
3796 }
3797
3798 static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3799 {
3800         struct vcpu_svm *svm = to_svm(vcpu);
3801
3802         svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3803         svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3804         svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3805
3806         /*
3807          * A vmexit emulation is required before the vcpu can be executed
3808          * again.
3809          */
3810         if (unlikely(svm->nested.exit_required))
3811                 return;
3812
3813         pre_svm_run(svm);
3814
3815         sync_lapic_to_cr8(vcpu);
3816
3817         svm->vmcb->save.cr2 = vcpu->arch.cr2;
3818
3819         clgi();
3820
3821         local_irq_enable();
3822
3823         asm volatile (
3824                 "push %%" _ASM_BP "; \n\t"
3825                 "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
3826                 "mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t"
3827                 "mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t"
3828                 "mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t"
3829                 "mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t"
3830                 "mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t"
3831 #ifdef CONFIG_X86_64
3832                 "mov %c[r8](%[svm]),  %%r8  \n\t"
3833                 "mov %c[r9](%[svm]),  %%r9  \n\t"
3834                 "mov %c[r10](%[svm]), %%r10 \n\t"
3835                 "mov %c[r11](%[svm]), %%r11 \n\t"
3836                 "mov %c[r12](%[svm]), %%r12 \n\t"
3837                 "mov %c[r13](%[svm]), %%r13 \n\t"
3838                 "mov %c[r14](%[svm]), %%r14 \n\t"
3839                 "mov %c[r15](%[svm]), %%r15 \n\t"
3840 #endif
3841
3842                 /* Enter guest mode */
3843                 "push %%" _ASM_AX " \n\t"
3844                 "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t"
3845                 __ex(SVM_VMLOAD) "\n\t"
3846                 __ex(SVM_VMRUN) "\n\t"
3847                 __ex(SVM_VMSAVE) "\n\t"
3848                 "pop %%" _ASM_AX " \n\t"
3849
3850                 /* Save guest registers, load host registers */
3851                 "mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t"
3852                 "mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t"
3853                 "mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t"
3854                 "mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t"
3855                 "mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t"
3856                 "mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t"
3857 #ifdef CONFIG_X86_64
3858                 "mov %%r8,  %c[r8](%[svm]) \n\t"
3859                 "mov %%r9,  %c[r9](%[svm]) \n\t"
3860                 "mov %%r10, %c[r10](%[svm]) \n\t"
3861                 "mov %%r11, %c[r11](%[svm]) \n\t"
3862                 "mov %%r12, %c[r12](%[svm]) \n\t"
3863                 "mov %%r13, %c[r13](%[svm]) \n\t"
3864                 "mov %%r14, %c[r14](%[svm]) \n\t"
3865                 "mov %%r15, %c[r15](%[svm]) \n\t"
3866 #endif
3867                 "pop %%" _ASM_BP
3868                 :
3869                 : [svm]"a"(svm),
3870                   [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
3871                   [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
3872                   [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
3873                   [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
3874                   [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
3875                   [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
3876                   [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
3877 #ifdef CONFIG_X86_64
3878                   , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
3879                   [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
3880                   [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
3881                   [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
3882                   [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
3883                   [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
3884                   [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
3885                   [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
3886 #endif
3887                 : "cc", "memory"
3888 #ifdef CONFIG_X86_64
3889                 , "rbx", "rcx", "rdx", "rsi", "rdi"
3890                 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
3891 #else
3892                 , "ebx", "ecx", "edx", "esi", "edi"
3893 #endif
3894                 );
3895
3896 #ifdef CONFIG_X86_64
3897         wrmsrl(MSR_GS_BASE, svm->host.gs_base);
3898 #else
3899         loadsegment(fs, svm->host.fs);
3900 #ifndef CONFIG_X86_32_LAZY_GS
3901         loadsegment(gs, svm->host.gs);
3902 #endif
3903 #endif
3904
3905         reload_tss(vcpu);
3906
3907         local_irq_disable();
3908
3909         vcpu->arch.cr2 = svm->vmcb->save.cr2;
3910         vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
3911         vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3912         vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3913
3914         trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM);
3915
3916         if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3917                 kvm_before_handle_nmi(&svm->vcpu);
3918
3919         stgi();
3920
3921         /* Any pending NMI will happen here */
3922
3923         if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3924                 kvm_after_handle_nmi(&svm->vcpu);
3925
3926         sync_cr8_to_lapic(vcpu);
3927
3928         svm->next_rip = 0;
3929
3930         svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
3931
3932         /* if exit due to PF check for async PF */
3933         if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
3934                 svm->apf_reason = kvm_read_and_reset_pf_reason();
3935
3936         if (npt_enabled) {
3937                 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
3938                 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
3939         }
3940
3941         /*
3942          * We need to handle MC intercepts here before the vcpu has a chance to
3943          * change the physical cpu
3944          */
3945         if (unlikely(svm->vmcb->control.exit_code ==
3946                      SVM_EXIT_EXCP_BASE + MC_VECTOR))
3947                 svm_handle_mce(svm);
3948
3949         mark_all_clean(svm->vmcb);
3950 }
3951
3952 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
3953 {
3954         struct vcpu_svm *svm = to_svm(vcpu);
3955
3956         svm->vmcb->save.cr3 = root;
3957         mark_dirty(svm->vmcb, VMCB_CR);
3958         svm_flush_tlb(vcpu);
3959 }
3960
3961 static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
3962 {
3963         struct vcpu_svm *svm = to_svm(vcpu);
3964
3965         svm->vmcb->control.nested_cr3 = root;
3966         mark_dirty(svm->vmcb, VMCB_NPT);
3967
3968         /* Also sync guest cr3 here in case we live migrate */
3969         svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
3970         mark_dirty(svm->vmcb, VMCB_CR);
3971
3972         svm_flush_tlb(vcpu);
3973 }
3974
3975 static int is_disabled(void)
3976 {
3977         u64 vm_cr;
3978
3979         rdmsrl(MSR_VM_CR, vm_cr);
3980         if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
3981                 return 1;
3982
3983         return 0;
3984 }
3985
3986 static void
3987 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
3988 {
3989         /*
3990          * Patch in the VMMCALL instruction:
3991          */
3992         hypercall[0] = 0x0f;
3993         hypercall[1] = 0x01;
3994         hypercall[2] = 0xd9;
3995 }
3996
3997 static void svm_check_processor_compat(void *rtn)
3998 {
3999         *(int *)rtn = 0;
4000 }
4001
4002 static bool svm_cpu_has_accelerated_tpr(void)
4003 {
4004         return false;
4005 }
4006
4007 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
4008 {
4009         return 0;
4010 }
4011
4012 static void svm_cpuid_update(struct kvm_vcpu *vcpu)
4013 {
4014 }
4015
4016 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
4017 {
4018         switch (func) {
4019         case 0x80000001:
4020                 if (nested)
4021                         entry->ecx |= (1 << 2); /* Set SVM bit */
4022                 break;
4023         case 0x8000000A:
4024                 entry->eax = 1; /* SVM revision 1 */
4025                 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
4026                                    ASID emulation to nested SVM */
4027                 entry->ecx = 0; /* Reserved */
4028                 entry->edx = 0; /* Per default do not support any
4029                                    additional features */
4030
4031                 /* Support next_rip if host supports it */
4032                 if (boot_cpu_has(X86_FEATURE_NRIPS))
4033                         entry->edx |= SVM_FEATURE_NRIP;
4034
4035                 /* Support NPT for the guest if enabled */
4036                 if (npt_enabled)
4037                         entry->edx |= SVM_FEATURE_NPT;
4038
4039                 break;
4040         }
4041 }
4042
4043 static int svm_get_lpage_level(void)
4044 {
4045         return PT_PDPE_LEVEL;
4046 }
4047
4048 static bool svm_rdtscp_supported(void)
4049 {
4050         return false;
4051 }
4052
4053 static bool svm_invpcid_supported(void)
4054 {
4055         return false;
4056 }
4057
4058 static bool svm_has_wbinvd_exit(void)
4059 {
4060         return true;
4061 }
4062
4063 static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
4064 {
4065         struct vcpu_svm *svm = to_svm(vcpu);
4066
4067         set_exception_intercept(svm, NM_VECTOR);
4068         update_cr0_intercept(svm);
4069 }
4070
4071 #define PRE_EX(exit)  { .exit_code = (exit), \
4072                         .stage = X86_ICPT_PRE_EXCEPT, }
4073 #define POST_EX(exit) { .exit_code = (exit), \
4074                         .stage = X86_ICPT_POST_EXCEPT, }
4075 #define POST_MEM(exit) { .exit_code = (exit), \
4076                         .stage = X86_ICPT_POST_MEMACCESS, }
4077
4078 static const struct __x86_intercept {
4079         u32 exit_code;
4080         enum x86_intercept_stage stage;
4081 } x86_intercept_map[] = {
4082         [x86_intercept_cr_read]         = POST_EX(SVM_EXIT_READ_CR0),
4083         [x86_intercept_cr_write]        = POST_EX(SVM_EXIT_WRITE_CR0),
4084         [x86_intercept_clts]            = POST_EX(SVM_EXIT_WRITE_CR0),
4085         [x86_intercept_lmsw]            = POST_EX(SVM_EXIT_WRITE_CR0),
4086         [x86_intercept_smsw]            = POST_EX(SVM_EXIT_READ_CR0),
4087         [x86_intercept_dr_read]         = POST_EX(SVM_EXIT_READ_DR0),
4088         [x86_intercept_dr_write]        = POST_EX(SVM_EXIT_WRITE_DR0),
4089         [x86_intercept_sldt]            = POST_EX(SVM_EXIT_LDTR_READ),
4090         [x86_intercept_str]             = POST_EX(SVM_EXIT_TR_READ),
4091         [x86_intercept_lldt]            = POST_EX(SVM_EXIT_LDTR_WRITE),
4092         [x86_intercept_ltr]             = POST_EX(SVM_EXIT_TR_WRITE),
4093         [x86_intercept_sgdt]            = POST_EX(SVM_EXIT_GDTR_READ),
4094         [x86_intercept_sidt]            = POST_EX(SVM_EXIT_IDTR_READ),
4095         [x86_intercept_lgdt]            = POST_EX(SVM_EXIT_GDTR_WRITE),
4096         [x86_intercept_lidt]            = POST_EX(SVM_EXIT_IDTR_WRITE),
4097         [x86_intercept_vmrun]           = POST_EX(SVM_EXIT_VMRUN),
4098         [x86_intercept_vmmcall]         = POST_EX(SVM_EXIT_VMMCALL),
4099         [x86_intercept_vmload]          = POST_EX(SVM_EXIT_VMLOAD),
4100         [x86_intercept_vmsave]          = POST_EX(SVM_EXIT_VMSAVE),
4101         [x86_intercept_stgi]            = POST_EX(SVM_EXIT_STGI),
4102         [x86_intercept_clgi]            = POST_EX(SVM_EXIT_CLGI),
4103         [x86_intercept_skinit]          = POST_EX(SVM_EXIT_SKINIT),
4104         [x86_intercept_invlpga]         = POST_EX(SVM_EXIT_INVLPGA),
4105         [x86_intercept_rdtscp]          = POST_EX(SVM_EXIT_RDTSCP),
4106         [x86_intercept_monitor]         = POST_MEM(SVM_EXIT_MONITOR),
4107         [x86_intercept_mwait]           = POST_EX(SVM_EXIT_MWAIT),
4108         [x86_intercept_invlpg]          = POST_EX(SVM_EXIT_INVLPG),
4109         [x86_intercept_invd]            = POST_EX(SVM_EXIT_INVD),
4110         [x86_intercept_wbinvd]          = POST_EX(SVM_EXIT_WBINVD),
4111         [x86_intercept_wrmsr]           = POST_EX(SVM_EXIT_MSR),
4112         [x86_intercept_rdtsc]           = POST_EX(SVM_EXIT_RDTSC),
4113         [x86_intercept_rdmsr]           = POST_EX(SVM_EXIT_MSR),
4114         [x86_intercept_rdpmc]           = POST_EX(SVM_EXIT_RDPMC),
4115         [x86_intercept_cpuid]           = PRE_EX(SVM_EXIT_CPUID),
4116         [x86_intercept_rsm]             = PRE_EX(SVM_EXIT_RSM),
4117         [x86_intercept_pause]           = PRE_EX(SVM_EXIT_PAUSE),
4118         [x86_intercept_pushf]           = PRE_EX(SVM_EXIT_PUSHF),
4119         [x86_intercept_popf]            = PRE_EX(SVM_EXIT_POPF),
4120         [x86_intercept_intn]            = PRE_EX(SVM_EXIT_SWINT),
4121         [x86_intercept_iret]            = PRE_EX(SVM_EXIT_IRET),
4122         [x86_intercept_icebp]           = PRE_EX(SVM_EXIT_ICEBP),
4123         [x86_intercept_hlt]             = POST_EX(SVM_EXIT_HLT),
4124         [x86_intercept_in]              = POST_EX(SVM_EXIT_IOIO),
4125         [x86_intercept_ins]             = POST_EX(SVM_EXIT_IOIO),
4126         [x86_intercept_out]             = POST_EX(SVM_EXIT_IOIO),
4127         [x86_intercept_outs]            = POST_EX(SVM_EXIT_IOIO),
4128 };
4129
4130 #undef PRE_EX
4131 #undef POST_EX
4132 #undef POST_MEM
4133
4134 static int svm_check_intercept(struct kvm_vcpu *vcpu,
4135                                struct x86_instruction_info *info,
4136                                enum x86_intercept_stage stage)
4137 {
4138         struct vcpu_svm *svm = to_svm(vcpu);
4139         int vmexit, ret = X86EMUL_CONTINUE;
4140         struct __x86_intercept icpt_info;
4141         struct vmcb *vmcb = svm->vmcb;
4142
4143         if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
4144                 goto out;
4145
4146         icpt_info = x86_intercept_map[info->intercept];
4147
4148         if (stage != icpt_info.stage)
4149                 goto out;
4150
4151         switch (icpt_info.exit_code) {
4152         case SVM_EXIT_READ_CR0:
4153                 if (info->intercept == x86_intercept_cr_read)
4154                         icpt_info.exit_code += info->modrm_reg;
4155                 break;
4156         case SVM_EXIT_WRITE_CR0: {
4157                 unsigned long cr0, val;
4158                 u64 intercept;
4159
4160                 if (info->intercept == x86_intercept_cr_write)
4161                         icpt_info.exit_code += info->modrm_reg;
4162
4163                 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0)
4164                         break;
4165
4166                 intercept = svm->nested.intercept;
4167
4168                 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
4169                         break;
4170
4171                 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
4172                 val = info->src_val  & ~SVM_CR0_SELECTIVE_MASK;
4173
4174                 if (info->intercept == x86_intercept_lmsw) {
4175                         cr0 &= 0xfUL;
4176                         val &= 0xfUL;
4177                         /* lmsw can't clear PE - catch this here */
4178                         if (cr0 & X86_CR0_PE)
4179                                 val |= X86_CR0_PE;
4180                 }
4181
4182                 if (cr0 ^ val)
4183                         icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
4184
4185                 break;
4186         }
4187         case SVM_EXIT_READ_DR0:
4188         case SVM_EXIT_WRITE_DR0:
4189                 icpt_info.exit_code += info->modrm_reg;
4190                 break;
4191         case SVM_EXIT_MSR:
4192                 if (info->intercept == x86_intercept_wrmsr)
4193                         vmcb->control.exit_info_1 = 1;
4194                 else
4195                         vmcb->control.exit_info_1 = 0;
4196                 break;
4197         case SVM_EXIT_PAUSE:
4198                 /*
4199                  * We get this for NOP only, but pause
4200                  * is rep not, check this here
4201                  */
4202                 if (info->rep_prefix != REPE_PREFIX)
4203                         goto out;
4204         case SVM_EXIT_IOIO: {
4205                 u64 exit_info;
4206                 u32 bytes;
4207
4208                 exit_info = (vcpu->arch.regs[VCPU_REGS_RDX] & 0xffff) << 16;
4209
4210                 if (info->intercept == x86_intercept_in ||
4211                     info->intercept == x86_intercept_ins) {
4212                         exit_info |= SVM_IOIO_TYPE_MASK;
4213                         bytes = info->src_bytes;
4214                 } else {
4215                         bytes = info->dst_bytes;
4216                 }
4217
4218                 if (info->intercept == x86_intercept_outs ||
4219                     info->intercept == x86_intercept_ins)
4220                         exit_info |= SVM_IOIO_STR_MASK;
4221
4222                 if (info->rep_prefix)
4223                         exit_info |= SVM_IOIO_REP_MASK;
4224
4225                 bytes = min(bytes, 4u);
4226
4227                 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
4228
4229                 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
4230
4231                 vmcb->control.exit_info_1 = exit_info;
4232                 vmcb->control.exit_info_2 = info->next_rip;
4233
4234                 break;
4235         }
4236         default:
4237                 break;
4238         }
4239
4240         vmcb->control.next_rip  = info->next_rip;
4241         vmcb->control.exit_code = icpt_info.exit_code;
4242         vmexit = nested_svm_exit_handled(svm);
4243
4244         ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
4245                                            : X86EMUL_CONTINUE;
4246
4247 out:
4248         return ret;
4249 }
4250
4251 static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
4252 {
4253         local_irq_enable();
4254 }
4255
4256 static struct kvm_x86_ops svm_x86_ops = {
4257         .cpu_has_kvm_support = has_svm,
4258         .disabled_by_bios = is_disabled,
4259         .hardware_setup = svm_hardware_setup,
4260         .hardware_unsetup = svm_hardware_unsetup,
4261         .check_processor_compatibility = svm_check_processor_compat,
4262         .hardware_enable = svm_hardware_enable,
4263         .hardware_disable = svm_hardware_disable,
4264         .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
4265
4266         .vcpu_create = svm_create_vcpu,
4267         .vcpu_free = svm_free_vcpu,
4268         .vcpu_reset = svm_vcpu_reset,
4269
4270         .prepare_guest_switch = svm_prepare_guest_switch,
4271         .vcpu_load = svm_vcpu_load,
4272         .vcpu_put = svm_vcpu_put,
4273
4274         .update_db_bp_intercept = update_db_bp_intercept,
4275         .get_msr = svm_get_msr,
4276         .set_msr = svm_set_msr,
4277         .get_segment_base = svm_get_segment_base,
4278         .get_segment = svm_get_segment,
4279         .set_segment = svm_set_segment,
4280         .get_cpl = svm_get_cpl,
4281         .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
4282         .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
4283         .decache_cr3 = svm_decache_cr3,
4284         .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
4285         .set_cr0 = svm_set_cr0,
4286         .set_cr3 = svm_set_cr3,
4287         .set_cr4 = svm_set_cr4,
4288         .set_efer = svm_set_efer,
4289         .get_idt = svm_get_idt,
4290         .set_idt = svm_set_idt,
4291         .get_gdt = svm_get_gdt,
4292         .set_gdt = svm_set_gdt,
4293         .set_dr7 = svm_set_dr7,
4294         .cache_reg = svm_cache_reg,
4295         .get_rflags = svm_get_rflags,
4296         .set_rflags = svm_set_rflags,
4297         .fpu_activate = svm_fpu_activate,
4298         .fpu_deactivate = svm_fpu_deactivate,
4299
4300         .tlb_flush = svm_flush_tlb,
4301
4302         .run = svm_vcpu_run,
4303         .handle_exit = handle_exit,
4304         .skip_emulated_instruction = skip_emulated_instruction,
4305         .set_interrupt_shadow = svm_set_interrupt_shadow,
4306         .get_interrupt_shadow = svm_get_interrupt_shadow,
4307         .patch_hypercall = svm_patch_hypercall,
4308         .set_irq = svm_set_irq,
4309         .set_nmi = svm_inject_nmi,
4310         .queue_exception = svm_queue_exception,
4311         .cancel_injection = svm_cancel_injection,
4312         .interrupt_allowed = svm_interrupt_allowed,
4313         .nmi_allowed = svm_nmi_allowed,
4314         .get_nmi_mask = svm_get_nmi_mask,
4315         .set_nmi_mask = svm_set_nmi_mask,
4316         .enable_nmi_window = enable_nmi_window,
4317         .enable_irq_window = enable_irq_window,
4318         .update_cr8_intercept = update_cr8_intercept,
4319         .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
4320         .vm_has_apicv = svm_vm_has_apicv,
4321         .load_eoi_exitmap = svm_load_eoi_exitmap,
4322         .hwapic_isr_update = svm_hwapic_isr_update,
4323         .sync_pir_to_irr = svm_sync_pir_to_irr,
4324
4325         .set_tss_addr = svm_set_tss_addr,
4326         .get_tdp_level = get_npt_level,
4327         .get_mt_mask = svm_get_mt_mask,
4328
4329         .get_exit_info = svm_get_exit_info,
4330
4331         .get_lpage_level = svm_get_lpage_level,
4332
4333         .cpuid_update = svm_cpuid_update,
4334
4335         .rdtscp_supported = svm_rdtscp_supported,
4336         .invpcid_supported = svm_invpcid_supported,
4337
4338         .set_supported_cpuid = svm_set_supported_cpuid,
4339
4340         .has_wbinvd_exit = svm_has_wbinvd_exit,
4341
4342         .set_tsc_khz = svm_set_tsc_khz,
4343         .read_tsc_offset = svm_read_tsc_offset,
4344         .write_tsc_offset = svm_write_tsc_offset,
4345         .adjust_tsc_offset = svm_adjust_tsc_offset,
4346         .compute_tsc_offset = svm_compute_tsc_offset,
4347         .read_l1_tsc = svm_read_l1_tsc,
4348
4349         .set_tdp_cr3 = set_tdp_cr3,
4350
4351         .check_intercept = svm_check_intercept,
4352         .handle_external_intr = svm_handle_external_intr,
4353 };
4354
4355 static int __init svm_init(void)
4356 {
4357         return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
4358                         __alignof__(struct vcpu_svm), THIS_MODULE);
4359 }
4360
4361 static void __exit svm_exit(void)
4362 {
4363         kvm_exit();
4364 }
4365
4366 module_init(svm_init)
4367 module_exit(svm_exit)