]> Pileus Git - ~andy/linux/blobdiff - arch/x86/kvm/mmu.c
KVM: MMU: cache mmio info on page fault path
[~andy/linux] / arch / x86 / kvm / mmu.c
index da0f3b0810765e04de2eda2cd167d693f9f35a55..d1986b7dcec7323f527cbec77abae373a28b18c6 100644 (file)
@@ -217,11 +217,6 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
 
-static bool is_write_protection(struct kvm_vcpu *vcpu)
-{
-       return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
-}
-
 static int is_cpuid_PSE36(void)
 {
        return 1;
@@ -243,11 +238,6 @@ static int is_large_pte(u64 pte)
        return pte & PT_PAGE_SIZE_MASK;
 }
 
-static int is_writable_pte(unsigned long pte)
-{
-       return pte & PT_WRITABLE_MASK;
-}
-
 static int is_dirty_gpte(unsigned long pte)
 {
        return pte & PT_DIRTY_MASK;
@@ -743,9 +733,6 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
        struct kvm_mmu_page *sp;
        unsigned long *rmapp;
 
-       if (!is_rmap_spte(*spte))
-               return 0;
-
        sp = page_header(__pa(spte));
        kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
        rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
@@ -1517,10 +1504,6 @@ static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
        if (iterator->level < PT_PAGE_TABLE_LEVEL)
                return false;
 
-       if (iterator->level == PT_PAGE_TABLE_LEVEL)
-               if (is_large_pte(*iterator->sptep))
-                       return false;
-
        iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
        iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
        return true;
@@ -1528,6 +1511,11 @@ static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
 
 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
 {
+       if (is_last_spte(*iterator->sptep, iterator->level)) {
+               iterator->level = 0;
+               return;
+       }
+
        iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
        --iterator->level;
 }
@@ -2086,11 +2074,13 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
        if (!was_rmapped && is_large_pte(*sptep))
                ++vcpu->kvm->stat.lpages;
 
-       page_header_update_slot(vcpu->kvm, sptep, gfn);
-       if (!was_rmapped) {
-               rmap_count = rmap_add(vcpu, sptep, gfn);
-               if (rmap_count > RMAP_RECYCLE_THRESHOLD)
-                       rmap_recycle(vcpu, sptep, gfn);
+       if (is_shadow_present_pte(*sptep)) {
+               page_header_update_slot(vcpu->kvm, sptep, gfn);
+               if (!was_rmapped) {
+                       rmap_count = rmap_add(vcpu, sptep, gfn);
+                       if (rmap_count > RMAP_RECYCLE_THRESHOLD)
+                               rmap_recycle(vcpu, sptep, gfn);
+               }
        }
        kvm_release_pfn_clean(pfn);
        if (speculative) {
@@ -2247,15 +2237,17 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *
        send_sig_info(SIGBUS, &info, tsk);
 }
 
-static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn)
+static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gva_t gva,
+                              unsigned access, gfn_t gfn, pfn_t pfn)
 {
        kvm_release_pfn_clean(pfn);
        if (is_hwpoison_pfn(pfn)) {
-               kvm_send_hwpoison_signal(gfn_to_hva(kvm, gfn), current);
+               kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current);
                return 0;
        } else if (is_fault_pfn(pfn))
                return -EFAULT;
 
+       vcpu_cache_mmio_info(vcpu, gva, gfn, access);
        return 1;
 }
 
@@ -2337,7 +2329,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
 
        /* mmio */
        if (is_error_pfn(pfn))
-               return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
+               return kvm_handle_bad_page(vcpu, v, ACC_ALL, gfn, pfn);
 
        spin_lock(&vcpu->kvm->mmu_lock);
        if (mmu_notifier_retry(vcpu, mmu_seq))
@@ -2564,6 +2556,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
        if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
                return;
 
+       vcpu_clear_mmio_info(vcpu, ~0ul);
        trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
        if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
                hpa_t root = vcpu->arch.mmu.root_hpa;
@@ -2710,7 +2703,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
 
        /* mmio */
        if (is_error_pfn(pfn))
-               return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
+               return kvm_handle_bad_page(vcpu, 0, 0, gfn, pfn);
        spin_lock(&vcpu->kvm->mmu_lock);
        if (mmu_notifier_retry(vcpu, mmu_seq))
                goto out_unlock;