]> Pileus Git - ~andy/linux/blobdiff - arch/x86/kvm/mmu.c
x86, mm: fault.c, factor out the vm86 fault check
[~andy/linux] / arch / x86 / kvm / mmu.c
index 7ce92f78f3376154762c3a4826ed6ad7f2a22ce6..2d4477c7147372c28ed84f5a513ca553dc46979c 100644 (file)
@@ -621,7 +621,7 @@ static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
        return NULL;
 }
 
-static void rmap_write_protect(struct kvm *kvm, u64 gfn)
+static int rmap_write_protect(struct kvm *kvm, u64 gfn)
 {
        unsigned long *rmapp;
        u64 *spte;
@@ -667,8 +667,7 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
                spte = rmap_next(kvm, rmapp, spte);
        }
 
-       if (write_protected)
-               kvm_flush_remote_tlbs(kvm);
+       return write_protected;
 }
 
 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
@@ -794,9 +793,11 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
        sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
        set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
        list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
+       INIT_LIST_HEAD(&sp->oos_link);
        ASSERT(is_empty_shadow_page(sp->spt));
        bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
        sp->multimapped = 0;
+       sp->global = 1;
        sp->parent_pte = parent_pte;
        --vcpu->kvm->arch.n_free_mmu_pages;
        return sp;
@@ -1006,7 +1007,7 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
        for_each_unsync_children(sp->unsync_child_bitmap, i) {
                u64 ent = sp->spt[i];
 
-               if (is_shadow_present_pte(ent)) {
+               if (is_shadow_present_pte(ent) && !is_large_pte(ent)) {
                        struct kvm_mmu_page *child;
                        child = page_header(ent & PT64_BASE_ADDR_MASK);
 
@@ -1067,10 +1068,18 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
        return NULL;
 }
 
+static void kvm_unlink_unsync_global(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+       list_del(&sp->oos_link);
+       --kvm->stat.mmu_unsync_global;
+}
+
 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        WARN_ON(!sp->unsync);
        sp->unsync = 0;
+       if (sp->global)
+               kvm_unlink_unsync_global(kvm, sp);
        --kvm->stat.mmu_unsync;
 }
 
@@ -1083,7 +1092,8 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
                return 1;
        }
 
-       rmap_write_protect(vcpu->kvm, sp->gfn);
+       if (rmap_write_protect(vcpu->kvm, sp->gfn))
+               kvm_flush_remote_tlbs(vcpu->kvm);
        kvm_unlink_unsync_page(vcpu->kvm, sp);
        if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
                kvm_mmu_zap_page(vcpu->kvm, sp);
@@ -1162,6 +1172,14 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
 
        kvm_mmu_pages_init(parent, &parents, &pages);
        while (mmu_unsync_walk(parent, &pages)) {
+               int protected = 0;
+
+               for_each_sp(pages, sp, parents, i)
+                       protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
+
+               if (protected)
+                       kvm_flush_remote_tlbs(vcpu->kvm);
+
                for_each_sp(pages, sp, parents, i) {
                        kvm_sync_page(vcpu, sp);
                        mmu_pages_clear_parents(&parents);
@@ -1226,7 +1244,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        sp->role = role;
        hlist_add_head(&sp->hash_link, bucket);
        if (!metaphysical) {
-               rmap_write_protect(vcpu->kvm, gfn);
+               if (rmap_write_protect(vcpu->kvm, gfn))
+                       kvm_flush_remote_tlbs(vcpu->kvm);
                account_shadowed(vcpu->kvm, gfn);
        }
        if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
@@ -1250,6 +1269,8 @@ static int walk_shadow(struct kvm_shadow_walk *walker,
        if (level == PT32E_ROOT_LEVEL) {
                shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
                shadow_addr &= PT64_BASE_ADDR_MASK;
+               if (!shadow_addr)
+                       return 1;
                --level;
        }
 
@@ -1606,9 +1627,15 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
                if (s->role.word != sp->role.word)
                        return 1;
        }
-       kvm_mmu_mark_parents_unsync(vcpu, sp);
        ++vcpu->kvm->stat.mmu_unsync;
        sp->unsync = 1;
+
+       if (sp->global) {
+               list_add(&sp->oos_link, &vcpu->kvm->arch.oos_global_pages);
+               ++vcpu->kvm->stat.mmu_unsync_global;
+       } else
+               kvm_mmu_mark_parents_unsync(vcpu, sp);
+
        mmu_convert_notrap(sp);
        return 0;
 }
@@ -1634,12 +1661,23 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
 static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                    unsigned pte_access, int user_fault,
                    int write_fault, int dirty, int largepage,
-                   gfn_t gfn, pfn_t pfn, bool speculative,
+                   int global, gfn_t gfn, pfn_t pfn, bool speculative,
                    bool can_unsync)
 {
        u64 spte;
        int ret = 0;
        u64 mt_mask = shadow_mt_mask;
+       struct kvm_mmu_page *sp = page_header(__pa(shadow_pte));
+
+       if (!(vcpu->arch.cr4 & X86_CR4_PGE))
+               global = 0;
+       if (!global && sp->global) {
+               sp->global = 0;
+               if (sp->unsync) {
+                       kvm_unlink_unsync_global(vcpu->kvm, sp);
+                       kvm_mmu_mark_parents_unsync(vcpu, sp);
+               }
+       }
 
        /*
         * We don't set the accessed bit, since we sometimes want to see
@@ -1660,8 +1698,13 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
        if (largepage)
                spte |= PT_PAGE_SIZE_MASK;
        if (mt_mask) {
-               mt_mask = get_memory_type(vcpu, gfn) <<
-                         kvm_x86_ops->get_mt_mask_shift();
+               if (!kvm_is_mmio_pfn(pfn)) {
+                       mt_mask = get_memory_type(vcpu, gfn) <<
+                               kvm_x86_ops->get_mt_mask_shift();
+                       mt_mask |= VMX_EPT_IGMT_BIT;
+               } else
+                       mt_mask = MTRR_TYPE_UNCACHABLE <<
+                               kvm_x86_ops->get_mt_mask_shift();
                spte |= mt_mask;
        }
 
@@ -1708,8 +1751,8 @@ set_pte:
 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                         unsigned pt_access, unsigned pte_access,
                         int user_fault, int write_fault, int dirty,
-                        int *ptwrite, int largepage, gfn_t gfn,
-                        pfn_t pfn, bool speculative)
+                        int *ptwrite, int largepage, int global,
+                        gfn_t gfn, pfn_t pfn, bool speculative)
 {
        int was_rmapped = 0;
        int was_writeble = is_writeble_pte(*shadow_pte);
@@ -1742,7 +1785,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                }
        }
        if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
-                     dirty, largepage, gfn, pfn, speculative, true)) {
+                     dirty, largepage, global, gfn, pfn, speculative, true)) {
                if (write_fault)
                        *ptwrite = 1;
                kvm_x86_ops->tlb_flush(vcpu);
@@ -1799,7 +1842,7 @@ static int direct_map_entry(struct kvm_shadow_walk *_walk,
            || (walk->largepage && level == PT_DIRECTORY_LEVEL)) {
                mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL,
                             0, walk->write, 1, &walk->pt_write,
-                            walk->largepage, gfn, walk->pfn, false);
+                            walk->largepage, 0, gfn, walk->pfn, false);
                ++vcpu->stat.pf_fixed;
                return 1;
        }
@@ -1986,6 +2029,15 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
        }
 }
 
+static void mmu_sync_global(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = vcpu->kvm;
+       struct kvm_mmu_page *sp, *n;
+
+       list_for_each_entry_safe(sp, n, &kvm->arch.oos_global_pages, oos_link)
+               kvm_sync_page(vcpu, sp);
+}
+
 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
 {
        spin_lock(&vcpu->kvm->mmu_lock);
@@ -1993,6 +2045,13 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
        spin_unlock(&vcpu->kvm->mmu_lock);
 }
 
+void kvm_mmu_sync_global(struct kvm_vcpu *vcpu)
+{
+       spin_lock(&vcpu->kvm->mmu_lock);
+       mmu_sync_global(vcpu);
+       spin_unlock(&vcpu->kvm->mmu_lock);
+}
+
 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
 {
        return vaddr;
@@ -2391,7 +2450,8 @@ static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
 }
 
 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
-                      const u8 *new, int bytes)
+                      const u8 *new, int bytes,
+                      bool guest_initiated)
 {
        gfn_t gfn = gpa >> PAGE_SHIFT;
        struct kvm_mmu_page *sp;
@@ -2417,15 +2477,17 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        kvm_mmu_free_some_pages(vcpu);
        ++vcpu->kvm->stat.mmu_pte_write;
        kvm_mmu_audit(vcpu, "pre pte write");
-       if (gfn == vcpu->arch.last_pt_write_gfn
-           && !last_updated_pte_accessed(vcpu)) {
-               ++vcpu->arch.last_pt_write_count;
-               if (vcpu->arch.last_pt_write_count >= 3)
-                       flooded = 1;
-       } else {
-               vcpu->arch.last_pt_write_gfn = gfn;
-               vcpu->arch.last_pt_write_count = 1;
-               vcpu->arch.last_pte_updated = NULL;
+       if (guest_initiated) {
+               if (gfn == vcpu->arch.last_pt_write_gfn
+                   && !last_updated_pte_accessed(vcpu)) {
+                       ++vcpu->arch.last_pt_write_count;
+                       if (vcpu->arch.last_pt_write_count >= 3)
+                               flooded = 1;
+               } else {
+                       vcpu->arch.last_pt_write_gfn = gfn;
+                       vcpu->arch.last_pt_write_count = 1;
+                       vcpu->arch.last_pte_updated = NULL;
+               }
        }
        index = kvm_page_table_hashfn(gfn);
        bucket = &vcpu->kvm->arch.mmu_page_hash[index];
@@ -2565,9 +2627,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
 
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
 {
-       spin_lock(&vcpu->kvm->mmu_lock);
        vcpu->arch.mmu.invlpg(vcpu, gva);
-       spin_unlock(&vcpu->kvm->mmu_lock);
        kvm_mmu_flush_tlb(vcpu);
        ++vcpu->stat.invlpg;
 }