]> Pileus Git - ~andy/linux/blobdiff - drivers/kvm/mmu.c
KVM: MMU: Use slab caches for shadow pages and their headers
[~andy/linux] / drivers / kvm / mmu.c
index a1a93368f314d4e89226c3ab1b78dd9934135d42..46491b4cd859158e9ffbc81d2fa8f28889a71254 100644 (file)
@@ -52,11 +52,15 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
 static int dbg = 1;
 #endif
 
+#ifndef MMU_DEBUG
+#define ASSERT(x) do { } while (0)
+#else
 #define ASSERT(x)                                                      \
        if (!(x)) {                                                     \
                printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
                       __FILE__, __LINE__, #x);                         \
        }
+#endif
 
 #define PT64_PT_BITS 9
 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
@@ -131,7 +135,7 @@ static int dbg = 1;
        (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
 
 
-#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & PAGE_MASK)
+#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
 #define PT64_DIR_BASE_ADDR_MASK \
        (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
 
@@ -159,6 +163,11 @@ struct kvm_rmap_desc {
        struct kvm_rmap_desc *more;
 };
 
+static struct kmem_cache *pte_chain_cache;
+static struct kmem_cache *rmap_desc_cache;
+static struct kmem_cache *mmu_page_cache;
+static struct kmem_cache *mmu_page_header_cache;
+
 static int is_write_protection(struct kvm_vcpu *vcpu)
 {
        return vcpu->cr0 & CR0_WP_MASK;
@@ -196,14 +205,15 @@ static int is_rmap_pte(u64 pte)
 }
 
 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
-                                 size_t objsize, int min)
+                                 struct kmem_cache *base_cache, int min,
+                                 gfp_t gfp_flags)
 {
        void *obj;
 
        if (cache->nobjs >= min)
                return 0;
        while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
-               obj = kzalloc(objsize, GFP_NOWAIT);
+               obj = kmem_cache_zalloc(base_cache, gfp_flags);
                if (!obj)
                        return -ENOMEM;
                cache->objects[cache->nobjs++] = obj;
@@ -217,24 +227,49 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
                kfree(mc->objects[--mc->nobjs]);
 }
 
-static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
+static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags)
 {
        int r;
 
        r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
-                                  sizeof(struct kvm_pte_chain), 4);
+                                  pte_chain_cache, 4, gfp_flags);
        if (r)
                goto out;
        r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
-                                  sizeof(struct kvm_rmap_desc), 1);
+                                  rmap_desc_cache, 1, gfp_flags);
+       if (r)
+               goto out;
+       r = mmu_topup_memory_cache(&vcpu->mmu_page_cache,
+                                  mmu_page_cache, 4, gfp_flags);
+       if (r)
+               goto out;
+       r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
+                                  mmu_page_header_cache, 4, gfp_flags);
 out:
        return r;
 }
 
+static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
+{
+       int r;
+
+       r = __mmu_topup_memory_caches(vcpu, GFP_NOWAIT);
+       if (r < 0) {
+               spin_unlock(&vcpu->kvm->lock);
+               kvm_arch_ops->vcpu_put(vcpu);
+               r = __mmu_topup_memory_caches(vcpu, GFP_KERNEL);
+               kvm_arch_ops->vcpu_load(vcpu);
+               spin_lock(&vcpu->kvm->lock);
+       }
+       return r;
+}
+
 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
 {
        mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
        mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
+       mmu_free_memory_cache(&vcpu->mmu_page_cache);
+       mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
 }
 
 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
@@ -390,13 +425,11 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
 {
        struct kvm *kvm = vcpu->kvm;
        struct page *page;
-       struct kvm_memory_slot *slot;
        struct kvm_rmap_desc *desc;
        u64 *spte;
 
-       slot = gfn_to_memslot(kvm, gfn);
-       BUG_ON(!slot);
-       page = gfn_to_page(slot, gfn);
+       page = gfn_to_page(kvm, gfn);
+       BUG_ON(!page);
 
        while (page_private(page)) {
                if (!(page_private(page) & 1))
@@ -406,8 +439,8 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
                        spte = desc->shadow_ptes[0];
                }
                BUG_ON(!spte);
-               BUG_ON((*spte & PT64_BASE_ADDR_MASK) !=
-                      page_to_pfn(page) << PAGE_SHIFT);
+               BUG_ON((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT
+                      != page_to_pfn(page));
                BUG_ON(!(*spte & PT_PRESENT_MASK));
                BUG_ON(!(*spte & PT_WRITABLE_MASK));
                rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
@@ -417,13 +450,13 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
        }
 }
 
-static int is_empty_shadow_page(hpa_t page_hpa)
+#ifdef MMU_DEBUG
+static int is_empty_shadow_page(u64 *spt)
 {
        u64 *pos;
        u64 *end;
 
-       for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u64);
-                     pos != end; pos++)
+       for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
                if (*pos != 0) {
                        printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
                               pos, *pos);
@@ -431,15 +464,15 @@ static int is_empty_shadow_page(hpa_t page_hpa)
                }
        return 1;
 }
+#endif
 
-static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa)
+static void kvm_mmu_free_page(struct kvm_vcpu *vcpu,
+                             struct kvm_mmu_page *page_head)
 {
-       struct kvm_mmu_page *page_head = page_header(page_hpa);
-
-       ASSERT(is_empty_shadow_page(page_hpa));
+       ASSERT(is_empty_shadow_page(page_head->spt));
        list_del(&page_head->link);
-       page_head->page_hpa = page_hpa;
-       list_add(&page_head->link, &vcpu->free_pages);
+       mmu_memory_cache_free(&vcpu->mmu_page_cache, page_head->spt);
+       mmu_memory_cache_free(&vcpu->mmu_page_header_cache, page_head);
        ++vcpu->kvm->n_free_mmu_pages;
 }
 
@@ -453,15 +486,16 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
 {
        struct kvm_mmu_page *page;
 
-       if (list_empty(&vcpu->free_pages))
+       if (!vcpu->kvm->n_free_mmu_pages)
                return NULL;
 
-       page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link);
-       list_del(&page->link);
+       page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
+                                     sizeof *page);
+       page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
+       set_page_private(virt_to_page(page->spt), (unsigned long)page);
        list_add(&page->link, &vcpu->kvm->active_mmu_pages);
-       ASSERT(is_empty_shadow_page(page->page_hpa));
+       ASSERT(is_empty_shadow_page(page->spt));
        page->slot_bitmap = 0;
-       page->global = 1;
        page->multimapped = 0;
        page->parent_pte = parent_pte;
        --vcpu->kvm->n_free_mmu_pages;
@@ -569,6 +603,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                                             gva_t gaddr,
                                             unsigned level,
                                             int metaphysical,
+                                            unsigned hugepage_access,
                                             u64 *parent_pte)
 {
        union kvm_mmu_page_role role;
@@ -582,6 +617,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        role.glevels = vcpu->mmu.root_level;
        role.level = level;
        role.metaphysical = metaphysical;
+       role.hugepage_access = hugepage_access;
        if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
                quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
                quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
@@ -616,7 +652,7 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
        u64 *pt;
        u64 ent;
 
-       pt = __va(page->page_hpa);
+       pt = page->spt;
 
        if (page->role.level == PT_PAGE_TABLE_LEVEL) {
                for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
@@ -668,11 +704,9 @@ static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
        kvm_mmu_page_unlink_children(vcpu, page);
        if (!page->root_count) {
                hlist_del(&page->hash_link);
-               kvm_mmu_free_page(vcpu, page->page_hpa);
-       } else {
-               list_del(&page->link);
-               list_add(&page->link, &vcpu->kvm->active_mmu_pages);
-       }
+               kvm_mmu_free_page(vcpu, page);
+       } else
+               list_move(&page->link, &vcpu->kvm->active_mmu_pages);
 }
 
 static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -714,14 +748,12 @@ hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
 
 hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
 {
-       struct kvm_memory_slot *slot;
        struct page *page;
 
        ASSERT((gpa & HPA_ERR_MASK) == 0);
-       slot = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT);
-       if (!slot)
+       page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+       if (!page)
                return gpa | HPA_ERR_MASK;
-       page = gfn_to_page(slot, gpa >> PAGE_SHIFT);
        return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
                | (gpa & (PAGE_SIZE-1));
 }
@@ -735,6 +767,15 @@ hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
        return gpa_to_hpa(vcpu, gpa);
 }
 
+struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
+{
+       gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
+
+       if (gpa == UNMAPPED_GVA)
+               return NULL;
+       return pfn_to_page(gpa_to_hpa(vcpu, gpa) >> PAGE_SHIFT);
+}
+
 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
 {
 }
@@ -772,13 +813,13 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
                                >> PAGE_SHIFT;
                        new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
                                                     v, level - 1,
-                                                    1, &table[index]);
+                                                    1, 0, &table[index]);
                        if (!new_table) {
                                pgprintk("nonpaging_map: ENOMEM\n");
                                return -ENOMEM;
                        }
 
-                       table[index] = new_table->page_hpa | PT_PRESENT_MASK
+                       table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
                                | PT_WRITABLE_MASK | PT_USER_MASK;
                }
                table_addr = table[index] & PT64_BASE_ADDR_MASK;
@@ -804,10 +845,12 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
        for (i = 0; i < 4; ++i) {
                hpa_t root = vcpu->mmu.pae_root[i];
 
-               ASSERT(VALID_PAGE(root));
-               root &= PT64_BASE_ADDR_MASK;
-               page = page_header(root);
-               --page->root_count;
+               if (root) {
+                       ASSERT(VALID_PAGE(root));
+                       root &= PT64_BASE_ADDR_MASK;
+                       page = page_header(root);
+                       --page->root_count;
+               }
                vcpu->mmu.pae_root[i] = INVALID_PAGE;
        }
        vcpu->mmu.root_hpa = INVALID_PAGE;
@@ -827,8 +870,8 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
 
                ASSERT(!VALID_PAGE(root));
                page = kvm_mmu_get_page(vcpu, root_gfn, 0,
-                                       PT64_ROOT_LEVEL, 0, NULL);
-               root = page->page_hpa;
+                                       PT64_ROOT_LEVEL, 0, 0, NULL);
+               root = __pa(page->spt);
                ++page->root_count;
                vcpu->mmu.root_hpa = root;
                return;
@@ -838,14 +881,18 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
                hpa_t root = vcpu->mmu.pae_root[i];
 
                ASSERT(!VALID_PAGE(root));
-               if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL)
+               if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
+                       if (!is_present_pte(vcpu->pdptrs[i])) {
+                               vcpu->mmu.pae_root[i] = 0;
+                               continue;
+                       }
                        root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
-               else if (vcpu->mmu.root_level == 0)
+               else if (vcpu->mmu.root_level == 0)
                        root_gfn = 0;
                page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
                                        PT32_ROOT_LEVEL, !is_paging(vcpu),
-                                       NULL);
-               root = page->page_hpa;
+                                       0, NULL);
+               root = __pa(page->spt);
                ++page->root_count;
                vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
        }
@@ -903,7 +950,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
 
 static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
 {
-       ++kvm_stat.tlb_flush;
+       ++vcpu->stat.tlb_flush;
        kvm_arch_ops->tlb_flush(vcpu);
 }
 
@@ -918,11 +965,6 @@ static void paging_new_cr3(struct kvm_vcpu *vcpu)
        kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
 }
 
-static void mark_pagetable_nonglobal(void *shadow_pte)
-{
-       page_header(__pa(shadow_pte))->global = 0;
-}
-
 static inline void set_pte_common(struct kvm_vcpu *vcpu,
                             u64 *shadow_pte,
                             gpa_t gaddr,
@@ -940,9 +982,6 @@ static inline void set_pte_common(struct kvm_vcpu *vcpu,
 
        *shadow_pte |= access_bits;
 
-       if (!(*shadow_pte & PT_GLOBAL_MASK))
-               mark_pagetable_nonglobal(shadow_pte);
-
        if (is_error_hpa(paddr)) {
                *shadow_pte |= gaddr;
                *shadow_pte |= PT_SHADOW_IO_MARK;
@@ -1061,6 +1100,7 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
        ASSERT(vcpu);
        ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
 
+       mmu_topup_memory_caches(vcpu);
        if (!is_paging(vcpu))
                return nonpaging_init_context(vcpu);
        else if (is_long_mode(vcpu))
@@ -1093,22 +1133,56 @@ out:
        return r;
 }
 
-void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
+static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
+                                 struct kvm_mmu_page *page,
+                                 u64 *spte)
+{
+       u64 pte;
+       struct kvm_mmu_page *child;
+
+       pte = *spte;
+       if (is_present_pte(pte)) {
+               if (page->role.level == PT_PAGE_TABLE_LEVEL)
+                       rmap_remove(vcpu, spte);
+               else {
+                       child = page_header(pte & PT64_BASE_ADDR_MASK);
+                       mmu_page_remove_parent_pte(vcpu, child, spte);
+               }
+       }
+       *spte = 0;
+}
+
+static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
+                                 struct kvm_mmu_page *page,
+                                 u64 *spte,
+                                 const void *new, int bytes)
+{
+       if (page->role.level != PT_PAGE_TABLE_LEVEL)
+               return;
+
+       if (page->role.glevels == PT32_ROOT_LEVEL)
+               paging32_update_pte(vcpu, page, spte, new, bytes);
+       else
+               paging64_update_pte(vcpu, page, spte, new, bytes);
+}
+
+void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+                      const u8 *old, const u8 *new, int bytes)
 {
        gfn_t gfn = gpa >> PAGE_SHIFT;
        struct kvm_mmu_page *page;
-       struct kvm_mmu_page *child;
        struct hlist_node *node, *n;
        struct hlist_head *bucket;
        unsigned index;
        u64 *spte;
-       u64 pte;
        unsigned offset = offset_in_page(gpa);
        unsigned pte_size;
        unsigned page_offset;
        unsigned misaligned;
+       unsigned quadrant;
        int level;
        int flooded = 0;
+       int npte;
 
        pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
        if (gfn == vcpu->last_pt_write_gfn) {
@@ -1126,6 +1200,7 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
                        continue;
                pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
                misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
+               misaligned |= bytes < 4;
                if (misaligned || flooded) {
                        /*
                         * Misaligned accesses are too much trouble to fix
@@ -1144,29 +1219,33 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
                }
                page_offset = offset;
                level = page->role.level;
+               npte = 1;
                if (page->role.glevels == PT32_ROOT_LEVEL) {
-                       page_offset <<= 1;          /* 32->64 */
+                       page_offset <<= 1;      /* 32->64 */
+                       /*
+                        * A 32-bit pde maps 4MB while the shadow pdes map
+                        * only 2MB.  So we need to double the offset again
+                        * and zap two pdes instead of one.
+                        */
+                       if (level == PT32_ROOT_LEVEL) {
+                               page_offset &= ~7; /* kill rounding error */
+                               page_offset <<= 1;
+                               npte = 2;
+                       }
+                       quadrant = page_offset >> PAGE_SHIFT;
                        page_offset &= ~PAGE_MASK;
+                       if (quadrant != page->role.quadrant)
+                               continue;
                }
-               spte = __va(page->page_hpa);
-               spte += page_offset / sizeof(*spte);
-               pte = *spte;
-               if (is_present_pte(pte)) {
-                       if (level == PT_PAGE_TABLE_LEVEL)
-                               rmap_remove(vcpu, spte);
-                       else {
-                               child = page_header(pte & PT64_BASE_ADDR_MASK);
-                               mmu_page_remove_parent_pte(vcpu, child, spte);
-                       }
+               spte = &page->spt[page_offset / sizeof(*spte)];
+               while (npte--) {
+                       mmu_pte_write_zap_pte(vcpu, page, spte);
+                       mmu_pte_write_new_pte(vcpu, page, spte, new, bytes);
+                       ++spte;
                }
-               *spte = 0;
        }
 }
 
-void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
-{
-}
-
 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
 {
        gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
@@ -1195,13 +1274,6 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu)
                                    struct kvm_mmu_page, link);
                kvm_mmu_zap_page(vcpu, page);
        }
-       while (!list_empty(&vcpu->free_pages)) {
-               page = list_entry(vcpu->free_pages.next,
-                                 struct kvm_mmu_page, link);
-               list_del(&page->link);
-               __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT));
-               page->page_hpa = INVALID_PAGE;
-       }
        free_page((unsigned long)vcpu->mmu.pae_root);
 }
 
@@ -1212,18 +1284,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
 
        ASSERT(vcpu);
 
-       for (i = 0; i < KVM_NUM_MMU_PAGES; i++) {
-               struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i];
-
-               INIT_LIST_HEAD(&page_header->link);
-               if ((page = alloc_page(GFP_KERNEL)) == NULL)
-                       goto error_1;
-               set_page_private(page, (unsigned long)page_header);
-               page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
-               memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
-               list_add(&page_header->link, &vcpu->free_pages);
-               ++vcpu->kvm->n_free_mmu_pages;
-       }
+       vcpu->kvm->n_free_mmu_pages = KVM_NUM_MMU_PAGES;
 
        /*
         * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
@@ -1248,7 +1309,6 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
        ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
-       ASSERT(list_empty(&vcpu->free_pages));
 
        return alloc_mmu_pages(vcpu);
 }
@@ -1257,7 +1317,6 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
        ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
-       ASSERT(!list_empty(&vcpu->free_pages));
 
        return init_kvm_mmu(vcpu);
 }
@@ -1283,7 +1342,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot)
                if (!test_bit(slot, &page->slot_bitmap))
                        continue;
 
-               pt = __va(page->page_hpa);
+               pt = page->spt;
                for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
                        /* avoid RMW */
                        if (pt[i] & PT_WRITABLE_MASK) {
@@ -1293,6 +1352,67 @@ void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot)
        }
 }
 
+void kvm_mmu_zap_all(struct kvm_vcpu *vcpu)
+{
+       destroy_kvm_mmu(vcpu);
+
+       while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
+               struct kvm_mmu_page *page;
+
+               page = container_of(vcpu->kvm->active_mmu_pages.next,
+                                   struct kvm_mmu_page, link);
+               kvm_mmu_zap_page(vcpu, page);
+       }
+
+       mmu_free_memory_caches(vcpu);
+       kvm_arch_ops->tlb_flush(vcpu);
+       init_kvm_mmu(vcpu);
+}
+
+void kvm_mmu_module_exit(void)
+{
+       if (pte_chain_cache)
+               kmem_cache_destroy(pte_chain_cache);
+       if (rmap_desc_cache)
+               kmem_cache_destroy(rmap_desc_cache);
+       if (mmu_page_cache)
+               kmem_cache_destroy(mmu_page_cache);
+       if (mmu_page_header_cache)
+               kmem_cache_destroy(mmu_page_header_cache);
+}
+
+int kvm_mmu_module_init(void)
+{
+       pte_chain_cache = kmem_cache_create("kvm_pte_chain",
+                                           sizeof(struct kvm_pte_chain),
+                                           0, 0, NULL, NULL);
+       if (!pte_chain_cache)
+               goto nomem;
+       rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
+                                           sizeof(struct kvm_rmap_desc),
+                                           0, 0, NULL, NULL);
+       if (!rmap_desc_cache)
+               goto nomem;
+
+       mmu_page_cache = kmem_cache_create("kvm_mmu_page",
+                                          PAGE_SIZE,
+                                          PAGE_SIZE, 0, NULL, NULL);
+       if (!mmu_page_cache)
+               goto nomem;
+
+       mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
+                                                 sizeof(struct kvm_mmu_page),
+                                                 0, 0, NULL, NULL);
+       if (!mmu_page_header_cache)
+               goto nomem;
+
+       return 0;
+
+nomem:
+       kvm_mmu_module_exit();
+       return -ENOMEM;
+}
+
 #ifdef AUDIT
 
 static const char *audit_msg;
@@ -1315,7 +1435,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
        for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
                u64 ent = pt[i];
 
-               if (!ent & PT_PRESENT_MASK)
+               if (!(ent & PT_PRESENT_MASK))
                        continue;
 
                va = canonicalize(va);
@@ -1337,7 +1457,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
 
 static void audit_mappings(struct kvm_vcpu *vcpu)
 {
-       int i;
+       unsigned i;
 
        if (vcpu->mmu.root_level == 4)
                audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
@@ -1389,7 +1509,7 @@ static int count_writable_mappings(struct kvm_vcpu *vcpu)
        int i;
 
        list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
-               u64 *pt = __va(page->page_hpa);
+               u64 *pt = page->spt;
 
                if (page->role.level != PT_PAGE_TABLE_LEVEL)
                        continue;