]> Pileus Git - ~andy/linux/blobdiff - drivers/kvm/mmu.c
KVM: MMU: Use slab caches for shadow pages and their headers
[~andy/linux] / drivers / kvm / mmu.c
index d81b9cd3465fcf80b9bf290d3aef907ea55098ac..46491b4cd859158e9ffbc81d2fa8f28889a71254 100644 (file)
@@ -52,11 +52,15 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
 static int dbg = 1;
 #endif
 
+#ifndef MMU_DEBUG
+#define ASSERT(x) do { } while (0)
+#else
 #define ASSERT(x)                                                      \
        if (!(x)) {                                                     \
                printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
                       __FILE__, __LINE__, #x);                         \
        }
+#endif
 
 #define PT64_PT_BITS 9
 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
@@ -159,6 +163,11 @@ struct kvm_rmap_desc {
        struct kvm_rmap_desc *more;
 };
 
+static struct kmem_cache *pte_chain_cache;
+static struct kmem_cache *rmap_desc_cache;
+static struct kmem_cache *mmu_page_cache;
+static struct kmem_cache *mmu_page_header_cache;
+
 static int is_write_protection(struct kvm_vcpu *vcpu)
 {
        return vcpu->cr0 & CR0_WP_MASK;
@@ -196,14 +205,15 @@ static int is_rmap_pte(u64 pte)
 }
 
 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
-                                 size_t objsize, int min)
+                                 struct kmem_cache *base_cache, int min,
+                                 gfp_t gfp_flags)
 {
        void *obj;
 
        if (cache->nobjs >= min)
                return 0;
        while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
-               obj = kzalloc(objsize, GFP_NOWAIT);
+               obj = kmem_cache_zalloc(base_cache, gfp_flags);
                if (!obj)
                        return -ENOMEM;
                cache->objects[cache->nobjs++] = obj;
@@ -217,24 +227,49 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
                kfree(mc->objects[--mc->nobjs]);
 }
 
-static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
+static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags)
 {
        int r;
 
        r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
-                                  sizeof(struct kvm_pte_chain), 4);
+                                  pte_chain_cache, 4, gfp_flags);
        if (r)
                goto out;
        r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
-                                  sizeof(struct kvm_rmap_desc), 1);
+                                  rmap_desc_cache, 1, gfp_flags);
+       if (r)
+               goto out;
+       r = mmu_topup_memory_cache(&vcpu->mmu_page_cache,
+                                  mmu_page_cache, 4, gfp_flags);
+       if (r)
+               goto out;
+       r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
+                                  mmu_page_header_cache, 4, gfp_flags);
 out:
        return r;
 }
 
+static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
+{
+       int r;
+
+       r = __mmu_topup_memory_caches(vcpu, GFP_NOWAIT);
+       if (r < 0) {
+               spin_unlock(&vcpu->kvm->lock);
+               kvm_arch_ops->vcpu_put(vcpu);
+               r = __mmu_topup_memory_caches(vcpu, GFP_KERNEL);
+               kvm_arch_ops->vcpu_load(vcpu);
+               spin_lock(&vcpu->kvm->lock);
+       }
+       return r;
+}
+
 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
 {
        mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
        mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
+       mmu_free_memory_cache(&vcpu->mmu_page_cache);
+       mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
 }
 
 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
@@ -390,13 +425,11 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
 {
        struct kvm *kvm = vcpu->kvm;
        struct page *page;
-       struct kvm_memory_slot *slot;
        struct kvm_rmap_desc *desc;
        u64 *spte;
 
-       slot = gfn_to_memslot(kvm, gfn);
-       BUG_ON(!slot);
-       page = gfn_to_page(slot, gfn);
+       page = gfn_to_page(kvm, gfn);
+       BUG_ON(!page);
 
        while (page_private(page)) {
                if (!(page_private(page) & 1))
@@ -417,13 +450,13 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
        }
 }
 
-static int is_empty_shadow_page(hpa_t page_hpa)
+#ifdef MMU_DEBUG
+static int is_empty_shadow_page(u64 *spt)
 {
        u64 *pos;
        u64 *end;
 
-       for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u64);
-                     pos != end; pos++)
+       for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
                if (*pos != 0) {
                        printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
                               pos, *pos);
@@ -431,14 +464,15 @@ static int is_empty_shadow_page(hpa_t page_hpa)
                }
        return 1;
 }
+#endif
 
-static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa)
+static void kvm_mmu_free_page(struct kvm_vcpu *vcpu,
+                             struct kvm_mmu_page *page_head)
 {
-       struct kvm_mmu_page *page_head = page_header(page_hpa);
-
-       ASSERT(is_empty_shadow_page(page_hpa));
-       page_head->page_hpa = page_hpa;
-       list_move(&page_head->link, &vcpu->free_pages);
+       ASSERT(is_empty_shadow_page(page_head->spt));
+       list_del(&page_head->link);
+       mmu_memory_cache_free(&vcpu->mmu_page_cache, page_head->spt);
+       mmu_memory_cache_free(&vcpu->mmu_page_header_cache, page_head);
        ++vcpu->kvm->n_free_mmu_pages;
 }
 
@@ -452,12 +486,15 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
 {
        struct kvm_mmu_page *page;
 
-       if (list_empty(&vcpu->free_pages))
+       if (!vcpu->kvm->n_free_mmu_pages)
                return NULL;
 
-       page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link);
-       list_move(&page->link, &vcpu->kvm->active_mmu_pages);
-       ASSERT(is_empty_shadow_page(page->page_hpa));
+       page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
+                                     sizeof *page);
+       page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
+       set_page_private(virt_to_page(page->spt), (unsigned long)page);
+       list_add(&page->link, &vcpu->kvm->active_mmu_pages);
+       ASSERT(is_empty_shadow_page(page->spt));
        page->slot_bitmap = 0;
        page->multimapped = 0;
        page->parent_pte = parent_pte;
@@ -615,7 +652,7 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
        u64 *pt;
        u64 ent;
 
-       pt = __va(page->page_hpa);
+       pt = page->spt;
 
        if (page->role.level == PT_PAGE_TABLE_LEVEL) {
                for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
@@ -667,7 +704,7 @@ static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
        kvm_mmu_page_unlink_children(vcpu, page);
        if (!page->root_count) {
                hlist_del(&page->hash_link);
-               kvm_mmu_free_page(vcpu, page->page_hpa);
+               kvm_mmu_free_page(vcpu, page);
        } else
                list_move(&page->link, &vcpu->kvm->active_mmu_pages);
 }
@@ -711,14 +748,12 @@ hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
 
 hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
 {
-       struct kvm_memory_slot *slot;
        struct page *page;
 
        ASSERT((gpa & HPA_ERR_MASK) == 0);
-       slot = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT);
-       if (!slot)
+       page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+       if (!page)
                return gpa | HPA_ERR_MASK;
-       page = gfn_to_page(slot, gpa >> PAGE_SHIFT);
        return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
                | (gpa & (PAGE_SIZE-1));
 }
@@ -784,7 +819,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
                                return -ENOMEM;
                        }
 
-                       table[index] = new_table->page_hpa | PT_PRESENT_MASK
+                       table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
                                | PT_WRITABLE_MASK | PT_USER_MASK;
                }
                table_addr = table[index] & PT64_BASE_ADDR_MASK;
@@ -810,10 +845,12 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
        for (i = 0; i < 4; ++i) {
                hpa_t root = vcpu->mmu.pae_root[i];
 
-               ASSERT(VALID_PAGE(root));
-               root &= PT64_BASE_ADDR_MASK;
-               page = page_header(root);
-               --page->root_count;
+               if (root) {
+                       ASSERT(VALID_PAGE(root));
+                       root &= PT64_BASE_ADDR_MASK;
+                       page = page_header(root);
+                       --page->root_count;
+               }
                vcpu->mmu.pae_root[i] = INVALID_PAGE;
        }
        vcpu->mmu.root_hpa = INVALID_PAGE;
@@ -834,7 +871,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
                ASSERT(!VALID_PAGE(root));
                page = kvm_mmu_get_page(vcpu, root_gfn, 0,
                                        PT64_ROOT_LEVEL, 0, 0, NULL);
-               root = page->page_hpa;
+               root = __pa(page->spt);
                ++page->root_count;
                vcpu->mmu.root_hpa = root;
                return;
@@ -844,14 +881,18 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
                hpa_t root = vcpu->mmu.pae_root[i];
 
                ASSERT(!VALID_PAGE(root));
-               if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL)
+               if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
+                       if (!is_present_pte(vcpu->pdptrs[i])) {
+                               vcpu->mmu.pae_root[i] = 0;
+                               continue;
+                       }
                        root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
-               else if (vcpu->mmu.root_level == 0)
+               else if (vcpu->mmu.root_level == 0)
                        root_gfn = 0;
                page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
                                        PT32_ROOT_LEVEL, !is_paging(vcpu),
                                        0, NULL);
-               root = page->page_hpa;
+               root = __pa(page->spt);
                ++page->root_count;
                vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
        }
@@ -909,7 +950,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
 
 static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
 {
-       ++kvm_stat.tlb_flush;
+       ++vcpu->stat.tlb_flush;
        kvm_arch_ops->tlb_flush(vcpu);
 }
 
@@ -1059,6 +1100,7 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
        ASSERT(vcpu);
        ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
 
+       mmu_topup_memory_caches(vcpu);
        if (!is_paging(vcpu))
                return nonpaging_init_context(vcpu);
        else if (is_long_mode(vcpu))
@@ -1091,7 +1133,7 @@ out:
        return r;
 }
 
-static void mmu_pre_write_zap_pte(struct kvm_vcpu *vcpu,
+static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
                                  struct kvm_mmu_page *page,
                                  u64 *spte)
 {
@@ -1110,7 +1152,22 @@ static void mmu_pre_write_zap_pte(struct kvm_vcpu *vcpu,
        *spte = 0;
 }
 
-void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
+static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
+                                 struct kvm_mmu_page *page,
+                                 u64 *spte,
+                                 const void *new, int bytes)
+{
+       if (page->role.level != PT_PAGE_TABLE_LEVEL)
+               return;
+
+       if (page->role.glevels == PT32_ROOT_LEVEL)
+               paging32_update_pte(vcpu, page, spte, new, bytes);
+       else
+               paging64_update_pte(vcpu, page, spte, new, bytes);
+}
+
+void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+                      const u8 *old, const u8 *new, int bytes)
 {
        gfn_t gfn = gpa >> PAGE_SHIFT;
        struct kvm_mmu_page *page;
@@ -1122,6 +1179,7 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
        unsigned pte_size;
        unsigned page_offset;
        unsigned misaligned;
+       unsigned quadrant;
        int level;
        int flooded = 0;
        int npte;
@@ -1142,6 +1200,7 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
                        continue;
                pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
                misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
+               misaligned |= bytes < 4;
                if (misaligned || flooded) {
                        /*
                         * Misaligned accesses are too much trouble to fix
@@ -1173,21 +1232,20 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
                                page_offset <<= 1;
                                npte = 2;
                        }
+                       quadrant = page_offset >> PAGE_SHIFT;
                        page_offset &= ~PAGE_MASK;
+                       if (quadrant != page->role.quadrant)
+                               continue;
                }
-               spte = __va(page->page_hpa);
-               spte += page_offset / sizeof(*spte);
+               spte = &page->spt[page_offset / sizeof(*spte)];
                while (npte--) {
-                       mmu_pre_write_zap_pte(vcpu, page, spte);
+                       mmu_pte_write_zap_pte(vcpu, page, spte);
+                       mmu_pte_write_new_pte(vcpu, page, spte, new, bytes);
                        ++spte;
                }
        }
 }
 
-void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
-{
-}
-
 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
 {
        gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
@@ -1216,13 +1274,6 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu)
                                    struct kvm_mmu_page, link);
                kvm_mmu_zap_page(vcpu, page);
        }
-       while (!list_empty(&vcpu->free_pages)) {
-               page = list_entry(vcpu->free_pages.next,
-                                 struct kvm_mmu_page, link);
-               list_del(&page->link);
-               __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT));
-               page->page_hpa = INVALID_PAGE;
-       }
        free_page((unsigned long)vcpu->mmu.pae_root);
 }
 
@@ -1233,18 +1284,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
 
        ASSERT(vcpu);
 
-       for (i = 0; i < KVM_NUM_MMU_PAGES; i++) {
-               struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i];
-
-               INIT_LIST_HEAD(&page_header->link);
-               if ((page = alloc_page(GFP_KERNEL)) == NULL)
-                       goto error_1;
-               set_page_private(page, (unsigned long)page_header);
-               page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
-               memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
-               list_add(&page_header->link, &vcpu->free_pages);
-               ++vcpu->kvm->n_free_mmu_pages;
-       }
+       vcpu->kvm->n_free_mmu_pages = KVM_NUM_MMU_PAGES;
 
        /*
         * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
@@ -1269,7 +1309,6 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
        ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
-       ASSERT(list_empty(&vcpu->free_pages));
 
        return alloc_mmu_pages(vcpu);
 }
@@ -1278,7 +1317,6 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
        ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
-       ASSERT(!list_empty(&vcpu->free_pages));
 
        return init_kvm_mmu(vcpu);
 }
@@ -1304,7 +1342,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot)
                if (!test_bit(slot, &page->slot_bitmap))
                        continue;
 
-               pt = __va(page->page_hpa);
+               pt = page->spt;
                for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
                        /* avoid RMW */
                        if (pt[i] & PT_WRITABLE_MASK) {
@@ -1314,6 +1352,67 @@ void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot)
        }
 }
 
+void kvm_mmu_zap_all(struct kvm_vcpu *vcpu)
+{
+       destroy_kvm_mmu(vcpu);
+
+       while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
+               struct kvm_mmu_page *page;
+
+               page = container_of(vcpu->kvm->active_mmu_pages.next,
+                                   struct kvm_mmu_page, link);
+               kvm_mmu_zap_page(vcpu, page);
+       }
+
+       mmu_free_memory_caches(vcpu);
+       kvm_arch_ops->tlb_flush(vcpu);
+       init_kvm_mmu(vcpu);
+}
+
+void kvm_mmu_module_exit(void)
+{
+       if (pte_chain_cache)
+               kmem_cache_destroy(pte_chain_cache);
+       if (rmap_desc_cache)
+               kmem_cache_destroy(rmap_desc_cache);
+       if (mmu_page_cache)
+               kmem_cache_destroy(mmu_page_cache);
+       if (mmu_page_header_cache)
+               kmem_cache_destroy(mmu_page_header_cache);
+}
+
+int kvm_mmu_module_init(void)
+{
+       pte_chain_cache = kmem_cache_create("kvm_pte_chain",
+                                           sizeof(struct kvm_pte_chain),
+                                           0, 0, NULL, NULL);
+       if (!pte_chain_cache)
+               goto nomem;
+       rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
+                                           sizeof(struct kvm_rmap_desc),
+                                           0, 0, NULL, NULL);
+       if (!rmap_desc_cache)
+               goto nomem;
+
+       mmu_page_cache = kmem_cache_create("kvm_mmu_page",
+                                          PAGE_SIZE,
+                                          PAGE_SIZE, 0, NULL, NULL);
+       if (!mmu_page_cache)
+               goto nomem;
+
+       mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
+                                                 sizeof(struct kvm_mmu_page),
+                                                 0, 0, NULL, NULL);
+       if (!mmu_page_header_cache)
+               goto nomem;
+
+       return 0;
+
+nomem:
+       kvm_mmu_module_exit();
+       return -ENOMEM;
+}
+
 #ifdef AUDIT
 
 static const char *audit_msg;
@@ -1336,7 +1435,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
        for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
                u64 ent = pt[i];
 
-               if (!ent & PT_PRESENT_MASK)
+               if (!(ent & PT_PRESENT_MASK))
                        continue;
 
                va = canonicalize(va);
@@ -1410,7 +1509,7 @@ static int count_writable_mappings(struct kvm_vcpu *vcpu)
        int i;
 
        list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
-               u64 *pt = __va(page->page_hpa);
+               u64 *pt = page->spt;
 
                if (page->role.level != PT_PAGE_TABLE_LEVEL)
                        continue;