static struct kmem_cache *pte_chain_cache;
static struct kmem_cache *rmap_desc_cache;
+static struct kmem_cache *mmu_page_cache;
+static struct kmem_cache *mmu_page_header_cache;
static int is_write_protection(struct kvm_vcpu *vcpu)
{
goto out;
r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
rmap_desc_cache, 1, gfp_flags);
+ if (r)
+ goto out;
+ r = mmu_topup_memory_cache(&vcpu->mmu_page_cache,
+ mmu_page_cache, 4, gfp_flags);
+ if (r)
+ goto out;
+ r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
+ mmu_page_header_cache, 4, gfp_flags);
out:
return r;
}
{
mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
+ mmu_free_memory_cache(&vcpu->mmu_page_cache);
+ mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
}
static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
}
#ifdef MMU_DEBUG
-static int is_empty_shadow_page(hpa_t page_hpa)
+static int is_empty_shadow_page(u64 *spt)
{
u64 *pos;
u64 *end;
- for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u64);
- pos != end; pos++)
+ for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
if (*pos != 0) {
printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
pos, *pos);
}
#endif
-static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa)
+static void kvm_mmu_free_page(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_page *page_head)
{
- struct kvm_mmu_page *page_head = page_header(page_hpa);
-
- ASSERT(is_empty_shadow_page(page_hpa));
- page_head->page_hpa = page_hpa;
- list_move(&page_head->link, &vcpu->free_pages);
+ ASSERT(is_empty_shadow_page(page_head->spt));
+ list_del(&page_head->link);
+ mmu_memory_cache_free(&vcpu->mmu_page_cache, page_head->spt);
+ mmu_memory_cache_free(&vcpu->mmu_page_header_cache, page_head);
++vcpu->kvm->n_free_mmu_pages;
}
{
struct kvm_mmu_page *page;
- if (list_empty(&vcpu->free_pages))
+ if (!vcpu->kvm->n_free_mmu_pages)
return NULL;
- page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link);
- list_move(&page->link, &vcpu->kvm->active_mmu_pages);
- ASSERT(is_empty_shadow_page(page->page_hpa));
+ page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
+ sizeof *page);
+ page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
+ set_page_private(virt_to_page(page->spt), (unsigned long)page);
+ list_add(&page->link, &vcpu->kvm->active_mmu_pages);
+ ASSERT(is_empty_shadow_page(page->spt));
page->slot_bitmap = 0;
page->multimapped = 0;
page->parent_pte = parent_pte;
u64 *pt;
u64 ent;
- pt = __va(page->page_hpa);
+ pt = page->spt;
if (page->role.level == PT_PAGE_TABLE_LEVEL) {
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
kvm_mmu_page_unlink_children(vcpu, page);
if (!page->root_count) {
hlist_del(&page->hash_link);
- kvm_mmu_free_page(vcpu, page->page_hpa);
+ kvm_mmu_free_page(vcpu, page);
} else
list_move(&page->link, &vcpu->kvm->active_mmu_pages);
}
return -ENOMEM;
}
- table[index] = new_table->page_hpa | PT_PRESENT_MASK
+ table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
| PT_WRITABLE_MASK | PT_USER_MASK;
}
table_addr = table[index] & PT64_BASE_ADDR_MASK;
ASSERT(!VALID_PAGE(root));
page = kvm_mmu_get_page(vcpu, root_gfn, 0,
PT64_ROOT_LEVEL, 0, 0, NULL);
- root = page->page_hpa;
+ root = __pa(page->spt);
++page->root_count;
vcpu->mmu.root_hpa = root;
return;
page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
PT32_ROOT_LEVEL, !is_paging(vcpu),
0, NULL);
- root = page->page_hpa;
+ root = __pa(page->spt);
++page->root_count;
vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
}
ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
+ mmu_topup_memory_caches(vcpu);
if (!is_paging(vcpu))
return nonpaging_init_context(vcpu);
else if (is_long_mode(vcpu))
return r;
}
-static void mmu_pre_write_zap_pte(struct kvm_vcpu *vcpu,
+static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *page,
u64 *spte)
{
*spte = 0;
}
-void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
+static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_page *page,
+ u64 *spte,
+ const void *new, int bytes)
+{
+ if (page->role.level != PT_PAGE_TABLE_LEVEL)
+ return;
+
+ if (page->role.glevels == PT32_ROOT_LEVEL)
+ paging32_update_pte(vcpu, page, spte, new, bytes);
+ else
+ paging64_update_pte(vcpu, page, spte, new, bytes);
+}
+
+void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const u8 *old, const u8 *new, int bytes)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_mmu_page *page;
unsigned pte_size;
unsigned page_offset;
unsigned misaligned;
+ unsigned quadrant;
int level;
int flooded = 0;
int npte;
continue;
pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
+ misaligned |= bytes < 4;
if (misaligned || flooded) {
/*
* Misaligned accesses are too much trouble to fix
page_offset <<= 1;
npte = 2;
}
+ quadrant = page_offset >> PAGE_SHIFT;
page_offset &= ~PAGE_MASK;
+ if (quadrant != page->role.quadrant)
+ continue;
}
- spte = __va(page->page_hpa);
- spte += page_offset / sizeof(*spte);
+ spte = &page->spt[page_offset / sizeof(*spte)];
while (npte--) {
- mmu_pre_write_zap_pte(vcpu, page, spte);
+ mmu_pte_write_zap_pte(vcpu, page, spte);
+ mmu_pte_write_new_pte(vcpu, page, spte, new, bytes);
++spte;
}
}
}
-void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
-{
-}
-
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
struct kvm_mmu_page, link);
kvm_mmu_zap_page(vcpu, page);
}
- while (!list_empty(&vcpu->free_pages)) {
- page = list_entry(vcpu->free_pages.next,
- struct kvm_mmu_page, link);
- list_del(&page->link);
- __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT));
- page->page_hpa = INVALID_PAGE;
- }
free_page((unsigned long)vcpu->mmu.pae_root);
}
ASSERT(vcpu);
- for (i = 0; i < KVM_NUM_MMU_PAGES; i++) {
- struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i];
-
- INIT_LIST_HEAD(&page_header->link);
- if ((page = alloc_page(GFP_KERNEL)) == NULL)
- goto error_1;
- set_page_private(page, (unsigned long)page_header);
- page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
- memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
- list_add(&page_header->link, &vcpu->free_pages);
- ++vcpu->kvm->n_free_mmu_pages;
- }
+ vcpu->kvm->n_free_mmu_pages = KVM_NUM_MMU_PAGES;
/*
* When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
{
ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
- ASSERT(list_empty(&vcpu->free_pages));
return alloc_mmu_pages(vcpu);
}
{
ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
- ASSERT(!list_empty(&vcpu->free_pages));
return init_kvm_mmu(vcpu);
}
if (!test_bit(slot, &page->slot_bitmap))
continue;
- pt = __va(page->page_hpa);
+ pt = page->spt;
for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
/* avoid RMW */
if (pt[i] & PT_WRITABLE_MASK) {
kmem_cache_destroy(pte_chain_cache);
if (rmap_desc_cache)
kmem_cache_destroy(rmap_desc_cache);
+ if (mmu_page_cache)
+ kmem_cache_destroy(mmu_page_cache);
+ if (mmu_page_header_cache)
+ kmem_cache_destroy(mmu_page_header_cache);
}
int kvm_mmu_module_init(void)
if (!rmap_desc_cache)
goto nomem;
+ mmu_page_cache = kmem_cache_create("kvm_mmu_page",
+ PAGE_SIZE,
+ PAGE_SIZE, 0, NULL, NULL);
+ if (!mmu_page_cache)
+ goto nomem;
+
+ mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
+ sizeof(struct kvm_mmu_page),
+ 0, 0, NULL, NULL);
+ if (!mmu_page_header_cache)
+ goto nomem;
+
return 0;
nomem:
int i;
list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
- u64 *pt = __va(page->page_hpa);
+ u64 *pt = page->spt;
if (page->role.level != PT_PAGE_TABLE_LEVEL)
continue;