]> Pileus Git - ~andy/linux/blobdiff - arch/powerpc/kvm/e500_tlb.c
KVM: PPC: e500: MMU API: fix leak of shared_tlb_pages
[~andy/linux] / arch / powerpc / kvm / e500_tlb.c
index a2b66717813dfef6c43e9bf23864f8766266f0a5..641f97847b95a847cce3f6d437f60f4ce359ad69 100644 (file)
@@ -304,17 +304,13 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
        ref->flags = E500_TLB_VALID;
 
        if (tlbe_is_writable(gtlbe))
-               ref->flags |= E500_TLB_DIRTY;
+               kvm_set_pfn_dirty(pfn);
 }
 
 static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
 {
        if (ref->flags & E500_TLB_VALID) {
-               if (ref->flags & E500_TLB_DIRTY)
-                       kvm_release_pfn_dirty(ref->pfn);
-               else
-                       kvm_release_pfn_clean(ref->pfn);
-
+               trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
                ref->flags = 0;
        }
 }
@@ -357,6 +353,13 @@ static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
        clear_tlb_privs(vcpu_e500);
 }
 
+void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
+{
+       struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+       clear_tlb_refs(vcpu_e500);
+       clear_tlb1_bitmap(vcpu_e500);
+}
+
 static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
                unsigned int eaddr, int as)
 {
@@ -520,11 +523,10 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 
        if (likely(!pfnmap)) {
                unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
-               pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn);
+               pfn = gfn_to_pfn_memslot(slot, gfn);
                if (is_error_pfn(pfn)) {
                        printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
                                        (long)gfn);
-                       kvm_release_pfn_clean(pfn);
                        return;
                }
 
@@ -542,6 +544,9 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 
        /* Clear i-cache for new pages */
        kvmppc_mmu_flush_icache(pfn);
+
+       /* Drop refcount on page, so that mmu notifiers can clear it */
+       kvm_release_pfn_clean(pfn);
 }
 
 /* XXX only map the one-one case, for now use TLB0 */
@@ -1040,8 +1045,12 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
                sesel = 0; /* unused */
                priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
 
-               kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
-                                       &priv->ref, eaddr, &stlbe);
+               /* Only triggers after clear_tlb_refs */
+               if (unlikely(!(priv->ref.flags & E500_TLB_VALID)))
+                       kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
+               else
+                       kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
+                                               &priv->ref, eaddr, &stlbe);
                break;
 
        case 1: {
@@ -1061,6 +1070,49 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
        write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
 }
 
+/************* MMU Notifiers *************/
+
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+{
+       trace_kvm_unmap_hva(hva);
+
+       /*
+        * Flush all shadow tlb entries everywhere. This is slow, but
+        * we are 100% sure that we catch the to be unmapped page
+        */
+       kvm_flush_remote_tlbs(kvm);
+
+       return 0;
+}
+
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+{
+       /* kvm_unmap_hva flushes everything anyways */
+       kvm_unmap_hva(kvm, start);
+
+       return 0;
+}
+
+int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+{
+       /* XXX could be more clever ;) */
+       return 0;
+}
+
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+       /* XXX could be more clever ;) */
+       return 0;
+}
+
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+{
+       /* The page will get remapped properly on its next fault */
+       kvm_unmap_hva(kvm, hva);
+}
+
+/*****************************************/
+
 static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
 {
        int i;
@@ -1082,6 +1134,8 @@ static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
                }
 
                vcpu_e500->num_shared_tlb_pages = 0;
+
+               kfree(vcpu_e500->shared_tlb_pages);
                vcpu_e500->shared_tlb_pages = NULL;
        } else {
                kfree(vcpu_e500->gtlb_arch);
@@ -1333,7 +1387,7 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
        if (!vcpu_e500->gtlb_priv[1])
                goto err;
 
-       vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(unsigned int) *
+       vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(u64) *
                                          vcpu_e500->gtlb_params[1].entries,
                                          GFP_KERNEL);
        if (!vcpu_e500->g2h_tlb1_map)