]> Pileus Git - ~andy/linux/blobdiff - arch/powerpc/kvm/e500_tlb.c
KVM: PPC: e500: fix allocation size error on g2h_tlb1_map
[~andy/linux] / arch / powerpc / kvm / e500_tlb.c
index c8f6c58267426309ff979a7ed2a0d473006adda1..a27d134eef3674f98ba1535931582c8106e4291c 100644 (file)
@@ -304,17 +304,13 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
        ref->flags = E500_TLB_VALID;
 
        if (tlbe_is_writable(gtlbe))
-               ref->flags |= E500_TLB_DIRTY;
+               kvm_set_pfn_dirty(pfn);
 }
 
 static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
 {
        if (ref->flags & E500_TLB_VALID) {
-               if (ref->flags & E500_TLB_DIRTY)
-                       kvm_release_pfn_dirty(ref->pfn);
-               else
-                       kvm_release_pfn_clean(ref->pfn);
-
+               trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
                ref->flags = 0;
        }
 }
@@ -322,11 +318,11 @@ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
 static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
 {
        if (vcpu_e500->g2h_tlb1_map)
-               memset(vcpu_e500->g2h_tlb1_map,
-                      sizeof(u64) * vcpu_e500->gtlb_params[1].entries, 0);
+               memset(vcpu_e500->g2h_tlb1_map, 0,
+                      sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
        if (vcpu_e500->h2g_tlb1_rmap)
-               memset(vcpu_e500->h2g_tlb1_rmap,
-                      sizeof(unsigned int) * host_tlb_params[1].entries, 0);
+               memset(vcpu_e500->h2g_tlb1_rmap, 0,
+                      sizeof(unsigned int) * host_tlb_params[1].entries);
 }
 
 static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
@@ -357,6 +353,13 @@ static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
        clear_tlb_privs(vcpu_e500);
 }
 
+void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
+{
+       struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+       clear_tlb_refs(vcpu_e500);
+       clear_tlb1_bitmap(vcpu_e500);
+}
+
 static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
                unsigned int eaddr, int as)
 {
@@ -524,7 +527,6 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
                if (is_error_pfn(pfn)) {
                        printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
                                        (long)gfn);
-                       kvm_release_pfn_clean(pfn);
                        return;
                }
 
@@ -539,6 +541,12 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 
        kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
                                ref, gvaddr, stlbe);
+
+       /* Clear i-cache for new pages */
+       kvmppc_mmu_flush_icache(pfn);
+
+       /* Drop refcount on page, so that mmu notifiers can clear it */
+       kvm_release_pfn_clean(pfn);
 }
 
 /* XXX only map the one-one case, for now use TLB0 */
@@ -1037,8 +1045,12 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
                sesel = 0; /* unused */
                priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
 
-               kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
-                                       &priv->ref, eaddr, &stlbe);
+               /* Only triggers after clear_tlb_refs */
+               if (unlikely(!(priv->ref.flags & E500_TLB_VALID)))
+                       kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
+               else
+                       kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
+                                               &priv->ref, eaddr, &stlbe);
                break;
 
        case 1: {
@@ -1058,6 +1070,49 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
        write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
 }
 
+/************* MMU Notifiers *************/
+
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+{
+       trace_kvm_unmap_hva(hva);
+
+       /*
+        * Flush all shadow tlb entries everywhere. This is slow, but
+        * we are 100% sure that we catch the to be unmapped page
+        */
+       kvm_flush_remote_tlbs(kvm);
+
+       return 0;
+}
+
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+{
+       /* kvm_unmap_hva flushes everything anyways */
+       kvm_unmap_hva(kvm, start);
+
+       return 0;
+}
+
+int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+{
+       /* XXX could be more clever ;) */
+       return 0;
+}
+
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+       /* XXX could be more clever ;) */
+       return 0;
+}
+
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+{
+       /* The page will get remapped properly on its next fault */
+       kvm_unmap_hva(kvm, hva);
+}
+
+/*****************************************/
+
 static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
 {
        int i;
@@ -1330,7 +1385,7 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
        if (!vcpu_e500->gtlb_priv[1])
                goto err;
 
-       vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(unsigned int) *
+       vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(u64) *
                                          vcpu_e500->gtlb_params[1].entries,
                                          GFP_KERNEL);
        if (!vcpu_e500->g2h_tlb1_map)