]> Pileus Git - ~andy/linux/blobdiff - arch/powerpc/kvm/book3s_32_mmu.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[~andy/linux] / arch / powerpc / kvm / book3s_32_mmu.c
index 3292d76101d2eefa6808b0bf72dd81c3fed1eba5..76a64ce6a5b6c641207cfa40b6dd421299f788a0 100644 (file)
@@ -58,14 +58,40 @@ static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
 #endif
 }
 
+static inline u32 sr_vsid(u32 sr_raw)
+{
+       return sr_raw & 0x0fffffff;
+}
+
+static inline bool sr_valid(u32 sr_raw)
+{
+       return (sr_raw & 0x80000000) ? false : true;
+}
+
+static inline bool sr_ks(u32 sr_raw)
+{
+       return (sr_raw & 0x40000000) ? true: false;
+}
+
+static inline bool sr_kp(u32 sr_raw)
+{
+       return (sr_raw & 0x20000000) ? true: false;
+}
+
+static inline bool sr_nx(u32 sr_raw)
+{
+       return (sr_raw & 0x10000000) ? true: false;
+}
+
 static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
-                                         struct kvmppc_pte *pte, bool data);
+                                         struct kvmppc_pte *pte, bool data,
+                                         bool iswrite);
 static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
                                             u64 *vsid);
 
-static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t eaddr)
+static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr)
 {
-       return &vcpu_book3s->sr[(eaddr >> 28) & 0xf];
+       return vcpu->arch.shared->sr[(eaddr >> 28) & 0xf];
 }
 
 static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
@@ -74,7 +100,7 @@ static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
        u64 vsid;
        struct kvmppc_pte pte;
 
-       if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data))
+       if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data, false))
                return pte.vpage;
 
        kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
@@ -86,17 +112,18 @@ static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu)
        kvmppc_set_msr(vcpu, 0);
 }
 
-static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3s,
-                                     struct kvmppc_sr *sre, gva_t eaddr,
+static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu *vcpu,
+                                     u32 sre, gva_t eaddr,
                                      bool primary)
 {
+       struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
        u32 page, hash, pteg, htabmask;
        hva_t r;
 
        page = (eaddr & 0x0FFFFFFF) >> 12;
        htabmask = ((vcpu_book3s->sdr1 & 0x1FF) << 16) | 0xFFC0;
 
-       hash = ((sre->vsid ^ page) << 6);
+       hash = ((sr_vsid(sre) ^ page) << 6);
        if (!primary)
                hash = ~hash;
        hash &= htabmask;
@@ -104,24 +131,24 @@ static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3
        pteg = (vcpu_book3s->sdr1 & 0xffff0000) | hash;
 
        dprintk("MMU: pc=0x%lx eaddr=0x%lx sdr1=0x%llx pteg=0x%x vsid=0x%x\n",
-               vcpu_book3s->vcpu.arch.pc, eaddr, vcpu_book3s->sdr1, pteg,
-               sre->vsid);
+               kvmppc_get_pc(&vcpu_book3s->vcpu), eaddr, vcpu_book3s->sdr1, pteg,
+               sr_vsid(sre));
 
-       r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT);
+       r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
        if (kvm_is_error_hva(r))
                return r;
        return r | (pteg & ~PAGE_MASK);
 }
 
-static u32 kvmppc_mmu_book3s_32_get_ptem(struct kvmppc_sr *sre, gva_t eaddr,
-                                   bool primary)
+static u32 kvmppc_mmu_book3s_32_get_ptem(u32 sre, gva_t eaddr, bool primary)
 {
-       return ((eaddr & 0x0fffffff) >> 22) | (sre->vsid << 7) |
+       return ((eaddr & 0x0fffffff) >> 22) | (sr_vsid(sre) << 7) |
               (primary ? 0 : 0x40) | 0x80000000;
 }
 
 static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
-                                         struct kvmppc_pte *pte, bool data)
+                                         struct kvmppc_pte *pte, bool data,
+                                         bool iswrite)
 {
        struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
        struct kvmppc_bat *bat;
@@ -133,7 +160,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
                else
                        bat = &vcpu_book3s->ibat[i];
 
-               if (vcpu->arch.msr & MSR_PR) {
+               if (vcpu->arch.shared->msr & MSR_PR) {
                        if (!bat->vp)
                                continue;
                } else {
@@ -162,8 +189,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
                                printk(KERN_INFO "BAT is not readable!\n");
                                continue;
                        }
-                       if (!pte->may_write) {
-                               /* let's treat r/o BATs as not-readable for now */
+                       if (iswrite && !pte->may_write) {
                                dprintk_pte("BAT is read-only!\n");
                                continue;
                        }
@@ -177,24 +203,23 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
 
 static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
                                     struct kvmppc_pte *pte, bool data,
-                                    bool primary)
+                                    bool iswrite, bool primary)
 {
-       struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
-       struct kvmppc_sr *sre;
+       u32 sre;
        hva_t ptegp;
        u32 pteg[16];
        u32 ptem = 0;
        int i;
        int found = 0;
 
-       sre = find_sr(vcpu_book3s, eaddr);
+       sre = find_sr(vcpu, eaddr);
 
        dprintk_pte("SR 0x%lx: vsid=0x%x, raw=0x%x\n", eaddr >> 28,
-                   sre->vsid, sre->raw);
+                   sr_vsid(sre), sre);
 
        pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
 
-       ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu_book3s, sre, eaddr, primary);
+       ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu, sre, eaddr, primary);
        if (kvm_is_error_hva(ptegp)) {
                printk(KERN_INFO "KVM: Invalid PTEG!\n");
                goto no_page_found;
@@ -214,8 +239,8 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
                        pte->raddr = (pteg[i+1] & ~(0xFFFULL)) | (eaddr & 0xFFF);
                        pp = pteg[i+1] & 3;
 
-                       if ((sre->Kp &&  (vcpu->arch.msr & MSR_PR)) ||
-                           (sre->Ks && !(vcpu->arch.msr & MSR_PR)))
+                       if ((sr_kp(sre) &&  (vcpu->arch.shared->msr & MSR_PR)) ||
+                           (sr_ks(sre) && !(vcpu->arch.shared->msr & MSR_PR)))
                                pp |= 4;
 
                        pte->may_write = false;
@@ -234,9 +259,6 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
                                        break;
                        }
 
-                       if ( !pte->may_read )
-                               continue;
-
                        dprintk_pte("MMU: Found PTE -> %x %x - %x\n",
                                    pteg[i], pteg[i+1], pp);
                        found = 1;
@@ -247,19 +269,23 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
        /* Update PTE C and A bits, so the guest's swapper knows we used the
           page */
        if (found) {
-               u32 oldpte = pteg[i+1];
-
-               if (pte->may_read)
-                       pteg[i+1] |= PTEG_FLAG_ACCESSED;
-               if (pte->may_write)
-                       pteg[i+1] |= PTEG_FLAG_DIRTY;
-               else
-                       dprintk_pte("KVM: Mapping read-only page!\n");
-
-               /* Write back into the PTEG */
-               if (pteg[i+1] != oldpte)
-                       copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
-
+               u32 pte_r = pteg[i+1];
+               char __user *addr = (char __user *) &pteg[i+1];
+
+               /*
+                * Use single-byte writes to update the HPTE, to
+                * conform to what real hardware does.
+                */
+               if (pte->may_read && !(pte_r & PTEG_FLAG_ACCESSED)) {
+                       pte_r |= PTEG_FLAG_ACCESSED;
+                       put_user(pte_r >> 8, addr + 2);
+               }
+               if (iswrite && pte->may_write && !(pte_r & PTEG_FLAG_DIRTY)) {
+                       pte_r |= PTEG_FLAG_DIRTY;
+                       put_user(pte_r, addr + 3);
+               }
+               if (!pte->may_read || (iswrite && !pte->may_write))
+                       return -EPERM;
                return 0;
        }
 
@@ -269,7 +295,7 @@ no_page_found:
                dprintk_pte("KVM MMU: No PTE found (sdr1=0x%llx ptegp=0x%lx)\n",
                            to_book3s(vcpu)->sdr1, ptegp);
                for (i=0; i<16; i+=2) {
-                       dprintk_pte("   %02d: 0x%x - 0x%x (0x%llx)\n",
+                       dprintk_pte("   %02d: 0x%x - 0x%x (0x%x)\n",
                                    i, pteg[i], pteg[i+1], ptem);
                }
        }
@@ -278,16 +304,36 @@ no_page_found:
 }
 
 static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
-                                     struct kvmppc_pte *pte, bool data)
+                                     struct kvmppc_pte *pte, bool data,
+                                     bool iswrite)
 {
        int r;
+       ulong mp_ea = vcpu->arch.magic_page_ea;
 
        pte->eaddr = eaddr;
-       r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data);
+       pte->page_size = MMU_PAGE_4K;
+
+       /* Magic page override */
+       if (unlikely(mp_ea) &&
+           unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
+           !(vcpu->arch.shared->msr & MSR_PR)) {
+               pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
+               pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff);
+               pte->raddr &= KVM_PAM;
+               pte->may_execute = true;
+               pte->may_read = true;
+               pte->may_write = true;
+
+               return 0;
+       }
+
+       r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data, iswrite);
        if (r < 0)
-              r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true);
+               r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
+                                                  data, iswrite, true);
        if (r < 0)
-              r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, false);
+               r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
+                                                  data, iswrite, false);
 
        return r;
 }
@@ -295,55 +341,43 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
 
 static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum)
 {
-       return to_book3s(vcpu)->sr[srnum].raw;
+       return vcpu->arch.shared->sr[srnum];
 }
 
 static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
                                        ulong value)
 {
-       struct kvmppc_sr *sre;
-
-       sre = &to_book3s(vcpu)->sr[srnum];
-
-       /* Flush any left-over shadows from the previous SR */
-
-       /* XXX Not necessary? */
-       /* kvmppc_mmu_pte_flush(vcpu, ((u64)sre->vsid) << 28, 0xf0000000ULL); */
-
-       /* And then put in the new SR */
-       sre->raw = value;
-       sre->vsid = (value & 0x0fffffff);
-       sre->valid = (value & 0x80000000) ? false : true;
-       sre->Ks = (value & 0x40000000) ? true : false;
-       sre->Kp = (value & 0x20000000) ? true : false;
-       sre->nx = (value & 0x10000000) ? true : false;
-
-       /* Map the new segment */
+       vcpu->arch.shared->sr[srnum] = value;
        kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT);
 }
 
 static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large)
 {
-       kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000);
+       int i;
+       struct kvm_vcpu *v;
+
+       /* flush this VA on all cpus */
+       kvm_for_each_vcpu(i, v, vcpu->kvm)
+               kvmppc_mmu_pte_flush(v, ea, 0x0FFFF000);
 }
 
 static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
                                             u64 *vsid)
 {
        ulong ea = esid << SID_SHIFT;
-       struct kvmppc_sr *sr;
+       u32 sr;
        u64 gvsid = esid;
 
-       if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
-               sr = find_sr(to_book3s(vcpu), ea);
-               if (sr->valid)
-                       gvsid = sr->vsid;
+       if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
+               sr = find_sr(vcpu, ea);
+               if (sr_valid(sr))
+                       gvsid = sr_vsid(sr);
        }
 
        /* In case we only have one of MSR_IR or MSR_DR set, let's put
           that in the real-mode context (and hope RM doesn't access
           high memory) */
-       switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+       switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
        case 0:
                *vsid = VSID_REAL | esid;
                break;
@@ -354,8 +388,8 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
                *vsid = VSID_REAL_DR | gvsid;
                break;
        case MSR_DR|MSR_IR:
-               if (sr->valid)
-                       *vsid = sr->vsid;
+               if (sr_valid(sr))
+                       *vsid = sr_vsid(sr);
                else
                        *vsid = VSID_BAT | gvsid;
                break;
@@ -363,7 +397,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
                BUG();
        }
 
-       if (vcpu->arch.msr & MSR_PR)
+       if (vcpu->arch.shared->msr & MSR_PR)
                *vsid |= VSID_PR;
 
        return 0;