2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/hugetlb.h>
14 #include <linux/module.h>
16 #include <asm/tlbflush.h>
17 #include <asm/kvm_ppc.h>
18 #include <asm/kvm_book3s.h>
19 #include <asm/mmu-hash64.h>
20 #include <asm/hvcall.h>
21 #include <asm/synch.h>
22 #include <asm/ppc-opcode.h>
25 * Since this file is built in even if KVM is a module, we need
26 * a local copy of this function for the case where kvm_main.c is
29 static struct kvm_memory_slot *builtin_gfn_to_memslot(struct kvm *kvm,
32 struct kvm_memslots *slots;
33 struct kvm_memory_slot *memslot;
35 slots = kvm_memslots(kvm);
36 kvm_for_each_memslot(memslot, slots)
37 if (gfn >= memslot->base_gfn &&
38 gfn < memslot->base_gfn + memslot->npages)
43 /* Translate address of a vmalloc'd thing to a linear map address */
44 static void *real_vmalloc_addr(void *x)
46 unsigned long addr = (unsigned long) x;
49 p = find_linux_pte(swapper_pg_dir, addr);
50 if (!p || !pte_present(*p))
52 /* assume we don't have huge pages in vmalloc space... */
53 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
58 * Add this HPTE into the chain for the real page.
59 * Must be called with the chain locked; it unlocks the chain.
61 void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
62 unsigned long *rmap, long pte_index, int realmode)
64 struct revmap_entry *head, *tail;
67 if (*rmap & KVMPPC_RMAP_PRESENT) {
68 i = *rmap & KVMPPC_RMAP_INDEX;
69 head = &kvm->arch.revmap[i];
71 head = real_vmalloc_addr(head);
72 tail = &kvm->arch.revmap[head->back];
74 tail = real_vmalloc_addr(tail);
76 rev->back = head->back;
77 tail->forw = pte_index;
78 head->back = pte_index;
80 rev->forw = rev->back = pte_index;
84 *rmap = i | KVMPPC_RMAP_REFERENCED | KVMPPC_RMAP_PRESENT; /* unlock */
86 EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
88 /* Remove this HPTE from the chain for a real page */
89 static void remove_revmap_chain(struct kvm *kvm, long pte_index,
92 struct revmap_entry *rev, *next, *prev;
93 unsigned long gfn, ptel, head;
94 struct kvm_memory_slot *memslot;
97 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
98 ptel = rev->guest_rpte;
99 gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
100 memslot = builtin_gfn_to_memslot(kvm, gfn);
101 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
104 rmap = real_vmalloc_addr(&memslot->rmap[gfn - memslot->base_gfn]);
107 head = *rmap & KVMPPC_RMAP_INDEX;
108 next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]);
109 prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]);
110 next->back = rev->back;
111 prev->forw = rev->forw;
112 if (head == pte_index) {
114 if (head == pte_index)
115 *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
117 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
122 static pte_t lookup_linux_pte(struct kvm_vcpu *vcpu, unsigned long hva,
123 int writing, unsigned long *pte_sizep)
126 unsigned long ps = *pte_sizep;
129 ptep = find_linux_pte_or_hugepte(vcpu->arch.pgdir, hva, &shift);
133 *pte_sizep = 1ul << shift;
135 *pte_sizep = PAGE_SIZE;
138 if (!pte_present(*ptep))
140 return kvmppc_read_update_linux_pte(ptep, writing);
143 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
144 long pte_index, unsigned long pteh, unsigned long ptel)
146 struct kvm *kvm = vcpu->kvm;
147 unsigned long i, pa, gpa, gfn, psize;
148 unsigned long slot_fn, hva;
150 struct revmap_entry *rev;
151 unsigned long g_ptel = ptel;
152 struct kvm_memory_slot *memslot;
153 unsigned long *physp, pte_size;
157 unsigned int writing;
158 unsigned long mmu_seq;
159 bool realmode = vcpu->arch.vcore->vcore_state == VCORE_RUNNING;
161 psize = hpte_page_size(pteh, ptel);
164 writing = hpte_is_writable(ptel);
165 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
167 /* used later to detect if we might have been invalidated */
168 mmu_seq = kvm->mmu_notifier_seq;
171 /* Find the memslot (if any) for this address */
172 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
173 gfn = gpa >> PAGE_SHIFT;
174 memslot = builtin_gfn_to_memslot(kvm, gfn);
178 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
179 /* PPC970 can't do emulated MMIO */
180 if (!cpu_has_feature(CPU_FTR_ARCH_206))
182 /* Emulated MMIO - mark this with key=31 */
183 pteh |= HPTE_V_ABSENT;
184 ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
188 /* Check if the requested page fits entirely in the memslot. */
189 if (!slot_is_aligned(memslot, psize))
191 slot_fn = gfn - memslot->base_gfn;
192 rmap = &memslot->rmap[slot_fn];
194 if (!kvm->arch.using_mmu_notifiers) {
195 physp = kvm->arch.slot_phys[memslot->id];
200 physp = real_vmalloc_addr(physp);
204 is_io = pa & (HPTE_R_I | HPTE_R_W);
205 pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
208 /* Translate to host virtual address */
209 hva = gfn_to_hva_memslot(memslot, gfn);
211 /* Look up the Linux PTE for the backing page */
213 pte = lookup_linux_pte(vcpu, hva, writing, &pte_size);
214 if (pte_present(pte)) {
215 if (writing && !pte_write(pte))
216 /* make the actual HPTE be read-only */
217 ptel = hpte_make_readonly(ptel);
218 is_io = hpte_cache_bits(pte_val(pte));
219 pa = pte_pfn(pte) << PAGE_SHIFT;
222 if (pte_size < psize)
224 if (pa && pte_size > psize)
225 pa |= gpa & (pte_size - 1);
227 ptel &= ~(HPTE_R_PP0 - psize);
231 pteh |= HPTE_V_VALID;
233 pteh |= HPTE_V_ABSENT;
236 if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) {
240 * Allow guest to map emulated device memory as
241 * uncacheable, but actually make it cacheable.
243 ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
247 /* Find and lock the HPTEG slot to use */
249 if (pte_index >= HPT_NPTE)
251 if (likely((flags & H_EXACT) == 0)) {
253 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
254 for (i = 0; i < 8; ++i) {
255 if ((*hpte & HPTE_V_VALID) == 0 &&
256 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
263 * Since try_lock_hpte doesn't retry (not even stdcx.
264 * failures), it could be that there is a free slot
265 * but we transiently failed to lock it. Try again,
266 * actually locking each slot and checking it.
269 for (i = 0; i < 8; ++i) {
270 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
272 if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)))
274 *hpte &= ~HPTE_V_HVLOCK;
282 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
283 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
285 /* Lock the slot and check again */
286 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
288 if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
289 *hpte &= ~HPTE_V_HVLOCK;
295 /* Save away the guest's idea of the second HPTE dword */
296 rev = &kvm->arch.revmap[pte_index];
298 rev = real_vmalloc_addr(rev);
300 rev->guest_rpte = g_ptel;
302 /* Link HPTE into reverse-map chain */
303 if (pteh & HPTE_V_VALID) {
305 rmap = real_vmalloc_addr(rmap);
307 /* Check for pending invalidations under the rmap chain lock */
308 if (kvm->arch.using_mmu_notifiers &&
309 mmu_notifier_retry(vcpu, mmu_seq)) {
310 /* inval in progress, write a non-present HPTE */
311 pteh |= HPTE_V_ABSENT;
312 pteh &= ~HPTE_V_VALID;
315 kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
322 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
325 asm volatile("ptesync" : : : "memory");
327 vcpu->arch.gpr[4] = pte_index;
330 EXPORT_SYMBOL_GPL(kvmppc_h_enter);
332 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
334 static inline int try_lock_tlbie(unsigned int *lock)
336 unsigned int tmp, old;
337 unsigned int token = LOCK_TOKEN;
339 asm volatile("1:lwarx %1,0,%2\n"
346 : "=&r" (tmp), "=&r" (old)
347 : "r" (lock), "r" (token)
352 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
353 unsigned long pte_index, unsigned long avpn,
356 struct kvm *kvm = vcpu->kvm;
358 unsigned long v, r, rb;
360 if (pte_index >= HPT_NPTE)
362 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
363 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
365 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
366 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
367 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
368 hpte[0] &= ~HPTE_V_HVLOCK;
371 if (atomic_read(&kvm->online_vcpus) == 1)
373 vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK;
374 vcpu->arch.gpr[5] = r = hpte[1];
375 rb = compute_tlbie_rb(v, r, pte_index);
376 if (v & HPTE_V_VALID)
377 remove_revmap_chain(kvm, pte_index, v);
380 if (!(v & HPTE_V_VALID))
382 if (!(flags & H_LOCAL)) {
383 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
385 asm volatile("ptesync" : : : "memory");
386 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
387 : : "r" (rb), "r" (kvm->arch.lpid));
388 asm volatile("ptesync" : : : "memory");
389 kvm->arch.tlbie_lock = 0;
391 asm volatile("ptesync" : : : "memory");
392 asm volatile("tlbiel %0" : : "r" (rb));
393 asm volatile("ptesync" : : : "memory");
398 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
400 struct kvm *kvm = vcpu->kvm;
401 unsigned long *args = &vcpu->arch.gpr[4];
402 unsigned long *hp, tlbrb[4];
404 long int n_inval = 0;
405 unsigned long flags, req, pte_index;
407 long int ret = H_SUCCESS;
409 if (atomic_read(&kvm->online_vcpus) == 1)
411 for (i = 0; i < 4; ++i) {
412 pte_index = args[i * 2];
413 flags = pte_index >> 56;
414 pte_index &= ((1ul << 56) - 1);
419 if (req != 1 || flags == 3 ||
420 pte_index >= HPT_NPTE) {
421 /* parameter error */
422 args[i * 2] = ((0xa0 | flags) << 56) + pte_index;
426 hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
427 while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
430 if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) {
432 case 0: /* absolute */
435 case 1: /* andcond */
436 if (!(hp[0] & args[i * 2 + 1]))
440 if ((hp[0] & ~0x7fUL) == args[i * 2 + 1])
446 hp[0] &= ~HPTE_V_HVLOCK;
447 args[i * 2] = ((0x90 | flags) << 56) + pte_index;
450 /* insert R and C bits from PTE */
451 flags |= (hp[1] >> 5) & 0x0c;
452 args[i * 2] = ((0x80 | flags) << 56) + pte_index;
453 if (hp[0] & HPTE_V_VALID) {
454 tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index);
455 remove_revmap_chain(kvm, pte_index, hp[0]);
464 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
466 asm volatile("ptesync" : : : "memory");
467 for (i = 0; i < n_inval; ++i)
468 asm volatile(PPC_TLBIE(%1,%0)
469 : : "r" (tlbrb[i]), "r" (kvm->arch.lpid));
470 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
471 kvm->arch.tlbie_lock = 0;
473 asm volatile("ptesync" : : : "memory");
474 for (i = 0; i < n_inval; ++i)
475 asm volatile("tlbiel %0" : : "r" (tlbrb[i]));
476 asm volatile("ptesync" : : : "memory");
481 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
482 unsigned long pte_index, unsigned long avpn,
485 struct kvm *kvm = vcpu->kvm;
487 struct revmap_entry *rev;
488 unsigned long v, r, rb, mask, bits;
490 if (pte_index >= HPT_NPTE)
493 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
494 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
496 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
497 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
498 hpte[0] &= ~HPTE_V_HVLOCK;
502 if (atomic_read(&kvm->online_vcpus) == 1)
505 bits = (flags << 55) & HPTE_R_PP0;
506 bits |= (flags << 48) & HPTE_R_KEY_HI;
507 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
509 /* Update guest view of 2nd HPTE dword */
510 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
511 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
512 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
514 r = (rev->guest_rpte & ~mask) | bits;
517 r = (hpte[1] & ~mask) | bits;
520 if (v & HPTE_V_VALID) {
521 rb = compute_tlbie_rb(v, r, pte_index);
522 hpte[0] = v & ~HPTE_V_VALID;
523 if (!(flags & H_LOCAL)) {
524 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
526 asm volatile("ptesync" : : : "memory");
527 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
528 : : "r" (rb), "r" (kvm->arch.lpid));
529 asm volatile("ptesync" : : : "memory");
530 kvm->arch.tlbie_lock = 0;
532 asm volatile("ptesync" : : : "memory");
533 asm volatile("tlbiel %0" : : "r" (rb));
534 asm volatile("ptesync" : : : "memory");
539 hpte[0] = v & ~HPTE_V_HVLOCK;
540 asm volatile("ptesync" : : : "memory");
544 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
545 unsigned long pte_index)
547 struct kvm *kvm = vcpu->kvm;
548 unsigned long *hpte, v, r;
550 struct revmap_entry *rev = NULL;
552 if (pte_index >= HPT_NPTE)
554 if (flags & H_READ_4) {
558 if (flags & H_R_XLATE)
559 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
560 for (i = 0; i < n; ++i, ++pte_index) {
561 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
562 v = hpte[0] & ~HPTE_V_HVLOCK;
564 if (v & HPTE_V_ABSENT) {
568 if (v & HPTE_V_VALID) {
570 r = rev[i].guest_rpte;
572 r = hpte[1] | HPTE_R_RPN;
574 vcpu->arch.gpr[4 + i * 2] = v;
575 vcpu->arch.gpr[5 + i * 2] = r;
580 void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
581 unsigned long pte_index)
585 hptep[0] &= ~HPTE_V_VALID;
586 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
587 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
589 asm volatile("ptesync" : : : "memory");
590 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
591 : : "r" (rb), "r" (kvm->arch.lpid));
592 asm volatile("ptesync" : : : "memory");
593 kvm->arch.tlbie_lock = 0;
595 EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
597 static int slb_base_page_shift[4] = {
601 20, /* 1M, unsupported */
604 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
609 unsigned long somask;
610 unsigned long vsid, hash;
613 unsigned long mask, val;
616 /* Get page shift, work out hash and AVPN etc. */
617 mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
620 if (slb_v & SLB_VSID_L) {
621 mask |= HPTE_V_LARGE;
623 pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
625 if (slb_v & SLB_VSID_B_1T) {
626 somask = (1UL << 40) - 1;
627 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
630 somask = (1UL << 28) - 1;
631 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
633 hash = (vsid ^ ((eaddr & somask) >> pshift)) & HPT_HASH_MASK;
634 avpn = slb_v & ~(somask >> 16); /* also includes B */
635 avpn |= (eaddr & somask) >> 16;
638 avpn &= ~((1UL << (pshift - 16)) - 1);
644 hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7));
646 for (i = 0; i < 16; i += 2) {
647 /* Read the PTE racily */
648 v = hpte[i] & ~HPTE_V_HVLOCK;
650 /* Check valid/absent, hash, segment size and AVPN */
651 if (!(v & valid) || (v & mask) != val)
654 /* Lock the PTE and read it under the lock */
655 while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
657 v = hpte[i] & ~HPTE_V_HVLOCK;
661 * Check the HPTE again, including large page size
662 * Since we don't currently allow any MPSS (mixed
663 * page-size segment) page sizes, it is sufficient
664 * to check against the actual page size.
666 if ((v & valid) && (v & mask) == val &&
667 hpte_page_size(v, r) == (1ul << pshift))
668 /* Return with the HPTE still locked */
669 return (hash << 3) + (i >> 1);
671 /* Unlock and move on */
675 if (val & HPTE_V_SECONDARY)
677 val |= HPTE_V_SECONDARY;
678 hash = hash ^ HPT_HASH_MASK;
682 EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
685 * Called in real mode to check whether an HPTE not found fault
686 * is due to accessing a paged-out page or an emulated MMIO page,
687 * or if a protection fault is due to accessing a page that the
688 * guest wanted read/write access to but which we made read-only.
689 * Returns a possibly modified status (DSISR) value if not
690 * (i.e. pass the interrupt to the guest),
691 * -1 to pass the fault up to host kernel mode code, -2 to do that
692 * and also load the instruction word (for MMIO emulation),
693 * or 0 if we should make the guest retry the access.
695 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
696 unsigned long slb_v, unsigned int status, bool data)
698 struct kvm *kvm = vcpu->kvm;
700 unsigned long v, r, gr;
703 struct revmap_entry *rev;
704 unsigned long pp, key;
706 /* For protection fault, expect to find a valid HPTE */
707 valid = HPTE_V_VALID;
708 if (status & DSISR_NOHPTE)
709 valid |= HPTE_V_ABSENT;
711 index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
713 if (status & DSISR_NOHPTE)
714 return status; /* there really was no HPTE */
715 return 0; /* for prot fault, HPTE disappeared */
717 hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
718 v = hpte[0] & ~HPTE_V_HVLOCK;
720 rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
721 gr = rev->guest_rpte;
723 /* Unlock the HPTE */
724 asm volatile("lwsync" : : : "memory");
727 /* For not found, if the HPTE is valid by now, retry the instruction */
728 if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
731 /* Check access permissions to the page */
732 pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
733 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
734 status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
736 if (gr & (HPTE_R_N | HPTE_R_G))
737 return status | SRR1_ISI_N_OR_G;
738 if (!hpte_read_permission(pp, slb_v & key))
739 return status | SRR1_ISI_PROT;
740 } else if (status & DSISR_ISSTORE) {
741 /* check write permission */
742 if (!hpte_write_permission(pp, slb_v & key))
743 return status | DSISR_PROTFAULT;
745 if (!hpte_read_permission(pp, slb_v & key))
746 return status | DSISR_PROTFAULT;
749 /* Check storage key, if applicable */
750 if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
751 unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
752 if (status & DSISR_ISSTORE)
755 return status | DSISR_KEYFAULT;
758 /* Save HPTE info for virtual-mode handler */
759 vcpu->arch.pgfault_addr = addr;
760 vcpu->arch.pgfault_index = index;
761 vcpu->arch.pgfault_hpte[0] = v;
762 vcpu->arch.pgfault_hpte[1] = r;
764 /* Check the storage key to see if it is possibly emulated MMIO */
765 if (data && (vcpu->arch.shregs.msr & MSR_IR) &&
766 (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
767 (HPTE_R_KEY_HI | HPTE_R_KEY_LO))
768 return -2; /* MMIO emulation - load instr word */
770 return -1; /* send fault up to host kernel mode */