]> Pileus Git - ~andy/linux/commitdiff
KVM: PPC: Call SLB patching code in interrupt safe manner
authorAlexander Graf <agraf@suse.de>
Fri, 8 Jan 2010 01:58:06 +0000 (02:58 +0100)
committerMarcelo Tosatti <mtosatti@redhat.com>
Mon, 1 Mar 2010 15:35:49 +0000 (12:35 -0300)
Currently we're racy when doing the transition from IR=1 to IR=0, from
the module memory entry code to the real mode SLB switching code.

To work around that I took a look at the RTAS entry code which is faced
with a similar problem and did the same thing:

  A small helper in linear mapped memory that does mtmsr with IR=0 and
  then RFIs info the actual handler.

Thanks to that trick we can safely take page faults in the entry code
and only need to be really wary of what to do as of the SLB switching
part.

Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/powerpc/include/asm/kvm_book3s.h
arch/powerpc/include/asm/kvm_book3s_64_asm.h
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s_64_exports.c
arch/powerpc/kvm/book3s_64_interrupts.S
arch/powerpc/kvm/book3s_64_rmhandlers.S
arch/powerpc/kvm/book3s_64_slb.S

index f192017d799d20c2ede3bffbdffc76de5e8982b2..c91be0ff0232f624aed6cd94b5944dbbff51c2d9 100644 (file)
@@ -121,6 +121,7 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
 
 extern u32 kvmppc_trampoline_lowmem;
 extern u32 kvmppc_trampoline_enter;
+extern void kvmppc_rmcall(ulong srr0, ulong srr1);
 
 static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
 {
index fca9404c1a7dc5ab2567765cfa0293f1e196839d..183461b484076e25fd5ca714cce4426b82b0e1e6 100644 (file)
@@ -69,7 +69,6 @@ struct kvmppc_book3s_shadow_vcpu {
        ulong scratch0;
        ulong scratch1;
        ulong vmhandler;
-       ulong rmhandler;
 };
 
 #endif /*__ASSEMBLY__ */
index d615fa8a1412043229899a32457e77c8fc19af6f..f7215e622dfd8c0e386a4084dbc5107de35d550b 100644 (file)
@@ -167,6 +167,7 @@ struct kvm_vcpu_arch {
        ulong trampoline_lowmem;
        ulong trampoline_enter;
        ulong highmem_handler;
+       ulong rmcall;
        ulong host_paca_phys;
        struct kvmppc_mmu mmu;
 #endif
index 1501e77c980cc38a0086fe9a3b7edc11e4da4c5e..ee9935442f0eedf593ae7d01d9020a2f14d4e9cb 100644 (file)
@@ -214,8 +214,6 @@ int main(void)
        DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2));
        DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct,
                                            shadow_vcpu.vmhandler));
-       DEFINE(PACA_KVM_RMHANDLER, offsetof(struct paca_struct,
-                                           shadow_vcpu.rmhandler));
        DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct,
                                           shadow_vcpu.scratch0));
        DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct,
@@ -438,6 +436,7 @@ int main(void)
        DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
        DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
        DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
+       DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
        DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
 #else
        DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
index 3e06eae3f2c88ceb0e8aa9dbe350d2339d9c3059..13173922b6786d4a53fe80e76aec06f2970edfba 100644 (file)
@@ -919,6 +919,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
        vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
        vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
        vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
+       vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
 
        vcpu->arch.shadow_msr = MSR_USER64;
 
index 5b2db38ed86cce3c1d69309308f0a49c63806ee4..99b07125c52964c1d94b2c4257ae5414acfa0f90 100644 (file)
@@ -22,3 +22,4 @@
 
 EXPORT_SYMBOL_GPL(kvmppc_trampoline_enter);
 EXPORT_SYMBOL_GPL(kvmppc_trampoline_lowmem);
+EXPORT_SYMBOL_GPL(kvmppc_rmcall);
index 3c0ba5513077f4906c6965db4b878099a3a02dac..33aef5345f6b5cf2eaf5e0227127324930df4be3 100644 (file)
@@ -95,17 +95,14 @@ kvm_start_entry:
        ld      r3, VCPU_HIGHMEM_HANDLER(r4)
        std     r3, PACA_KVM_VMHANDLER(r13)
 
-       ld      r3, VCPU_TRAMPOLINE_ENTER(r4)
-       std     r3, PACA_KVM_RMHANDLER(r13)
-
 kvm_start_lightweight:
 
        ld      r9, VCPU_PC(r4)                 /* r9 = vcpu->arch.pc */
        ld      r10, VCPU_SHADOW_MSR(r4)        /* r10 = vcpu->arch.shadow_msr */
 
        /* Load some guest state in the respective registers */
-       ld      r3, VCPU_CTR(r4)        /* r3 = vcpu->arch.ctr */
-       mtctr   r3                      /* CTR = r3 */
+       ld      r5, VCPU_CTR(r4)        /* r5 = vcpu->arch.ctr */
+                                       /* will be swapped in by rmcall */
 
        ld      r3, VCPU_LR(r4)         /* r3 = vcpu->arch.lr */
        mtlr    r3                      /* LR = r3 */
@@ -131,22 +128,14 @@ kvm_start_lightweight:
 
 no_dcbz32_on:
 
-       /* This sets the Magic value for the trampoline */
-
-       /* XXX this needs to move into a safe function, so we can
-          be sure we don't get any interrupts */
-
-       li      r11, 1
-       stb     r11, PACA_KVM_IN_GUEST(r13)
-
-       ld      r3, PACA_KVM_RMHANDLER(r13)
-       mtsrr0  r3
+       ld      r6, VCPU_RMCALL(r4)
+       mtctr   r6
 
-       LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
-       mtsrr1  r3
+       ld      r3, VCPU_TRAMPOLINE_ENTER(r4)
+       LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR))
 
        /* Jump to SLB patching handlder and into our guest */
-       RFI
+       bctr
 
 /*
  * This is the handler in module memory. It gets jumped at from the
index 9ad1c2645d6f7c2c6b1d96012aa47671127c87c7..e7091c9459a8b599a5c8c96e61e25ac0d20087ea 100644 (file)
@@ -140,6 +140,24 @@ kvmppc_handler_lowmem_trampoline:
        blr
 kvmppc_handler_lowmem_trampoline_end:
 
+/*
+ * Call a function in real mode
+ *
+ * Input Registers:
+ *
+ * R3 = function
+ * R4 = MSR
+ * R5 = CTR
+ *
+ */
+_GLOBAL(kvmppc_rmcall)
+       mtmsr   r4              /* Disable relocation, so mtsrr
+                                  doesn't get interrupted */
+       mtctr   r5
+       mtsrr0  r3
+       mtsrr1  r4
+       RFI
+
 .global kvmppc_trampoline_lowmem
 kvmppc_trampoline_lowmem:
        .long kvmppc_handler_lowmem_trampoline - _stext
index d07b88617b2cf6915c3c6c0e077540736ad85d91..35b762722187ca884bd950bcbb894c4dfae09b32 100644 (file)
@@ -63,6 +63,10 @@ kvmppc_handler_trampoline_enter:
        mtsrr0  r9
        mtsrr1  r10
 
+       /* Activate guest mode, so faults get handled by KVM */
+       li      r11, KVM_GUEST_MODE_GUEST
+       stb     r11, PACA_KVM_IN_GUEST(r13)
+
        /* Remove LPAR shadow entries */
 
 #if SLB_NUM_BOLTED == 3