]> Pileus Git - ~andy/linux/commitdiff
Merge tag 'kvm-arm-for-3.14' of git://git.linaro.org/people/christoffer.dall/linux...
authorPaolo Bonzini <pbonzini@redhat.com>
Wed, 15 Jan 2014 11:14:29 +0000 (12:14 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 15 Jan 2014 11:14:29 +0000 (12:14 +0100)
Documentation/virtual/kvm/api.txt
arch/arm/include/asm/kvm_mmu.h
arch/arm/kvm/arm.c
arch/arm/kvm/handle_exit.c
arch/arm/kvm/mmu.c
arch/arm/kvm/psci.c
arch/arm64/include/asm/kvm_mmu.h

index a8cdc7b72281f915e835d9050235ae23264e9789..366bf4b47ef48bb56a6d0dd96d6951a65c8e2129 100644 (file)
@@ -2327,7 +2327,7 @@ current state.  "addr" is ignored.
 Capability: basic
 Architectures: arm, arm64
 Type: vcpu ioctl
-Parameters: struct struct kvm_vcpu_init (in)
+Parameters: struct kvm_vcpu_init (in)
 Returns: 0 on success; -1 on error
 Errors:
   EINVAL:    the target is unknown, or the combination of features is invalid.
index 77de4a41cc5045bd3b778f98396a454d23767467..2d122adcdb22108984a46f8483b5d2ed6b37b811 100644 (file)
@@ -140,6 +140,7 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
 }
 
 #define kvm_flush_dcache_to_poc(a,l)   __cpuc_flush_dcache_area((a), (l))
+#define kvm_virt_to_phys(x)            virt_to_idmap((unsigned long)(x))
 
 #endif /* !__ASSEMBLY__ */
 
index b92ff6d3e34b755f0caf47f2f9497a4eba29386f..2d4b4a8068c82ca843d69c6a07b712cbb8d21226 100644 (file)
@@ -489,15 +489,6 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
                        return ret;
        }
 
-       /*
-        * Handle the "start in power-off" case by calling into the
-        * PSCI code.
-        */
-       if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) {
-               *vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF;
-               kvm_psci_call(vcpu);
-       }
-
        return 0;
 }
 
@@ -711,6 +702,24 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
        return -EINVAL;
 }
 
+static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
+                                        struct kvm_vcpu_init *init)
+{
+       int ret;
+
+       ret = kvm_vcpu_set_target(vcpu, init);
+       if (ret)
+               return ret;
+
+       /*
+        * Handle the "start in power-off" case by marking the VCPU as paused.
+        */
+       if (__test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
+               vcpu->arch.pause = true;
+
+       return 0;
+}
+
 long kvm_arch_vcpu_ioctl(struct file *filp,
                         unsigned int ioctl, unsigned long arg)
 {
@@ -724,8 +733,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                if (copy_from_user(&init, argp, sizeof(init)))
                        return -EFAULT;
 
-               return kvm_vcpu_set_target(vcpu, &init);
-
+               return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
        }
        case KVM_SET_ONE_REG:
        case KVM_GET_ONE_REG: {
index a92079011a836974a9188ab8db1fa75f2bc5403a..0de91fc6de0ff06e671357430c0544581127ea80 100644 (file)
@@ -26,8 +26,6 @@
 
 #include "trace.h"
 
-#include "trace.h"
-
 typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
 
 static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
index 580906989db1091eb034ada5cc60570505de8cd1..7789857d147034b8cbfd1e44d599f81db085215b 100644 (file)
@@ -667,14 +667,16 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
        } else {
                /*
-                * Pages belonging to VMAs not aligned to the PMD mapping
-                * granularity cannot be mapped using block descriptors even
-                * if the pages belong to a THP for the process, because the
-                * stage-2 block descriptor will cover more than a single THP
-                * and we loose atomicity for unmapping, updates, and splits
-                * of the THP or other pages in the stage-2 block range.
+                * Pages belonging to memslots that don't have the same
+                * alignment for userspace and IPA cannot be mapped using
+                * block descriptors even if the pages belong to a THP for
+                * the process, because the stage-2 block descriptor will
+                * cover more than a single THP and we loose atomicity for
+                * unmapping, updates, and splits of the THP or other pages
+                * in the stage-2 block range.
                 */
-               if (vma->vm_start & ~PMD_MASK)
+               if ((memslot->userspace_addr & ~PMD_MASK) !=
+                   ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
                        force_pte = true;
        }
        up_read(&current->mm->mmap_sem);
@@ -916,9 +918,9 @@ int kvm_mmu_init(void)
 {
        int err;
 
-       hyp_idmap_start = virt_to_phys(__hyp_idmap_text_start);
-       hyp_idmap_end = virt_to_phys(__hyp_idmap_text_end);
-       hyp_idmap_vector = virt_to_phys(__kvm_hyp_init);
+       hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
+       hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
+       hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
 
        if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
                /*
@@ -945,7 +947,7 @@ int kvm_mmu_init(void)
                 */
                kvm_flush_dcache_to_poc(init_bounce_page, len);
 
-               phys_base = virt_to_phys(init_bounce_page);
+               phys_base = kvm_virt_to_phys(init_bounce_page);
                hyp_idmap_vector += phys_base - hyp_idmap_start;
                hyp_idmap_start = phys_base;
                hyp_idmap_end = phys_base + len;
index 0881bf169fbce5cf09db6da3ff0996df6cf989cb..448f60e8d23ca0c9886baa72ea437087b4fea41e 100644 (file)
@@ -54,15 +54,15 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
                }
        }
 
-       if (!vcpu)
+       /*
+        * Make sure the caller requested a valid CPU and that the CPU is
+        * turned off.
+        */
+       if (!vcpu || !vcpu->arch.pause)
                return KVM_PSCI_RET_INVAL;
 
        target_pc = *vcpu_reg(source_vcpu, 2);
 
-       wq = kvm_arch_vcpu_wq(vcpu);
-       if (!waitqueue_active(wq))
-               return KVM_PSCI_RET_INVAL;
-
        kvm_reset_vcpu(vcpu);
 
        /* Gracefully handle Thumb2 entry point */
@@ -79,6 +79,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
        vcpu->arch.pause = false;
        smp_mb();               /* Make sure the above is visible */
 
+       wq = kvm_arch_vcpu_wq(vcpu);
        wake_up_interruptible(wq);
 
        return KVM_PSCI_RET_SUCCESS;
index 680f74e674971ab5062047369d6994edddc64e8e..7f1f9408ff66e897e8988d0b844ab9a3e8f903a0 100644 (file)
@@ -136,6 +136,7 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
 }
 
 #define kvm_flush_dcache_to_poc(a,l)   __flush_dcache_area((a), (l))
+#define kvm_virt_to_phys(x)            __virt_to_phys((unsigned long)(x))
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ARM64_KVM_MMU_H__ */