2 * s390host.c -- hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008,2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
30 #include <asm/system.h>
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
38 { "exit_null", VCPU_STAT(exit_null) },
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
75 { "diagnose_10", VCPU_STAT(diagnose_10) },
76 { "diagnose_44", VCPU_STAT(diagnose_44) },
80 static unsigned long long *facilities;
82 /* Section: not file related */
83 int kvm_arch_hardware_enable(void *garbage)
85 /* every s390 is virtualization enabled ;-) */
89 void kvm_arch_hardware_disable(void *garbage)
93 int kvm_arch_hardware_setup(void)
98 void kvm_arch_hardware_unsetup(void)
102 void kvm_arch_check_processor_compat(void *rtn)
106 int kvm_arch_init(void *opaque)
111 void kvm_arch_exit(void)
115 /* Section: device related */
116 long kvm_arch_dev_ioctl(struct file *filp,
117 unsigned int ioctl, unsigned long arg)
119 if (ioctl == KVM_S390_ENABLE_SIE)
120 return s390_enable_sie();
124 int kvm_dev_ioctl_check_extension(long ext)
129 case KVM_CAP_S390_PSW:
130 case KVM_CAP_S390_GMAP:
131 case KVM_CAP_SYNC_MMU:
140 /* Section: vm related */
142 * Get (and clear) the dirty memory log for a memory slot.
144 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
145 struct kvm_dirty_log *log)
150 long kvm_arch_vm_ioctl(struct file *filp,
151 unsigned int ioctl, unsigned long arg)
153 struct kvm *kvm = filp->private_data;
154 void __user *argp = (void __user *)arg;
158 case KVM_S390_INTERRUPT: {
159 struct kvm_s390_interrupt s390int;
162 if (copy_from_user(&s390int, argp, sizeof(s390int)))
164 r = kvm_s390_inject_vm(kvm, &s390int);
174 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
180 #ifdef CONFIG_KVM_S390_UCONTROL
181 if (type & ~KVM_VM_S390_UCONTROL)
183 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
190 rc = s390_enable_sie();
196 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
200 sprintf(debug_name, "kvm-%u", current->pid);
202 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
206 spin_lock_init(&kvm->arch.float_int.lock);
207 INIT_LIST_HEAD(&kvm->arch.float_int.list);
209 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
210 VM_EVENT(kvm, 3, "%s", "vm created");
212 if (type & KVM_VM_S390_UCONTROL) {
213 kvm->arch.gmap = NULL;
215 kvm->arch.gmap = gmap_alloc(current->mm);
221 debug_unregister(kvm->arch.dbf);
223 free_page((unsigned long)(kvm->arch.sca));
228 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
230 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
231 if (!kvm_is_ucontrol(vcpu->kvm)) {
232 clear_bit(63 - vcpu->vcpu_id,
233 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
234 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
235 (__u64) vcpu->arch.sie_block)
236 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
240 if (kvm_is_ucontrol(vcpu->kvm))
241 gmap_free(vcpu->arch.gmap);
243 free_page((unsigned long)(vcpu->arch.sie_block));
244 kvm_vcpu_uninit(vcpu);
248 static void kvm_free_vcpus(struct kvm *kvm)
251 struct kvm_vcpu *vcpu;
253 kvm_for_each_vcpu(i, vcpu, kvm)
254 kvm_arch_vcpu_destroy(vcpu);
256 mutex_lock(&kvm->lock);
257 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
258 kvm->vcpus[i] = NULL;
260 atomic_set(&kvm->online_vcpus, 0);
261 mutex_unlock(&kvm->lock);
264 void kvm_arch_sync_events(struct kvm *kvm)
268 void kvm_arch_destroy_vm(struct kvm *kvm)
271 free_page((unsigned long)(kvm->arch.sca));
272 debug_unregister(kvm->arch.dbf);
273 if (!kvm_is_ucontrol(kvm))
274 gmap_free(kvm->arch.gmap);
277 /* Section: vcpu related */
278 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
280 if (kvm_is_ucontrol(vcpu->kvm)) {
281 vcpu->arch.gmap = gmap_alloc(current->mm);
282 if (!vcpu->arch.gmap)
287 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
291 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
296 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
298 save_fp_regs(&vcpu->arch.host_fpregs);
299 save_access_regs(vcpu->arch.host_acrs);
300 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
301 restore_fp_regs(&vcpu->arch.guest_fpregs);
302 restore_access_regs(vcpu->arch.guest_acrs);
303 gmap_enable(vcpu->arch.gmap);
304 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
307 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
309 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
310 gmap_disable(vcpu->arch.gmap);
311 save_fp_regs(&vcpu->arch.guest_fpregs);
312 save_access_regs(vcpu->arch.guest_acrs);
313 restore_fp_regs(&vcpu->arch.host_fpregs);
314 restore_access_regs(vcpu->arch.host_acrs);
317 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
319 /* this equals initial cpu reset in pop, but we don't switch to ESA */
320 vcpu->arch.sie_block->gpsw.mask = 0UL;
321 vcpu->arch.sie_block->gpsw.addr = 0UL;
322 vcpu->arch.sie_block->prefix = 0UL;
323 vcpu->arch.sie_block->ihcpu = 0xffff;
324 vcpu->arch.sie_block->cputm = 0UL;
325 vcpu->arch.sie_block->ckc = 0UL;
326 vcpu->arch.sie_block->todpr = 0;
327 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
328 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
329 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
330 vcpu->arch.guest_fpregs.fpc = 0;
331 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
332 vcpu->arch.sie_block->gbea = 1;
335 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
337 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
340 vcpu->arch.sie_block->ecb = 6;
341 vcpu->arch.sie_block->eca = 0xC1002001U;
342 vcpu->arch.sie_block->fac = (int) (long) facilities;
343 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
344 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
345 (unsigned long) vcpu);
346 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
347 get_cpu_id(&vcpu->arch.cpu_id);
348 vcpu->arch.cpu_id.version = 0xff;
352 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
355 struct kvm_vcpu *vcpu;
358 if (id >= KVM_MAX_VCPUS)
363 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
367 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
368 get_zeroed_page(GFP_KERNEL);
370 if (!vcpu->arch.sie_block)
373 vcpu->arch.sie_block->icpua = id;
374 if (!kvm_is_ucontrol(kvm)) {
375 if (!kvm->arch.sca) {
379 if (!kvm->arch.sca->cpu[id].sda)
380 kvm->arch.sca->cpu[id].sda =
381 (__u64) vcpu->arch.sie_block;
382 vcpu->arch.sie_block->scaoh =
383 (__u32)(((__u64)kvm->arch.sca) >> 32);
384 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
385 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
388 spin_lock_init(&vcpu->arch.local_int.lock);
389 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
390 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
391 spin_lock(&kvm->arch.float_int.lock);
392 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
393 init_waitqueue_head(&vcpu->arch.local_int.wq);
394 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
395 spin_unlock(&kvm->arch.float_int.lock);
397 rc = kvm_vcpu_init(vcpu, kvm, id);
399 goto out_free_sie_block;
400 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
401 vcpu->arch.sie_block);
405 free_page((unsigned long)(vcpu->arch.sie_block));
412 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
414 /* kvm common code refers to this, but never calls it */
419 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
421 kvm_s390_vcpu_initial_reset(vcpu);
425 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
427 memcpy(&vcpu->arch.guest_gprs, ®s->gprs, sizeof(regs->gprs));
431 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
433 memcpy(®s->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
437 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
438 struct kvm_sregs *sregs)
440 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
441 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
442 restore_access_regs(vcpu->arch.guest_acrs);
446 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
447 struct kvm_sregs *sregs)
449 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
450 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
454 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
456 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
457 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
458 restore_fp_regs(&vcpu->arch.guest_fpregs);
462 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
464 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
465 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
469 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
473 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
476 vcpu->run->psw_mask = psw.mask;
477 vcpu->run->psw_addr = psw.addr;
482 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
483 struct kvm_translation *tr)
485 return -EINVAL; /* not implemented yet */
488 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
489 struct kvm_guest_debug *dbg)
491 return -EINVAL; /* not implemented yet */
494 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
495 struct kvm_mp_state *mp_state)
497 return -EINVAL; /* not implemented yet */
500 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
501 struct kvm_mp_state *mp_state)
503 return -EINVAL; /* not implemented yet */
506 static int __vcpu_run(struct kvm_vcpu *vcpu)
510 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
515 if (test_thread_flag(TIF_MCCK_PENDING))
518 if (!kvm_is_ucontrol(vcpu->kvm))
519 kvm_s390_deliver_pending_interrupts(vcpu);
521 vcpu->arch.sie_block->icptcode = 0;
525 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
526 atomic_read(&vcpu->arch.sie_block->cpuflags));
527 rc = sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
529 if (kvm_is_ucontrol(vcpu->kvm)) {
530 rc = SIE_INTERCEPT_UCONTROL;
532 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
533 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
537 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
538 vcpu->arch.sie_block->icptcode);
543 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
547 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
553 if (vcpu->sigset_active)
554 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
556 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
558 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
560 switch (kvm_run->exit_reason) {
561 case KVM_EXIT_S390_SIEIC:
562 case KVM_EXIT_UNKNOWN:
564 case KVM_EXIT_S390_RESET:
565 case KVM_EXIT_S390_UCONTROL:
571 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
572 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
577 rc = __vcpu_run(vcpu);
580 if (kvm_is_ucontrol(vcpu->kvm))
583 rc = kvm_handle_sie_intercept(vcpu);
584 } while (!signal_pending(current) && !rc);
586 if (rc == SIE_INTERCEPT_RERUNVCPU)
589 if (signal_pending(current) && !rc) {
590 kvm_run->exit_reason = KVM_EXIT_INTR;
594 #ifdef CONFIG_KVM_S390_UCONTROL
595 if (rc == SIE_INTERCEPT_UCONTROL) {
596 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
597 kvm_run->s390_ucontrol.trans_exc_code =
598 current->thread.gmap_addr;
599 kvm_run->s390_ucontrol.pgm_code = 0x10;
604 if (rc == -EOPNOTSUPP) {
605 /* intercept cannot be handled in-kernel, prepare kvm-run */
606 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
607 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
608 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
609 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
613 if (rc == -EREMOTE) {
614 /* intercept was handled, but userspace support is needed
615 * kvm_run has been prepared by the handler */
619 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
620 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
622 if (vcpu->sigset_active)
623 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
625 vcpu->stat.exit_userspace++;
629 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
630 unsigned long n, int prefix)
633 return copy_to_guest(vcpu, guestdest, from, n);
635 return copy_to_guest_absolute(vcpu, guestdest, from, n);
639 * store status at address
640 * we use have two special cases:
641 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
642 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
644 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
646 unsigned char archmode = 1;
649 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
650 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
652 addr = SAVE_AREA_BASE;
654 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
655 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
657 addr = SAVE_AREA_BASE;
662 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
663 vcpu->arch.guest_fpregs.fprs, 128, prefix))
666 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
667 vcpu->arch.guest_gprs, 128, prefix))
670 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
671 &vcpu->arch.sie_block->gpsw, 16, prefix))
674 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
675 &vcpu->arch.sie_block->prefix, 4, prefix))
678 if (__guestcopy(vcpu,
679 addr + offsetof(struct save_area, fp_ctrl_reg),
680 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
683 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
684 &vcpu->arch.sie_block->todpr, 4, prefix))
687 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
688 &vcpu->arch.sie_block->cputm, 8, prefix))
691 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
692 &vcpu->arch.sie_block->ckc, 8, prefix))
695 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
696 &vcpu->arch.guest_acrs, 64, prefix))
699 if (__guestcopy(vcpu,
700 addr + offsetof(struct save_area, ctrl_regs),
701 &vcpu->arch.sie_block->gcr, 128, prefix))
706 long kvm_arch_vcpu_ioctl(struct file *filp,
707 unsigned int ioctl, unsigned long arg)
709 struct kvm_vcpu *vcpu = filp->private_data;
710 void __user *argp = (void __user *)arg;
714 case KVM_S390_INTERRUPT: {
715 struct kvm_s390_interrupt s390int;
718 if (copy_from_user(&s390int, argp, sizeof(s390int)))
720 r = kvm_s390_inject_vcpu(vcpu, &s390int);
723 case KVM_S390_STORE_STATUS:
724 r = kvm_s390_vcpu_store_status(vcpu, arg);
726 case KVM_S390_SET_INITIAL_PSW: {
730 if (copy_from_user(&psw, argp, sizeof(psw)))
732 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
735 case KVM_S390_INITIAL_RESET:
736 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
738 #ifdef CONFIG_KVM_S390_UCONTROL
739 case KVM_S390_UCAS_MAP: {
740 struct kvm_s390_ucas_mapping ucasmap;
742 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
747 if (!kvm_is_ucontrol(vcpu->kvm)) {
752 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
753 ucasmap.vcpu_addr, ucasmap.length);
756 case KVM_S390_UCAS_UNMAP: {
757 struct kvm_s390_ucas_mapping ucasmap;
759 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
764 if (!kvm_is_ucontrol(vcpu->kvm)) {
769 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
774 case KVM_S390_VCPU_FAULT: {
775 r = gmap_fault(arg, vcpu->arch.gmap);
776 if (!IS_ERR_VALUE(r))
786 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
788 #ifdef CONFIG_KVM_S390_UCONTROL
789 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
790 && (kvm_is_ucontrol(vcpu->kvm))) {
791 vmf->page = virt_to_page(vcpu->arch.sie_block);
796 return VM_FAULT_SIGBUS;
799 /* Section: memory related */
800 int kvm_arch_prepare_memory_region(struct kvm *kvm,
801 struct kvm_memory_slot *memslot,
802 struct kvm_memory_slot old,
803 struct kvm_userspace_memory_region *mem,
806 /* A few sanity checks. We can have exactly one memory slot which has
807 to start at guest virtual zero and which has to be located at a
808 page boundary in userland and which has to end at a page boundary.
809 The memory in userland is ok to be fragmented into various different
810 vmas. It is okay to mmap() and munmap() stuff in this slot after
811 doing this call at any time */
816 if (mem->guest_phys_addr)
819 if (mem->userspace_addr & 0xffffful)
822 if (mem->memory_size & 0xffffful)
831 void kvm_arch_commit_memory_region(struct kvm *kvm,
832 struct kvm_userspace_memory_region *mem,
833 struct kvm_memory_slot old,
839 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
840 mem->guest_phys_addr, mem->memory_size);
842 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
846 void kvm_arch_flush_shadow(struct kvm *kvm)
850 static int __init kvm_s390_init(void)
853 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
858 * guests can ask for up to 255+1 double words, we need a full page
859 * to hold the maximum amount of facilities. On the other hand, we
860 * only set facilities that are known to work in KVM.
862 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
867 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
868 facilities[0] &= 0xff00fff3f47c0000ULL;
869 facilities[1] &= 0x201c000000000000ULL;
873 static void __exit kvm_s390_exit(void)
875 free_page((unsigned long) facilities);
879 module_init(kvm_s390_init);
880 module_exit(kvm_s390_exit);