]> Pileus Git - ~andy/linux/blob - arch/s390/kvm/kvm-s390.c
KVM: s390: ucontrol: disable sca
[~andy/linux] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * s390host.c --  hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008,2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/system.h>
31 #include "kvm-s390.h"
32 #include "gaccess.h"
33
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37         { "userspace_handled", VCPU_STAT(exit_userspace) },
38         { "exit_null", VCPU_STAT(exit_null) },
39         { "exit_validity", VCPU_STAT(exit_validity) },
40         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41         { "exit_external_request", VCPU_STAT(exit_external_request) },
42         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43         { "exit_instruction", VCPU_STAT(exit_instruction) },
44         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
50         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
57         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58         { "instruction_spx", VCPU_STAT(instruction_spx) },
59         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60         { "instruction_stap", VCPU_STAT(instruction_stap) },
61         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
66         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
67         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68         { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
69         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
70         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
75         { "diagnose_10", VCPU_STAT(diagnose_10) },
76         { "diagnose_44", VCPU_STAT(diagnose_44) },
77         { NULL }
78 };
79
80 static unsigned long long *facilities;
81
82 /* Section: not file related */
83 int kvm_arch_hardware_enable(void *garbage)
84 {
85         /* every s390 is virtualization enabled ;-) */
86         return 0;
87 }
88
89 void kvm_arch_hardware_disable(void *garbage)
90 {
91 }
92
93 int kvm_arch_hardware_setup(void)
94 {
95         return 0;
96 }
97
98 void kvm_arch_hardware_unsetup(void)
99 {
100 }
101
102 void kvm_arch_check_processor_compat(void *rtn)
103 {
104 }
105
106 int kvm_arch_init(void *opaque)
107 {
108         return 0;
109 }
110
111 void kvm_arch_exit(void)
112 {
113 }
114
115 /* Section: device related */
116 long kvm_arch_dev_ioctl(struct file *filp,
117                         unsigned int ioctl, unsigned long arg)
118 {
119         if (ioctl == KVM_S390_ENABLE_SIE)
120                 return s390_enable_sie();
121         return -EINVAL;
122 }
123
124 int kvm_dev_ioctl_check_extension(long ext)
125 {
126         int r;
127
128         switch (ext) {
129         case KVM_CAP_S390_PSW:
130         case KVM_CAP_S390_GMAP:
131         case KVM_CAP_SYNC_MMU:
132                 r = 1;
133                 break;
134         default:
135                 r = 0;
136         }
137         return r;
138 }
139
140 /* Section: vm related */
141 /*
142  * Get (and clear) the dirty memory log for a memory slot.
143  */
144 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
145                                struct kvm_dirty_log *log)
146 {
147         return 0;
148 }
149
150 long kvm_arch_vm_ioctl(struct file *filp,
151                        unsigned int ioctl, unsigned long arg)
152 {
153         struct kvm *kvm = filp->private_data;
154         void __user *argp = (void __user *)arg;
155         int r;
156
157         switch (ioctl) {
158         case KVM_S390_INTERRUPT: {
159                 struct kvm_s390_interrupt s390int;
160
161                 r = -EFAULT;
162                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
163                         break;
164                 r = kvm_s390_inject_vm(kvm, &s390int);
165                 break;
166         }
167         default:
168                 r = -ENOTTY;
169         }
170
171         return r;
172 }
173
174 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
175 {
176         int rc;
177         char debug_name[16];
178
179         rc = -EINVAL;
180 #ifdef CONFIG_KVM_S390_UCONTROL
181         if (type & ~KVM_VM_S390_UCONTROL)
182                 goto out_err;
183         if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
184                 goto out_err;
185 #else
186         if (type)
187                 goto out_err;
188 #endif
189
190         rc = s390_enable_sie();
191         if (rc)
192                 goto out_err;
193
194         rc = -ENOMEM;
195
196         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
197         if (!kvm->arch.sca)
198                 goto out_err;
199
200         sprintf(debug_name, "kvm-%u", current->pid);
201
202         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
203         if (!kvm->arch.dbf)
204                 goto out_nodbf;
205
206         spin_lock_init(&kvm->arch.float_int.lock);
207         INIT_LIST_HEAD(&kvm->arch.float_int.list);
208
209         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
210         VM_EVENT(kvm, 3, "%s", "vm created");
211
212         if (type & KVM_VM_S390_UCONTROL) {
213                 kvm->arch.gmap = NULL;
214         } else {
215                 kvm->arch.gmap = gmap_alloc(current->mm);
216                 if (!kvm->arch.gmap)
217                         goto out_nogmap;
218         }
219         return 0;
220 out_nogmap:
221         debug_unregister(kvm->arch.dbf);
222 out_nodbf:
223         free_page((unsigned long)(kvm->arch.sca));
224 out_err:
225         return rc;
226 }
227
228 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
229 {
230         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
231         if (!kvm_is_ucontrol(vcpu->kvm)) {
232                 clear_bit(63 - vcpu->vcpu_id,
233                           (unsigned long *) &vcpu->kvm->arch.sca->mcn);
234                 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
235                     (__u64) vcpu->arch.sie_block)
236                         vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
237         }
238         smp_mb();
239
240         if (kvm_is_ucontrol(vcpu->kvm))
241                 gmap_free(vcpu->arch.gmap);
242
243         free_page((unsigned long)(vcpu->arch.sie_block));
244         kvm_vcpu_uninit(vcpu);
245         kfree(vcpu);
246 }
247
248 static void kvm_free_vcpus(struct kvm *kvm)
249 {
250         unsigned int i;
251         struct kvm_vcpu *vcpu;
252
253         kvm_for_each_vcpu(i, vcpu, kvm)
254                 kvm_arch_vcpu_destroy(vcpu);
255
256         mutex_lock(&kvm->lock);
257         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
258                 kvm->vcpus[i] = NULL;
259
260         atomic_set(&kvm->online_vcpus, 0);
261         mutex_unlock(&kvm->lock);
262 }
263
264 void kvm_arch_sync_events(struct kvm *kvm)
265 {
266 }
267
268 void kvm_arch_destroy_vm(struct kvm *kvm)
269 {
270         kvm_free_vcpus(kvm);
271         free_page((unsigned long)(kvm->arch.sca));
272         debug_unregister(kvm->arch.dbf);
273         if (!kvm_is_ucontrol(kvm))
274                 gmap_free(kvm->arch.gmap);
275 }
276
277 /* Section: vcpu related */
278 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
279 {
280         if (kvm_is_ucontrol(vcpu->kvm)) {
281                 vcpu->arch.gmap = gmap_alloc(current->mm);
282                 if (!vcpu->arch.gmap)
283                         return -ENOMEM;
284                 return 0;
285         }
286
287         vcpu->arch.gmap = vcpu->kvm->arch.gmap;
288         return 0;
289 }
290
291 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
292 {
293         /* Nothing todo */
294 }
295
296 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
297 {
298         save_fp_regs(&vcpu->arch.host_fpregs);
299         save_access_regs(vcpu->arch.host_acrs);
300         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
301         restore_fp_regs(&vcpu->arch.guest_fpregs);
302         restore_access_regs(vcpu->arch.guest_acrs);
303         gmap_enable(vcpu->arch.gmap);
304         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
305 }
306
307 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
308 {
309         atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
310         gmap_disable(vcpu->arch.gmap);
311         save_fp_regs(&vcpu->arch.guest_fpregs);
312         save_access_regs(vcpu->arch.guest_acrs);
313         restore_fp_regs(&vcpu->arch.host_fpregs);
314         restore_access_regs(vcpu->arch.host_acrs);
315 }
316
317 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
318 {
319         /* this equals initial cpu reset in pop, but we don't switch to ESA */
320         vcpu->arch.sie_block->gpsw.mask = 0UL;
321         vcpu->arch.sie_block->gpsw.addr = 0UL;
322         vcpu->arch.sie_block->prefix    = 0UL;
323         vcpu->arch.sie_block->ihcpu     = 0xffff;
324         vcpu->arch.sie_block->cputm     = 0UL;
325         vcpu->arch.sie_block->ckc       = 0UL;
326         vcpu->arch.sie_block->todpr     = 0;
327         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
328         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
329         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
330         vcpu->arch.guest_fpregs.fpc = 0;
331         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
332         vcpu->arch.sie_block->gbea = 1;
333 }
334
335 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
336 {
337         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
338                                                     CPUSTAT_SM |
339                                                     CPUSTAT_STOPPED);
340         vcpu->arch.sie_block->ecb   = 6;
341         vcpu->arch.sie_block->eca   = 0xC1002001U;
342         vcpu->arch.sie_block->fac   = (int) (long) facilities;
343         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
344         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
345                      (unsigned long) vcpu);
346         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
347         get_cpu_id(&vcpu->arch.cpu_id);
348         vcpu->arch.cpu_id.version = 0xff;
349         return 0;
350 }
351
352 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
353                                       unsigned int id)
354 {
355         struct kvm_vcpu *vcpu;
356         int rc = -EINVAL;
357
358         if (id >= KVM_MAX_VCPUS)
359                 goto out;
360
361         rc = -ENOMEM;
362
363         vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
364         if (!vcpu)
365                 goto out;
366
367         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
368                                         get_zeroed_page(GFP_KERNEL);
369
370         if (!vcpu->arch.sie_block)
371                 goto out_free_cpu;
372
373         vcpu->arch.sie_block->icpua = id;
374         if (!kvm_is_ucontrol(kvm)) {
375                 if (!kvm->arch.sca) {
376                         WARN_ON_ONCE(1);
377                         goto out_free_cpu;
378                 }
379                 if (!kvm->arch.sca->cpu[id].sda)
380                         kvm->arch.sca->cpu[id].sda =
381                                 (__u64) vcpu->arch.sie_block;
382                 vcpu->arch.sie_block->scaoh =
383                         (__u32)(((__u64)kvm->arch.sca) >> 32);
384                 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
385                 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
386         }
387
388         spin_lock_init(&vcpu->arch.local_int.lock);
389         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
390         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
391         spin_lock(&kvm->arch.float_int.lock);
392         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
393         init_waitqueue_head(&vcpu->arch.local_int.wq);
394         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
395         spin_unlock(&kvm->arch.float_int.lock);
396
397         rc = kvm_vcpu_init(vcpu, kvm, id);
398         if (rc)
399                 goto out_free_sie_block;
400         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
401                  vcpu->arch.sie_block);
402
403         return vcpu;
404 out_free_sie_block:
405         free_page((unsigned long)(vcpu->arch.sie_block));
406 out_free_cpu:
407         kfree(vcpu);
408 out:
409         return ERR_PTR(rc);
410 }
411
412 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
413 {
414         /* kvm common code refers to this, but never calls it */
415         BUG();
416         return 0;
417 }
418
419 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
420 {
421         kvm_s390_vcpu_initial_reset(vcpu);
422         return 0;
423 }
424
425 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
426 {
427         memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
428         return 0;
429 }
430
431 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
432 {
433         memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
434         return 0;
435 }
436
437 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
438                                   struct kvm_sregs *sregs)
439 {
440         memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
441         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
442         restore_access_regs(vcpu->arch.guest_acrs);
443         return 0;
444 }
445
446 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
447                                   struct kvm_sregs *sregs)
448 {
449         memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
450         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
451         return 0;
452 }
453
454 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
455 {
456         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
457         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
458         restore_fp_regs(&vcpu->arch.guest_fpregs);
459         return 0;
460 }
461
462 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
463 {
464         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
465         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
466         return 0;
467 }
468
469 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
470 {
471         int rc = 0;
472
473         if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
474                 rc = -EBUSY;
475         else {
476                 vcpu->run->psw_mask = psw.mask;
477                 vcpu->run->psw_addr = psw.addr;
478         }
479         return rc;
480 }
481
482 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
483                                   struct kvm_translation *tr)
484 {
485         return -EINVAL; /* not implemented yet */
486 }
487
488 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
489                                         struct kvm_guest_debug *dbg)
490 {
491         return -EINVAL; /* not implemented yet */
492 }
493
494 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
495                                     struct kvm_mp_state *mp_state)
496 {
497         return -EINVAL; /* not implemented yet */
498 }
499
500 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
501                                     struct kvm_mp_state *mp_state)
502 {
503         return -EINVAL; /* not implemented yet */
504 }
505
506 static int __vcpu_run(struct kvm_vcpu *vcpu)
507 {
508         int rc;
509
510         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
511
512         if (need_resched())
513                 schedule();
514
515         if (test_thread_flag(TIF_MCCK_PENDING))
516                 s390_handle_mcck();
517
518         if (!kvm_is_ucontrol(vcpu->kvm))
519                 kvm_s390_deliver_pending_interrupts(vcpu);
520
521         vcpu->arch.sie_block->icptcode = 0;
522         local_irq_disable();
523         kvm_guest_enter();
524         local_irq_enable();
525         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
526                    atomic_read(&vcpu->arch.sie_block->cpuflags));
527         rc = sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
528         if (rc) {
529                 if (kvm_is_ucontrol(vcpu->kvm)) {
530                         rc = SIE_INTERCEPT_UCONTROL;
531                 } else {
532                         VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
533                         kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
534                         rc = 0;
535                 }
536         }
537         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
538                    vcpu->arch.sie_block->icptcode);
539         local_irq_disable();
540         kvm_guest_exit();
541         local_irq_enable();
542
543         memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
544         return rc;
545 }
546
547 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
548 {
549         int rc;
550         sigset_t sigsaved;
551
552 rerun_vcpu:
553         if (vcpu->sigset_active)
554                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
555
556         atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
557
558         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
559
560         switch (kvm_run->exit_reason) {
561         case KVM_EXIT_S390_SIEIC:
562         case KVM_EXIT_UNKNOWN:
563         case KVM_EXIT_INTR:
564         case KVM_EXIT_S390_RESET:
565         case KVM_EXIT_S390_UCONTROL:
566                 break;
567         default:
568                 BUG();
569         }
570
571         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
572         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
573
574         might_fault();
575
576         do {
577                 rc = __vcpu_run(vcpu);
578                 if (rc)
579                         break;
580                 if (kvm_is_ucontrol(vcpu->kvm))
581                         rc = -EOPNOTSUPP;
582                 else
583                         rc = kvm_handle_sie_intercept(vcpu);
584         } while (!signal_pending(current) && !rc);
585
586         if (rc == SIE_INTERCEPT_RERUNVCPU)
587                 goto rerun_vcpu;
588
589         if (signal_pending(current) && !rc) {
590                 kvm_run->exit_reason = KVM_EXIT_INTR;
591                 rc = -EINTR;
592         }
593
594 #ifdef CONFIG_KVM_S390_UCONTROL
595         if (rc == SIE_INTERCEPT_UCONTROL) {
596                 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
597                 kvm_run->s390_ucontrol.trans_exc_code =
598                         current->thread.gmap_addr;
599                 kvm_run->s390_ucontrol.pgm_code = 0x10;
600                 rc = 0;
601         }
602 #endif
603
604         if (rc == -EOPNOTSUPP) {
605                 /* intercept cannot be handled in-kernel, prepare kvm-run */
606                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
607                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
608                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
609                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
610                 rc = 0;
611         }
612
613         if (rc == -EREMOTE) {
614                 /* intercept was handled, but userspace support is needed
615                  * kvm_run has been prepared by the handler */
616                 rc = 0;
617         }
618
619         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
620         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
621
622         if (vcpu->sigset_active)
623                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
624
625         vcpu->stat.exit_userspace++;
626         return rc;
627 }
628
629 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
630                        unsigned long n, int prefix)
631 {
632         if (prefix)
633                 return copy_to_guest(vcpu, guestdest, from, n);
634         else
635                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
636 }
637
638 /*
639  * store status at address
640  * we use have two special cases:
641  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
642  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
643  */
644 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
645 {
646         unsigned char archmode = 1;
647         int prefix;
648
649         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
650                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
651                         return -EFAULT;
652                 addr = SAVE_AREA_BASE;
653                 prefix = 0;
654         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
655                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
656                         return -EFAULT;
657                 addr = SAVE_AREA_BASE;
658                 prefix = 1;
659         } else
660                 prefix = 0;
661
662         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
663                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
664                 return -EFAULT;
665
666         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
667                         vcpu->arch.guest_gprs, 128, prefix))
668                 return -EFAULT;
669
670         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
671                         &vcpu->arch.sie_block->gpsw, 16, prefix))
672                 return -EFAULT;
673
674         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
675                         &vcpu->arch.sie_block->prefix, 4, prefix))
676                 return -EFAULT;
677
678         if (__guestcopy(vcpu,
679                         addr + offsetof(struct save_area, fp_ctrl_reg),
680                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
681                 return -EFAULT;
682
683         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
684                         &vcpu->arch.sie_block->todpr, 4, prefix))
685                 return -EFAULT;
686
687         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
688                         &vcpu->arch.sie_block->cputm, 8, prefix))
689                 return -EFAULT;
690
691         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
692                         &vcpu->arch.sie_block->ckc, 8, prefix))
693                 return -EFAULT;
694
695         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
696                         &vcpu->arch.guest_acrs, 64, prefix))
697                 return -EFAULT;
698
699         if (__guestcopy(vcpu,
700                         addr + offsetof(struct save_area, ctrl_regs),
701                         &vcpu->arch.sie_block->gcr, 128, prefix))
702                 return -EFAULT;
703         return 0;
704 }
705
706 long kvm_arch_vcpu_ioctl(struct file *filp,
707                          unsigned int ioctl, unsigned long arg)
708 {
709         struct kvm_vcpu *vcpu = filp->private_data;
710         void __user *argp = (void __user *)arg;
711         long r;
712
713         switch (ioctl) {
714         case KVM_S390_INTERRUPT: {
715                 struct kvm_s390_interrupt s390int;
716
717                 r = -EFAULT;
718                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
719                         break;
720                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
721                 break;
722         }
723         case KVM_S390_STORE_STATUS:
724                 r = kvm_s390_vcpu_store_status(vcpu, arg);
725                 break;
726         case KVM_S390_SET_INITIAL_PSW: {
727                 psw_t psw;
728
729                 r = -EFAULT;
730                 if (copy_from_user(&psw, argp, sizeof(psw)))
731                         break;
732                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
733                 break;
734         }
735         case KVM_S390_INITIAL_RESET:
736                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
737                 break;
738 #ifdef CONFIG_KVM_S390_UCONTROL
739         case KVM_S390_UCAS_MAP: {
740                 struct kvm_s390_ucas_mapping ucasmap;
741
742                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
743                         r = -EFAULT;
744                         break;
745                 }
746
747                 if (!kvm_is_ucontrol(vcpu->kvm)) {
748                         r = -EINVAL;
749                         break;
750                 }
751
752                 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
753                                      ucasmap.vcpu_addr, ucasmap.length);
754                 break;
755         }
756         case KVM_S390_UCAS_UNMAP: {
757                 struct kvm_s390_ucas_mapping ucasmap;
758
759                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
760                         r = -EFAULT;
761                         break;
762                 }
763
764                 if (!kvm_is_ucontrol(vcpu->kvm)) {
765                         r = -EINVAL;
766                         break;
767                 }
768
769                 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
770                         ucasmap.length);
771                 break;
772         }
773 #endif
774         case KVM_S390_VCPU_FAULT: {
775                 r = gmap_fault(arg, vcpu->arch.gmap);
776                 if (!IS_ERR_VALUE(r))
777                         r = 0;
778                 break;
779         }
780         default:
781                 r = -EINVAL;
782         }
783         return r;
784 }
785
786 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
787 {
788 #ifdef CONFIG_KVM_S390_UCONTROL
789         if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
790                  && (kvm_is_ucontrol(vcpu->kvm))) {
791                 vmf->page = virt_to_page(vcpu->arch.sie_block);
792                 get_page(vmf->page);
793                 return 0;
794         }
795 #endif
796         return VM_FAULT_SIGBUS;
797 }
798
799 /* Section: memory related */
800 int kvm_arch_prepare_memory_region(struct kvm *kvm,
801                                    struct kvm_memory_slot *memslot,
802                                    struct kvm_memory_slot old,
803                                    struct kvm_userspace_memory_region *mem,
804                                    int user_alloc)
805 {
806         /* A few sanity checks. We can have exactly one memory slot which has
807            to start at guest virtual zero and which has to be located at a
808            page boundary in userland and which has to end at a page boundary.
809            The memory in userland is ok to be fragmented into various different
810            vmas. It is okay to mmap() and munmap() stuff in this slot after
811            doing this call at any time */
812
813         if (mem->slot)
814                 return -EINVAL;
815
816         if (mem->guest_phys_addr)
817                 return -EINVAL;
818
819         if (mem->userspace_addr & 0xffffful)
820                 return -EINVAL;
821
822         if (mem->memory_size & 0xffffful)
823                 return -EINVAL;
824
825         if (!user_alloc)
826                 return -EINVAL;
827
828         return 0;
829 }
830
831 void kvm_arch_commit_memory_region(struct kvm *kvm,
832                                 struct kvm_userspace_memory_region *mem,
833                                 struct kvm_memory_slot old,
834                                 int user_alloc)
835 {
836         int rc;
837
838
839         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
840                 mem->guest_phys_addr, mem->memory_size);
841         if (rc)
842                 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
843         return;
844 }
845
846 void kvm_arch_flush_shadow(struct kvm *kvm)
847 {
848 }
849
850 static int __init kvm_s390_init(void)
851 {
852         int ret;
853         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
854         if (ret)
855                 return ret;
856
857         /*
858          * guests can ask for up to 255+1 double words, we need a full page
859          * to hold the maximum amount of facilities. On the other hand, we
860          * only set facilities that are known to work in KVM.
861          */
862         facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
863         if (!facilities) {
864                 kvm_exit();
865                 return -ENOMEM;
866         }
867         memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
868         facilities[0] &= 0xff00fff3f47c0000ULL;
869         facilities[1] &= 0x201c000000000000ULL;
870         return 0;
871 }
872
873 static void __exit kvm_s390_exit(void)
874 {
875         free_page((unsigned long) facilities);
876         kvm_exit();
877 }
878
879 module_init(kvm_s390_init);
880 module_exit(kvm_s390_exit);