]> Pileus Git - ~andy/linux/blob - arch/s390/kvm/priv.c
Merge tag 'stable/for-linus-3.13-rc0-tag' of git://git.kernel.org/pub/scm/linux/kerne...
[~andy/linux] / arch / s390 / kvm / priv.c
1 /*
2  * handling privileged instructions
3  *
4  * Copyright IBM Corp. 2008, 2013
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  */
13
14 #include <linux/kvm.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <linux/compat.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/facility.h>
20 #include <asm/current.h>
21 #include <asm/debug.h>
22 #include <asm/ebcdic.h>
23 #include <asm/sysinfo.h>
24 #include <asm/pgtable.h>
25 #include <asm/pgalloc.h>
26 #include <asm/io.h>
27 #include <asm/ptrace.h>
28 #include <asm/compat.h>
29 #include "gaccess.h"
30 #include "kvm-s390.h"
31 #include "trace.h"
32
33 static int handle_set_prefix(struct kvm_vcpu *vcpu)
34 {
35         u64 operand2;
36         u32 address = 0;
37         u8 tmp;
38
39         vcpu->stat.instruction_spx++;
40
41         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
42                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
43
44         operand2 = kvm_s390_get_base_disp_s(vcpu);
45
46         /* must be word boundary */
47         if (operand2 & 3)
48                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
49
50         /* get the value */
51         if (get_guest(vcpu, address, (u32 __user *) operand2))
52                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
53
54         address = address & 0x7fffe000u;
55
56         /* make sure that the new value is valid memory */
57         if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
58            (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)))
59                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
60
61         kvm_s390_set_prefix(vcpu, address);
62
63         VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
64         trace_kvm_s390_handle_prefix(vcpu, 1, address);
65         return 0;
66 }
67
68 static int handle_store_prefix(struct kvm_vcpu *vcpu)
69 {
70         u64 operand2;
71         u32 address;
72
73         vcpu->stat.instruction_stpx++;
74
75         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
76                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
77
78         operand2 = kvm_s390_get_base_disp_s(vcpu);
79
80         /* must be word boundary */
81         if (operand2 & 3)
82                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
83
84         address = vcpu->arch.sie_block->prefix;
85         address = address & 0x7fffe000u;
86
87         /* get the value */
88         if (put_guest(vcpu, address, (u32 __user *)operand2))
89                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
90
91         VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
92         trace_kvm_s390_handle_prefix(vcpu, 0, address);
93         return 0;
94 }
95
96 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
97 {
98         u64 useraddr;
99
100         vcpu->stat.instruction_stap++;
101
102         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
103                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
104
105         useraddr = kvm_s390_get_base_disp_s(vcpu);
106
107         if (useraddr & 1)
108                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
109
110         if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr))
111                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
112
113         VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
114         trace_kvm_s390_handle_stap(vcpu, useraddr);
115         return 0;
116 }
117
118 static int handle_skey(struct kvm_vcpu *vcpu)
119 {
120         vcpu->stat.instruction_storage_key++;
121
122         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
123                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
124
125         vcpu->arch.sie_block->gpsw.addr =
126                 __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
127         VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
128         return 0;
129 }
130
131 static int handle_tpi(struct kvm_vcpu *vcpu)
132 {
133         struct kvm_s390_interrupt_info *inti;
134         u64 addr;
135         int cc;
136
137         addr = kvm_s390_get_base_disp_s(vcpu);
138         if (addr & 3)
139                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
140         cc = 0;
141         inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0);
142         if (!inti)
143                 goto no_interrupt;
144         cc = 1;
145         if (addr) {
146                 /*
147                  * Store the two-word I/O interruption code into the
148                  * provided area.
149                  */
150                 if (put_guest(vcpu, inti->io.subchannel_id, (u16 __user *)addr)
151                     || put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *)(addr + 2))
152                     || put_guest(vcpu, inti->io.io_int_parm, (u32 __user *)(addr + 4)))
153                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
154         } else {
155                 /*
156                  * Store the three-word I/O interruption code into
157                  * the appropriate lowcore area.
158                  */
159                 put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID);
160                 put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR);
161                 put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM);
162                 put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD);
163         }
164         kfree(inti);
165 no_interrupt:
166         /* Set condition code and we're done. */
167         kvm_s390_set_psw_cc(vcpu, cc);
168         return 0;
169 }
170
171 static int handle_tsch(struct kvm_vcpu *vcpu)
172 {
173         struct kvm_s390_interrupt_info *inti;
174
175         inti = kvm_s390_get_io_int(vcpu->kvm, 0,
176                                    vcpu->run->s.regs.gprs[1]);
177
178         /*
179          * Prepare exit to userspace.
180          * We indicate whether we dequeued a pending I/O interrupt
181          * so that userspace can re-inject it if the instruction gets
182          * a program check. While this may re-order the pending I/O
183          * interrupts, this is no problem since the priority is kept
184          * intact.
185          */
186         vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
187         vcpu->run->s390_tsch.dequeued = !!inti;
188         if (inti) {
189                 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
190                 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
191                 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
192                 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
193         }
194         vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
195         kfree(inti);
196         return -EREMOTE;
197 }
198
199 static int handle_io_inst(struct kvm_vcpu *vcpu)
200 {
201         VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
202
203         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
204                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
205
206         if (vcpu->kvm->arch.css_support) {
207                 /*
208                  * Most I/O instructions will be handled by userspace.
209                  * Exceptions are tpi and the interrupt portion of tsch.
210                  */
211                 if (vcpu->arch.sie_block->ipa == 0xb236)
212                         return handle_tpi(vcpu);
213                 if (vcpu->arch.sie_block->ipa == 0xb235)
214                         return handle_tsch(vcpu);
215                 /* Handle in userspace. */
216                 return -EOPNOTSUPP;
217         } else {
218                 /*
219                  * Set condition code 3 to stop the guest from issueing channel
220                  * I/O instructions.
221                  */
222                 kvm_s390_set_psw_cc(vcpu, 3);
223                 return 0;
224         }
225 }
226
227 static int handle_stfl(struct kvm_vcpu *vcpu)
228 {
229         int rc;
230
231         vcpu->stat.instruction_stfl++;
232
233         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
234                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
235
236         rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
237                            vfacilities, 4);
238         if (rc)
239                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
240         VCPU_EVENT(vcpu, 5, "store facility list value %x",
241                    *(unsigned int *) vfacilities);
242         trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities);
243         return 0;
244 }
245
246 static void handle_new_psw(struct kvm_vcpu *vcpu)
247 {
248         /* Check whether the new psw is enabled for machine checks. */
249         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
250                 kvm_s390_deliver_pending_machine_checks(vcpu);
251 }
252
253 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
254 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
255 #define PSW_ADDR_24 0x0000000000ffffffUL
256 #define PSW_ADDR_31 0x000000007fffffffUL
257
258 static int is_valid_psw(psw_t *psw) {
259         if (psw->mask & PSW_MASK_UNASSIGNED)
260                 return 0;
261         if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
262                 if (psw->addr & ~PSW_ADDR_31)
263                         return 0;
264         }
265         if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
266                 return 0;
267         if ((psw->mask & PSW_MASK_ADDR_MODE) ==  PSW_MASK_EA)
268                 return 0;
269         return 1;
270 }
271
272 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
273 {
274         psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
275         psw_compat_t new_psw;
276         u64 addr;
277
278         if (gpsw->mask & PSW_MASK_PSTATE)
279                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
280
281         addr = kvm_s390_get_base_disp_s(vcpu);
282         if (addr & 7)
283                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
284         if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
285                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
286         if (!(new_psw.mask & PSW32_MASK_BASE))
287                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
288         gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
289         gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
290         gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
291         if (!is_valid_psw(gpsw))
292                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
293         handle_new_psw(vcpu);
294         return 0;
295 }
296
297 static int handle_lpswe(struct kvm_vcpu *vcpu)
298 {
299         psw_t new_psw;
300         u64 addr;
301
302         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
303                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
304
305         addr = kvm_s390_get_base_disp_s(vcpu);
306         if (addr & 7)
307                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
308         if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
309                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
310         vcpu->arch.sie_block->gpsw = new_psw;
311         if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
312                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
313         handle_new_psw(vcpu);
314         return 0;
315 }
316
317 static int handle_stidp(struct kvm_vcpu *vcpu)
318 {
319         u64 operand2;
320
321         vcpu->stat.instruction_stidp++;
322
323         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
324                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
325
326         operand2 = kvm_s390_get_base_disp_s(vcpu);
327
328         if (operand2 & 7)
329                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
330
331         if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2))
332                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
333
334         VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
335         return 0;
336 }
337
338 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
339 {
340         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
341         int cpus = 0;
342         int n;
343
344         spin_lock(&fi->lock);
345         for (n = 0; n < KVM_MAX_VCPUS; n++)
346                 if (fi->local_int[n])
347                         cpus++;
348         spin_unlock(&fi->lock);
349
350         /* deal with other level 3 hypervisors */
351         if (stsi(mem, 3, 2, 2))
352                 mem->count = 0;
353         if (mem->count < 8)
354                 mem->count++;
355         for (n = mem->count - 1; n > 0 ; n--)
356                 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
357
358         mem->vm[0].cpus_total = cpus;
359         mem->vm[0].cpus_configured = cpus;
360         mem->vm[0].cpus_standby = 0;
361         mem->vm[0].cpus_reserved = 0;
362         mem->vm[0].caf = 1000;
363         memcpy(mem->vm[0].name, "KVMguest", 8);
364         ASCEBC(mem->vm[0].name, 8);
365         memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
366         ASCEBC(mem->vm[0].cpi, 16);
367 }
368
369 static int handle_stsi(struct kvm_vcpu *vcpu)
370 {
371         int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
372         int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
373         int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
374         unsigned long mem = 0;
375         u64 operand2;
376         int rc = 0;
377
378         vcpu->stat.instruction_stsi++;
379         VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
380
381         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
382                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
383
384         if (fc > 3) {
385                 kvm_s390_set_psw_cc(vcpu, 3);
386                 return 0;
387         }
388
389         if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
390             || vcpu->run->s.regs.gprs[1] & 0xffff0000)
391                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
392
393         if (fc == 0) {
394                 vcpu->run->s.regs.gprs[0] = 3 << 28;
395                 kvm_s390_set_psw_cc(vcpu, 0);
396                 return 0;
397         }
398
399         operand2 = kvm_s390_get_base_disp_s(vcpu);
400
401         if (operand2 & 0xfff)
402                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
403
404         switch (fc) {
405         case 1: /* same handling for 1 and 2 */
406         case 2:
407                 mem = get_zeroed_page(GFP_KERNEL);
408                 if (!mem)
409                         goto out_no_data;
410                 if (stsi((void *) mem, fc, sel1, sel2))
411                         goto out_no_data;
412                 break;
413         case 3:
414                 if (sel1 != 2 || sel2 != 2)
415                         goto out_no_data;
416                 mem = get_zeroed_page(GFP_KERNEL);
417                 if (!mem)
418                         goto out_no_data;
419                 handle_stsi_3_2_2(vcpu, (void *) mem);
420                 break;
421         }
422
423         if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
424                 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
425                 goto out_exception;
426         }
427         trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
428         free_page(mem);
429         kvm_s390_set_psw_cc(vcpu, 0);
430         vcpu->run->s.regs.gprs[0] = 0;
431         return 0;
432 out_no_data:
433         kvm_s390_set_psw_cc(vcpu, 3);
434 out_exception:
435         free_page(mem);
436         return rc;
437 }
438
439 static const intercept_handler_t b2_handlers[256] = {
440         [0x02] = handle_stidp,
441         [0x10] = handle_set_prefix,
442         [0x11] = handle_store_prefix,
443         [0x12] = handle_store_cpu_address,
444         [0x29] = handle_skey,
445         [0x2a] = handle_skey,
446         [0x2b] = handle_skey,
447         [0x30] = handle_io_inst,
448         [0x31] = handle_io_inst,
449         [0x32] = handle_io_inst,
450         [0x33] = handle_io_inst,
451         [0x34] = handle_io_inst,
452         [0x35] = handle_io_inst,
453         [0x36] = handle_io_inst,
454         [0x37] = handle_io_inst,
455         [0x38] = handle_io_inst,
456         [0x39] = handle_io_inst,
457         [0x3a] = handle_io_inst,
458         [0x3b] = handle_io_inst,
459         [0x3c] = handle_io_inst,
460         [0x5f] = handle_io_inst,
461         [0x74] = handle_io_inst,
462         [0x76] = handle_io_inst,
463         [0x7d] = handle_stsi,
464         [0xb1] = handle_stfl,
465         [0xb2] = handle_lpswe,
466 };
467
468 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
469 {
470         intercept_handler_t handler;
471
472         /*
473          * A lot of B2 instructions are priviledged. Here we check for
474          * the privileged ones, that we can handle in the kernel.
475          * Anything else goes to userspace.
476          */
477         handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
478         if (handler)
479                 return handler(vcpu);
480
481         return -EOPNOTSUPP;
482 }
483
484 static int handle_epsw(struct kvm_vcpu *vcpu)
485 {
486         int reg1, reg2;
487
488         kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
489
490         /* This basically extracts the mask half of the psw. */
491         vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
492         vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
493         if (reg2) {
494                 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
495                 vcpu->run->s.regs.gprs[reg2] |=
496                         vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
497         }
498         return 0;
499 }
500
501 #define PFMF_RESERVED   0xfffc0101UL
502 #define PFMF_SK         0x00020000UL
503 #define PFMF_CF         0x00010000UL
504 #define PFMF_UI         0x00008000UL
505 #define PFMF_FSC        0x00007000UL
506 #define PFMF_NQ         0x00000800UL
507 #define PFMF_MR         0x00000400UL
508 #define PFMF_MC         0x00000200UL
509 #define PFMF_KEY        0x000000feUL
510
511 static int handle_pfmf(struct kvm_vcpu *vcpu)
512 {
513         int reg1, reg2;
514         unsigned long start, end;
515
516         vcpu->stat.instruction_pfmf++;
517
518         kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
519
520         if (!MACHINE_HAS_PFMF)
521                 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
522
523         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
524                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
525
526         if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
527                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
528
529         /* Only provide non-quiescing support if the host supports it */
530         if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
531                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
532
533         /* No support for conditional-SSKE */
534         if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC))
535                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
536
537         start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
538         switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
539         case 0x00000000:
540                 end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
541                 break;
542         case 0x00001000:
543                 end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
544                 break;
545         /* We dont support EDAT2
546         case 0x00002000:
547                 end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
548                 break;*/
549         default:
550                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
551         }
552         while (start < end) {
553                 unsigned long useraddr;
554
555                 useraddr = gmap_translate(start, vcpu->arch.gmap);
556                 if (IS_ERR((void *)useraddr))
557                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
558
559                 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
560                         if (clear_user((void __user *)useraddr, PAGE_SIZE))
561                                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
562                 }
563
564                 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
565                         if (set_guest_storage_key(current->mm, useraddr,
566                                         vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
567                                         vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
568                                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
569                 }
570
571                 start += PAGE_SIZE;
572         }
573         if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC)
574                 vcpu->run->s.regs.gprs[reg2] = end;
575         return 0;
576 }
577
578 static const intercept_handler_t b9_handlers[256] = {
579         [0x8d] = handle_epsw,
580         [0x9c] = handle_io_inst,
581         [0xaf] = handle_pfmf,
582 };
583
584 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
585 {
586         intercept_handler_t handler;
587
588         /* This is handled just as for the B2 instructions. */
589         handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
590         if (handler)
591                 return handler(vcpu);
592
593         return -EOPNOTSUPP;
594 }
595
596 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
597 {
598         int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
599         int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
600         u64 useraddr;
601         u32 val = 0;
602         int reg, rc;
603
604         vcpu->stat.instruction_lctl++;
605
606         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
607                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
608
609         useraddr = kvm_s390_get_base_disp_rs(vcpu);
610
611         if (useraddr & 3)
612                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
613
614         VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3,
615                    useraddr);
616         trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);
617
618         reg = reg1;
619         do {
620                 rc = get_guest(vcpu, val, (u32 __user *) useraddr);
621                 if (rc)
622                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
623                 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
624                 vcpu->arch.sie_block->gcr[reg] |= val;
625                 useraddr += 4;
626                 if (reg == reg3)
627                         break;
628                 reg = (reg + 1) % 16;
629         } while (1);
630
631         return 0;
632 }
633
634 static int handle_lctlg(struct kvm_vcpu *vcpu)
635 {
636         int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
637         int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
638         u64 useraddr;
639         int reg, rc;
640
641         vcpu->stat.instruction_lctlg++;
642
643         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
644                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
645
646         useraddr = kvm_s390_get_base_disp_rsy(vcpu);
647
648         if (useraddr & 7)
649                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
650
651         reg = reg1;
652
653         VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3,
654                    useraddr);
655         trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
656
657         do {
658                 rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg],
659                                (u64 __user *) useraddr);
660                 if (rc)
661                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
662                 useraddr += 8;
663                 if (reg == reg3)
664                         break;
665                 reg = (reg + 1) % 16;
666         } while (1);
667
668         return 0;
669 }
670
671 static const intercept_handler_t eb_handlers[256] = {
672         [0x2f] = handle_lctlg,
673         [0x8a] = handle_io_inst,
674 };
675
676 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
677 {
678         intercept_handler_t handler;
679
680         handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
681         if (handler)
682                 return handler(vcpu);
683         return -EOPNOTSUPP;
684 }
685
686 static int handle_tprot(struct kvm_vcpu *vcpu)
687 {
688         u64 address1, address2;
689         struct vm_area_struct *vma;
690         unsigned long user_address;
691
692         vcpu->stat.instruction_tprot++;
693
694         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
695                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
696
697         kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
698
699         /* we only handle the Linux memory detection case:
700          * access key == 0
701          * guest DAT == off
702          * everything else goes to userspace. */
703         if (address2 & 0xf0)
704                 return -EOPNOTSUPP;
705         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
706                 return -EOPNOTSUPP;
707
708         down_read(&current->mm->mmap_sem);
709         user_address = __gmap_translate(address1, vcpu->arch.gmap);
710         if (IS_ERR_VALUE(user_address))
711                 goto out_inject;
712         vma = find_vma(current->mm, user_address);
713         if (!vma)
714                 goto out_inject;
715         vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
716         if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
717                 vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
718         if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
719                 vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);
720
721         up_read(&current->mm->mmap_sem);
722         return 0;
723
724 out_inject:
725         up_read(&current->mm->mmap_sem);
726         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
727 }
728
729 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
730 {
731         /* For e5xx... instructions we only handle TPROT */
732         if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
733                 return handle_tprot(vcpu);
734         return -EOPNOTSUPP;
735 }
736
737 static int handle_sckpf(struct kvm_vcpu *vcpu)
738 {
739         u32 value;
740
741         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
742                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
743
744         if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
745                 return kvm_s390_inject_program_int(vcpu,
746                                                    PGM_SPECIFICATION);
747
748         value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
749         vcpu->arch.sie_block->todpr = value;
750
751         return 0;
752 }
753
754 static const intercept_handler_t x01_handlers[256] = {
755         [0x07] = handle_sckpf,
756 };
757
758 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
759 {
760         intercept_handler_t handler;
761
762         handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
763         if (handler)
764                 return handler(vcpu);
765         return -EOPNOTSUPP;
766 }