]> Pileus Git - ~andy/linux/blob - arch/powerpc/kvm/emulate.c
8f7a3aa03c262b705fd0ccfab9d94bb811d5233c
[~andy/linux] / arch / powerpc / kvm / emulate.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  *
17  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18  */
19
20 #include <linux/jiffies.h>
21 #include <linux/hrtimer.h>
22 #include <linux/types.h>
23 #include <linux/string.h>
24 #include <linux/kvm_host.h>
25
26 #include <asm/reg.h>
27 #include <asm/time.h>
28 #include <asm/byteorder.h>
29 #include <asm/kvm_ppc.h>
30 #include <asm/disassemble.h>
31 #include "timing.h"
32 #include "trace.h"
33
34 #define OP_TRAP 3
35 #define OP_TRAP_64 2
36
37 #define OP_31_XOP_LWZX      23
38 #define OP_31_XOP_LBZX      87
39 #define OP_31_XOP_STWX      151
40 #define OP_31_XOP_STBX      215
41 #define OP_31_XOP_LBZUX     119
42 #define OP_31_XOP_STBUX     247
43 #define OP_31_XOP_LHZX      279
44 #define OP_31_XOP_LHZUX     311
45 #define OP_31_XOP_MFSPR     339
46 #define OP_31_XOP_LHAX      343
47 #define OP_31_XOP_STHX      407
48 #define OP_31_XOP_STHUX     439
49 #define OP_31_XOP_MTSPR     467
50 #define OP_31_XOP_DCBI      470
51 #define OP_31_XOP_LWBRX     534
52 #define OP_31_XOP_TLBSYNC   566
53 #define OP_31_XOP_STWBRX    662
54 #define OP_31_XOP_LHBRX     790
55 #define OP_31_XOP_STHBRX    918
56
57 #define OP_LWZ  32
58 #define OP_LWZU 33
59 #define OP_LBZ  34
60 #define OP_LBZU 35
61 #define OP_STW  36
62 #define OP_STWU 37
63 #define OP_STB  38
64 #define OP_STBU 39
65 #define OP_LHZ  40
66 #define OP_LHZU 41
67 #define OP_LHA  42
68 #define OP_LHAU 43
69 #define OP_STH  44
70 #define OP_STHU 45
71
72 #ifdef CONFIG_PPC_BOOK3S
73 static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
74 {
75         return 1;
76 }
77 #else
78 static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
79 {
80         return vcpu->arch.tcr & TCR_DIE;
81 }
82 #endif
83
84 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
85 {
86         unsigned long dec_nsec;
87
88         pr_debug("mtDEC: %x\n", vcpu->arch.dec);
89 #ifdef CONFIG_PPC_BOOK3S
90         /* mtdec lowers the interrupt line when positive. */
91         kvmppc_core_dequeue_dec(vcpu);
92
93         /* POWER4+ triggers a dec interrupt if the value is < 0 */
94         if (vcpu->arch.dec & 0x80000000) {
95                 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
96                 kvmppc_core_queue_dec(vcpu);
97                 return;
98         }
99 #endif
100         if (kvmppc_dec_enabled(vcpu)) {
101                 /* The decrementer ticks at the same rate as the timebase, so
102                  * that's how we convert the guest DEC value to the number of
103                  * host ticks. */
104
105                 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
106                 dec_nsec = vcpu->arch.dec;
107                 dec_nsec *= 1000;
108                 dec_nsec /= tb_ticks_per_usec;
109                 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
110                               HRTIMER_MODE_REL);
111                 vcpu->arch.dec_jiffies = get_tb();
112         } else {
113                 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
114         }
115 }
116
117 /* XXX to do:
118  * lhax
119  * lhaux
120  * lswx
121  * lswi
122  * stswx
123  * stswi
124  * lha
125  * lhau
126  * lmw
127  * stmw
128  *
129  * XXX is_bigendian should depend on MMU mapping or MSR[LE]
130  */
131 /* XXX Should probably auto-generate instruction decoding for a particular core
132  * from opcode tables in the future. */
133 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
134 {
135         u32 inst = kvmppc_get_last_inst(vcpu);
136         u32 ea;
137         int ra;
138         int rb;
139         int rs;
140         int rt;
141         int sprn;
142         enum emulation_result emulated = EMULATE_DONE;
143         int advance = 1;
144
145         /* this default type might be overwritten by subcategories */
146         kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
147
148         pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
149
150         switch (get_op(inst)) {
151         case OP_TRAP:
152 #ifdef CONFIG_PPC_BOOK3S
153         case OP_TRAP_64:
154                 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
155 #else
156                 kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR);
157 #endif
158                 advance = 0;
159                 break;
160
161         case 31:
162                 switch (get_xop(inst)) {
163
164                 case OP_31_XOP_LWZX:
165                         rt = get_rt(inst);
166                         emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
167                         break;
168
169                 case OP_31_XOP_LBZX:
170                         rt = get_rt(inst);
171                         emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
172                         break;
173
174                 case OP_31_XOP_LBZUX:
175                         rt = get_rt(inst);
176                         ra = get_ra(inst);
177                         rb = get_rb(inst);
178
179                         ea = kvmppc_get_gpr(vcpu, rb);
180                         if (ra)
181                                 ea += kvmppc_get_gpr(vcpu, ra);
182
183                         emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
184                         kvmppc_set_gpr(vcpu, ra, ea);
185                         break;
186
187                 case OP_31_XOP_STWX:
188                         rs = get_rs(inst);
189                         emulated = kvmppc_handle_store(run, vcpu,
190                                                        kvmppc_get_gpr(vcpu, rs),
191                                                        4, 1);
192                         break;
193
194                 case OP_31_XOP_STBX:
195                         rs = get_rs(inst);
196                         emulated = kvmppc_handle_store(run, vcpu,
197                                                        kvmppc_get_gpr(vcpu, rs),
198                                                        1, 1);
199                         break;
200
201                 case OP_31_XOP_STBUX:
202                         rs = get_rs(inst);
203                         ra = get_ra(inst);
204                         rb = get_rb(inst);
205
206                         ea = kvmppc_get_gpr(vcpu, rb);
207                         if (ra)
208                                 ea += kvmppc_get_gpr(vcpu, ra);
209
210                         emulated = kvmppc_handle_store(run, vcpu,
211                                                        kvmppc_get_gpr(vcpu, rs),
212                                                        1, 1);
213                         kvmppc_set_gpr(vcpu, rs, ea);
214                         break;
215
216                 case OP_31_XOP_LHAX:
217                         rt = get_rt(inst);
218                         emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
219                         break;
220
221                 case OP_31_XOP_LHZX:
222                         rt = get_rt(inst);
223                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
224                         break;
225
226                 case OP_31_XOP_LHZUX:
227                         rt = get_rt(inst);
228                         ra = get_ra(inst);
229                         rb = get_rb(inst);
230
231                         ea = kvmppc_get_gpr(vcpu, rb);
232                         if (ra)
233                                 ea += kvmppc_get_gpr(vcpu, ra);
234
235                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
236                         kvmppc_set_gpr(vcpu, ra, ea);
237                         break;
238
239                 case OP_31_XOP_MFSPR:
240                         sprn = get_sprn(inst);
241                         rt = get_rt(inst);
242
243                         switch (sprn) {
244                         case SPRN_SRR0:
245                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0);
246                                 break;
247                         case SPRN_SRR1:
248                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1);
249                                 break;
250                         case SPRN_PVR:
251                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
252                         case SPRN_PIR:
253                                 kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
254                         case SPRN_MSSSR0:
255                                 kvmppc_set_gpr(vcpu, rt, 0); break;
256
257                         /* Note: mftb and TBRL/TBWL are user-accessible, so
258                          * the guest can always access the real TB anyways.
259                          * In fact, we probably will never see these traps. */
260                         case SPRN_TBWL:
261                                 kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
262                         case SPRN_TBWU:
263                                 kvmppc_set_gpr(vcpu, rt, get_tb()); break;
264
265                         case SPRN_SPRG0:
266                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0);
267                                 break;
268                         case SPRN_SPRG1:
269                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1);
270                                 break;
271                         case SPRN_SPRG2:
272                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2);
273                                 break;
274                         case SPRN_SPRG3:
275                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3);
276                                 break;
277                         /* Note: SPRG4-7 are user-readable, so we don't get
278                          * a trap. */
279
280                         case SPRN_DEC:
281                         {
282                                 u64 jd = get_tb() - vcpu->arch.dec_jiffies;
283                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd);
284                                 pr_debug("mfDEC: %x - %llx = %lx\n",
285                                          vcpu->arch.dec, jd,
286                                          kvmppc_get_gpr(vcpu, rt));
287                                 break;
288                         }
289                         default:
290                                 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
291                                 if (emulated == EMULATE_FAIL) {
292                                         printk("mfspr: unknown spr %x\n", sprn);
293                                         kvmppc_set_gpr(vcpu, rt, 0);
294                                 }
295                                 break;
296                         }
297                         kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
298                         break;
299
300                 case OP_31_XOP_STHX:
301                         rs = get_rs(inst);
302                         ra = get_ra(inst);
303                         rb = get_rb(inst);
304
305                         emulated = kvmppc_handle_store(run, vcpu,
306                                                        kvmppc_get_gpr(vcpu, rs),
307                                                        2, 1);
308                         break;
309
310                 case OP_31_XOP_STHUX:
311                         rs = get_rs(inst);
312                         ra = get_ra(inst);
313                         rb = get_rb(inst);
314
315                         ea = kvmppc_get_gpr(vcpu, rb);
316                         if (ra)
317                                 ea += kvmppc_get_gpr(vcpu, ra);
318
319                         emulated = kvmppc_handle_store(run, vcpu,
320                                                        kvmppc_get_gpr(vcpu, rs),
321                                                        2, 1);
322                         kvmppc_set_gpr(vcpu, ra, ea);
323                         break;
324
325                 case OP_31_XOP_MTSPR:
326                         sprn = get_sprn(inst);
327                         rs = get_rs(inst);
328                         switch (sprn) {
329                         case SPRN_SRR0:
330                                 vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs);
331                                 break;
332                         case SPRN_SRR1:
333                                 vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs);
334                                 break;
335
336                         /* XXX We need to context-switch the timebase for
337                          * watchdog and FIT. */
338                         case SPRN_TBWL: break;
339                         case SPRN_TBWU: break;
340
341                         case SPRN_MSSSR0: break;
342
343                         case SPRN_DEC:
344                                 vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
345                                 kvmppc_emulate_dec(vcpu);
346                                 break;
347
348                         case SPRN_SPRG0:
349                                 vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs);
350                                 break;
351                         case SPRN_SPRG1:
352                                 vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs);
353                                 break;
354                         case SPRN_SPRG2:
355                                 vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs);
356                                 break;
357                         case SPRN_SPRG3:
358                                 vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs);
359                                 break;
360
361                         default:
362                                 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
363                                 if (emulated == EMULATE_FAIL)
364                                         printk("mtspr: unknown spr %x\n", sprn);
365                                 break;
366                         }
367                         kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
368                         break;
369
370                 case OP_31_XOP_DCBI:
371                         /* Do nothing. The guest is performing dcbi because
372                          * hardware DMA is not snooped by the dcache, but
373                          * emulated DMA either goes through the dcache as
374                          * normal writes, or the host kernel has handled dcache
375                          * coherence. */
376                         break;
377
378                 case OP_31_XOP_LWBRX:
379                         rt = get_rt(inst);
380                         emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
381                         break;
382
383                 case OP_31_XOP_TLBSYNC:
384                         break;
385
386                 case OP_31_XOP_STWBRX:
387                         rs = get_rs(inst);
388                         ra = get_ra(inst);
389                         rb = get_rb(inst);
390
391                         emulated = kvmppc_handle_store(run, vcpu,
392                                                        kvmppc_get_gpr(vcpu, rs),
393                                                        4, 0);
394                         break;
395
396                 case OP_31_XOP_LHBRX:
397                         rt = get_rt(inst);
398                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
399                         break;
400
401                 case OP_31_XOP_STHBRX:
402                         rs = get_rs(inst);
403                         ra = get_ra(inst);
404                         rb = get_rb(inst);
405
406                         emulated = kvmppc_handle_store(run, vcpu,
407                                                        kvmppc_get_gpr(vcpu, rs),
408                                                        2, 0);
409                         break;
410
411                 default:
412                         /* Attempt core-specific emulation below. */
413                         emulated = EMULATE_FAIL;
414                 }
415                 break;
416
417         case OP_LWZ:
418                 rt = get_rt(inst);
419                 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
420                 break;
421
422         case OP_LWZU:
423                 ra = get_ra(inst);
424                 rt = get_rt(inst);
425                 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
426                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
427                 break;
428
429         case OP_LBZ:
430                 rt = get_rt(inst);
431                 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
432                 break;
433
434         case OP_LBZU:
435                 ra = get_ra(inst);
436                 rt = get_rt(inst);
437                 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
438                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
439                 break;
440
441         case OP_STW:
442                 rs = get_rs(inst);
443                 emulated = kvmppc_handle_store(run, vcpu,
444                                                kvmppc_get_gpr(vcpu, rs),
445                                                4, 1);
446                 break;
447
448         case OP_STWU:
449                 ra = get_ra(inst);
450                 rs = get_rs(inst);
451                 emulated = kvmppc_handle_store(run, vcpu,
452                                                kvmppc_get_gpr(vcpu, rs),
453                                                4, 1);
454                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
455                 break;
456
457         case OP_STB:
458                 rs = get_rs(inst);
459                 emulated = kvmppc_handle_store(run, vcpu,
460                                                kvmppc_get_gpr(vcpu, rs),
461                                                1, 1);
462                 break;
463
464         case OP_STBU:
465                 ra = get_ra(inst);
466                 rs = get_rs(inst);
467                 emulated = kvmppc_handle_store(run, vcpu,
468                                                kvmppc_get_gpr(vcpu, rs),
469                                                1, 1);
470                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
471                 break;
472
473         case OP_LHZ:
474                 rt = get_rt(inst);
475                 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
476                 break;
477
478         case OP_LHZU:
479                 ra = get_ra(inst);
480                 rt = get_rt(inst);
481                 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
482                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
483                 break;
484
485         case OP_LHA:
486                 rt = get_rt(inst);
487                 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
488                 break;
489
490         case OP_LHAU:
491                 ra = get_ra(inst);
492                 rt = get_rt(inst);
493                 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
494                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
495                 break;
496
497         case OP_STH:
498                 rs = get_rs(inst);
499                 emulated = kvmppc_handle_store(run, vcpu,
500                                                kvmppc_get_gpr(vcpu, rs),
501                                                2, 1);
502                 break;
503
504         case OP_STHU:
505                 ra = get_ra(inst);
506                 rs = get_rs(inst);
507                 emulated = kvmppc_handle_store(run, vcpu,
508                                                kvmppc_get_gpr(vcpu, rs),
509                                                2, 1);
510                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
511                 break;
512
513         default:
514                 emulated = EMULATE_FAIL;
515         }
516
517         if (emulated == EMULATE_FAIL) {
518                 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
519                 if (emulated == EMULATE_AGAIN) {
520                         advance = 0;
521                 } else if (emulated == EMULATE_FAIL) {
522                         advance = 0;
523                         printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
524                                "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
525                         kvmppc_core_queue_program(vcpu, 0);
526                 }
527         }
528
529         trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
530
531         /* Advance past emulated instruction. */
532         if (advance)
533                 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
534
535         return emulated;
536 }