]> Pileus Git - ~andy/linux/blob - arch/powerpc/kvm/book3s_64_slb.S
Merge tag 'efi-urgent' into x86/urgent
[~andy/linux] / arch / powerpc / kvm / book3s_64_slb.S
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright SUSE Linux Products GmbH 2009
16  *
17  * Authors: Alexander Graf <agraf@suse.de>
18  */
19
20 #ifdef __LITTLE_ENDIAN__
21 #error Need to fix SLB shadow accesses in little endian mode
22 #endif
23
24 #define SHADOW_SLB_ESID(num)    (SLBSHADOW_SAVEAREA + (num * 0x10))
25 #define SHADOW_SLB_VSID(num)    (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8)
26 #define UNBOLT_SLB_ENTRY(num) \
27         ld      r9, SHADOW_SLB_ESID(num)(r12); \
28         /* Invalid? Skip. */; \
29         rldicl. r0, r9, 37, 63; \
30         beq     slb_entry_skip_ ## num; \
31         xoris   r9, r9, SLB_ESID_V@h; \
32         std     r9, SHADOW_SLB_ESID(num)(r12); \
33   slb_entry_skip_ ## num:
34
35 #define REBOLT_SLB_ENTRY(num) \
36         ld      r10, SHADOW_SLB_ESID(num)(r11); \
37         cmpdi   r10, 0; \
38         beq     slb_exit_skip_ ## num; \
39         oris    r10, r10, SLB_ESID_V@h; \
40         ld      r9, SHADOW_SLB_VSID(num)(r11); \
41         slbmte  r9, r10; \
42         std     r10, SHADOW_SLB_ESID(num)(r11); \
43 slb_exit_skip_ ## num:
44
45 /******************************************************************************
46  *                                                                            *
47  *                               Entry code                                   *
48  *                                                                            *
49  *****************************************************************************/
50
51 .macro LOAD_GUEST_SEGMENTS
52
53         /* Required state:
54          *
55          * MSR = ~IR|DR
56          * R13 = PACA
57          * R1 = host R1
58          * R2 = host R2
59          * R3 = shadow vcpu
60          * all other volatile GPRS = free except R4, R6
61          * SVCPU[CR]  = guest CR
62          * SVCPU[XER] = guest XER
63          * SVCPU[CTR] = guest CTR
64          * SVCPU[LR]  = guest LR
65          */
66
67         /* Remove LPAR shadow entries */
68
69 #if SLB_NUM_BOLTED == 3
70
71         ld      r12, PACA_SLBSHADOWPTR(r13)
72
73         /* Remove bolted entries */
74         UNBOLT_SLB_ENTRY(0)
75         UNBOLT_SLB_ENTRY(1)
76         UNBOLT_SLB_ENTRY(2)
77         
78 #else
79 #error unknown number of bolted entries
80 #endif
81
82         /* Flush SLB */
83
84         li      r10, 0
85         slbmte  r10, r10
86         slbia
87
88         /* Fill SLB with our shadow */
89
90         lbz     r12, SVCPU_SLB_MAX(r3)
91         mulli   r12, r12, 16
92         addi    r12, r12, SVCPU_SLB
93         add     r12, r12, r3
94
95         /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
96         li      r11, SVCPU_SLB
97         add     r11, r11, r3
98
99 slb_loop_enter:
100
101         ld      r10, 0(r11)
102
103         rldicl. r0, r10, 37, 63
104         beq     slb_loop_enter_skip
105
106         ld      r9, 8(r11)
107         slbmte  r9, r10
108
109 slb_loop_enter_skip:
110         addi    r11, r11, 16
111         cmpd    cr0, r11, r12
112         blt     slb_loop_enter
113
114 slb_do_enter:
115
116 .endm
117
118 /******************************************************************************
119  *                                                                            *
120  *                               Exit code                                    *
121  *                                                                            *
122  *****************************************************************************/
123
124 .macro LOAD_HOST_SEGMENTS
125
126         /* Register usage at this point:
127          *
128          * R1         = host R1
129          * R2         = host R2
130          * R12        = exit handler id
131          * R13        = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
132          * SVCPU.*    = guest *
133          * SVCPU[CR]  = guest CR
134          * SVCPU[XER] = guest XER
135          * SVCPU[CTR] = guest CTR
136          * SVCPU[LR]  = guest LR
137          *
138          */
139
140         /* Restore bolted entries from the shadow and fix it along the way */
141
142         /* We don't store anything in entry 0, so we don't need to take care of it */
143         slbia
144         isync
145
146 #if SLB_NUM_BOLTED == 3
147
148         ld      r11, PACA_SLBSHADOWPTR(r13)
149
150         REBOLT_SLB_ENTRY(0)
151         REBOLT_SLB_ENTRY(1)
152         REBOLT_SLB_ENTRY(2)
153         
154 #else
155 #error unknown number of bolted entries
156 #endif
157
158 slb_do_exit:
159
160 .endm