2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #ifndef __ASM_KVM_BOOK3S_H__
21 #define __ASM_KVM_BOOK3S_H__
23 #include <linux/types.h>
24 #include <linux/kvm_host.h>
25 #include <asm/kvm_book3s_asm.h>
38 struct kvmppc_sid_map {
45 #define SID_MAP_BITS 9
46 #define SID_MAP_NUM (1 << SID_MAP_BITS)
47 #define SID_MAP_MASK (SID_MAP_NUM - 1)
49 #ifdef CONFIG_PPC_BOOK3S_64
50 #define SID_CONTEXTS 1
52 #define SID_CONTEXTS 128
53 #define VSID_POOL_SIZE (SID_CONTEXTS * 16)
57 struct hlist_node list_pte;
58 struct hlist_node list_pte_long;
59 struct hlist_node list_vpte;
60 struct hlist_node list_vpte_long;
61 struct rcu_head rcu_head;
65 struct kvmppc_pte pte;
68 struct kvmppc_vcpu_book3s {
70 struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
71 struct kvmppc_sid_map sid_map[SID_MAP_NUM];
77 struct kvmppc_bat ibat[8];
78 struct kvmppc_bat dbat[8];
86 #ifdef CONFIG_PPC_BOOK3S_32
87 u32 vsid_pool[VSID_POOL_SIZE];
94 int context_id[SID_CONTEXTS];
96 bool hior_explicit; /* HIOR is set by ioctl, not PVR */
98 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
99 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
100 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
101 struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
102 int hpte_cache_count;
106 #define CONTEXT_HOST 0
107 #define CONTEXT_GUEST 1
108 #define CONTEXT_GUEST_END 2
110 #define VSID_REAL 0x0fffffffffc00000ULL
111 #define VSID_BAT 0x0fffffffffb00000ULL
112 #define VSID_1T 0x1000000000000000ULL
113 #define VSID_REAL_DR 0x2000000000000000ULL
114 #define VSID_REAL_IR 0x4000000000000000ULL
115 #define VSID_PR 0x8000000000000000ULL
117 extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
118 extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
119 extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
120 extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
121 extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr);
122 extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
123 extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
124 extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
125 extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
126 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
127 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
128 extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
129 extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
130 struct kvm_vcpu *vcpu, unsigned long addr,
131 unsigned long status);
132 extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
133 unsigned long slb_v, unsigned long valid);
135 extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
136 extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
137 extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
138 extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
139 extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
140 extern int kvmppc_mmu_hpte_sysinit(void);
141 extern void kvmppc_mmu_hpte_sysexit(void);
142 extern int kvmppc_mmu_hv_init(void);
144 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
145 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
146 extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
147 extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
149 extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
150 extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
151 bool upper, u32 val);
152 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
153 extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
154 extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
155 extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
156 unsigned long *rmap, long pte_index, int realmode);
157 extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
158 unsigned long pte_index);
159 void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
160 unsigned long pte_index);
161 extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
162 unsigned long *nb_ret);
163 extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
164 unsigned long gpa, bool dirty);
165 extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
166 long pte_index, unsigned long pteh, unsigned long ptel);
167 extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
168 long pte_index, unsigned long pteh, unsigned long ptel,
169 pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
170 extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
171 unsigned long pte_index, unsigned long avpn,
172 unsigned long *hpret);
173 extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
174 struct kvm_memory_slot *memslot, unsigned long *map);
175 extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
178 extern void kvmppc_entry_trampoline(void);
179 extern void kvmppc_hv_entry_trampoline(void);
180 extern void kvmppc_load_up_fpu(void);
181 extern void kvmppc_load_up_altivec(void);
182 extern void kvmppc_load_up_vsx(void);
183 extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
184 extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
185 extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
187 static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
189 return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu);
192 extern void kvm_return_point(void);
194 /* Also add subarch specific defines */
196 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
197 #include <asm/kvm_book3s_32.h>
199 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
200 #include <asm/kvm_book3s_64.h>
203 #ifdef CONFIG_KVM_BOOK3S_PR
205 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
207 return to_book3s(vcpu)->hior;
210 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
211 unsigned long pending_now, unsigned long old_pending)
214 vcpu->arch.shared->int_pending = 1;
215 else if (old_pending)
216 vcpu->arch.shared->int_pending = 0;
219 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
222 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
223 svcpu->gpr[num] = val;
225 to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
227 vcpu->arch.gpr[num] = val;
230 static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
233 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
234 ulong r = svcpu->gpr[num];
238 return vcpu->arch.gpr[num];
241 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
243 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
246 to_book3s(vcpu)->shadow_vcpu->cr = val;
249 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
251 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
258 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
260 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
262 to_book3s(vcpu)->shadow_vcpu->xer = val;
266 static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
268 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
275 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
277 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
282 static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
284 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
291 static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
293 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
298 static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
300 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
307 static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
309 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
314 static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
316 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
323 static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
325 ulong pc = kvmppc_get_pc(vcpu);
326 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
329 /* Load the instruction manually if it failed to do so in the
331 if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
332 kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
334 r = svcpu->last_inst;
340 * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
341 * Because the sc instruction sets SRR0 to point to the following
342 * instruction, we have to fetch from pc - 4.
344 static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
346 ulong pc = kvmppc_get_pc(vcpu) - 4;
347 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
350 /* Load the instruction manually if it failed to do so in the
352 if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
353 kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
355 r = svcpu->last_inst;
360 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
362 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
364 r = svcpu->fault_dar;
369 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
371 ulong crit_raw = vcpu->arch.shared->critical;
372 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
375 /* Truncate crit indicators in 32 bit mode */
376 if (!(vcpu->arch.shared->msr & MSR_SF)) {
377 crit_raw &= 0xffffffff;
378 crit_r1 &= 0xffffffff;
381 /* Critical section when crit == r1 */
382 crit = (crit_raw == crit_r1);
383 /* ... and we're in supervisor mode */
384 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
388 #else /* CONFIG_KVM_BOOK3S_PR */
390 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
395 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
396 unsigned long pending_now, unsigned long old_pending)
400 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
402 vcpu->arch.gpr[num] = val;
405 static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
407 return vcpu->arch.gpr[num];
410 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
415 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
417 return vcpu->arch.cr;
420 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
422 vcpu->arch.xer = val;
425 static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
427 return vcpu->arch.xer;
430 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
432 vcpu->arch.ctr = val;
435 static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
437 return vcpu->arch.ctr;
440 static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
445 static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
447 return vcpu->arch.lr;
450 static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
455 static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
457 return vcpu->arch.pc;
460 static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
462 ulong pc = kvmppc_get_pc(vcpu);
464 /* Load the instruction manually if it failed to do so in the
466 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
467 kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
469 return vcpu->arch.last_inst;
473 * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
474 * Because the sc instruction sets SRR0 to point to the following
475 * instruction, we have to fetch from pc - 4.
477 static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
479 ulong pc = kvmppc_get_pc(vcpu) - 4;
481 /* Load the instruction manually if it failed to do so in the
483 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
484 kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
486 return vcpu->arch.last_inst;
489 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
491 return vcpu->arch.fault_dar;
494 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
500 /* Magic register values loaded into r3 and r4 before the 'sc' assembly
501 * instruction for the OSI hypercalls */
502 #define OSI_SC_MAGIC_R3 0x113724FA
503 #define OSI_SC_MAGIC_R4 0x77810F9B
505 #define INS_DCBZ 0x7c0007ec
506 /* TO = 31 for unconditional trap */
507 #define INS_TW 0x7fe00008
509 /* LPIDs we support with this build -- runtime limit may be lower */
510 #define KVMPPC_NR_LPIDS (LPID_RSVD + 1)
512 #endif /* __ASM_KVM_BOOK3S_H__ */