]> Pileus Git - ~andy/linux/blob - arch/x86/include/asm/paravirt.h
Merge tag 'disintegrate-alpha-20121217' of git://git.infradead.org/users/dhowells...
[~andy/linux] / arch / x86 / include / asm / paravirt.h
1 #ifndef _ASM_X86_PARAVIRT_H
2 #define _ASM_X86_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4  * para-virtualization: those hooks are defined here. */
5
6 #ifdef CONFIG_PARAVIRT
7 #include <asm/pgtable_types.h>
8 #include <asm/asm.h>
9
10 #include <asm/paravirt_types.h>
11
12 #ifndef __ASSEMBLY__
13 #include <linux/bug.h>
14 #include <linux/types.h>
15 #include <linux/cpumask.h>
16
17 static inline int paravirt_enabled(void)
18 {
19         return pv_info.paravirt_enabled;
20 }
21
22 static inline void load_sp0(struct tss_struct *tss,
23                              struct thread_struct *thread)
24 {
25         PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
26 }
27
28 /* The paravirtualized CPUID instruction. */
29 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
30                            unsigned int *ecx, unsigned int *edx)
31 {
32         PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
33 }
34
35 /*
36  * These special macros can be used to get or set a debugging register
37  */
38 static inline unsigned long paravirt_get_debugreg(int reg)
39 {
40         return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
41 }
42 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
43 static inline void set_debugreg(unsigned long val, int reg)
44 {
45         PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
46 }
47
48 static inline void clts(void)
49 {
50         PVOP_VCALL0(pv_cpu_ops.clts);
51 }
52
53 static inline unsigned long read_cr0(void)
54 {
55         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
56 }
57
58 static inline void write_cr0(unsigned long x)
59 {
60         PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
61 }
62
63 static inline unsigned long read_cr2(void)
64 {
65         return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
66 }
67
68 static inline void write_cr2(unsigned long x)
69 {
70         PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
71 }
72
73 static inline unsigned long read_cr3(void)
74 {
75         return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
76 }
77
78 static inline void write_cr3(unsigned long x)
79 {
80         PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
81 }
82
83 static inline unsigned long read_cr4(void)
84 {
85         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
86 }
87 static inline unsigned long read_cr4_safe(void)
88 {
89         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
90 }
91
92 static inline void write_cr4(unsigned long x)
93 {
94         PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
95 }
96
97 #ifdef CONFIG_X86_64
98 static inline unsigned long read_cr8(void)
99 {
100         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
101 }
102
103 static inline void write_cr8(unsigned long x)
104 {
105         PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
106 }
107 #endif
108
109 static inline void arch_safe_halt(void)
110 {
111         PVOP_VCALL0(pv_irq_ops.safe_halt);
112 }
113
114 static inline void halt(void)
115 {
116         PVOP_VCALL0(pv_irq_ops.halt);
117 }
118
119 static inline void wbinvd(void)
120 {
121         PVOP_VCALL0(pv_cpu_ops.wbinvd);
122 }
123
124 #define get_kernel_rpl()  (pv_info.kernel_rpl)
125
126 static inline u64 paravirt_read_msr(unsigned msr, int *err)
127 {
128         return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
129 }
130
131 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
132 {
133         return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
134 }
135
136 /* These should all do BUG_ON(_err), but our headers are too tangled. */
137 #define rdmsr(msr, val1, val2)                  \
138 do {                                            \
139         int _err;                               \
140         u64 _l = paravirt_read_msr(msr, &_err); \
141         val1 = (u32)_l;                         \
142         val2 = _l >> 32;                        \
143 } while (0)
144
145 #define wrmsr(msr, val1, val2)                  \
146 do {                                            \
147         paravirt_write_msr(msr, val1, val2);    \
148 } while (0)
149
150 #define rdmsrl(msr, val)                        \
151 do {                                            \
152         int _err;                               \
153         val = paravirt_read_msr(msr, &_err);    \
154 } while (0)
155
156 #define wrmsrl(msr, val)        wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
157 #define wrmsr_safe(msr, a, b)   paravirt_write_msr(msr, a, b)
158
159 /* rdmsr with exception handling */
160 #define rdmsr_safe(msr, a, b)                   \
161 ({                                              \
162         int _err;                               \
163         u64 _l = paravirt_read_msr(msr, &_err); \
164         (*a) = (u32)_l;                         \
165         (*b) = _l >> 32;                        \
166         _err;                                   \
167 })
168
169 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
170 {
171         int err;
172
173         *p = paravirt_read_msr(msr, &err);
174         return err;
175 }
176
177 static inline u64 paravirt_read_tsc(void)
178 {
179         return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
180 }
181
182 #define rdtscl(low)                             \
183 do {                                            \
184         u64 _l = paravirt_read_tsc();           \
185         low = (int)_l;                          \
186 } while (0)
187
188 #define rdtscll(val) (val = paravirt_read_tsc())
189
190 static inline unsigned long long paravirt_sched_clock(void)
191 {
192         return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
193 }
194
195 struct static_key;
196 extern struct static_key paravirt_steal_enabled;
197 extern struct static_key paravirt_steal_rq_enabled;
198
199 static inline u64 paravirt_steal_clock(int cpu)
200 {
201         return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
202 }
203
204 static inline unsigned long long paravirt_read_pmc(int counter)
205 {
206         return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
207 }
208
209 #define rdpmc(counter, low, high)               \
210 do {                                            \
211         u64 _l = paravirt_read_pmc(counter);    \
212         low = (u32)_l;                          \
213         high = _l >> 32;                        \
214 } while (0)
215
216 #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
217
218 static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
219 {
220         return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
221 }
222
223 #define rdtscp(low, high, aux)                          \
224 do {                                                    \
225         int __aux;                                      \
226         unsigned long __val = paravirt_rdtscp(&__aux);  \
227         (low) = (u32)__val;                             \
228         (high) = (u32)(__val >> 32);                    \
229         (aux) = __aux;                                  \
230 } while (0)
231
232 #define rdtscpll(val, aux)                              \
233 do {                                                    \
234         unsigned long __aux;                            \
235         val = paravirt_rdtscp(&__aux);                  \
236         (aux) = __aux;                                  \
237 } while (0)
238
239 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
240 {
241         PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
242 }
243
244 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
245 {
246         PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
247 }
248
249 static inline void load_TR_desc(void)
250 {
251         PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
252 }
253 static inline void load_gdt(const struct desc_ptr *dtr)
254 {
255         PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
256 }
257 static inline void load_idt(const struct desc_ptr *dtr)
258 {
259         PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
260 }
261 static inline void set_ldt(const void *addr, unsigned entries)
262 {
263         PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
264 }
265 static inline void store_gdt(struct desc_ptr *dtr)
266 {
267         PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
268 }
269 static inline void store_idt(struct desc_ptr *dtr)
270 {
271         PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
272 }
273 static inline unsigned long paravirt_store_tr(void)
274 {
275         return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
276 }
277 #define store_tr(tr)    ((tr) = paravirt_store_tr())
278 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
279 {
280         PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
281 }
282
283 #ifdef CONFIG_X86_64
284 static inline void load_gs_index(unsigned int gs)
285 {
286         PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
287 }
288 #endif
289
290 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
291                                    const void *desc)
292 {
293         PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
294 }
295
296 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
297                                    void *desc, int type)
298 {
299         PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
300 }
301
302 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
303 {
304         PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
305 }
306 static inline void set_iopl_mask(unsigned mask)
307 {
308         PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
309 }
310
311 /* The paravirtualized I/O functions */
312 static inline void slow_down_io(void)
313 {
314         pv_cpu_ops.io_delay();
315 #ifdef REALLY_SLOW_IO
316         pv_cpu_ops.io_delay();
317         pv_cpu_ops.io_delay();
318         pv_cpu_ops.io_delay();
319 #endif
320 }
321
322 #ifdef CONFIG_SMP
323 static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
324                                     unsigned long start_esp)
325 {
326         PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
327                     phys_apicid, start_eip, start_esp);
328 }
329 #endif
330
331 static inline void paravirt_activate_mm(struct mm_struct *prev,
332                                         struct mm_struct *next)
333 {
334         PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
335 }
336
337 static inline void arch_dup_mmap(struct mm_struct *oldmm,
338                                  struct mm_struct *mm)
339 {
340         PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
341 }
342
343 static inline void arch_exit_mmap(struct mm_struct *mm)
344 {
345         PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
346 }
347
348 static inline void __flush_tlb(void)
349 {
350         PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
351 }
352 static inline void __flush_tlb_global(void)
353 {
354         PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
355 }
356 static inline void __flush_tlb_single(unsigned long addr)
357 {
358         PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
359 }
360
361 static inline void flush_tlb_others(const struct cpumask *cpumask,
362                                     struct mm_struct *mm,
363                                     unsigned long start,
364                                     unsigned long end)
365 {
366         PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
367 }
368
369 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
370 {
371         return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
372 }
373
374 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
375 {
376         PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
377 }
378
379 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
380 {
381         PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
382 }
383 static inline void paravirt_release_pte(unsigned long pfn)
384 {
385         PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
386 }
387
388 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
389 {
390         PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
391 }
392
393 static inline void paravirt_release_pmd(unsigned long pfn)
394 {
395         PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
396 }
397
398 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
399 {
400         PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
401 }
402 static inline void paravirt_release_pud(unsigned long pfn)
403 {
404         PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
405 }
406
407 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
408                               pte_t *ptep)
409 {
410         PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
411 }
412 static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
413                               pmd_t *pmdp)
414 {
415         PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
416 }
417
418 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
419                                     pte_t *ptep)
420 {
421         PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
422 }
423
424 static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
425                                     pmd_t *pmdp)
426 {
427         PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
428 }
429
430 static inline pte_t __pte(pteval_t val)
431 {
432         pteval_t ret;
433
434         if (sizeof(pteval_t) > sizeof(long))
435                 ret = PVOP_CALLEE2(pteval_t,
436                                    pv_mmu_ops.make_pte,
437                                    val, (u64)val >> 32);
438         else
439                 ret = PVOP_CALLEE1(pteval_t,
440                                    pv_mmu_ops.make_pte,
441                                    val);
442
443         return (pte_t) { .pte = ret };
444 }
445
446 static inline pteval_t pte_val(pte_t pte)
447 {
448         pteval_t ret;
449
450         if (sizeof(pteval_t) > sizeof(long))
451                 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
452                                    pte.pte, (u64)pte.pte >> 32);
453         else
454                 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
455                                    pte.pte);
456
457         return ret;
458 }
459
460 static inline pgd_t __pgd(pgdval_t val)
461 {
462         pgdval_t ret;
463
464         if (sizeof(pgdval_t) > sizeof(long))
465                 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
466                                    val, (u64)val >> 32);
467         else
468                 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
469                                    val);
470
471         return (pgd_t) { ret };
472 }
473
474 static inline pgdval_t pgd_val(pgd_t pgd)
475 {
476         pgdval_t ret;
477
478         if (sizeof(pgdval_t) > sizeof(long))
479                 ret =  PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
480                                     pgd.pgd, (u64)pgd.pgd >> 32);
481         else
482                 ret =  PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
483                                     pgd.pgd);
484
485         return ret;
486 }
487
488 #define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
489 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
490                                            pte_t *ptep)
491 {
492         pteval_t ret;
493
494         ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
495                          mm, addr, ptep);
496
497         return (pte_t) { .pte = ret };
498 }
499
500 static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
501                                            pte_t *ptep, pte_t pte)
502 {
503         if (sizeof(pteval_t) > sizeof(long))
504                 /* 5 arg words */
505                 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
506         else
507                 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
508                             mm, addr, ptep, pte.pte);
509 }
510
511 static inline void set_pte(pte_t *ptep, pte_t pte)
512 {
513         if (sizeof(pteval_t) > sizeof(long))
514                 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
515                             pte.pte, (u64)pte.pte >> 32);
516         else
517                 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
518                             pte.pte);
519 }
520
521 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
522                               pte_t *ptep, pte_t pte)
523 {
524         if (sizeof(pteval_t) > sizeof(long))
525                 /* 5 arg words */
526                 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
527         else
528                 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
529 }
530
531 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
532                               pmd_t *pmdp, pmd_t pmd)
533 {
534         if (sizeof(pmdval_t) > sizeof(long))
535                 /* 5 arg words */
536                 pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
537         else
538                 PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
539                             native_pmd_val(pmd));
540 }
541
542 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
543 {
544         pmdval_t val = native_pmd_val(pmd);
545
546         if (sizeof(pmdval_t) > sizeof(long))
547                 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
548         else
549                 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
550 }
551
552 #if PAGETABLE_LEVELS >= 3
553 static inline pmd_t __pmd(pmdval_t val)
554 {
555         pmdval_t ret;
556
557         if (sizeof(pmdval_t) > sizeof(long))
558                 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
559                                    val, (u64)val >> 32);
560         else
561                 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
562                                    val);
563
564         return (pmd_t) { ret };
565 }
566
567 static inline pmdval_t pmd_val(pmd_t pmd)
568 {
569         pmdval_t ret;
570
571         if (sizeof(pmdval_t) > sizeof(long))
572                 ret =  PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
573                                     pmd.pmd, (u64)pmd.pmd >> 32);
574         else
575                 ret =  PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
576                                     pmd.pmd);
577
578         return ret;
579 }
580
581 static inline void set_pud(pud_t *pudp, pud_t pud)
582 {
583         pudval_t val = native_pud_val(pud);
584
585         if (sizeof(pudval_t) > sizeof(long))
586                 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
587                             val, (u64)val >> 32);
588         else
589                 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
590                             val);
591 }
592 #if PAGETABLE_LEVELS == 4
593 static inline pud_t __pud(pudval_t val)
594 {
595         pudval_t ret;
596
597         if (sizeof(pudval_t) > sizeof(long))
598                 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
599                                    val, (u64)val >> 32);
600         else
601                 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
602                                    val);
603
604         return (pud_t) { ret };
605 }
606
607 static inline pudval_t pud_val(pud_t pud)
608 {
609         pudval_t ret;
610
611         if (sizeof(pudval_t) > sizeof(long))
612                 ret =  PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
613                                     pud.pud, (u64)pud.pud >> 32);
614         else
615                 ret =  PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
616                                     pud.pud);
617
618         return ret;
619 }
620
621 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
622 {
623         pgdval_t val = native_pgd_val(pgd);
624
625         if (sizeof(pgdval_t) > sizeof(long))
626                 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
627                             val, (u64)val >> 32);
628         else
629                 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
630                             val);
631 }
632
633 static inline void pgd_clear(pgd_t *pgdp)
634 {
635         set_pgd(pgdp, __pgd(0));
636 }
637
638 static inline void pud_clear(pud_t *pudp)
639 {
640         set_pud(pudp, __pud(0));
641 }
642
643 #endif  /* PAGETABLE_LEVELS == 4 */
644
645 #endif  /* PAGETABLE_LEVELS >= 3 */
646
647 #ifdef CONFIG_X86_PAE
648 /* Special-case pte-setting operations for PAE, which can't update a
649    64-bit pte atomically */
650 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
651 {
652         PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
653                     pte.pte, pte.pte >> 32);
654 }
655
656 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
657                              pte_t *ptep)
658 {
659         PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
660 }
661
662 static inline void pmd_clear(pmd_t *pmdp)
663 {
664         PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
665 }
666 #else  /* !CONFIG_X86_PAE */
667 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
668 {
669         set_pte(ptep, pte);
670 }
671
672 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
673                              pte_t *ptep)
674 {
675         set_pte_at(mm, addr, ptep, __pte(0));
676 }
677
678 static inline void pmd_clear(pmd_t *pmdp)
679 {
680         set_pmd(pmdp, __pmd(0));
681 }
682 #endif  /* CONFIG_X86_PAE */
683
684 #define  __HAVE_ARCH_START_CONTEXT_SWITCH
685 static inline void arch_start_context_switch(struct task_struct *prev)
686 {
687         PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
688 }
689
690 static inline void arch_end_context_switch(struct task_struct *next)
691 {
692         PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
693 }
694
695 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
696 static inline void arch_enter_lazy_mmu_mode(void)
697 {
698         PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
699 }
700
701 static inline void arch_leave_lazy_mmu_mode(void)
702 {
703         PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
704 }
705
706 void arch_flush_lazy_mmu_mode(void);
707
708 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
709                                 phys_addr_t phys, pgprot_t flags)
710 {
711         pv_mmu_ops.set_fixmap(idx, phys, flags);
712 }
713
714 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
715
716 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
717 {
718         return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
719 }
720
721 static inline int arch_spin_is_contended(struct arch_spinlock *lock)
722 {
723         return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
724 }
725 #define arch_spin_is_contended  arch_spin_is_contended
726
727 static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
728 {
729         PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
730 }
731
732 static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
733                                                   unsigned long flags)
734 {
735         PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
736 }
737
738 static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
739 {
740         return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
741 }
742
743 static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
744 {
745         PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
746 }
747
748 #endif
749
750 #ifdef CONFIG_X86_32
751 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
752 #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
753
754 /* save and restore all caller-save registers, except return value */
755 #define PV_SAVE_ALL_CALLER_REGS         "pushl %ecx;"
756 #define PV_RESTORE_ALL_CALLER_REGS      "popl  %ecx;"
757
758 #define PV_FLAGS_ARG "0"
759 #define PV_EXTRA_CLOBBERS
760 #define PV_VEXTRA_CLOBBERS
761 #else
762 /* save and restore all caller-save registers, except return value */
763 #define PV_SAVE_ALL_CALLER_REGS                                         \
764         "push %rcx;"                                                    \
765         "push %rdx;"                                                    \
766         "push %rsi;"                                                    \
767         "push %rdi;"                                                    \
768         "push %r8;"                                                     \
769         "push %r9;"                                                     \
770         "push %r10;"                                                    \
771         "push %r11;"
772 #define PV_RESTORE_ALL_CALLER_REGS                                      \
773         "pop %r11;"                                                     \
774         "pop %r10;"                                                     \
775         "pop %r9;"                                                      \
776         "pop %r8;"                                                      \
777         "pop %rdi;"                                                     \
778         "pop %rsi;"                                                     \
779         "pop %rdx;"                                                     \
780         "pop %rcx;"
781
782 /* We save some registers, but all of them, that's too much. We clobber all
783  * caller saved registers but the argument parameter */
784 #define PV_SAVE_REGS "pushq %%rdi;"
785 #define PV_RESTORE_REGS "popq %%rdi;"
786 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
787 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
788 #define PV_FLAGS_ARG "D"
789 #endif
790
791 /*
792  * Generate a thunk around a function which saves all caller-save
793  * registers except for the return value.  This allows C functions to
794  * be called from assembler code where fewer than normal registers are
795  * available.  It may also help code generation around calls from C
796  * code if the common case doesn't use many registers.
797  *
798  * When a callee is wrapped in a thunk, the caller can assume that all
799  * arg regs and all scratch registers are preserved across the
800  * call. The return value in rax/eax will not be saved, even for void
801  * functions.
802  */
803 #define PV_CALLEE_SAVE_REGS_THUNK(func)                                 \
804         extern typeof(func) __raw_callee_save_##func;                   \
805         static void *__##func##__ __used = func;                        \
806                                                                         \
807         asm(".pushsection .text;"                                       \
808             "__raw_callee_save_" #func ": "                             \
809             PV_SAVE_ALL_CALLER_REGS                                     \
810             "call " #func ";"                                           \
811             PV_RESTORE_ALL_CALLER_REGS                                  \
812             "ret;"                                                      \
813             ".popsection")
814
815 /* Get a reference to a callee-save function */
816 #define PV_CALLEE_SAVE(func)                                            \
817         ((struct paravirt_callee_save) { __raw_callee_save_##func })
818
819 /* Promise that "func" already uses the right calling convention */
820 #define __PV_IS_CALLEE_SAVE(func)                       \
821         ((struct paravirt_callee_save) { func })
822
823 static inline notrace unsigned long arch_local_save_flags(void)
824 {
825         return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
826 }
827
828 static inline notrace void arch_local_irq_restore(unsigned long f)
829 {
830         PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
831 }
832
833 static inline notrace void arch_local_irq_disable(void)
834 {
835         PVOP_VCALLEE0(pv_irq_ops.irq_disable);
836 }
837
838 static inline notrace void arch_local_irq_enable(void)
839 {
840         PVOP_VCALLEE0(pv_irq_ops.irq_enable);
841 }
842
843 static inline notrace unsigned long arch_local_irq_save(void)
844 {
845         unsigned long f;
846
847         f = arch_local_save_flags();
848         arch_local_irq_disable();
849         return f;
850 }
851
852
853 /* Make sure as little as possible of this mess escapes. */
854 #undef PARAVIRT_CALL
855 #undef __PVOP_CALL
856 #undef __PVOP_VCALL
857 #undef PVOP_VCALL0
858 #undef PVOP_CALL0
859 #undef PVOP_VCALL1
860 #undef PVOP_CALL1
861 #undef PVOP_VCALL2
862 #undef PVOP_CALL2
863 #undef PVOP_VCALL3
864 #undef PVOP_CALL3
865 #undef PVOP_VCALL4
866 #undef PVOP_CALL4
867
868 extern void default_banner(void);
869
870 #else  /* __ASSEMBLY__ */
871
872 #define _PVSITE(ptype, clobbers, ops, word, algn)       \
873 771:;                                           \
874         ops;                                    \
875 772:;                                           \
876         .pushsection .parainstructions,"a";     \
877          .align algn;                           \
878          word 771b;                             \
879          .byte ptype;                           \
880          .byte 772b-771b;                       \
881          .short clobbers;                       \
882         .popsection
883
884
885 #define COND_PUSH(set, mask, reg)                       \
886         .if ((~(set)) & mask); push %reg; .endif
887 #define COND_POP(set, mask, reg)                        \
888         .if ((~(set)) & mask); pop %reg; .endif
889
890 #ifdef CONFIG_X86_64
891
892 #define PV_SAVE_REGS(set)                       \
893         COND_PUSH(set, CLBR_RAX, rax);          \
894         COND_PUSH(set, CLBR_RCX, rcx);          \
895         COND_PUSH(set, CLBR_RDX, rdx);          \
896         COND_PUSH(set, CLBR_RSI, rsi);          \
897         COND_PUSH(set, CLBR_RDI, rdi);          \
898         COND_PUSH(set, CLBR_R8, r8);            \
899         COND_PUSH(set, CLBR_R9, r9);            \
900         COND_PUSH(set, CLBR_R10, r10);          \
901         COND_PUSH(set, CLBR_R11, r11)
902 #define PV_RESTORE_REGS(set)                    \
903         COND_POP(set, CLBR_R11, r11);           \
904         COND_POP(set, CLBR_R10, r10);           \
905         COND_POP(set, CLBR_R9, r9);             \
906         COND_POP(set, CLBR_R8, r8);             \
907         COND_POP(set, CLBR_RDI, rdi);           \
908         COND_POP(set, CLBR_RSI, rsi);           \
909         COND_POP(set, CLBR_RDX, rdx);           \
910         COND_POP(set, CLBR_RCX, rcx);           \
911         COND_POP(set, CLBR_RAX, rax)
912
913 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
914 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
915 #define PARA_INDIRECT(addr)     *addr(%rip)
916 #else
917 #define PV_SAVE_REGS(set)                       \
918         COND_PUSH(set, CLBR_EAX, eax);          \
919         COND_PUSH(set, CLBR_EDI, edi);          \
920         COND_PUSH(set, CLBR_ECX, ecx);          \
921         COND_PUSH(set, CLBR_EDX, edx)
922 #define PV_RESTORE_REGS(set)                    \
923         COND_POP(set, CLBR_EDX, edx);           \
924         COND_POP(set, CLBR_ECX, ecx);           \
925         COND_POP(set, CLBR_EDI, edi);           \
926         COND_POP(set, CLBR_EAX, eax)
927
928 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
929 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
930 #define PARA_INDIRECT(addr)     *%cs:addr
931 #endif
932
933 #define INTERRUPT_RETURN                                                \
934         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,       \
935                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
936
937 #define DISABLE_INTERRUPTS(clobbers)                                    \
938         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
939                   PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
940                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);    \
941                   PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
942
943 #define ENABLE_INTERRUPTS(clobbers)                                     \
944         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,  \
945                   PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
946                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);     \
947                   PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
948
949 #define USERGS_SYSRET32                                                 \
950         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32),       \
951                   CLBR_NONE,                                            \
952                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
953
954 #ifdef CONFIG_X86_32
955 #define GET_CR0_INTO_EAX                                \
956         push %ecx; push %edx;                           \
957         call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
958         pop %edx; pop %ecx
959
960 #define ENABLE_INTERRUPTS_SYSEXIT                                       \
961         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
962                   CLBR_NONE,                                            \
963                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
964
965
966 #else   /* !CONFIG_X86_32 */
967
968 /*
969  * If swapgs is used while the userspace stack is still current,
970  * there's no way to call a pvop.  The PV replacement *must* be
971  * inlined, or the swapgs instruction must be trapped and emulated.
972  */
973 #define SWAPGS_UNSAFE_STACK                                             \
974         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
975                   swapgs)
976
977 /*
978  * Note: swapgs is very special, and in practise is either going to be
979  * implemented with a single "swapgs" instruction or something very
980  * special.  Either way, we don't need to save any registers for
981  * it.
982  */
983 #define SWAPGS                                                          \
984         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
985                   call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs)          \
986                  )
987
988 #define GET_CR2_INTO_RAX                                \
989         call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
990
991 #define PARAVIRT_ADJUST_EXCEPTION_FRAME                                 \
992         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
993                   CLBR_NONE,                                            \
994                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
995
996 #define USERGS_SYSRET64                                                 \
997         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),       \
998                   CLBR_NONE,                                            \
999                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1000
1001 #define ENABLE_INTERRUPTS_SYSEXIT32                                     \
1002         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
1003                   CLBR_NONE,                                            \
1004                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1005 #endif  /* CONFIG_X86_32 */
1006
1007 #endif /* __ASSEMBLY__ */
1008 #else  /* CONFIG_PARAVIRT */
1009 # define default_banner x86_init_noop
1010 #endif /* !CONFIG_PARAVIRT */
1011 #endif /* _ASM_X86_PARAVIRT_H */