2 * KVM paravirt_ops implementation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19 * Copyright IBM Corporation, 2007
20 * Authors: Anthony Liguori <aliguori@us.ibm.com>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/kvm_para.h>
26 #include <linux/cpu.h>
28 #include <linux/highmem.h>
29 #include <linux/hardirq.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/hash.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/kprobes.h>
36 #include <asm/timer.h>
38 #include <asm/traps.h>
40 #include <asm/tlbflush.h>
43 #include <asm/apicdef.h>
44 #include <asm/hypervisor.h>
46 static int kvmapf = 1;
48 static int parse_no_kvmapf(char *arg)
54 early_param("no-kvmapf", parse_no_kvmapf);
56 static int steal_acc = 1;
57 static int parse_no_stealacc(char *arg)
63 early_param("no-steal-acc", parse_no_stealacc);
65 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
66 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
67 static int has_steal_clock = 0;
70 * No need for any "IO delay" on KVM
72 static void kvm_io_delay(void)
76 #define KVM_TASK_SLEEP_HASHBITS 8
77 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
79 struct kvm_task_sleep_node {
80 struct hlist_node link;
87 static struct kvm_task_sleep_head {
89 struct hlist_head list;
90 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
92 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
97 hlist_for_each(p, &b->list) {
98 struct kvm_task_sleep_node *n =
99 hlist_entry(p, typeof(*n), link);
100 if (n->token == token)
107 void kvm_async_pf_task_wait(u32 token)
109 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
110 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
111 struct kvm_task_sleep_node n, *e;
116 idle = idle_cpu(cpu);
120 e = _find_apf_task(b, token);
122 /* dummy entry exist -> wake up was delivered ahead of PF */
125 spin_unlock(&b->lock);
130 n.cpu = smp_processor_id();
131 n.halted = idle || preempt_count() > 1;
132 init_waitqueue_head(&n.wq);
133 hlist_add_head(&n.link, &b->list);
134 spin_unlock(&b->lock);
138 prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
139 if (hlist_unhashed(&n.link))
148 * We cannot reschedule. So halt.
155 finish_wait(&n.wq, &wait);
159 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
161 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
163 hlist_del_init(&n->link);
165 smp_send_reschedule(n->cpu);
166 else if (waitqueue_active(&n->wq))
170 static void apf_task_wake_all(void)
174 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
175 struct hlist_node *p, *next;
176 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
178 hlist_for_each_safe(p, next, &b->list) {
179 struct kvm_task_sleep_node *n =
180 hlist_entry(p, typeof(*n), link);
181 if (n->cpu == smp_processor_id())
182 apf_task_wake_one(n);
184 spin_unlock(&b->lock);
188 void kvm_async_pf_task_wake(u32 token)
190 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
191 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
192 struct kvm_task_sleep_node *n;
201 n = _find_apf_task(b, token);
204 * async PF was not yet handled.
205 * Add dummy entry for the token.
207 n = kzalloc(sizeof(*n), GFP_ATOMIC);
210 * Allocation failed! Busy wait while other cpu
213 spin_unlock(&b->lock);
218 n->cpu = smp_processor_id();
219 init_waitqueue_head(&n->wq);
220 hlist_add_head(&n->link, &b->list);
222 apf_task_wake_one(n);
223 spin_unlock(&b->lock);
226 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
228 u32 kvm_read_and_reset_pf_reason(void)
232 if (__get_cpu_var(apf_reason).enabled) {
233 reason = __get_cpu_var(apf_reason).reason;
234 __get_cpu_var(apf_reason).reason = 0;
239 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
241 dotraplinkage void __kprobes
242 do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
244 switch (kvm_read_and_reset_pf_reason()) {
246 do_page_fault(regs, error_code);
248 case KVM_PV_REASON_PAGE_NOT_PRESENT:
249 /* page is swapped out by the host. */
250 kvm_async_pf_task_wait((u32)read_cr2());
252 case KVM_PV_REASON_PAGE_READY:
255 kvm_async_pf_task_wake((u32)read_cr2());
261 static void __init paravirt_ops_setup(void)
263 pv_info.name = "KVM";
264 pv_info.paravirt_enabled = 1;
266 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
267 pv_cpu_ops.io_delay = kvm_io_delay;
269 #ifdef CONFIG_X86_IO_APIC
274 static void kvm_register_steal_time(void)
276 int cpu = smp_processor_id();
277 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
279 if (!has_steal_clock)
282 memset(st, 0, sizeof(*st));
284 wrmsrl(MSR_KVM_STEAL_TIME, (__pa(st) | KVM_MSR_ENABLED));
285 printk(KERN_INFO "kvm-stealtime: cpu %d, msr %lx\n",
289 static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
291 static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
294 * This relies on __test_and_clear_bit to modify the memory
295 * in a way that is atomic with respect to the local CPU.
296 * The hypervisor only accesses this memory from the local CPU so
297 * there's no need for lock or memory barriers.
298 * An optimization barrier is implied in apic write.
300 if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi)))
302 apic_write(APIC_EOI, APIC_EOI_ACK);
305 void __cpuinit kvm_guest_cpu_init(void)
307 if (!kvm_para_available())
310 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
311 u64 pa = __pa(&__get_cpu_var(apf_reason));
313 #ifdef CONFIG_PREEMPT
314 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
316 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
317 __get_cpu_var(apf_reason).enabled = 1;
318 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
322 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
324 /* Size alignment is implied but just to make it explicit. */
325 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
326 __get_cpu_var(kvm_apic_eoi) = 0;
327 pa = __pa(&__get_cpu_var(kvm_apic_eoi)) | KVM_MSR_ENABLED;
328 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
332 kvm_register_steal_time();
335 static void kvm_pv_disable_apf(void)
337 if (!__get_cpu_var(apf_reason).enabled)
340 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
341 __get_cpu_var(apf_reason).enabled = 0;
343 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
347 static void kvm_pv_guest_cpu_reboot(void *unused)
350 * We disable PV EOI before we load a new kernel by kexec,
351 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
352 * New kernel can re-enable when it boots.
354 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
355 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
356 kvm_pv_disable_apf();
359 static int kvm_pv_reboot_notify(struct notifier_block *nb,
360 unsigned long code, void *unused)
362 if (code == SYS_RESTART)
363 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
367 static struct notifier_block kvm_pv_reboot_nb = {
368 .notifier_call = kvm_pv_reboot_notify,
371 static u64 kvm_steal_clock(int cpu)
374 struct kvm_steal_time *src;
377 src = &per_cpu(steal_time, cpu);
379 version = src->version;
383 } while ((version & 1) || (version != src->version));
388 void kvm_disable_steal_time(void)
390 if (!has_steal_clock)
393 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
397 static void __init kvm_smp_prepare_boot_cpu(void)
399 #ifdef CONFIG_KVM_CLOCK
400 WARN_ON(kvm_register_clock("primary cpu clock"));
402 kvm_guest_cpu_init();
403 native_smp_prepare_boot_cpu();
406 static void __cpuinit kvm_guest_cpu_online(void *dummy)
408 kvm_guest_cpu_init();
411 static void kvm_guest_cpu_offline(void *dummy)
413 kvm_disable_steal_time();
414 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
415 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
416 kvm_pv_disable_apf();
420 static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
421 unsigned long action, void *hcpu)
423 int cpu = (unsigned long)hcpu;
426 case CPU_DOWN_FAILED:
427 case CPU_ONLINE_FROZEN:
428 smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
430 case CPU_DOWN_PREPARE:
431 case CPU_DOWN_PREPARE_FROZEN:
432 smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
440 static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
441 .notifier_call = kvm_cpu_notify,
445 static void __init kvm_apf_trap_init(void)
447 set_intr_gate(14, &async_page_fault);
450 void __init kvm_guest_init(void)
454 if (!kvm_para_available())
457 paravirt_ops_setup();
458 register_reboot_notifier(&kvm_pv_reboot_nb);
459 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
460 spin_lock_init(&async_pf_sleepers[i].lock);
461 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
462 x86_init.irqs.trap_init = kvm_apf_trap_init;
464 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
466 pv_time_ops.steal_clock = kvm_steal_clock;
469 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
470 apic_set_eoi_write(kvm_guest_apic_eoi_write);
473 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
474 register_cpu_notifier(&kvm_cpu_notifier);
476 kvm_guest_cpu_init();
480 static bool __init kvm_detect(void)
482 if (!kvm_para_available())
487 const struct hypervisor_x86 x86_hyper_kvm __refconst = {
489 .detect = kvm_detect,
491 EXPORT_SYMBOL_GPL(x86_hyper_kvm);
493 static __init int activate_jump_labels(void)
495 if (has_steal_clock) {
496 static_key_slow_inc(¶virt_steal_enabled);
498 static_key_slow_inc(¶virt_steal_rq_enabled);
503 arch_initcall(activate_jump_labels);