]> Pileus Git - ~andy/linux/blob - kernel/cpu/idle.c
idle: Implement generic idle function
[~andy/linux] / kernel / cpu / idle.c
1 /*
2  * Generic entry point for the idle threads
3  */
4 #include <linux/sched.h>
5 #include <linux/cpu.h>
6 #include <linux/tick.h>
7 #include <linux/mm.h>
8
9 #include <asm/tlb.h>
10
11 #include <trace/events/power.h>
12
13 #ifndef CONFIG_GENERIC_IDLE_LOOP
14 void cpu_startup_entry(enum cpuhp_state state)
15 {
16         cpu_idle();
17 }
18 #else
19
20 static int __read_mostly cpu_idle_force_poll;
21
22 void cpu_idle_poll_ctrl(bool enable)
23 {
24         if (enable) {
25                 cpu_idle_force_poll++;
26         } else {
27                 cpu_idle_force_poll--;
28                 WARN_ON_ONCE(cpu_idle_force_poll < 0);
29         }
30 }
31
32 #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
33 static int __init cpu_idle_poll_setup(char *__unused)
34 {
35         cpu_idle_force_poll = 1;
36         return 1;
37 }
38 __setup("nohlt", cpu_idle_poll_setup);
39
40 static int __init cpu_idle_nopoll_setup(char *__unused)
41 {
42         cpu_idle_force_poll = 0;
43         return 1;
44 }
45 __setup("hlt", cpu_idle_nopoll_setup);
46 #endif
47
48 static inline int cpu_idle_poll(void)
49 {
50         trace_cpu_idle_rcuidle(0, smp_processor_id());
51         local_irq_enable();
52         while (!need_resched())
53                 cpu_relax();
54         trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
55         return 1;
56 }
57
58 /* Weak implementations for optional arch specific functions */
59 void __weak arch_cpu_idle_prepare(void) { }
60 void __weak arch_cpu_idle_enter(void) { }
61 void __weak arch_cpu_idle_exit(void) { }
62 void __weak arch_cpu_idle_dead(void) { }
63 void __weak arch_cpu_idle(void)
64 {
65         cpu_idle_force_poll = 1;
66 }
67
68 /*
69  * Generic idle loop implementation
70  */
71 static void cpu_idle_loop(void)
72 {
73         while (1) {
74                 tick_nohz_idle_enter();
75
76                 while (!need_resched()) {
77                         check_pgt_cache();
78                         rmb();
79
80                         if (cpu_is_offline(smp_processor_id()))
81                                 arch_cpu_idle_dead();
82
83                         local_irq_disable();
84                         arch_cpu_idle_enter();
85
86                         if (cpu_idle_force_poll) {
87                                 cpu_idle_poll();
88                         } else {
89                                 current_clr_polling();
90                                 if (!need_resched()) {
91                                         stop_critical_timings();
92                                         rcu_idle_enter();
93                                         arch_cpu_idle();
94                                         WARN_ON_ONCE(irqs_disabled());
95                                         rcu_idle_exit();
96                                         start_critical_timings();
97                                 } else {
98                                         local_irq_enable();
99                                 }
100                                 current_set_polling();
101                         }
102                         arch_cpu_idle_exit();
103                 }
104                 tick_nohz_idle_exit();
105                 schedule_preempt_disabled();
106         }
107 }
108
109 void cpu_startup_entry(enum cpuhp_state state)
110 {
111         current_set_polling();
112         arch_cpu_idle_prepare();
113         cpu_idle_loop();
114 }
115 #endif