]> Pileus Git - ~andy/linux/blob - drivers/base/power/main.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[~andy/linux] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resume-trace.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/async.h>
30 #include <linux/suspend.h>
31 #include <linux/cpuidle.h>
32 #include "../base.h"
33 #include "power.h"
34
35 typedef int (*pm_callback_t)(struct device *);
36
37 /*
38  * The entries in the dpm_list list are in a depth first order, simply
39  * because children are guaranteed to be discovered after parents, and
40  * are inserted at the back of the list on discovery.
41  *
42  * Since device_pm_add() may be called with a device lock held,
43  * we must never try to acquire a device lock while holding
44  * dpm_list_mutex.
45  */
46
47 LIST_HEAD(dpm_list);
48 static LIST_HEAD(dpm_prepared_list);
49 static LIST_HEAD(dpm_suspended_list);
50 static LIST_HEAD(dpm_late_early_list);
51 static LIST_HEAD(dpm_noirq_list);
52
53 struct suspend_stats suspend_stats;
54 static DEFINE_MUTEX(dpm_list_mtx);
55 static pm_message_t pm_transition;
56
57 static int async_error;
58
59 /**
60  * device_pm_sleep_init - Initialize system suspend-related device fields.
61  * @dev: Device object being initialized.
62  */
63 void device_pm_sleep_init(struct device *dev)
64 {
65         dev->power.is_prepared = false;
66         dev->power.is_suspended = false;
67         init_completion(&dev->power.completion);
68         complete_all(&dev->power.completion);
69         dev->power.wakeup = NULL;
70         INIT_LIST_HEAD(&dev->power.entry);
71 }
72
73 /**
74  * device_pm_lock - Lock the list of active devices used by the PM core.
75  */
76 void device_pm_lock(void)
77 {
78         mutex_lock(&dpm_list_mtx);
79 }
80
81 /**
82  * device_pm_unlock - Unlock the list of active devices used by the PM core.
83  */
84 void device_pm_unlock(void)
85 {
86         mutex_unlock(&dpm_list_mtx);
87 }
88
89 /**
90  * device_pm_add - Add a device to the PM core's list of active devices.
91  * @dev: Device to add to the list.
92  */
93 void device_pm_add(struct device *dev)
94 {
95         pr_debug("PM: Adding info for %s:%s\n",
96                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
97         mutex_lock(&dpm_list_mtx);
98         if (dev->parent && dev->parent->power.is_prepared)
99                 dev_warn(dev, "parent %s should not be sleeping\n",
100                         dev_name(dev->parent));
101         list_add_tail(&dev->power.entry, &dpm_list);
102         mutex_unlock(&dpm_list_mtx);
103 }
104
105 /**
106  * device_pm_remove - Remove a device from the PM core's list of active devices.
107  * @dev: Device to be removed from the list.
108  */
109 void device_pm_remove(struct device *dev)
110 {
111         pr_debug("PM: Removing info for %s:%s\n",
112                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
113         complete_all(&dev->power.completion);
114         mutex_lock(&dpm_list_mtx);
115         list_del_init(&dev->power.entry);
116         mutex_unlock(&dpm_list_mtx);
117         device_wakeup_disable(dev);
118         pm_runtime_remove(dev);
119 }
120
121 /**
122  * device_pm_move_before - Move device in the PM core's list of active devices.
123  * @deva: Device to move in dpm_list.
124  * @devb: Device @deva should come before.
125  */
126 void device_pm_move_before(struct device *deva, struct device *devb)
127 {
128         pr_debug("PM: Moving %s:%s before %s:%s\n",
129                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
130                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
131         /* Delete deva from dpm_list and reinsert before devb. */
132         list_move_tail(&deva->power.entry, &devb->power.entry);
133 }
134
135 /**
136  * device_pm_move_after - Move device in the PM core's list of active devices.
137  * @deva: Device to move in dpm_list.
138  * @devb: Device @deva should come after.
139  */
140 void device_pm_move_after(struct device *deva, struct device *devb)
141 {
142         pr_debug("PM: Moving %s:%s after %s:%s\n",
143                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
144                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
145         /* Delete deva from dpm_list and reinsert after devb. */
146         list_move(&deva->power.entry, &devb->power.entry);
147 }
148
149 /**
150  * device_pm_move_last - Move device to end of the PM core's list of devices.
151  * @dev: Device to move in dpm_list.
152  */
153 void device_pm_move_last(struct device *dev)
154 {
155         pr_debug("PM: Moving %s:%s to end of list\n",
156                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
157         list_move_tail(&dev->power.entry, &dpm_list);
158 }
159
160 static ktime_t initcall_debug_start(struct device *dev)
161 {
162         ktime_t calltime = ktime_set(0, 0);
163
164         if (pm_print_times_enabled) {
165                 pr_info("calling  %s+ @ %i, parent: %s\n",
166                         dev_name(dev), task_pid_nr(current),
167                         dev->parent ? dev_name(dev->parent) : "none");
168                 calltime = ktime_get();
169         }
170
171         return calltime;
172 }
173
174 static void initcall_debug_report(struct device *dev, ktime_t calltime,
175                                   int error)
176 {
177         ktime_t delta, rettime;
178
179         if (pm_print_times_enabled) {
180                 rettime = ktime_get();
181                 delta = ktime_sub(rettime, calltime);
182                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
183                         error, (unsigned long long)ktime_to_ns(delta) >> 10);
184         }
185 }
186
187 /**
188  * dpm_wait - Wait for a PM operation to complete.
189  * @dev: Device to wait for.
190  * @async: If unset, wait only if the device's power.async_suspend flag is set.
191  */
192 static void dpm_wait(struct device *dev, bool async)
193 {
194         if (!dev)
195                 return;
196
197         if (async || (pm_async_enabled && dev->power.async_suspend))
198                 wait_for_completion(&dev->power.completion);
199 }
200
201 static int dpm_wait_fn(struct device *dev, void *async_ptr)
202 {
203         dpm_wait(dev, *((bool *)async_ptr));
204         return 0;
205 }
206
207 static void dpm_wait_for_children(struct device *dev, bool async)
208 {
209        device_for_each_child(dev, &async, dpm_wait_fn);
210 }
211
212 /**
213  * pm_op - Return the PM operation appropriate for given PM event.
214  * @ops: PM operations to choose from.
215  * @state: PM transition of the system being carried out.
216  */
217 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
218 {
219         switch (state.event) {
220 #ifdef CONFIG_SUSPEND
221         case PM_EVENT_SUSPEND:
222                 return ops->suspend;
223         case PM_EVENT_RESUME:
224                 return ops->resume;
225 #endif /* CONFIG_SUSPEND */
226 #ifdef CONFIG_HIBERNATE_CALLBACKS
227         case PM_EVENT_FREEZE:
228         case PM_EVENT_QUIESCE:
229                 return ops->freeze;
230         case PM_EVENT_HIBERNATE:
231                 return ops->poweroff;
232         case PM_EVENT_THAW:
233         case PM_EVENT_RECOVER:
234                 return ops->thaw;
235                 break;
236         case PM_EVENT_RESTORE:
237                 return ops->restore;
238 #endif /* CONFIG_HIBERNATE_CALLBACKS */
239         }
240
241         return NULL;
242 }
243
244 /**
245  * pm_late_early_op - Return the PM operation appropriate for given PM event.
246  * @ops: PM operations to choose from.
247  * @state: PM transition of the system being carried out.
248  *
249  * Runtime PM is disabled for @dev while this function is being executed.
250  */
251 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
252                                       pm_message_t state)
253 {
254         switch (state.event) {
255 #ifdef CONFIG_SUSPEND
256         case PM_EVENT_SUSPEND:
257                 return ops->suspend_late;
258         case PM_EVENT_RESUME:
259                 return ops->resume_early;
260 #endif /* CONFIG_SUSPEND */
261 #ifdef CONFIG_HIBERNATE_CALLBACKS
262         case PM_EVENT_FREEZE:
263         case PM_EVENT_QUIESCE:
264                 return ops->freeze_late;
265         case PM_EVENT_HIBERNATE:
266                 return ops->poweroff_late;
267         case PM_EVENT_THAW:
268         case PM_EVENT_RECOVER:
269                 return ops->thaw_early;
270         case PM_EVENT_RESTORE:
271                 return ops->restore_early;
272 #endif /* CONFIG_HIBERNATE_CALLBACKS */
273         }
274
275         return NULL;
276 }
277
278 /**
279  * pm_noirq_op - Return the PM operation appropriate for given PM event.
280  * @ops: PM operations to choose from.
281  * @state: PM transition of the system being carried out.
282  *
283  * The driver of @dev will not receive interrupts while this function is being
284  * executed.
285  */
286 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
287 {
288         switch (state.event) {
289 #ifdef CONFIG_SUSPEND
290         case PM_EVENT_SUSPEND:
291                 return ops->suspend_noirq;
292         case PM_EVENT_RESUME:
293                 return ops->resume_noirq;
294 #endif /* CONFIG_SUSPEND */
295 #ifdef CONFIG_HIBERNATE_CALLBACKS
296         case PM_EVENT_FREEZE:
297         case PM_EVENT_QUIESCE:
298                 return ops->freeze_noirq;
299         case PM_EVENT_HIBERNATE:
300                 return ops->poweroff_noirq;
301         case PM_EVENT_THAW:
302         case PM_EVENT_RECOVER:
303                 return ops->thaw_noirq;
304         case PM_EVENT_RESTORE:
305                 return ops->restore_noirq;
306 #endif /* CONFIG_HIBERNATE_CALLBACKS */
307         }
308
309         return NULL;
310 }
311
312 static char *pm_verb(int event)
313 {
314         switch (event) {
315         case PM_EVENT_SUSPEND:
316                 return "suspend";
317         case PM_EVENT_RESUME:
318                 return "resume";
319         case PM_EVENT_FREEZE:
320                 return "freeze";
321         case PM_EVENT_QUIESCE:
322                 return "quiesce";
323         case PM_EVENT_HIBERNATE:
324                 return "hibernate";
325         case PM_EVENT_THAW:
326                 return "thaw";
327         case PM_EVENT_RESTORE:
328                 return "restore";
329         case PM_EVENT_RECOVER:
330                 return "recover";
331         default:
332                 return "(unknown PM event)";
333         }
334 }
335
336 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
337 {
338         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
339                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
340                 ", may wakeup" : "");
341 }
342
343 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
344                         int error)
345 {
346         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
347                 dev_name(dev), pm_verb(state.event), info, error);
348 }
349
350 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
351 {
352         ktime_t calltime;
353         u64 usecs64;
354         int usecs;
355
356         calltime = ktime_get();
357         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
358         do_div(usecs64, NSEC_PER_USEC);
359         usecs = usecs64;
360         if (usecs == 0)
361                 usecs = 1;
362         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
363                 info ?: "", info ? " " : "", pm_verb(state.event),
364                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
365 }
366
367 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
368                             pm_message_t state, char *info)
369 {
370         ktime_t calltime;
371         int error;
372
373         if (!cb)
374                 return 0;
375
376         calltime = initcall_debug_start(dev);
377
378         pm_dev_dbg(dev, state, info);
379         error = cb(dev);
380         suspend_report_result(cb, error);
381
382         initcall_debug_report(dev, calltime, error);
383
384         return error;
385 }
386
387 /*------------------------- Resume routines -------------------------*/
388
389 /**
390  * device_resume_noirq - Execute an "early resume" callback for given device.
391  * @dev: Device to handle.
392  * @state: PM transition of the system being carried out.
393  *
394  * The driver of @dev will not receive interrupts while this function is being
395  * executed.
396  */
397 static int device_resume_noirq(struct device *dev, pm_message_t state)
398 {
399         pm_callback_t callback = NULL;
400         char *info = NULL;
401         int error = 0;
402
403         TRACE_DEVICE(dev);
404         TRACE_RESUME(0);
405
406         if (dev->power.syscore)
407                 goto Out;
408
409         if (dev->pm_domain) {
410                 info = "noirq power domain ";
411                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
412         } else if (dev->type && dev->type->pm) {
413                 info = "noirq type ";
414                 callback = pm_noirq_op(dev->type->pm, state);
415         } else if (dev->class && dev->class->pm) {
416                 info = "noirq class ";
417                 callback = pm_noirq_op(dev->class->pm, state);
418         } else if (dev->bus && dev->bus->pm) {
419                 info = "noirq bus ";
420                 callback = pm_noirq_op(dev->bus->pm, state);
421         }
422
423         if (!callback && dev->driver && dev->driver->pm) {
424                 info = "noirq driver ";
425                 callback = pm_noirq_op(dev->driver->pm, state);
426         }
427
428         error = dpm_run_callback(callback, dev, state, info);
429
430  Out:
431         TRACE_RESUME(error);
432         return error;
433 }
434
435 /**
436  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
437  * @state: PM transition of the system being carried out.
438  *
439  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
440  * enable device drivers to receive interrupts.
441  */
442 static void dpm_resume_noirq(pm_message_t state)
443 {
444         ktime_t starttime = ktime_get();
445
446         mutex_lock(&dpm_list_mtx);
447         while (!list_empty(&dpm_noirq_list)) {
448                 struct device *dev = to_device(dpm_noirq_list.next);
449                 int error;
450
451                 get_device(dev);
452                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
453                 mutex_unlock(&dpm_list_mtx);
454
455                 error = device_resume_noirq(dev, state);
456                 if (error) {
457                         suspend_stats.failed_resume_noirq++;
458                         dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
459                         dpm_save_failed_dev(dev_name(dev));
460                         pm_dev_err(dev, state, " noirq", error);
461                 }
462
463                 mutex_lock(&dpm_list_mtx);
464                 put_device(dev);
465         }
466         mutex_unlock(&dpm_list_mtx);
467         dpm_show_time(starttime, state, "noirq");
468         resume_device_irqs();
469         cpuidle_resume();
470 }
471
472 /**
473  * device_resume_early - Execute an "early resume" callback for given device.
474  * @dev: Device to handle.
475  * @state: PM transition of the system being carried out.
476  *
477  * Runtime PM is disabled for @dev while this function is being executed.
478  */
479 static int device_resume_early(struct device *dev, pm_message_t state)
480 {
481         pm_callback_t callback = NULL;
482         char *info = NULL;
483         int error = 0;
484
485         TRACE_DEVICE(dev);
486         TRACE_RESUME(0);
487
488         if (dev->power.syscore)
489                 goto Out;
490
491         if (dev->pm_domain) {
492                 info = "early power domain ";
493                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
494         } else if (dev->type && dev->type->pm) {
495                 info = "early type ";
496                 callback = pm_late_early_op(dev->type->pm, state);
497         } else if (dev->class && dev->class->pm) {
498                 info = "early class ";
499                 callback = pm_late_early_op(dev->class->pm, state);
500         } else if (dev->bus && dev->bus->pm) {
501                 info = "early bus ";
502                 callback = pm_late_early_op(dev->bus->pm, state);
503         }
504
505         if (!callback && dev->driver && dev->driver->pm) {
506                 info = "early driver ";
507                 callback = pm_late_early_op(dev->driver->pm, state);
508         }
509
510         error = dpm_run_callback(callback, dev, state, info);
511
512  Out:
513         TRACE_RESUME(error);
514
515         pm_runtime_enable(dev);
516         return error;
517 }
518
519 /**
520  * dpm_resume_early - Execute "early resume" callbacks for all devices.
521  * @state: PM transition of the system being carried out.
522  */
523 static void dpm_resume_early(pm_message_t state)
524 {
525         ktime_t starttime = ktime_get();
526
527         mutex_lock(&dpm_list_mtx);
528         while (!list_empty(&dpm_late_early_list)) {
529                 struct device *dev = to_device(dpm_late_early_list.next);
530                 int error;
531
532                 get_device(dev);
533                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
534                 mutex_unlock(&dpm_list_mtx);
535
536                 error = device_resume_early(dev, state);
537                 if (error) {
538                         suspend_stats.failed_resume_early++;
539                         dpm_save_failed_step(SUSPEND_RESUME_EARLY);
540                         dpm_save_failed_dev(dev_name(dev));
541                         pm_dev_err(dev, state, " early", error);
542                 }
543
544                 mutex_lock(&dpm_list_mtx);
545                 put_device(dev);
546         }
547         mutex_unlock(&dpm_list_mtx);
548         dpm_show_time(starttime, state, "early");
549 }
550
551 /**
552  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
553  * @state: PM transition of the system being carried out.
554  */
555 void dpm_resume_start(pm_message_t state)
556 {
557         dpm_resume_noirq(state);
558         dpm_resume_early(state);
559 }
560 EXPORT_SYMBOL_GPL(dpm_resume_start);
561
562 /**
563  * device_resume - Execute "resume" callbacks for given device.
564  * @dev: Device to handle.
565  * @state: PM transition of the system being carried out.
566  * @async: If true, the device is being resumed asynchronously.
567  */
568 static int device_resume(struct device *dev, pm_message_t state, bool async)
569 {
570         pm_callback_t callback = NULL;
571         char *info = NULL;
572         int error = 0;
573
574         TRACE_DEVICE(dev);
575         TRACE_RESUME(0);
576
577         if (dev->power.syscore)
578                 goto Complete;
579
580         dpm_wait(dev->parent, async);
581         device_lock(dev);
582
583         /*
584          * This is a fib.  But we'll allow new children to be added below
585          * a resumed device, even if the device hasn't been completed yet.
586          */
587         dev->power.is_prepared = false;
588
589         if (!dev->power.is_suspended)
590                 goto Unlock;
591
592         if (dev->pm_domain) {
593                 info = "power domain ";
594                 callback = pm_op(&dev->pm_domain->ops, state);
595                 goto Driver;
596         }
597
598         if (dev->type && dev->type->pm) {
599                 info = "type ";
600                 callback = pm_op(dev->type->pm, state);
601                 goto Driver;
602         }
603
604         if (dev->class) {
605                 if (dev->class->pm) {
606                         info = "class ";
607                         callback = pm_op(dev->class->pm, state);
608                         goto Driver;
609                 } else if (dev->class->resume) {
610                         info = "legacy class ";
611                         callback = dev->class->resume;
612                         goto End;
613                 }
614         }
615
616         if (dev->bus) {
617                 if (dev->bus->pm) {
618                         info = "bus ";
619                         callback = pm_op(dev->bus->pm, state);
620                 } else if (dev->bus->resume) {
621                         info = "legacy bus ";
622                         callback = dev->bus->resume;
623                         goto End;
624                 }
625         }
626
627  Driver:
628         if (!callback && dev->driver && dev->driver->pm) {
629                 info = "driver ";
630                 callback = pm_op(dev->driver->pm, state);
631         }
632
633  End:
634         error = dpm_run_callback(callback, dev, state, info);
635         dev->power.is_suspended = false;
636
637  Unlock:
638         device_unlock(dev);
639
640  Complete:
641         complete_all(&dev->power.completion);
642
643         TRACE_RESUME(error);
644
645         return error;
646 }
647
648 static void async_resume(void *data, async_cookie_t cookie)
649 {
650         struct device *dev = (struct device *)data;
651         int error;
652
653         error = device_resume(dev, pm_transition, true);
654         if (error)
655                 pm_dev_err(dev, pm_transition, " async", error);
656         put_device(dev);
657 }
658
659 static bool is_async(struct device *dev)
660 {
661         return dev->power.async_suspend && pm_async_enabled
662                 && !pm_trace_is_enabled();
663 }
664
665 /**
666  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
667  * @state: PM transition of the system being carried out.
668  *
669  * Execute the appropriate "resume" callback for all devices whose status
670  * indicates that they are suspended.
671  */
672 void dpm_resume(pm_message_t state)
673 {
674         struct device *dev;
675         ktime_t starttime = ktime_get();
676
677         might_sleep();
678
679         mutex_lock(&dpm_list_mtx);
680         pm_transition = state;
681         async_error = 0;
682
683         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
684                 INIT_COMPLETION(dev->power.completion);
685                 if (is_async(dev)) {
686                         get_device(dev);
687                         async_schedule(async_resume, dev);
688                 }
689         }
690
691         while (!list_empty(&dpm_suspended_list)) {
692                 dev = to_device(dpm_suspended_list.next);
693                 get_device(dev);
694                 if (!is_async(dev)) {
695                         int error;
696
697                         mutex_unlock(&dpm_list_mtx);
698
699                         error = device_resume(dev, state, false);
700                         if (error) {
701                                 suspend_stats.failed_resume++;
702                                 dpm_save_failed_step(SUSPEND_RESUME);
703                                 dpm_save_failed_dev(dev_name(dev));
704                                 pm_dev_err(dev, state, "", error);
705                         }
706
707                         mutex_lock(&dpm_list_mtx);
708                 }
709                 if (!list_empty(&dev->power.entry))
710                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
711                 put_device(dev);
712         }
713         mutex_unlock(&dpm_list_mtx);
714         async_synchronize_full();
715         dpm_show_time(starttime, state, NULL);
716 }
717
718 /**
719  * device_complete - Complete a PM transition for given device.
720  * @dev: Device to handle.
721  * @state: PM transition of the system being carried out.
722  */
723 static void device_complete(struct device *dev, pm_message_t state)
724 {
725         void (*callback)(struct device *) = NULL;
726         char *info = NULL;
727
728         if (dev->power.syscore)
729                 return;
730
731         device_lock(dev);
732
733         if (dev->pm_domain) {
734                 info = "completing power domain ";
735                 callback = dev->pm_domain->ops.complete;
736         } else if (dev->type && dev->type->pm) {
737                 info = "completing type ";
738                 callback = dev->type->pm->complete;
739         } else if (dev->class && dev->class->pm) {
740                 info = "completing class ";
741                 callback = dev->class->pm->complete;
742         } else if (dev->bus && dev->bus->pm) {
743                 info = "completing bus ";
744                 callback = dev->bus->pm->complete;
745         }
746
747         if (!callback && dev->driver && dev->driver->pm) {
748                 info = "completing driver ";
749                 callback = dev->driver->pm->complete;
750         }
751
752         if (callback) {
753                 pm_dev_dbg(dev, state, info);
754                 callback(dev);
755         }
756
757         device_unlock(dev);
758
759         pm_runtime_put_sync(dev);
760 }
761
762 /**
763  * dpm_complete - Complete a PM transition for all non-sysdev devices.
764  * @state: PM transition of the system being carried out.
765  *
766  * Execute the ->complete() callbacks for all devices whose PM status is not
767  * DPM_ON (this allows new devices to be registered).
768  */
769 void dpm_complete(pm_message_t state)
770 {
771         struct list_head list;
772
773         might_sleep();
774
775         INIT_LIST_HEAD(&list);
776         mutex_lock(&dpm_list_mtx);
777         while (!list_empty(&dpm_prepared_list)) {
778                 struct device *dev = to_device(dpm_prepared_list.prev);
779
780                 get_device(dev);
781                 dev->power.is_prepared = false;
782                 list_move(&dev->power.entry, &list);
783                 mutex_unlock(&dpm_list_mtx);
784
785                 device_complete(dev, state);
786
787                 mutex_lock(&dpm_list_mtx);
788                 put_device(dev);
789         }
790         list_splice(&list, &dpm_list);
791         mutex_unlock(&dpm_list_mtx);
792 }
793
794 /**
795  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
796  * @state: PM transition of the system being carried out.
797  *
798  * Execute "resume" callbacks for all devices and complete the PM transition of
799  * the system.
800  */
801 void dpm_resume_end(pm_message_t state)
802 {
803         dpm_resume(state);
804         dpm_complete(state);
805 }
806 EXPORT_SYMBOL_GPL(dpm_resume_end);
807
808
809 /*------------------------- Suspend routines -------------------------*/
810
811 /**
812  * resume_event - Return a "resume" message for given "suspend" sleep state.
813  * @sleep_state: PM message representing a sleep state.
814  *
815  * Return a PM message representing the resume event corresponding to given
816  * sleep state.
817  */
818 static pm_message_t resume_event(pm_message_t sleep_state)
819 {
820         switch (sleep_state.event) {
821         case PM_EVENT_SUSPEND:
822                 return PMSG_RESUME;
823         case PM_EVENT_FREEZE:
824         case PM_EVENT_QUIESCE:
825                 return PMSG_RECOVER;
826         case PM_EVENT_HIBERNATE:
827                 return PMSG_RESTORE;
828         }
829         return PMSG_ON;
830 }
831
832 /**
833  * device_suspend_noirq - Execute a "late suspend" callback for given device.
834  * @dev: Device to handle.
835  * @state: PM transition of the system being carried out.
836  *
837  * The driver of @dev will not receive interrupts while this function is being
838  * executed.
839  */
840 static int device_suspend_noirq(struct device *dev, pm_message_t state)
841 {
842         pm_callback_t callback = NULL;
843         char *info = NULL;
844
845         if (dev->power.syscore)
846                 return 0;
847
848         if (dev->pm_domain) {
849                 info = "noirq power domain ";
850                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
851         } else if (dev->type && dev->type->pm) {
852                 info = "noirq type ";
853                 callback = pm_noirq_op(dev->type->pm, state);
854         } else if (dev->class && dev->class->pm) {
855                 info = "noirq class ";
856                 callback = pm_noirq_op(dev->class->pm, state);
857         } else if (dev->bus && dev->bus->pm) {
858                 info = "noirq bus ";
859                 callback = pm_noirq_op(dev->bus->pm, state);
860         }
861
862         if (!callback && dev->driver && dev->driver->pm) {
863                 info = "noirq driver ";
864                 callback = pm_noirq_op(dev->driver->pm, state);
865         }
866
867         return dpm_run_callback(callback, dev, state, info);
868 }
869
870 /**
871  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
872  * @state: PM transition of the system being carried out.
873  *
874  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
875  * handlers for all non-sysdev devices.
876  */
877 static int dpm_suspend_noirq(pm_message_t state)
878 {
879         ktime_t starttime = ktime_get();
880         int error = 0;
881
882         cpuidle_pause();
883         suspend_device_irqs();
884         mutex_lock(&dpm_list_mtx);
885         while (!list_empty(&dpm_late_early_list)) {
886                 struct device *dev = to_device(dpm_late_early_list.prev);
887
888                 get_device(dev);
889                 mutex_unlock(&dpm_list_mtx);
890
891                 error = device_suspend_noirq(dev, state);
892
893                 mutex_lock(&dpm_list_mtx);
894                 if (error) {
895                         pm_dev_err(dev, state, " noirq", error);
896                         suspend_stats.failed_suspend_noirq++;
897                         dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
898                         dpm_save_failed_dev(dev_name(dev));
899                         put_device(dev);
900                         break;
901                 }
902                 if (!list_empty(&dev->power.entry))
903                         list_move(&dev->power.entry, &dpm_noirq_list);
904                 put_device(dev);
905
906                 if (pm_wakeup_pending()) {
907                         error = -EBUSY;
908                         break;
909                 }
910         }
911         mutex_unlock(&dpm_list_mtx);
912         if (error)
913                 dpm_resume_noirq(resume_event(state));
914         else
915                 dpm_show_time(starttime, state, "noirq");
916         return error;
917 }
918
919 /**
920  * device_suspend_late - Execute a "late suspend" callback for given device.
921  * @dev: Device to handle.
922  * @state: PM transition of the system being carried out.
923  *
924  * Runtime PM is disabled for @dev while this function is being executed.
925  */
926 static int device_suspend_late(struct device *dev, pm_message_t state)
927 {
928         pm_callback_t callback = NULL;
929         char *info = NULL;
930
931         __pm_runtime_disable(dev, false);
932
933         if (dev->power.syscore)
934                 return 0;
935
936         if (dev->pm_domain) {
937                 info = "late power domain ";
938                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
939         } else if (dev->type && dev->type->pm) {
940                 info = "late type ";
941                 callback = pm_late_early_op(dev->type->pm, state);
942         } else if (dev->class && dev->class->pm) {
943                 info = "late class ";
944                 callback = pm_late_early_op(dev->class->pm, state);
945         } else if (dev->bus && dev->bus->pm) {
946                 info = "late bus ";
947                 callback = pm_late_early_op(dev->bus->pm, state);
948         }
949
950         if (!callback && dev->driver && dev->driver->pm) {
951                 info = "late driver ";
952                 callback = pm_late_early_op(dev->driver->pm, state);
953         }
954
955         return dpm_run_callback(callback, dev, state, info);
956 }
957
958 /**
959  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
960  * @state: PM transition of the system being carried out.
961  */
962 static int dpm_suspend_late(pm_message_t state)
963 {
964         ktime_t starttime = ktime_get();
965         int error = 0;
966
967         mutex_lock(&dpm_list_mtx);
968         while (!list_empty(&dpm_suspended_list)) {
969                 struct device *dev = to_device(dpm_suspended_list.prev);
970
971                 get_device(dev);
972                 mutex_unlock(&dpm_list_mtx);
973
974                 error = device_suspend_late(dev, state);
975
976                 mutex_lock(&dpm_list_mtx);
977                 if (error) {
978                         pm_dev_err(dev, state, " late", error);
979                         suspend_stats.failed_suspend_late++;
980                         dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
981                         dpm_save_failed_dev(dev_name(dev));
982                         put_device(dev);
983                         break;
984                 }
985                 if (!list_empty(&dev->power.entry))
986                         list_move(&dev->power.entry, &dpm_late_early_list);
987                 put_device(dev);
988
989                 if (pm_wakeup_pending()) {
990                         error = -EBUSY;
991                         break;
992                 }
993         }
994         mutex_unlock(&dpm_list_mtx);
995         if (error)
996                 dpm_resume_early(resume_event(state));
997         else
998                 dpm_show_time(starttime, state, "late");
999
1000         return error;
1001 }
1002
1003 /**
1004  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1005  * @state: PM transition of the system being carried out.
1006  */
1007 int dpm_suspend_end(pm_message_t state)
1008 {
1009         int error = dpm_suspend_late(state);
1010         if (error)
1011                 return error;
1012
1013         error = dpm_suspend_noirq(state);
1014         if (error) {
1015                 dpm_resume_early(resume_event(state));
1016                 return error;
1017         }
1018
1019         return 0;
1020 }
1021 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1022
1023 /**
1024  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1025  * @dev: Device to suspend.
1026  * @state: PM transition of the system being carried out.
1027  * @cb: Suspend callback to execute.
1028  */
1029 static int legacy_suspend(struct device *dev, pm_message_t state,
1030                           int (*cb)(struct device *dev, pm_message_t state))
1031 {
1032         int error;
1033         ktime_t calltime;
1034
1035         calltime = initcall_debug_start(dev);
1036
1037         error = cb(dev, state);
1038         suspend_report_result(cb, error);
1039
1040         initcall_debug_report(dev, calltime, error);
1041
1042         return error;
1043 }
1044
1045 /**
1046  * device_suspend - Execute "suspend" callbacks for given device.
1047  * @dev: Device to handle.
1048  * @state: PM transition of the system being carried out.
1049  * @async: If true, the device is being suspended asynchronously.
1050  */
1051 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1052 {
1053         pm_callback_t callback = NULL;
1054         char *info = NULL;
1055         int error = 0;
1056
1057         dpm_wait_for_children(dev, async);
1058
1059         if (async_error)
1060                 goto Complete;
1061
1062         /*
1063          * If a device configured to wake up the system from sleep states
1064          * has been suspended at run time and there's a resume request pending
1065          * for it, this is equivalent to the device signaling wakeup, so the
1066          * system suspend operation should be aborted.
1067          */
1068         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1069                 pm_wakeup_event(dev, 0);
1070
1071         if (pm_wakeup_pending()) {
1072                 async_error = -EBUSY;
1073                 goto Complete;
1074         }
1075
1076         if (dev->power.syscore)
1077                 goto Complete;
1078
1079         device_lock(dev);
1080
1081         if (dev->pm_domain) {
1082                 info = "power domain ";
1083                 callback = pm_op(&dev->pm_domain->ops, state);
1084                 goto Run;
1085         }
1086
1087         if (dev->type && dev->type->pm) {
1088                 info = "type ";
1089                 callback = pm_op(dev->type->pm, state);
1090                 goto Run;
1091         }
1092
1093         if (dev->class) {
1094                 if (dev->class->pm) {
1095                         info = "class ";
1096                         callback = pm_op(dev->class->pm, state);
1097                         goto Run;
1098                 } else if (dev->class->suspend) {
1099                         pm_dev_dbg(dev, state, "legacy class ");
1100                         error = legacy_suspend(dev, state, dev->class->suspend);
1101                         goto End;
1102                 }
1103         }
1104
1105         if (dev->bus) {
1106                 if (dev->bus->pm) {
1107                         info = "bus ";
1108                         callback = pm_op(dev->bus->pm, state);
1109                 } else if (dev->bus->suspend) {
1110                         pm_dev_dbg(dev, state, "legacy bus ");
1111                         error = legacy_suspend(dev, state, dev->bus->suspend);
1112                         goto End;
1113                 }
1114         }
1115
1116  Run:
1117         if (!callback && dev->driver && dev->driver->pm) {
1118                 info = "driver ";
1119                 callback = pm_op(dev->driver->pm, state);
1120         }
1121
1122         error = dpm_run_callback(callback, dev, state, info);
1123
1124  End:
1125         if (!error) {
1126                 dev->power.is_suspended = true;
1127                 if (dev->power.wakeup_path
1128                     && dev->parent && !dev->parent->power.ignore_children)
1129                         dev->parent->power.wakeup_path = true;
1130         }
1131
1132         device_unlock(dev);
1133
1134  Complete:
1135         complete_all(&dev->power.completion);
1136         if (error)
1137                 async_error = error;
1138
1139         return error;
1140 }
1141
1142 static void async_suspend(void *data, async_cookie_t cookie)
1143 {
1144         struct device *dev = (struct device *)data;
1145         int error;
1146
1147         error = __device_suspend(dev, pm_transition, true);
1148         if (error) {
1149                 dpm_save_failed_dev(dev_name(dev));
1150                 pm_dev_err(dev, pm_transition, " async", error);
1151         }
1152
1153         put_device(dev);
1154 }
1155
1156 static int device_suspend(struct device *dev)
1157 {
1158         INIT_COMPLETION(dev->power.completion);
1159
1160         if (pm_async_enabled && dev->power.async_suspend) {
1161                 get_device(dev);
1162                 async_schedule(async_suspend, dev);
1163                 return 0;
1164         }
1165
1166         return __device_suspend(dev, pm_transition, false);
1167 }
1168
1169 /**
1170  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1171  * @state: PM transition of the system being carried out.
1172  */
1173 int dpm_suspend(pm_message_t state)
1174 {
1175         ktime_t starttime = ktime_get();
1176         int error = 0;
1177
1178         might_sleep();
1179
1180         mutex_lock(&dpm_list_mtx);
1181         pm_transition = state;
1182         async_error = 0;
1183         while (!list_empty(&dpm_prepared_list)) {
1184                 struct device *dev = to_device(dpm_prepared_list.prev);
1185
1186                 get_device(dev);
1187                 mutex_unlock(&dpm_list_mtx);
1188
1189                 error = device_suspend(dev);
1190
1191                 mutex_lock(&dpm_list_mtx);
1192                 if (error) {
1193                         pm_dev_err(dev, state, "", error);
1194                         dpm_save_failed_dev(dev_name(dev));
1195                         put_device(dev);
1196                         break;
1197                 }
1198                 if (!list_empty(&dev->power.entry))
1199                         list_move(&dev->power.entry, &dpm_suspended_list);
1200                 put_device(dev);
1201                 if (async_error)
1202                         break;
1203         }
1204         mutex_unlock(&dpm_list_mtx);
1205         async_synchronize_full();
1206         if (!error)
1207                 error = async_error;
1208         if (error) {
1209                 suspend_stats.failed_suspend++;
1210                 dpm_save_failed_step(SUSPEND_SUSPEND);
1211         } else
1212                 dpm_show_time(starttime, state, NULL);
1213         return error;
1214 }
1215
1216 /**
1217  * device_prepare - Prepare a device for system power transition.
1218  * @dev: Device to handle.
1219  * @state: PM transition of the system being carried out.
1220  *
1221  * Execute the ->prepare() callback(s) for given device.  No new children of the
1222  * device may be registered after this function has returned.
1223  */
1224 static int device_prepare(struct device *dev, pm_message_t state)
1225 {
1226         int (*callback)(struct device *) = NULL;
1227         char *info = NULL;
1228         int error = 0;
1229
1230         if (dev->power.syscore)
1231                 return 0;
1232
1233         /*
1234          * If a device's parent goes into runtime suspend at the wrong time,
1235          * it won't be possible to resume the device.  To prevent this we
1236          * block runtime suspend here, during the prepare phase, and allow
1237          * it again during the complete phase.
1238          */
1239         pm_runtime_get_noresume(dev);
1240
1241         device_lock(dev);
1242
1243         dev->power.wakeup_path = device_may_wakeup(dev);
1244
1245         if (dev->pm_domain) {
1246                 info = "preparing power domain ";
1247                 callback = dev->pm_domain->ops.prepare;
1248         } else if (dev->type && dev->type->pm) {
1249                 info = "preparing type ";
1250                 callback = dev->type->pm->prepare;
1251         } else if (dev->class && dev->class->pm) {
1252                 info = "preparing class ";
1253                 callback = dev->class->pm->prepare;
1254         } else if (dev->bus && dev->bus->pm) {
1255                 info = "preparing bus ";
1256                 callback = dev->bus->pm->prepare;
1257         }
1258
1259         if (!callback && dev->driver && dev->driver->pm) {
1260                 info = "preparing driver ";
1261                 callback = dev->driver->pm->prepare;
1262         }
1263
1264         if (callback) {
1265                 error = callback(dev);
1266                 suspend_report_result(callback, error);
1267         }
1268
1269         device_unlock(dev);
1270
1271         return error;
1272 }
1273
1274 /**
1275  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1276  * @state: PM transition of the system being carried out.
1277  *
1278  * Execute the ->prepare() callback(s) for all devices.
1279  */
1280 int dpm_prepare(pm_message_t state)
1281 {
1282         int error = 0;
1283
1284         might_sleep();
1285
1286         mutex_lock(&dpm_list_mtx);
1287         while (!list_empty(&dpm_list)) {
1288                 struct device *dev = to_device(dpm_list.next);
1289
1290                 get_device(dev);
1291                 mutex_unlock(&dpm_list_mtx);
1292
1293                 error = device_prepare(dev, state);
1294
1295                 mutex_lock(&dpm_list_mtx);
1296                 if (error) {
1297                         if (error == -EAGAIN) {
1298                                 put_device(dev);
1299                                 error = 0;
1300                                 continue;
1301                         }
1302                         printk(KERN_INFO "PM: Device %s not prepared "
1303                                 "for power transition: code %d\n",
1304                                 dev_name(dev), error);
1305                         put_device(dev);
1306                         break;
1307                 }
1308                 dev->power.is_prepared = true;
1309                 if (!list_empty(&dev->power.entry))
1310                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1311                 put_device(dev);
1312         }
1313         mutex_unlock(&dpm_list_mtx);
1314         return error;
1315 }
1316
1317 /**
1318  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1319  * @state: PM transition of the system being carried out.
1320  *
1321  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1322  * callbacks for them.
1323  */
1324 int dpm_suspend_start(pm_message_t state)
1325 {
1326         int error;
1327
1328         error = dpm_prepare(state);
1329         if (error) {
1330                 suspend_stats.failed_prepare++;
1331                 dpm_save_failed_step(SUSPEND_PREPARE);
1332         } else
1333                 error = dpm_suspend(state);
1334         return error;
1335 }
1336 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1337
1338 void __suspend_report_result(const char *function, void *fn, int ret)
1339 {
1340         if (ret)
1341                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1342 }
1343 EXPORT_SYMBOL_GPL(__suspend_report_result);
1344
1345 /**
1346  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1347  * @dev: Device to wait for.
1348  * @subordinate: Device that needs to wait for @dev.
1349  */
1350 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1351 {
1352         dpm_wait(dev, subordinate->power.async_suspend);
1353         return async_error;
1354 }
1355 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1356
1357 /**
1358  * dpm_for_each_dev - device iterator.
1359  * @data: data for the callback.
1360  * @fn: function to be called for each device.
1361  *
1362  * Iterate over devices in dpm_list, and call @fn for each device,
1363  * passing it @data.
1364  */
1365 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1366 {
1367         struct device *dev;
1368
1369         if (!fn)
1370                 return;
1371
1372         device_pm_lock();
1373         list_for_each_entry(dev, &dpm_list, power.entry)
1374                 fn(dev, data);
1375         device_pm_unlock();
1376 }
1377 EXPORT_SYMBOL_GPL(dpm_for_each_dev);