2 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
5 * Copyright (C) 2011 Samsung Electronics
6 * MyungJoo Ham <myungjoo.ham@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/errno.h>
16 #include <linux/err.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/stat.h>
21 #include <linux/opp.h>
22 #include <linux/devfreq.h>
23 #include <linux/workqueue.h>
24 #include <linux/platform_device.h>
25 #include <linux/list.h>
26 #include <linux/printk.h>
27 #include <linux/hrtimer.h>
30 static struct class *devfreq_class;
33 * devfreq core provides delayed work based load monitoring helper
34 * functions. Governors can use these or can implement their own
35 * monitoring mechanism.
37 static struct workqueue_struct *devfreq_wq;
39 /* The list of all device-devfreq governors */
40 static LIST_HEAD(devfreq_governor_list);
41 /* The list of all device-devfreq */
42 static LIST_HEAD(devfreq_list);
43 static DEFINE_MUTEX(devfreq_list_lock);
46 * find_device_devfreq() - find devfreq struct using device pointer
47 * @dev: device pointer used to lookup device devfreq.
49 * Search the list of device devfreqs and return the matched device's
50 * devfreq info. devfreq_list_lock should be held by the caller.
52 static struct devfreq *find_device_devfreq(struct device *dev)
54 struct devfreq *tmp_devfreq;
56 if (unlikely(IS_ERR_OR_NULL(dev))) {
57 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
58 return ERR_PTR(-EINVAL);
60 WARN(!mutex_is_locked(&devfreq_list_lock),
61 "devfreq_list_lock must be locked.");
63 list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
64 if (tmp_devfreq->dev.parent == dev)
68 return ERR_PTR(-ENODEV);
72 * devfreq_get_freq_level() - Lookup freq_table for the frequency
73 * @devfreq: the devfreq instance
74 * @freq: the target frequency
76 static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
80 for (lev = 0; lev < devfreq->profile->max_state; lev++)
81 if (freq == devfreq->profile->freq_table[lev])
88 * devfreq_update_status() - Update statistics of devfreq behavior
89 * @devfreq: the devfreq instance
90 * @freq: the update target frequency
92 static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
95 unsigned long cur_time;
97 lev = devfreq_get_freq_level(devfreq, freq);
102 devfreq->time_in_state[lev] +=
103 cur_time - devfreq->last_stat_updated;
104 if (freq != devfreq->previous_freq) {
105 prev_lev = devfreq_get_freq_level(devfreq,
106 devfreq->previous_freq);
107 devfreq->trans_table[(prev_lev *
108 devfreq->profile->max_state) + lev]++;
109 devfreq->total_trans++;
111 devfreq->last_stat_updated = cur_time;
117 * find_devfreq_governor() - find devfreq governor from name
118 * @name: name of the governor
120 * Search the list of devfreq governors and return the matched
121 * governor's pointer. devfreq_list_lock should be held by the caller.
123 static struct devfreq_governor *find_devfreq_governor(const char *name)
125 struct devfreq_governor *tmp_governor;
127 if (unlikely(IS_ERR_OR_NULL(name))) {
128 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
129 return ERR_PTR(-EINVAL);
131 WARN(!mutex_is_locked(&devfreq_list_lock),
132 "devfreq_list_lock must be locked.");
134 list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
135 if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
139 return ERR_PTR(-ENODEV);
142 /* Load monitoring helper functions for governors use */
145 * update_devfreq() - Reevaluate the device and configure frequency.
146 * @devfreq: the devfreq instance.
148 * Note: Lock devfreq->lock before calling update_devfreq
149 * This function is exported for governors.
151 int update_devfreq(struct devfreq *devfreq)
157 if (!mutex_is_locked(&devfreq->lock)) {
158 WARN(true, "devfreq->lock must be locked by the caller.\n");
162 if (!devfreq->governor)
165 /* Reevaluate the proper frequency */
166 err = devfreq->governor->get_target_freq(devfreq, &freq);
171 * Adjust the freuqency with user freq and QoS.
173 * List from the highest proiority
174 * max_freq (probably called by thermal when it's too hot)
178 if (devfreq->min_freq && freq < devfreq->min_freq) {
179 freq = devfreq->min_freq;
180 flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
182 if (devfreq->max_freq && freq > devfreq->max_freq) {
183 freq = devfreq->max_freq;
184 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
187 err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
191 if (devfreq->profile->freq_table)
192 if (devfreq_update_status(devfreq, freq))
193 dev_err(&devfreq->dev,
194 "Couldn't update frequency transition information.\n");
196 devfreq->previous_freq = freq;
199 EXPORT_SYMBOL(update_devfreq);
202 * devfreq_monitor() - Periodically poll devfreq objects.
203 * @work: the work struct used to run devfreq_monitor periodically.
206 static void devfreq_monitor(struct work_struct *work)
209 struct devfreq *devfreq = container_of(work,
210 struct devfreq, work.work);
212 mutex_lock(&devfreq->lock);
213 err = update_devfreq(devfreq);
215 dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
217 queue_delayed_work(devfreq_wq, &devfreq->work,
218 msecs_to_jiffies(devfreq->profile->polling_ms));
219 mutex_unlock(&devfreq->lock);
223 * devfreq_monitor_start() - Start load monitoring of devfreq instance
224 * @devfreq: the devfreq instance.
226 * Helper function for starting devfreq device load monitoing. By
227 * default delayed work based monitoring is supported. Function
228 * to be called from governor in response to DEVFREQ_GOV_START
229 * event when device is added to devfreq framework.
231 void devfreq_monitor_start(struct devfreq *devfreq)
233 INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
234 if (devfreq->profile->polling_ms)
235 queue_delayed_work(devfreq_wq, &devfreq->work,
236 msecs_to_jiffies(devfreq->profile->polling_ms));
240 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
241 * @devfreq: the devfreq instance.
243 * Helper function to stop devfreq device load monitoing. Function
244 * to be called from governor in response to DEVFREQ_GOV_STOP
245 * event when device is removed from devfreq framework.
247 void devfreq_monitor_stop(struct devfreq *devfreq)
249 cancel_delayed_work_sync(&devfreq->work);
253 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
254 * @devfreq: the devfreq instance.
256 * Helper function to suspend devfreq device load monitoing. Function
257 * to be called from governor in response to DEVFREQ_GOV_SUSPEND
258 * event or when polling interval is set to zero.
260 * Note: Though this function is same as devfreq_monitor_stop(),
261 * intentionally kept separate to provide hooks for collecting
262 * transition statistics.
264 void devfreq_monitor_suspend(struct devfreq *devfreq)
266 mutex_lock(&devfreq->lock);
267 if (devfreq->stop_polling) {
268 mutex_unlock(&devfreq->lock);
272 devfreq->stop_polling = true;
273 mutex_unlock(&devfreq->lock);
274 cancel_delayed_work_sync(&devfreq->work);
278 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
279 * @devfreq: the devfreq instance.
281 * Helper function to resume devfreq device load monitoing. Function
282 * to be called from governor in response to DEVFREQ_GOV_RESUME
283 * event or when polling interval is set to non-zero.
285 void devfreq_monitor_resume(struct devfreq *devfreq)
287 mutex_lock(&devfreq->lock);
288 if (!devfreq->stop_polling)
291 if (!delayed_work_pending(&devfreq->work) &&
292 devfreq->profile->polling_ms)
293 queue_delayed_work(devfreq_wq, &devfreq->work,
294 msecs_to_jiffies(devfreq->profile->polling_ms));
295 devfreq->stop_polling = false;
298 mutex_unlock(&devfreq->lock);
302 * devfreq_interval_update() - Update device devfreq monitoring interval
303 * @devfreq: the devfreq instance.
304 * @delay: new polling interval to be set.
306 * Helper function to set new load monitoring polling interval. Function
307 * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
309 void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
311 unsigned int cur_delay = devfreq->profile->polling_ms;
312 unsigned int new_delay = *delay;
314 mutex_lock(&devfreq->lock);
315 devfreq->profile->polling_ms = new_delay;
317 if (devfreq->stop_polling)
320 /* if new delay is zero, stop polling */
322 mutex_unlock(&devfreq->lock);
323 cancel_delayed_work_sync(&devfreq->work);
327 /* if current delay is zero, start polling with new delay */
329 queue_delayed_work(devfreq_wq, &devfreq->work,
330 msecs_to_jiffies(devfreq->profile->polling_ms));
334 /* if current delay is greater than new delay, restart polling */
335 if (cur_delay > new_delay) {
336 mutex_unlock(&devfreq->lock);
337 cancel_delayed_work_sync(&devfreq->work);
338 mutex_lock(&devfreq->lock);
339 if (!devfreq->stop_polling)
340 queue_delayed_work(devfreq_wq, &devfreq->work,
341 msecs_to_jiffies(devfreq->profile->polling_ms));
344 mutex_unlock(&devfreq->lock);
348 * devfreq_notifier_call() - Notify that the device frequency requirements
349 * has been changed out of devfreq framework.
350 * @nb: the notifier_block (supposed to be devfreq->nb)
354 * Called by a notifier that uses devfreq->nb.
356 static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
359 struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
362 mutex_lock(&devfreq->lock);
363 ret = update_devfreq(devfreq);
364 mutex_unlock(&devfreq->lock);
370 * _remove_devfreq() - Remove devfreq from the list and release its resources.
371 * @devfreq: the devfreq struct
372 * @skip: skip calling device_unregister().
374 static void _remove_devfreq(struct devfreq *devfreq, bool skip)
376 mutex_lock(&devfreq_list_lock);
377 if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
378 mutex_unlock(&devfreq_list_lock);
379 dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
382 list_del(&devfreq->node);
383 mutex_unlock(&devfreq_list_lock);
385 if (devfreq->governor)
386 devfreq->governor->event_handler(devfreq,
387 DEVFREQ_GOV_STOP, NULL);
389 if (devfreq->profile->exit)
390 devfreq->profile->exit(devfreq->dev.parent);
392 if (!skip && get_device(&devfreq->dev)) {
393 device_unregister(&devfreq->dev);
394 put_device(&devfreq->dev);
397 mutex_destroy(&devfreq->lock);
402 * devfreq_dev_release() - Callback for struct device to release the device.
403 * @dev: the devfreq device
405 * This calls _remove_devfreq() if _remove_devfreq() is not called.
406 * Note that devfreq_dev_release() could be called by _remove_devfreq() as
407 * well as by others unregistering the device.
409 static void devfreq_dev_release(struct device *dev)
411 struct devfreq *devfreq = to_devfreq(dev);
413 _remove_devfreq(devfreq, true);
417 * devfreq_add_device() - Add devfreq feature to the device
418 * @dev: the device to add devfreq feature.
419 * @profile: device-specific profile to run devfreq.
420 * @governor_name: name of the policy to choose frequency.
421 * @data: private data for the governor. The devfreq framework does not
424 struct devfreq *devfreq_add_device(struct device *dev,
425 struct devfreq_dev_profile *profile,
426 const char *governor_name,
429 struct devfreq *devfreq;
430 struct devfreq_governor *governor;
433 if (!dev || !profile || !governor_name) {
434 dev_err(dev, "%s: Invalid parameters.\n", __func__);
435 return ERR_PTR(-EINVAL);
438 mutex_lock(&devfreq_list_lock);
439 devfreq = find_device_devfreq(dev);
440 mutex_unlock(&devfreq_list_lock);
441 if (!IS_ERR(devfreq)) {
442 dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
447 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
449 dev_err(dev, "%s: Unable to create devfreq for the device\n",
455 mutex_init(&devfreq->lock);
456 mutex_lock(&devfreq->lock);
457 devfreq->dev.parent = dev;
458 devfreq->dev.class = devfreq_class;
459 devfreq->dev.release = devfreq_dev_release;
460 devfreq->profile = profile;
461 strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
462 devfreq->previous_freq = profile->initial_freq;
463 devfreq->data = data;
464 devfreq->nb.notifier_call = devfreq_notifier_call;
466 devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) *
467 devfreq->profile->max_state *
468 devfreq->profile->max_state,
470 devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) *
471 devfreq->profile->max_state,
473 devfreq->last_stat_updated = jiffies;
475 dev_set_name(&devfreq->dev, dev_name(dev));
476 err = device_register(&devfreq->dev);
478 put_device(&devfreq->dev);
479 mutex_unlock(&devfreq->lock);
483 mutex_unlock(&devfreq->lock);
485 mutex_lock(&devfreq_list_lock);
486 list_add(&devfreq->node, &devfreq_list);
488 governor = find_devfreq_governor(devfreq->governor_name);
489 if (!IS_ERR(governor))
490 devfreq->governor = governor;
491 if (devfreq->governor)
492 err = devfreq->governor->event_handler(devfreq,
493 DEVFREQ_GOV_START, NULL);
494 mutex_unlock(&devfreq_list_lock);
496 dev_err(dev, "%s: Unable to start governor for the device\n",
504 list_del(&devfreq->node);
505 device_unregister(&devfreq->dev);
511 EXPORT_SYMBOL(devfreq_add_device);
514 * devfreq_remove_device() - Remove devfreq feature from a device.
515 * @devfreq: the devfreq instance to be removed
517 int devfreq_remove_device(struct devfreq *devfreq)
522 _remove_devfreq(devfreq, false);
526 EXPORT_SYMBOL(devfreq_remove_device);
529 * devfreq_suspend_device() - Suspend devfreq of a device.
530 * @devfreq: the devfreq instance to be suspended
532 int devfreq_suspend_device(struct devfreq *devfreq)
537 if (!devfreq->governor)
540 return devfreq->governor->event_handler(devfreq,
541 DEVFREQ_GOV_SUSPEND, NULL);
543 EXPORT_SYMBOL(devfreq_suspend_device);
546 * devfreq_resume_device() - Resume devfreq of a device.
547 * @devfreq: the devfreq instance to be resumed
549 int devfreq_resume_device(struct devfreq *devfreq)
554 if (!devfreq->governor)
557 return devfreq->governor->event_handler(devfreq,
558 DEVFREQ_GOV_RESUME, NULL);
560 EXPORT_SYMBOL(devfreq_resume_device);
563 * devfreq_add_governor() - Add devfreq governor
564 * @governor: the devfreq governor to be added
566 int devfreq_add_governor(struct devfreq_governor *governor)
568 struct devfreq_governor *g;
569 struct devfreq *devfreq;
573 pr_err("%s: Invalid parameters.\n", __func__);
577 mutex_lock(&devfreq_list_lock);
578 g = find_devfreq_governor(governor->name);
580 pr_err("%s: governor %s already registered\n", __func__,
586 list_add(&governor->node, &devfreq_governor_list);
588 list_for_each_entry(devfreq, &devfreq_list, node) {
590 struct device *dev = devfreq->dev.parent;
592 if (!strncmp(devfreq->governor_name, governor->name,
594 /* The following should never occur */
595 if (devfreq->governor) {
597 "%s: Governor %s already present\n",
598 __func__, devfreq->governor->name);
599 ret = devfreq->governor->event_handler(devfreq,
600 DEVFREQ_GOV_STOP, NULL);
603 "%s: Governor %s stop = %d\n",
605 devfreq->governor->name, ret);
609 devfreq->governor = governor;
610 ret = devfreq->governor->event_handler(devfreq,
611 DEVFREQ_GOV_START, NULL);
613 dev_warn(dev, "%s: Governor %s start=%d\n",
614 __func__, devfreq->governor->name,
621 mutex_unlock(&devfreq_list_lock);
625 EXPORT_SYMBOL(devfreq_add_governor);
628 * devfreq_remove_device() - Remove devfreq feature from a device.
629 * @governor: the devfreq governor to be removed
631 int devfreq_remove_governor(struct devfreq_governor *governor)
633 struct devfreq_governor *g;
634 struct devfreq *devfreq;
638 pr_err("%s: Invalid parameters.\n", __func__);
642 mutex_lock(&devfreq_list_lock);
643 g = find_devfreq_governor(governor->name);
645 pr_err("%s: governor %s not registered\n", __func__,
650 list_for_each_entry(devfreq, &devfreq_list, node) {
652 struct device *dev = devfreq->dev.parent;
654 if (!strncmp(devfreq->governor_name, governor->name,
656 /* we should have a devfreq governor! */
657 if (!devfreq->governor) {
658 dev_warn(dev, "%s: Governor %s NOT present\n",
659 __func__, governor->name);
663 ret = devfreq->governor->event_handler(devfreq,
664 DEVFREQ_GOV_STOP, NULL);
666 dev_warn(dev, "%s: Governor %s stop=%d\n",
667 __func__, devfreq->governor->name,
670 devfreq->governor = NULL;
674 list_del(&governor->node);
676 mutex_unlock(&devfreq_list_lock);
680 EXPORT_SYMBOL(devfreq_remove_governor);
682 static ssize_t show_governor(struct device *dev,
683 struct device_attribute *attr, char *buf)
685 if (!to_devfreq(dev)->governor)
688 return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
691 static ssize_t store_governor(struct device *dev, struct device_attribute *attr,
692 const char *buf, size_t count)
694 struct devfreq *df = to_devfreq(dev);
696 char str_governor[DEVFREQ_NAME_LEN + 1];
697 struct devfreq_governor *governor;
699 ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
703 mutex_lock(&devfreq_list_lock);
704 governor = find_devfreq_governor(str_governor);
705 if (IS_ERR(governor)) {
706 ret = PTR_ERR(governor);
709 if (df->governor == governor)
713 ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
715 dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
716 __func__, df->governor->name, ret);
720 df->governor = governor;
721 strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
722 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
724 dev_warn(dev, "%s: Governor %s not started(%d)\n",
725 __func__, df->governor->name, ret);
727 mutex_unlock(&devfreq_list_lock);
734 static ssize_t show_freq(struct device *dev,
735 struct device_attribute *attr, char *buf)
738 struct devfreq *devfreq = to_devfreq(dev);
740 if (devfreq->profile->get_cur_freq &&
741 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
742 return sprintf(buf, "%lu\n", freq);
744 return sprintf(buf, "%lu\n", devfreq->previous_freq);
747 static ssize_t show_target_freq(struct device *dev,
748 struct device_attribute *attr, char *buf)
750 return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
753 static ssize_t show_polling_interval(struct device *dev,
754 struct device_attribute *attr, char *buf)
756 return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
759 static ssize_t store_polling_interval(struct device *dev,
760 struct device_attribute *attr,
761 const char *buf, size_t count)
763 struct devfreq *df = to_devfreq(dev);
770 ret = sscanf(buf, "%u", &value);
774 df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
780 static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
781 const char *buf, size_t count)
783 struct devfreq *df = to_devfreq(dev);
788 ret = sscanf(buf, "%lu", &value);
792 mutex_lock(&df->lock);
794 if (value && max && value > max) {
799 df->min_freq = value;
803 mutex_unlock(&df->lock);
807 static ssize_t show_min_freq(struct device *dev, struct device_attribute *attr,
810 return sprintf(buf, "%lu\n", to_devfreq(dev)->min_freq);
813 static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr,
814 const char *buf, size_t count)
816 struct devfreq *df = to_devfreq(dev);
821 ret = sscanf(buf, "%lu", &value);
825 mutex_lock(&df->lock);
827 if (value && min && value < min) {
832 df->max_freq = value;
836 mutex_unlock(&df->lock);
840 static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr,
843 return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq);
846 static ssize_t show_available_freqs(struct device *d,
847 struct device_attribute *attr,
850 struct devfreq *df = to_devfreq(d);
851 struct device *dev = df->dev.parent;
854 unsigned long freq = 0;
858 opp = opp_find_freq_ceil(dev, &freq);
862 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
868 /* Truncate the trailing space */
872 count += sprintf(&buf[count], "\n");
877 static ssize_t show_trans_table(struct device *dev, struct device_attribute *attr,
880 struct devfreq *devfreq = to_devfreq(dev);
883 unsigned int max_state = devfreq->profile->max_state;
885 err = devfreq_update_status(devfreq, devfreq->previous_freq);
889 len = sprintf(buf, " From : To\n");
890 len += sprintf(buf + len, " :");
891 for (i = 0; i < max_state; i++)
892 len += sprintf(buf + len, "%8u",
893 devfreq->profile->freq_table[i]);
895 len += sprintf(buf + len, " time(ms)\n");
897 for (i = 0; i < max_state; i++) {
898 if (devfreq->profile->freq_table[i]
899 == devfreq->previous_freq) {
900 len += sprintf(buf + len, "*");
902 len += sprintf(buf + len, " ");
904 len += sprintf(buf + len, "%8u:",
905 devfreq->profile->freq_table[i]);
906 for (j = 0; j < max_state; j++)
907 len += sprintf(buf + len, "%8u",
908 devfreq->trans_table[(i * max_state) + j]);
909 len += sprintf(buf + len, "%10u\n",
910 jiffies_to_msecs(devfreq->time_in_state[i]));
913 len += sprintf(buf + len, "Total transition : %u\n",
914 devfreq->total_trans);
918 static struct device_attribute devfreq_attrs[] = {
919 __ATTR(governor, S_IRUGO | S_IWUSR, show_governor, store_governor),
920 __ATTR(cur_freq, S_IRUGO, show_freq, NULL),
921 __ATTR(available_frequencies, S_IRUGO, show_available_freqs, NULL),
922 __ATTR(target_freq, S_IRUGO, show_target_freq, NULL),
923 __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
924 store_polling_interval),
925 __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq),
926 __ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq),
927 __ATTR(trans_stat, S_IRUGO, show_trans_table, NULL),
931 static int __init devfreq_init(void)
933 devfreq_class = class_create(THIS_MODULE, "devfreq");
934 if (IS_ERR(devfreq_class)) {
935 pr_err("%s: couldn't create class\n", __FILE__);
936 return PTR_ERR(devfreq_class);
939 devfreq_wq = create_freezable_workqueue("devfreq_wq");
940 if (IS_ERR(devfreq_wq)) {
941 class_destroy(devfreq_class);
942 pr_err("%s: couldn't create workqueue\n", __FILE__);
943 return PTR_ERR(devfreq_wq);
945 devfreq_class->dev_attrs = devfreq_attrs;
949 subsys_initcall(devfreq_init);
951 static void __exit devfreq_exit(void)
953 class_destroy(devfreq_class);
954 destroy_workqueue(devfreq_wq);
956 module_exit(devfreq_exit);
959 * The followings are helper functions for devfreq user device drivers with
964 * devfreq_recommended_opp() - Helper function to get proper OPP for the
965 * freq value given to target callback.
966 * @dev: The devfreq user device. (parent of devfreq)
967 * @freq: The frequency given to target function
968 * @flags: Flags handed from devfreq framework.
971 struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
976 if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
977 /* The freq is an upper bound. opp should be lower */
978 opp = opp_find_freq_floor(dev, freq);
980 /* If not available, use the closest opp */
981 if (opp == ERR_PTR(-ENODEV))
982 opp = opp_find_freq_ceil(dev, freq);
984 /* The freq is an lower bound. opp should be higher */
985 opp = opp_find_freq_ceil(dev, freq);
987 /* If not available, use the closest opp */
988 if (opp == ERR_PTR(-ENODEV))
989 opp = opp_find_freq_floor(dev, freq);
996 * devfreq_register_opp_notifier() - Helper function to get devfreq notified
997 * for any changes in the OPP availability
999 * @dev: The devfreq user device. (parent of devfreq)
1000 * @devfreq: The devfreq object.
1002 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
1004 struct srcu_notifier_head *nh = opp_get_notifier(dev);
1008 return srcu_notifier_chain_register(nh, &devfreq->nb);
1012 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
1013 * notified for any changes in the OPP
1014 * availability changes anymore.
1015 * @dev: The devfreq user device. (parent of devfreq)
1016 * @devfreq: The devfreq object.
1018 * At exit() callback of devfreq_dev_profile, this must be included if
1019 * devfreq_recommended_opp is used.
1021 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
1023 struct srcu_notifier_head *nh = opp_get_notifier(dev);
1027 return srcu_notifier_chain_unregister(nh, &devfreq->nb);
1030 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
1031 MODULE_DESCRIPTION("devfreq class support");
1032 MODULE_LICENSE("GPL");