]> Pileus Git - ~andy/linux/blob - drivers/thermal/cpu_cooling.c
ipip: advertise tunnel param via rtnl
[~andy/linux] / drivers / thermal / cpu_cooling.c
1 /*
2  *  linux/drivers/thermal/cpu_cooling.c
3  *
4  *  Copyright (C) 2012  Samsung Electronics Co., Ltd(http://www.samsung.com)
5  *  Copyright (C) 2012  Amit Daniel <amit.kachhap@linaro.org>
6  *
7  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; version 2 of the License.
11  *
12  *  This program is distributed in the hope that it will be useful, but
13  *  WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  *  General Public License for more details.
16  *
17  *  You should have received a copy of the GNU General Public License along
18  *  with this program; if not, write to the Free Software Foundation, Inc.,
19  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
20  *
21  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
22  */
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/thermal.h>
26 #include <linux/platform_device.h>
27 #include <linux/cpufreq.h>
28 #include <linux/err.h>
29 #include <linux/slab.h>
30 #include <linux/cpu.h>
31 #include <linux/cpu_cooling.h>
32
33 /**
34  * struct cpufreq_cooling_device
35  * @id: unique integer value corresponding to each cpufreq_cooling_device
36  *      registered.
37  * @cool_dev: thermal_cooling_device pointer to keep track of the the
38  *      egistered cooling device.
39  * @cpufreq_state: integer value representing the current state of cpufreq
40  *      cooling devices.
41  * @cpufreq_val: integer value representing the absolute value of the clipped
42  *      frequency.
43  * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
44  * @node: list_head to link all cpufreq_cooling_device together.
45  *
46  * This structure is required for keeping information of each
47  * cpufreq_cooling_device registered as a list whose head is represented by
48  * cooling_cpufreq_list. In order to prevent corruption of this list a
49  * mutex lock cooling_cpufreq_lock is used.
50  */
51 struct cpufreq_cooling_device {
52         int id;
53         struct thermal_cooling_device *cool_dev;
54         unsigned int cpufreq_state;
55         unsigned int cpufreq_val;
56         struct cpumask allowed_cpus;
57         struct list_head node;
58 };
59 static LIST_HEAD(cooling_cpufreq_list);
60 static DEFINE_IDR(cpufreq_idr);
61
62 static struct mutex cooling_cpufreq_lock;
63
64 /* notify_table passes value to the CPUFREQ_ADJUST callback function. */
65 #define NOTIFY_INVALID NULL
66 struct cpufreq_cooling_device *notify_device;
67
68 /**
69  * get_idr - function to get a unique id.
70  * @idr: struct idr * handle used to create a id.
71  * @id: int * value generated by this function.
72  */
73 static int get_idr(struct idr *idr, int *id)
74 {
75         int err;
76 again:
77         if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
78                 return -ENOMEM;
79
80         mutex_lock(&cooling_cpufreq_lock);
81         err = idr_get_new(idr, NULL, id);
82         mutex_unlock(&cooling_cpufreq_lock);
83
84         if (unlikely(err == -EAGAIN))
85                 goto again;
86         else if (unlikely(err))
87                 return err;
88
89         *id = *id & MAX_IDR_MASK;
90         return 0;
91 }
92
93 /**
94  * release_idr - function to free the unique id.
95  * @idr: struct idr * handle used for creating the id.
96  * @id: int value representing the unique id.
97  */
98 static void release_idr(struct idr *idr, int id)
99 {
100         mutex_lock(&cooling_cpufreq_lock);
101         idr_remove(idr, id);
102         mutex_unlock(&cooling_cpufreq_lock);
103 }
104
105 /* Below code defines functions to be used for cpufreq as cooling device */
106
107 /**
108  * is_cpufreq_valid - function to check if a cpu has frequency transition policy.
109  * @cpu: cpu for which check is needed.
110  */
111 static int is_cpufreq_valid(int cpu)
112 {
113         struct cpufreq_policy policy;
114         return !cpufreq_get_policy(&policy, cpu);
115 }
116
117 /**
118  * get_cpu_frequency - get the absolute value of frequency from level.
119  * @cpu: cpu for which frequency is fetched.
120  * @level: level of frequency of the CPU
121  *      e.g level=1 --> 1st MAX FREQ, LEVEL=2 ---> 2nd MAX FREQ, .... etc
122  */
123 static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
124 {
125         int ret = 0, i = 0;
126         unsigned long level_index;
127         bool descend = false;
128         struct cpufreq_frequency_table *table =
129                                         cpufreq_frequency_get_table(cpu);
130         if (!table)
131                 return ret;
132
133         while (table[i].frequency != CPUFREQ_TABLE_END) {
134                 if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
135                         continue;
136
137                 /*check if table in ascending or descending order*/
138                 if ((table[i + 1].frequency != CPUFREQ_TABLE_END) &&
139                         (table[i + 1].frequency < table[i].frequency)
140                         && !descend) {
141                         descend = true;
142                 }
143
144                 /*return if level matched and table in descending order*/
145                 if (descend && i == level)
146                         return table[i].frequency;
147                 i++;
148         }
149         i--;
150
151         if (level > i || descend)
152                 return ret;
153         level_index = i - level;
154
155         /*Scan the table in reverse order and match the level*/
156         while (i >= 0) {
157                 if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
158                         continue;
159                 /*return if level matched*/
160                 if (i == level_index)
161                         return table[i].frequency;
162                 i--;
163         }
164         return ret;
165 }
166
167 /**
168  * cpufreq_apply_cooling - function to apply frequency clipping.
169  * @cpufreq_device: cpufreq_cooling_device pointer containing frequency
170  *      clipping data.
171  * @cooling_state: value of the cooling state.
172  */
173 static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
174                                 unsigned long cooling_state)
175 {
176         unsigned int cpuid, clip_freq;
177         struct cpumask *maskPtr = &cpufreq_device->allowed_cpus;
178         unsigned int cpu = cpumask_any(maskPtr);
179
180
181         /* Check if the old cooling action is same as new cooling action */
182         if (cpufreq_device->cpufreq_state == cooling_state)
183                 return 0;
184
185         clip_freq = get_cpu_frequency(cpu, cooling_state);
186         if (!clip_freq)
187                 return -EINVAL;
188
189         cpufreq_device->cpufreq_state = cooling_state;
190         cpufreq_device->cpufreq_val = clip_freq;
191         notify_device = cpufreq_device;
192
193         for_each_cpu(cpuid, maskPtr) {
194                 if (is_cpufreq_valid(cpuid))
195                         cpufreq_update_policy(cpuid);
196         }
197
198         notify_device = NOTIFY_INVALID;
199
200         return 0;
201 }
202
203 /**
204  * cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
205  * @nb: struct notifier_block * with callback info.
206  * @event: value showing cpufreq event for which this function invoked.
207  * @data: callback-specific data
208  */
209 static int cpufreq_thermal_notifier(struct notifier_block *nb,
210                                         unsigned long event, void *data)
211 {
212         struct cpufreq_policy *policy = data;
213         unsigned long max_freq = 0;
214
215         if (event != CPUFREQ_ADJUST || notify_device == NOTIFY_INVALID)
216                 return 0;
217
218         if (cpumask_test_cpu(policy->cpu, &notify_device->allowed_cpus))
219                 max_freq = notify_device->cpufreq_val;
220
221         /* Never exceed user_policy.max*/
222         if (max_freq > policy->user_policy.max)
223                 max_freq = policy->user_policy.max;
224
225         if (policy->max != max_freq)
226                 cpufreq_verify_within_limits(policy, 0, max_freq);
227
228         return 0;
229 }
230
231 /*
232  * cpufreq cooling device callback functions are defined below
233  */
234
235 /**
236  * cpufreq_get_max_state - callback function to get the max cooling state.
237  * @cdev: thermal cooling device pointer.
238  * @state: fill this variable with the max cooling state.
239  */
240 static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
241                                  unsigned long *state)
242 {
243         int ret = -EINVAL, i = 0;
244         struct cpufreq_cooling_device *cpufreq_device;
245         struct cpumask *maskPtr;
246         unsigned int cpu;
247         struct cpufreq_frequency_table *table;
248
249         mutex_lock(&cooling_cpufreq_lock);
250         list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) {
251                 if (cpufreq_device && cpufreq_device->cool_dev == cdev)
252                         break;
253         }
254         if (cpufreq_device == NULL)
255                 goto return_get_max_state;
256
257         maskPtr = &cpufreq_device->allowed_cpus;
258         cpu = cpumask_any(maskPtr);
259         table = cpufreq_frequency_get_table(cpu);
260         if (!table) {
261                 *state = 0;
262                 ret = 0;
263                 goto return_get_max_state;
264         }
265
266         while (table[i].frequency != CPUFREQ_TABLE_END) {
267                 if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
268                         continue;
269                 i++;
270         }
271         if (i > 0) {
272                 *state = --i;
273                 ret = 0;
274         }
275
276 return_get_max_state:
277         mutex_unlock(&cooling_cpufreq_lock);
278         return ret;
279 }
280
281 /**
282  * cpufreq_get_cur_state - callback function to get the current cooling state.
283  * @cdev: thermal cooling device pointer.
284  * @state: fill this variable with the current cooling state.
285  */
286 static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
287                                  unsigned long *state)
288 {
289         int ret = -EINVAL;
290         struct cpufreq_cooling_device *cpufreq_device;
291
292         mutex_lock(&cooling_cpufreq_lock);
293         list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) {
294                 if (cpufreq_device && cpufreq_device->cool_dev == cdev) {
295                         *state = cpufreq_device->cpufreq_state;
296                         ret = 0;
297                         break;
298                 }
299         }
300         mutex_unlock(&cooling_cpufreq_lock);
301
302         return ret;
303 }
304
305 /**
306  * cpufreq_set_cur_state - callback function to set the current cooling state.
307  * @cdev: thermal cooling device pointer.
308  * @state: set this variable to the current cooling state.
309  */
310 static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
311                                  unsigned long state)
312 {
313         int ret = -EINVAL;
314         struct cpufreq_cooling_device *cpufreq_device;
315
316         mutex_lock(&cooling_cpufreq_lock);
317         list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) {
318                 if (cpufreq_device && cpufreq_device->cool_dev == cdev) {
319                         ret = 0;
320                         break;
321                 }
322         }
323         if (!ret)
324                 ret = cpufreq_apply_cooling(cpufreq_device, state);
325
326         mutex_unlock(&cooling_cpufreq_lock);
327
328         return ret;
329 }
330
331 /* Bind cpufreq callbacks to thermal cooling device ops */
332 static struct thermal_cooling_device_ops const cpufreq_cooling_ops = {
333         .get_max_state = cpufreq_get_max_state,
334         .get_cur_state = cpufreq_get_cur_state,
335         .set_cur_state = cpufreq_set_cur_state,
336 };
337
338 /* Notifier for cpufreq policy change */
339 static struct notifier_block thermal_cpufreq_notifier_block = {
340         .notifier_call = cpufreq_thermal_notifier,
341 };
342
343 /**
344  * cpufreq_cooling_register - function to create cpufreq cooling device.
345  * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
346  */
347 struct thermal_cooling_device *cpufreq_cooling_register(
348         struct cpumask *clip_cpus)
349 {
350         struct thermal_cooling_device *cool_dev;
351         struct cpufreq_cooling_device *cpufreq_dev = NULL;
352         unsigned int cpufreq_dev_count = 0, min = 0, max = 0;
353         char dev_name[THERMAL_NAME_LENGTH];
354         int ret = 0, i;
355         struct cpufreq_policy policy;
356
357         list_for_each_entry(cpufreq_dev, &cooling_cpufreq_list, node)
358                 cpufreq_dev_count++;
359
360         /*Verify that all the clip cpus have same freq_min, freq_max limit*/
361         for_each_cpu(i, clip_cpus) {
362                 /*continue if cpufreq policy not found and not return error*/
363                 if (!cpufreq_get_policy(&policy, i))
364                         continue;
365                 if (min == 0 && max == 0) {
366                         min = policy.cpuinfo.min_freq;
367                         max = policy.cpuinfo.max_freq;
368                 } else {
369                         if (min != policy.cpuinfo.min_freq ||
370                                 max != policy.cpuinfo.max_freq)
371                                 return ERR_PTR(-EINVAL);
372 }
373         }
374         cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device),
375                         GFP_KERNEL);
376         if (!cpufreq_dev)
377                 return ERR_PTR(-ENOMEM);
378
379         cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
380
381         if (cpufreq_dev_count == 0)
382                 mutex_init(&cooling_cpufreq_lock);
383
384         ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
385         if (ret) {
386                 kfree(cpufreq_dev);
387                 return ERR_PTR(-EINVAL);
388         }
389
390         sprintf(dev_name, "thermal-cpufreq-%d", cpufreq_dev->id);
391
392         cool_dev = thermal_cooling_device_register(dev_name, cpufreq_dev,
393                                                 &cpufreq_cooling_ops);
394         if (!cool_dev) {
395                 release_idr(&cpufreq_idr, cpufreq_dev->id);
396                 kfree(cpufreq_dev);
397                 return ERR_PTR(-EINVAL);
398         }
399         cpufreq_dev->cool_dev = cool_dev;
400         cpufreq_dev->cpufreq_state = 0;
401         mutex_lock(&cooling_cpufreq_lock);
402         list_add_tail(&cpufreq_dev->node, &cooling_cpufreq_list);
403
404         /* Register the notifier for first cpufreq cooling device */
405         if (cpufreq_dev_count == 0)
406                 cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
407                                                 CPUFREQ_POLICY_NOTIFIER);
408
409         mutex_unlock(&cooling_cpufreq_lock);
410         return cool_dev;
411 }
412 EXPORT_SYMBOL(cpufreq_cooling_register);
413
414 /**
415  * cpufreq_cooling_unregister - function to remove cpufreq cooling device.
416  * @cdev: thermal cooling device pointer.
417  */
418 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
419 {
420         struct cpufreq_cooling_device *cpufreq_dev = NULL;
421         unsigned int cpufreq_dev_count = 0;
422
423         mutex_lock(&cooling_cpufreq_lock);
424         list_for_each_entry(cpufreq_dev, &cooling_cpufreq_list, node) {
425                 if (cpufreq_dev && cpufreq_dev->cool_dev == cdev)
426                         break;
427                 cpufreq_dev_count++;
428         }
429
430         if (!cpufreq_dev || cpufreq_dev->cool_dev != cdev) {
431                 mutex_unlock(&cooling_cpufreq_lock);
432                 return;
433         }
434
435         list_del(&cpufreq_dev->node);
436
437         /* Unregister the notifier for the last cpufreq cooling device */
438         if (cpufreq_dev_count == 1) {
439                 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
440                                         CPUFREQ_POLICY_NOTIFIER);
441         }
442         mutex_unlock(&cooling_cpufreq_lock);
443         thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
444         release_idr(&cpufreq_idr, cpufreq_dev->id);
445         if (cpufreq_dev_count == 1)
446                 mutex_destroy(&cooling_cpufreq_lock);
447         kfree(cpufreq_dev);
448 }
449 EXPORT_SYMBOL(cpufreq_cooling_unregister);