]> Pileus Git - ~andy/linux/blob - kernel/time/tick-broadcast.c
Merge branch 'linus' into timers/core
[~andy/linux] / kernel / time / tick-broadcast.c
1 /*
2  * linux/kernel/time/tick-broadcast.c
3  *
4  * This file contains functions which emulate a local clock-event
5  * device via a broadcast event source.
6  *
7  * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8  * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9  * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10  *
11  * This code is licenced under the GPL version 2. For details see
12  * kernel-base/COPYING.
13  */
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/interrupt.h>
18 #include <linux/percpu.h>
19 #include <linux/profile.h>
20 #include <linux/sched.h>
21 #include <linux/smp.h>
22
23 #include "tick-internal.h"
24
25 /*
26  * Broadcast support for broken x86 hardware, where the local apic
27  * timer stops in C3 state.
28  */
29
30 static struct tick_device tick_broadcast_device;
31 static cpumask_var_t tick_broadcast_mask;
32 static cpumask_var_t tmpmask;
33 static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
34 static int tick_broadcast_force;
35
36 #ifdef CONFIG_TICK_ONESHOT
37 static void tick_broadcast_clear_oneshot(int cpu);
38 #else
39 static inline void tick_broadcast_clear_oneshot(int cpu) { }
40 #endif
41
42 /*
43  * Debugging: see timer_list.c
44  */
45 struct tick_device *tick_get_broadcast_device(void)
46 {
47         return &tick_broadcast_device;
48 }
49
50 struct cpumask *tick_get_broadcast_mask(void)
51 {
52         return tick_broadcast_mask;
53 }
54
55 /*
56  * Start the device in periodic mode
57  */
58 static void tick_broadcast_start_periodic(struct clock_event_device *bc)
59 {
60         if (bc)
61                 tick_setup_periodic(bc, 1);
62 }
63
64 /*
65  * Check, if the device can be utilized as broadcast device:
66  */
67 int tick_check_broadcast_device(struct clock_event_device *dev)
68 {
69         if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
70             (tick_broadcast_device.evtdev &&
71              tick_broadcast_device.evtdev->rating >= dev->rating) ||
72              (dev->features & CLOCK_EVT_FEAT_C3STOP))
73                 return 0;
74
75         clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
76         tick_broadcast_device.evtdev = dev;
77         if (!cpumask_empty(tick_broadcast_mask))
78                 tick_broadcast_start_periodic(dev);
79         /*
80          * Inform all cpus about this. We might be in a situation
81          * where we did not switch to oneshot mode because the per cpu
82          * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack
83          * of a oneshot capable broadcast device. Without that
84          * notification the systems stays stuck in periodic mode
85          * forever.
86          */
87         if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
88                 tick_clock_notify();
89         return 1;
90 }
91
92 /*
93  * Check, if the device is the broadcast device
94  */
95 int tick_is_broadcast_device(struct clock_event_device *dev)
96 {
97         return (dev && tick_broadcast_device.evtdev == dev);
98 }
99
100 static void err_broadcast(const struct cpumask *mask)
101 {
102         pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
103 }
104
105 static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
106 {
107         if (!dev->broadcast)
108                 dev->broadcast = tick_broadcast;
109         if (!dev->broadcast) {
110                 pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
111                              dev->name);
112                 dev->broadcast = err_broadcast;
113         }
114 }
115
116 /*
117  * Check, if the device is disfunctional and a place holder, which
118  * needs to be handled by the broadcast device.
119  */
120 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
121 {
122         unsigned long flags;
123         int ret = 0;
124
125         raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
126
127         /*
128          * Devices might be registered with both periodic and oneshot
129          * mode disabled. This signals, that the device needs to be
130          * operated from the broadcast device and is a placeholder for
131          * the cpu local device.
132          */
133         if (!tick_device_is_functional(dev)) {
134                 dev->event_handler = tick_handle_periodic;
135                 tick_device_setup_broadcast_func(dev);
136                 cpumask_set_cpu(cpu, tick_broadcast_mask);
137                 tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
138                 ret = 1;
139         } else {
140                 /*
141                  * When the new device is not affected by the stop
142                  * feature and the cpu is marked in the broadcast mask
143                  * then clear the broadcast bit.
144                  */
145                 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
146                         int cpu = smp_processor_id();
147                         cpumask_clear_cpu(cpu, tick_broadcast_mask);
148                         tick_broadcast_clear_oneshot(cpu);
149                 } else {
150                         tick_device_setup_broadcast_func(dev);
151                 }
152         }
153         raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
154         return ret;
155 }
156
157 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
158 int tick_receive_broadcast(void)
159 {
160         struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
161         struct clock_event_device *evt = td->evtdev;
162
163         if (!evt)
164                 return -ENODEV;
165
166         if (!evt->event_handler)
167                 return -EINVAL;
168
169         evt->event_handler(evt);
170         return 0;
171 }
172 #endif
173
174 /*
175  * Broadcast the event to the cpus, which are set in the mask (mangled).
176  */
177 static void tick_do_broadcast(struct cpumask *mask)
178 {
179         int cpu = smp_processor_id();
180         struct tick_device *td;
181
182         /*
183          * Check, if the current cpu is in the mask
184          */
185         if (cpumask_test_cpu(cpu, mask)) {
186                 cpumask_clear_cpu(cpu, mask);
187                 td = &per_cpu(tick_cpu_device, cpu);
188                 td->evtdev->event_handler(td->evtdev);
189         }
190
191         if (!cpumask_empty(mask)) {
192                 /*
193                  * It might be necessary to actually check whether the devices
194                  * have different broadcast functions. For now, just use the
195                  * one of the first device. This works as long as we have this
196                  * misfeature only on x86 (lapic)
197                  */
198                 td = &per_cpu(tick_cpu_device, cpumask_first(mask));
199                 td->evtdev->broadcast(mask);
200         }
201 }
202
203 /*
204  * Periodic broadcast:
205  * - invoke the broadcast handlers
206  */
207 static void tick_do_periodic_broadcast(void)
208 {
209         raw_spin_lock(&tick_broadcast_lock);
210
211         cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
212         tick_do_broadcast(tmpmask);
213
214         raw_spin_unlock(&tick_broadcast_lock);
215 }
216
217 /*
218  * Event handler for periodic broadcast ticks
219  */
220 static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
221 {
222         ktime_t next;
223
224         tick_do_periodic_broadcast();
225
226         /*
227          * The device is in periodic mode. No reprogramming necessary:
228          */
229         if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
230                 return;
231
232         /*
233          * Setup the next period for devices, which do not have
234          * periodic mode. We read dev->next_event first and add to it
235          * when the event already expired. clockevents_program_event()
236          * sets dev->next_event only when the event is really
237          * programmed to the device.
238          */
239         for (next = dev->next_event; ;) {
240                 next = ktime_add(next, tick_period);
241
242                 if (!clockevents_program_event(dev, next, false))
243                         return;
244                 tick_do_periodic_broadcast();
245         }
246 }
247
248 /*
249  * Powerstate information: The system enters/leaves a state, where
250  * affected devices might stop
251  */
252 static void tick_do_broadcast_on_off(unsigned long *reason)
253 {
254         struct clock_event_device *bc, *dev;
255         struct tick_device *td;
256         unsigned long flags;
257         int cpu, bc_stopped;
258
259         raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
260
261         cpu = smp_processor_id();
262         td = &per_cpu(tick_cpu_device, cpu);
263         dev = td->evtdev;
264         bc = tick_broadcast_device.evtdev;
265
266         /*
267          * Is the device not affected by the powerstate ?
268          */
269         if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
270                 goto out;
271
272         if (!tick_device_is_functional(dev))
273                 goto out;
274
275         bc_stopped = cpumask_empty(tick_broadcast_mask);
276
277         switch (*reason) {
278         case CLOCK_EVT_NOTIFY_BROADCAST_ON:
279         case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
280                 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
281                         if (tick_broadcast_device.mode ==
282                             TICKDEV_MODE_PERIODIC)
283                                 clockevents_shutdown(dev);
284                 }
285                 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
286                         tick_broadcast_force = 1;
287                 break;
288         case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
289                 if (!tick_broadcast_force &&
290                     cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
291                         if (tick_broadcast_device.mode ==
292                             TICKDEV_MODE_PERIODIC)
293                                 tick_setup_periodic(dev, 0);
294                 }
295                 break;
296         }
297
298         if (cpumask_empty(tick_broadcast_mask)) {
299                 if (!bc_stopped)
300                         clockevents_shutdown(bc);
301         } else if (bc_stopped) {
302                 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
303                         tick_broadcast_start_periodic(bc);
304                 else
305                         tick_broadcast_setup_oneshot(bc);
306         }
307 out:
308         raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
309 }
310
311 /*
312  * Powerstate information: The system enters/leaves a state, where
313  * affected devices might stop.
314  */
315 void tick_broadcast_on_off(unsigned long reason, int *oncpu)
316 {
317         if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
318                 printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
319                        "offline CPU #%d\n", *oncpu);
320         else
321                 tick_do_broadcast_on_off(&reason);
322 }
323
324 /*
325  * Set the periodic handler depending on broadcast on/off
326  */
327 void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
328 {
329         if (!broadcast)
330                 dev->event_handler = tick_handle_periodic;
331         else
332                 dev->event_handler = tick_handle_periodic_broadcast;
333 }
334
335 /*
336  * Remove a CPU from broadcasting
337  */
338 void tick_shutdown_broadcast(unsigned int *cpup)
339 {
340         struct clock_event_device *bc;
341         unsigned long flags;
342         unsigned int cpu = *cpup;
343
344         raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
345
346         bc = tick_broadcast_device.evtdev;
347         cpumask_clear_cpu(cpu, tick_broadcast_mask);
348
349         if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
350                 if (bc && cpumask_empty(tick_broadcast_mask))
351                         clockevents_shutdown(bc);
352         }
353
354         raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
355 }
356
357 void tick_suspend_broadcast(void)
358 {
359         struct clock_event_device *bc;
360         unsigned long flags;
361
362         raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
363
364         bc = tick_broadcast_device.evtdev;
365         if (bc)
366                 clockevents_shutdown(bc);
367
368         raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
369 }
370
371 int tick_resume_broadcast(void)
372 {
373         struct clock_event_device *bc;
374         unsigned long flags;
375         int broadcast = 0;
376
377         raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
378
379         bc = tick_broadcast_device.evtdev;
380
381         if (bc) {
382                 clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
383
384                 switch (tick_broadcast_device.mode) {
385                 case TICKDEV_MODE_PERIODIC:
386                         if (!cpumask_empty(tick_broadcast_mask))
387                                 tick_broadcast_start_periodic(bc);
388                         broadcast = cpumask_test_cpu(smp_processor_id(),
389                                                      tick_broadcast_mask);
390                         break;
391                 case TICKDEV_MODE_ONESHOT:
392                         if (!cpumask_empty(tick_broadcast_mask))
393                                 broadcast = tick_resume_broadcast_oneshot(bc);
394                         break;
395                 }
396         }
397         raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
398
399         return broadcast;
400 }
401
402
403 #ifdef CONFIG_TICK_ONESHOT
404
405 static cpumask_var_t tick_broadcast_oneshot_mask;
406 static cpumask_var_t tick_broadcast_pending_mask;
407 static cpumask_var_t tick_broadcast_force_mask;
408
409 /*
410  * Exposed for debugging: see timer_list.c
411  */
412 struct cpumask *tick_get_broadcast_oneshot_mask(void)
413 {
414         return tick_broadcast_oneshot_mask;
415 }
416
417 /*
418  * Called before going idle with interrupts disabled. Checks whether a
419  * broadcast event from the other core is about to happen. We detected
420  * that in tick_broadcast_oneshot_control(). The callsite can use this
421  * to avoid a deep idle transition as we are about to get the
422  * broadcast IPI right away.
423  */
424 int tick_check_broadcast_expired(void)
425 {
426         return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
427 }
428
429 /*
430  * Set broadcast interrupt affinity
431  */
432 static void tick_broadcast_set_affinity(struct clock_event_device *bc,
433                                         const struct cpumask *cpumask)
434 {
435         if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
436                 return;
437
438         if (cpumask_equal(bc->cpumask, cpumask))
439                 return;
440
441         bc->cpumask = cpumask;
442         irq_set_affinity(bc->irq, bc->cpumask);
443 }
444
445 static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
446                                     ktime_t expires, int force)
447 {
448         int ret;
449
450         if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
451                 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
452
453         ret = clockevents_program_event(bc, expires, force);
454         if (!ret)
455                 tick_broadcast_set_affinity(bc, cpumask_of(cpu));
456         return ret;
457 }
458
459 int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
460 {
461         clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
462         return 0;
463 }
464
465 /*
466  * Called from irq_enter() when idle was interrupted to reenable the
467  * per cpu device.
468  */
469 void tick_check_oneshot_broadcast(int cpu)
470 {
471         if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) {
472                 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
473
474                 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
475         }
476 }
477
478 /*
479  * Handle oneshot mode broadcasting
480  */
481 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
482 {
483         struct tick_device *td;
484         ktime_t now, next_event;
485         int cpu, next_cpu = 0;
486
487         raw_spin_lock(&tick_broadcast_lock);
488 again:
489         dev->next_event.tv64 = KTIME_MAX;
490         next_event.tv64 = KTIME_MAX;
491         cpumask_clear(tmpmask);
492         now = ktime_get();
493         /* Find all expired events */
494         for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
495                 td = &per_cpu(tick_cpu_device, cpu);
496                 if (td->evtdev->next_event.tv64 <= now.tv64) {
497                         cpumask_set_cpu(cpu, tmpmask);
498                         /*
499                          * Mark the remote cpu in the pending mask, so
500                          * it can avoid reprogramming the cpu local
501                          * timer in tick_broadcast_oneshot_control().
502                          */
503                         cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
504                 } else if (td->evtdev->next_event.tv64 < next_event.tv64) {
505                         next_event.tv64 = td->evtdev->next_event.tv64;
506                         next_cpu = cpu;
507                 }
508         }
509
510         /* Take care of enforced broadcast requests */
511         cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
512         cpumask_clear(tick_broadcast_force_mask);
513
514         /*
515          * Wakeup the cpus which have an expired event.
516          */
517         tick_do_broadcast(tmpmask);
518
519         /*
520          * Two reasons for reprogram:
521          *
522          * - The global event did not expire any CPU local
523          * events. This happens in dyntick mode, as the maximum PIT
524          * delta is quite small.
525          *
526          * - There are pending events on sleeping CPUs which were not
527          * in the event mask
528          */
529         if (next_event.tv64 != KTIME_MAX) {
530                 /*
531                  * Rearm the broadcast device. If event expired,
532                  * repeat the above
533                  */
534                 if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
535                         goto again;
536         }
537         raw_spin_unlock(&tick_broadcast_lock);
538 }
539
540 /*
541  * Powerstate information: The system enters/leaves a state, where
542  * affected devices might stop
543  */
544 void tick_broadcast_oneshot_control(unsigned long reason)
545 {
546         struct clock_event_device *bc, *dev;
547         struct tick_device *td;
548         unsigned long flags;
549         ktime_t now;
550         int cpu;
551
552         /*
553          * Periodic mode does not care about the enter/exit of power
554          * states
555          */
556         if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
557                 return;
558
559         /*
560          * We are called with preemtion disabled from the depth of the
561          * idle code, so we can't be moved away.
562          */
563         cpu = smp_processor_id();
564         td = &per_cpu(tick_cpu_device, cpu);
565         dev = td->evtdev;
566
567         if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
568                 return;
569
570         bc = tick_broadcast_device.evtdev;
571
572         raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
573         if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
574                 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
575                 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
576                         clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
577                         /*
578                          * We only reprogram the broadcast timer if we
579                          * did not mark ourself in the force mask and
580                          * if the cpu local event is earlier than the
581                          * broadcast event. If the current CPU is in
582                          * the force mask, then we are going to be
583                          * woken by the IPI right away.
584                          */
585                         if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
586                             dev->next_event.tv64 < bc->next_event.tv64)
587                                 tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
588                 }
589         } else {
590                 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
591                         clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
592                         if (dev->next_event.tv64 == KTIME_MAX)
593                                 goto out;
594                         /*
595                          * The cpu which was handling the broadcast
596                          * timer marked this cpu in the broadcast
597                          * pending mask and fired the broadcast
598                          * IPI. So we are going to handle the expired
599                          * event anyway via the broadcast IPI
600                          * handler. No need to reprogram the timer
601                          * with an already expired event.
602                          */
603                         if (cpumask_test_and_clear_cpu(cpu,
604                                        tick_broadcast_pending_mask))
605                                 goto out;
606
607                         /*
608                          * If the pending bit is not set, then we are
609                          * either the CPU handling the broadcast
610                          * interrupt or we got woken by something else.
611                          *
612                          * We are not longer in the broadcast mask, so
613                          * if the cpu local expiry time is already
614                          * reached, we would reprogram the cpu local
615                          * timer with an already expired event.
616                          *
617                          * This can lead to a ping-pong when we return
618                          * to idle and therefor rearm the broadcast
619                          * timer before the cpu local timer was able
620                          * to fire. This happens because the forced
621                          * reprogramming makes sure that the event
622                          * will happen in the future and depending on
623                          * the min_delta setting this might be far
624                          * enough out that the ping-pong starts.
625                          *
626                          * If the cpu local next_event has expired
627                          * then we know that the broadcast timer
628                          * next_event has expired as well and
629                          * broadcast is about to be handled. So we
630                          * avoid reprogramming and enforce that the
631                          * broadcast handler, which did not run yet,
632                          * will invoke the cpu local handler.
633                          *
634                          * We cannot call the handler directly from
635                          * here, because we might be in a NOHZ phase
636                          * and we did not go through the irq_enter()
637                          * nohz fixups.
638                          */
639                         now = ktime_get();
640                         if (dev->next_event.tv64 <= now.tv64) {
641                                 cpumask_set_cpu(cpu, tick_broadcast_force_mask);
642                                 goto out;
643                         }
644                         /*
645                          * We got woken by something else. Reprogram
646                          * the cpu local timer device.
647                          */
648                         tick_program_event(dev->next_event, 1);
649                 }
650         }
651 out:
652         raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
653 }
654
655 /*
656  * Reset the one shot broadcast for a cpu
657  *
658  * Called with tick_broadcast_lock held
659  */
660 static void tick_broadcast_clear_oneshot(int cpu)
661 {
662         cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
663 }
664
665 static void tick_broadcast_init_next_event(struct cpumask *mask,
666                                            ktime_t expires)
667 {
668         struct tick_device *td;
669         int cpu;
670
671         for_each_cpu(cpu, mask) {
672                 td = &per_cpu(tick_cpu_device, cpu);
673                 if (td->evtdev)
674                         td->evtdev->next_event = expires;
675         }
676 }
677
678 /**
679  * tick_broadcast_setup_oneshot - setup the broadcast device
680  */
681 void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
682 {
683         int cpu = smp_processor_id();
684
685         /* Set it up only once ! */
686         if (bc->event_handler != tick_handle_oneshot_broadcast) {
687                 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
688
689                 bc->event_handler = tick_handle_oneshot_broadcast;
690
691                 /* Take the do_timer update */
692                 tick_do_timer_cpu = cpu;
693
694                 /*
695                  * We must be careful here. There might be other CPUs
696                  * waiting for periodic broadcast. We need to set the
697                  * oneshot_mask bits for those and program the
698                  * broadcast device to fire.
699                  */
700                 cpumask_copy(tmpmask, tick_broadcast_mask);
701                 cpumask_clear_cpu(cpu, tmpmask);
702                 cpumask_or(tick_broadcast_oneshot_mask,
703                            tick_broadcast_oneshot_mask, tmpmask);
704
705                 if (was_periodic && !cpumask_empty(tmpmask)) {
706                         clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
707                         tick_broadcast_init_next_event(tmpmask,
708                                                        tick_next_period);
709                         tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
710                 } else
711                         bc->next_event.tv64 = KTIME_MAX;
712         } else {
713                 /*
714                  * The first cpu which switches to oneshot mode sets
715                  * the bit for all other cpus which are in the general
716                  * (periodic) broadcast mask. So the bit is set and
717                  * would prevent the first broadcast enter after this
718                  * to program the bc device.
719                  */
720                 tick_broadcast_clear_oneshot(cpu);
721         }
722 }
723
724 /*
725  * Select oneshot operating mode for the broadcast device
726  */
727 void tick_broadcast_switch_to_oneshot(void)
728 {
729         struct clock_event_device *bc;
730         unsigned long flags;
731
732         raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
733
734         tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
735         bc = tick_broadcast_device.evtdev;
736         if (bc)
737                 tick_broadcast_setup_oneshot(bc);
738
739         raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
740 }
741
742
743 /*
744  * Remove a dead CPU from broadcasting
745  */
746 void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
747 {
748         unsigned long flags;
749         unsigned int cpu = *cpup;
750
751         raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
752
753         /*
754          * Clear the broadcast mask flag for the dead cpu, but do not
755          * stop the broadcast device!
756          */
757         cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
758
759         raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
760 }
761
762 /*
763  * Check, whether the broadcast device is in one shot mode
764  */
765 int tick_broadcast_oneshot_active(void)
766 {
767         return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
768 }
769
770 /*
771  * Check whether the broadcast device supports oneshot.
772  */
773 bool tick_broadcast_oneshot_available(void)
774 {
775         struct clock_event_device *bc = tick_broadcast_device.evtdev;
776
777         return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
778 }
779
780 #endif
781
782 void __init tick_broadcast_init(void)
783 {
784         alloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
785         alloc_cpumask_var(&tmpmask, GFP_NOWAIT);
786 #ifdef CONFIG_TICK_ONESHOT
787         alloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
788         alloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
789         alloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
790 #endif
791 }