2 * Implement CPU time clocks for the POSIX clock interface.
5 #include <linux/sched.h>
6 #include <linux/posix-timers.h>
7 #include <linux/errno.h>
8 #include <linux/math64.h>
9 #include <asm/uaccess.h>
10 #include <linux/kernel_stat.h>
11 #include <trace/events/timer.h>
12 #include <linux/random.h>
13 #include <linux/tick.h>
14 #include <linux/workqueue.h>
17 * Called after updating RLIMIT_CPU to run cpu timer and update
18 * tsk->signal->cputime_expires expiration cache if necessary. Needs
19 * siglock protection since other code may update expiration cache as
22 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
24 cputime_t cputime = secs_to_cputime(rlim_new);
26 spin_lock_irq(&task->sighand->siglock);
27 set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL);
28 spin_unlock_irq(&task->sighand->siglock);
31 static int check_clock(const clockid_t which_clock)
34 struct task_struct *p;
35 const pid_t pid = CPUCLOCK_PID(which_clock);
37 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
44 p = find_task_by_vpid(pid);
45 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
46 same_thread_group(p, current) : has_group_leader_pid(p))) {
54 static inline union cpu_time_count
55 timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
57 union cpu_time_count ret;
58 ret.sched = 0; /* high half always zero when .cpu used */
59 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
60 ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
62 ret.cpu = timespec_to_cputime(tp);
67 static void sample_to_timespec(const clockid_t which_clock,
68 union cpu_time_count cpu,
71 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
72 *tp = ns_to_timespec(cpu.sched);
74 cputime_to_timespec(cpu.cpu, tp);
77 static inline int cpu_time_before(const clockid_t which_clock,
78 union cpu_time_count now,
79 union cpu_time_count then)
81 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
82 return now.sched < then.sched;
84 return now.cpu < then.cpu;
87 static inline void cpu_time_add(const clockid_t which_clock,
88 union cpu_time_count *acc,
89 union cpu_time_count val)
91 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
92 acc->sched += val.sched;
97 static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
98 union cpu_time_count a,
99 union cpu_time_count b)
101 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
110 * Update expiry time from increment, and increase overrun count,
111 * given the current clock sample.
113 static void bump_cpu_timer(struct k_itimer *timer,
114 union cpu_time_count now)
118 if (timer->it.cpu.incr.sched == 0)
121 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
122 unsigned long long delta, incr;
124 if (now.sched < timer->it.cpu.expires.sched)
126 incr = timer->it.cpu.incr.sched;
127 delta = now.sched + incr - timer->it.cpu.expires.sched;
128 /* Don't use (incr*2 < delta), incr*2 might overflow. */
129 for (i = 0; incr < delta - incr; i++)
131 for (; i >= 0; incr >>= 1, i--) {
134 timer->it.cpu.expires.sched += incr;
135 timer->it_overrun += 1 << i;
139 cputime_t delta, incr;
141 if (now.cpu < timer->it.cpu.expires.cpu)
143 incr = timer->it.cpu.incr.cpu;
144 delta = now.cpu + incr - timer->it.cpu.expires.cpu;
145 /* Don't use (incr*2 < delta), incr*2 might overflow. */
146 for (i = 0; incr < delta - incr; i++)
148 for (; i >= 0; incr = incr >> 1, i--) {
151 timer->it.cpu.expires.cpu += incr;
152 timer->it_overrun += 1 << i;
158 static inline cputime_t prof_ticks(struct task_struct *p)
160 cputime_t utime, stime;
162 task_cputime(p, &utime, &stime);
164 return utime + stime;
166 static inline cputime_t virt_ticks(struct task_struct *p)
170 task_cputime(p, &utime, NULL);
176 posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
178 int error = check_clock(which_clock);
181 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
182 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
184 * If sched_clock is using a cycle counter, we
185 * don't have any idea of its true resolution
186 * exported, but it is much more than 1s/HZ.
195 posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
198 * You can never reset a CPU clock, but we check for other errors
199 * in the call before failing with EPERM.
201 int error = check_clock(which_clock);
210 * Sample a per-thread clock for the given task.
212 static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
213 union cpu_time_count *cpu)
215 switch (CPUCLOCK_WHICH(which_clock)) {
219 cpu->cpu = prof_ticks(p);
222 cpu->cpu = virt_ticks(p);
225 cpu->sched = task_sched_runtime(p);
231 static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
233 if (b->utime > a->utime)
236 if (b->stime > a->stime)
239 if (b->sum_exec_runtime > a->sum_exec_runtime)
240 a->sum_exec_runtime = b->sum_exec_runtime;
243 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
245 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
246 struct task_cputime sum;
249 if (!cputimer->running) {
251 * The POSIX timer interface allows for absolute time expiry
252 * values through the TIMER_ABSTIME flag, therefore we have
253 * to synchronize the timer to the clock every time we start
256 thread_group_cputime(tsk, &sum);
257 raw_spin_lock_irqsave(&cputimer->lock, flags);
258 cputimer->running = 1;
259 update_gt_cputime(&cputimer->cputime, &sum);
261 raw_spin_lock_irqsave(&cputimer->lock, flags);
262 *times = cputimer->cputime;
263 raw_spin_unlock_irqrestore(&cputimer->lock, flags);
267 * Sample a process (thread group) clock for the given group_leader task.
268 * Must be called with tasklist_lock held for reading.
270 static int cpu_clock_sample_group(const clockid_t which_clock,
271 struct task_struct *p,
272 union cpu_time_count *cpu)
274 struct task_cputime cputime;
276 switch (CPUCLOCK_WHICH(which_clock)) {
280 thread_group_cputime(p, &cputime);
281 cpu->cpu = cputime.utime + cputime.stime;
284 thread_group_cputime(p, &cputime);
285 cpu->cpu = cputime.utime;
288 thread_group_cputime(p, &cputime);
289 cpu->sched = cputime.sum_exec_runtime;
296 static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
298 const pid_t pid = CPUCLOCK_PID(which_clock);
300 union cpu_time_count rtn;
304 * Special case constant value for our own clocks.
305 * We don't have to do any lookup to find ourselves.
307 if (CPUCLOCK_PERTHREAD(which_clock)) {
309 * Sampling just ourselves we can do with no locking.
311 error = cpu_clock_sample(which_clock,
314 read_lock(&tasklist_lock);
315 error = cpu_clock_sample_group(which_clock,
317 read_unlock(&tasklist_lock);
321 * Find the given PID, and validate that the caller
322 * should be able to see it.
324 struct task_struct *p;
326 p = find_task_by_vpid(pid);
328 if (CPUCLOCK_PERTHREAD(which_clock)) {
329 if (same_thread_group(p, current)) {
330 error = cpu_clock_sample(which_clock,
334 read_lock(&tasklist_lock);
335 if (thread_group_leader(p) && p->sighand) {
337 cpu_clock_sample_group(which_clock,
340 read_unlock(&tasklist_lock);
348 sample_to_timespec(which_clock, rtn, tp);
354 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
355 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
356 * new timer already all-zeros initialized.
358 static int posix_cpu_timer_create(struct k_itimer *new_timer)
361 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
362 struct task_struct *p;
364 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
367 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
370 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
374 p = find_task_by_vpid(pid);
375 if (p && !same_thread_group(p, current))
380 p = current->group_leader;
382 p = find_task_by_vpid(pid);
383 if (p && !has_group_leader_pid(p))
387 new_timer->it.cpu.task = p;
399 * Clean up a CPU-clock timer that is about to be destroyed.
400 * This is called from timer deletion with the timer already locked.
401 * If we return TIMER_RETRY, it's necessary to release the timer's lock
402 * and try again. (This happens when the timer is in the middle of firing.)
404 static int posix_cpu_timer_del(struct k_itimer *timer)
406 struct task_struct *p = timer->it.cpu.task;
409 if (likely(p != NULL)) {
410 read_lock(&tasklist_lock);
411 if (unlikely(p->sighand == NULL)) {
413 * We raced with the reaping of the task.
414 * The deletion should have cleared us off the list.
416 BUG_ON(!list_empty(&timer->it.cpu.entry));
418 spin_lock(&p->sighand->siglock);
419 if (timer->it.cpu.firing)
422 list_del(&timer->it.cpu.entry);
423 spin_unlock(&p->sighand->siglock);
425 read_unlock(&tasklist_lock);
435 * Clean out CPU timers still ticking when a thread exited. The task
436 * pointer is cleared, and the expiry time is replaced with the residual
437 * time for later timer_gettime calls to return.
438 * This must be called with the siglock held.
440 static void cleanup_timers(struct list_head *head,
441 cputime_t utime, cputime_t stime,
442 unsigned long long sum_exec_runtime)
444 struct cpu_timer_list *timer, *next;
445 cputime_t ptime = utime + stime;
447 list_for_each_entry_safe(timer, next, head, entry) {
448 list_del_init(&timer->entry);
449 if (timer->expires.cpu < ptime) {
450 timer->expires.cpu = 0;
452 timer->expires.cpu -= ptime;
457 list_for_each_entry_safe(timer, next, head, entry) {
458 list_del_init(&timer->entry);
459 if (timer->expires.cpu < utime) {
460 timer->expires.cpu = 0;
462 timer->expires.cpu -= utime;
467 list_for_each_entry_safe(timer, next, head, entry) {
468 list_del_init(&timer->entry);
469 if (timer->expires.sched < sum_exec_runtime) {
470 timer->expires.sched = 0;
472 timer->expires.sched -= sum_exec_runtime;
478 * These are both called with the siglock held, when the current thread
479 * is being reaped. When the final (leader) thread in the group is reaped,
480 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
482 void posix_cpu_timers_exit(struct task_struct *tsk)
484 cputime_t utime, stime;
486 add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
487 sizeof(unsigned long long));
488 task_cputime(tsk, &utime, &stime);
489 cleanup_timers(tsk->cpu_timers,
490 utime, stime, tsk->se.sum_exec_runtime);
493 void posix_cpu_timers_exit_group(struct task_struct *tsk)
495 struct signal_struct *const sig = tsk->signal;
496 cputime_t utime, stime;
498 task_cputime(tsk, &utime, &stime);
499 cleanup_timers(tsk->signal->cpu_timers,
500 utime + sig->utime, stime + sig->stime,
501 tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
504 static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
507 * That's all for this thread or process.
508 * We leave our residual in expires to be reported.
510 put_task_struct(timer->it.cpu.task);
511 timer->it.cpu.task = NULL;
512 timer->it.cpu.expires = cpu_time_sub(timer->it_clock,
513 timer->it.cpu.expires,
517 static inline int expires_gt(cputime_t expires, cputime_t new_exp)
519 return expires == 0 || expires > new_exp;
523 * Insert the timer on the appropriate list before any timers that
524 * expire later. This must be called with the tasklist_lock held
525 * for reading, interrupts disabled and p->sighand->siglock taken.
527 static void arm_timer(struct k_itimer *timer)
529 struct task_struct *p = timer->it.cpu.task;
530 struct list_head *head, *listpos;
531 struct task_cputime *cputime_expires;
532 struct cpu_timer_list *const nt = &timer->it.cpu;
533 struct cpu_timer_list *next;
535 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
536 head = p->cpu_timers;
537 cputime_expires = &p->cputime_expires;
539 head = p->signal->cpu_timers;
540 cputime_expires = &p->signal->cputime_expires;
542 head += CPUCLOCK_WHICH(timer->it_clock);
545 list_for_each_entry(next, head, entry) {
546 if (cpu_time_before(timer->it_clock, nt->expires, next->expires))
548 listpos = &next->entry;
550 list_add(&nt->entry, listpos);
552 if (listpos == head) {
553 union cpu_time_count *exp = &nt->expires;
556 * We are the new earliest-expiring POSIX 1.b timer, hence
557 * need to update expiration cache. Take into account that
558 * for process timers we share expiration cache with itimers
559 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
562 switch (CPUCLOCK_WHICH(timer->it_clock)) {
564 if (expires_gt(cputime_expires->prof_exp, exp->cpu))
565 cputime_expires->prof_exp = exp->cpu;
568 if (expires_gt(cputime_expires->virt_exp, exp->cpu))
569 cputime_expires->virt_exp = exp->cpu;
572 if (cputime_expires->sched_exp == 0 ||
573 cputime_expires->sched_exp > exp->sched)
574 cputime_expires->sched_exp = exp->sched;
581 * The timer is locked, fire it and arrange for its reload.
583 static void cpu_timer_fire(struct k_itimer *timer)
585 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
587 * User don't want any signal.
589 timer->it.cpu.expires.sched = 0;
590 } else if (unlikely(timer->sigq == NULL)) {
592 * This a special case for clock_nanosleep,
593 * not a normal timer from sys_timer_create.
595 wake_up_process(timer->it_process);
596 timer->it.cpu.expires.sched = 0;
597 } else if (timer->it.cpu.incr.sched == 0) {
599 * One-shot timer. Clear it as soon as it's fired.
601 posix_timer_event(timer, 0);
602 timer->it.cpu.expires.sched = 0;
603 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
605 * The signal did not get queued because the signal
606 * was ignored, so we won't get any callback to
607 * reload the timer. But we need to keep it
608 * ticking in case the signal is deliverable next time.
610 posix_cpu_timer_schedule(timer);
615 * Sample a process (thread group) timer for the given group_leader task.
616 * Must be called with tasklist_lock held for reading.
618 static int cpu_timer_sample_group(const clockid_t which_clock,
619 struct task_struct *p,
620 union cpu_time_count *cpu)
622 struct task_cputime cputime;
624 thread_group_cputimer(p, &cputime);
625 switch (CPUCLOCK_WHICH(which_clock)) {
629 cpu->cpu = cputime.utime + cputime.stime;
632 cpu->cpu = cputime.utime;
635 cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
641 #ifdef CONFIG_NO_HZ_FULL
642 static void nohz_kick_work_fn(struct work_struct *work)
644 tick_nohz_full_kick_all();
647 static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn);
650 * We need the IPIs to be sent from sane process context.
651 * The posix cpu timers are always set with irqs disabled.
653 static void posix_cpu_timer_kick_nohz(void)
655 schedule_work(&nohz_kick_work);
658 static inline void posix_cpu_timer_kick_nohz(void) { }
662 * Guts of sys_timer_settime for CPU timers.
663 * This is called with the timer locked and interrupts disabled.
664 * If we return TIMER_RETRY, it's necessary to release the timer's lock
665 * and try again. (This happens when the timer is in the middle of firing.)
667 static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
668 struct itimerspec *new, struct itimerspec *old)
670 struct task_struct *p = timer->it.cpu.task;
671 union cpu_time_count old_expires, new_expires, old_incr, val;
674 if (unlikely(p == NULL)) {
676 * Timer refers to a dead task's clock.
681 new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
683 read_lock(&tasklist_lock);
685 * We need the tasklist_lock to protect against reaping that
686 * clears p->sighand. If p has just been reaped, we can no
687 * longer get any information about it at all.
689 if (unlikely(p->sighand == NULL)) {
690 read_unlock(&tasklist_lock);
692 timer->it.cpu.task = NULL;
697 * Disarm any old timer after extracting its expiry time.
699 BUG_ON(!irqs_disabled());
702 old_incr = timer->it.cpu.incr;
703 spin_lock(&p->sighand->siglock);
704 old_expires = timer->it.cpu.expires;
705 if (unlikely(timer->it.cpu.firing)) {
706 timer->it.cpu.firing = -1;
709 list_del_init(&timer->it.cpu.entry);
712 * We need to sample the current value to convert the new
713 * value from to relative and absolute, and to convert the
714 * old value from absolute to relative. To set a process
715 * timer, we need a sample to balance the thread expiry
716 * times (in arm_timer). With an absolute time, we must
717 * check if it's already passed. In short, we need a sample.
719 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
720 cpu_clock_sample(timer->it_clock, p, &val);
722 cpu_timer_sample_group(timer->it_clock, p, &val);
726 if (old_expires.sched == 0) {
727 old->it_value.tv_sec = 0;
728 old->it_value.tv_nsec = 0;
731 * Update the timer in case it has
732 * overrun already. If it has,
733 * we'll report it as having overrun
734 * and with the next reloaded timer
735 * already ticking, though we are
736 * swallowing that pending
737 * notification here to install the
740 bump_cpu_timer(timer, val);
741 if (cpu_time_before(timer->it_clock, val,
742 timer->it.cpu.expires)) {
743 old_expires = cpu_time_sub(
745 timer->it.cpu.expires, val);
746 sample_to_timespec(timer->it_clock,
750 old->it_value.tv_nsec = 1;
751 old->it_value.tv_sec = 0;
758 * We are colliding with the timer actually firing.
759 * Punt after filling in the timer's old value, and
760 * disable this firing since we are already reporting
761 * it as an overrun (thanks to bump_cpu_timer above).
763 spin_unlock(&p->sighand->siglock);
764 read_unlock(&tasklist_lock);
768 if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) {
769 cpu_time_add(timer->it_clock, &new_expires, val);
773 * Install the new expiry time (or zero).
774 * For a timer with no notification action, we don't actually
775 * arm the timer (we'll just fake it for timer_gettime).
777 timer->it.cpu.expires = new_expires;
778 if (new_expires.sched != 0 &&
779 cpu_time_before(timer->it_clock, val, new_expires)) {
783 spin_unlock(&p->sighand->siglock);
784 read_unlock(&tasklist_lock);
787 * Install the new reload setting, and
788 * set up the signal and overrun bookkeeping.
790 timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
794 * This acts as a modification timestamp for the timer,
795 * so any automatic reload attempt will punt on seeing
796 * that we have reset the timer manually.
798 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
800 timer->it_overrun_last = 0;
801 timer->it_overrun = -1;
803 if (new_expires.sched != 0 &&
804 !cpu_time_before(timer->it_clock, val, new_expires)) {
806 * The designated time already passed, so we notify
807 * immediately, even if the thread never runs to
808 * accumulate more time on this clock.
810 cpu_timer_fire(timer);
816 sample_to_timespec(timer->it_clock,
817 old_incr, &old->it_interval);
820 posix_cpu_timer_kick_nohz();
824 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
826 union cpu_time_count now;
827 struct task_struct *p = timer->it.cpu.task;
831 * Easy part: convert the reload time.
833 sample_to_timespec(timer->it_clock,
834 timer->it.cpu.incr, &itp->it_interval);
836 if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all. */
837 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
841 if (unlikely(p == NULL)) {
843 * This task already died and the timer will never fire.
844 * In this case, expires is actually the dead value.
847 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
853 * Sample the clock to take the difference with the expiry time.
855 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
856 cpu_clock_sample(timer->it_clock, p, &now);
857 clear_dead = p->exit_state;
859 read_lock(&tasklist_lock);
860 if (unlikely(p->sighand == NULL)) {
862 * The process has been reaped.
863 * We can't even collect a sample any more.
864 * Call the timer disarmed, nothing else to do.
867 timer->it.cpu.task = NULL;
868 timer->it.cpu.expires.sched = 0;
869 read_unlock(&tasklist_lock);
872 cpu_timer_sample_group(timer->it_clock, p, &now);
873 clear_dead = (unlikely(p->exit_state) &&
874 thread_group_empty(p));
876 read_unlock(&tasklist_lock);
879 if (unlikely(clear_dead)) {
881 * We've noticed that the thread is dead, but
882 * not yet reaped. Take this opportunity to
885 clear_dead_task(timer, now);
889 if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) {
890 sample_to_timespec(timer->it_clock,
891 cpu_time_sub(timer->it_clock,
892 timer->it.cpu.expires, now),
896 * The timer should have expired already, but the firing
897 * hasn't taken place yet. Say it's just about to expire.
899 itp->it_value.tv_nsec = 1;
900 itp->it_value.tv_sec = 0;
905 * Check for any per-thread CPU timers that have fired and move them off
906 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
907 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
909 static void check_thread_timers(struct task_struct *tsk,
910 struct list_head *firing)
913 struct list_head *timers = tsk->cpu_timers;
914 struct signal_struct *const sig = tsk->signal;
918 tsk->cputime_expires.prof_exp = 0;
919 while (!list_empty(timers)) {
920 struct cpu_timer_list *t = list_first_entry(timers,
921 struct cpu_timer_list,
923 if (!--maxfire || prof_ticks(tsk) < t->expires.cpu) {
924 tsk->cputime_expires.prof_exp = t->expires.cpu;
928 list_move_tail(&t->entry, firing);
933 tsk->cputime_expires.virt_exp = 0;
934 while (!list_empty(timers)) {
935 struct cpu_timer_list *t = list_first_entry(timers,
936 struct cpu_timer_list,
938 if (!--maxfire || virt_ticks(tsk) < t->expires.cpu) {
939 tsk->cputime_expires.virt_exp = t->expires.cpu;
943 list_move_tail(&t->entry, firing);
948 tsk->cputime_expires.sched_exp = 0;
949 while (!list_empty(timers)) {
950 struct cpu_timer_list *t = list_first_entry(timers,
951 struct cpu_timer_list,
953 if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
954 tsk->cputime_expires.sched_exp = t->expires.sched;
958 list_move_tail(&t->entry, firing);
962 * Check for the special case thread timers.
964 soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
965 if (soft != RLIM_INFINITY) {
967 ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
969 if (hard != RLIM_INFINITY &&
970 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
972 * At the hard limit, we just die.
973 * No need to calculate anything else now.
975 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
978 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
980 * At the soft limit, send a SIGXCPU every second.
983 soft += USEC_PER_SEC;
984 sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
987 "RT Watchdog Timeout: %s[%d]\n",
988 tsk->comm, task_pid_nr(tsk));
989 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
994 static void stop_process_timers(struct signal_struct *sig)
996 struct thread_group_cputimer *cputimer = &sig->cputimer;
999 raw_spin_lock_irqsave(&cputimer->lock, flags);
1000 cputimer->running = 0;
1001 raw_spin_unlock_irqrestore(&cputimer->lock, flags);
1004 static u32 onecputick;
1006 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
1007 cputime_t *expires, cputime_t cur_time, int signo)
1012 if (cur_time >= it->expires) {
1014 it->expires += it->incr;
1015 it->error += it->incr_error;
1016 if (it->error >= onecputick) {
1017 it->expires -= cputime_one_jiffy;
1018 it->error -= onecputick;
1024 trace_itimer_expire(signo == SIGPROF ?
1025 ITIMER_PROF : ITIMER_VIRTUAL,
1026 tsk->signal->leader_pid, cur_time);
1027 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
1030 if (it->expires && (!*expires || it->expires < *expires)) {
1031 *expires = it->expires;
1036 * task_cputime_zero - Check a task_cputime struct for all zero fields.
1038 * @cputime: The struct to compare.
1040 * Checks @cputime to see if all fields are zero. Returns true if all fields
1041 * are zero, false if any field is nonzero.
1043 static inline int task_cputime_zero(const struct task_cputime *cputime)
1045 if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
1051 * Check for any per-thread CPU timers that have fired and move them
1052 * off the tsk->*_timers list onto the firing list. Per-thread timers
1053 * have already been taken off.
1055 static void check_process_timers(struct task_struct *tsk,
1056 struct list_head *firing)
1059 struct signal_struct *const sig = tsk->signal;
1060 cputime_t utime, ptime, virt_expires, prof_expires;
1061 unsigned long long sum_sched_runtime, sched_expires;
1062 struct list_head *timers = sig->cpu_timers;
1063 struct task_cputime cputime;
1067 * Collect the current process totals.
1069 thread_group_cputimer(tsk, &cputime);
1070 utime = cputime.utime;
1071 ptime = utime + cputime.stime;
1072 sum_sched_runtime = cputime.sum_exec_runtime;
1075 while (!list_empty(timers)) {
1076 struct cpu_timer_list *tl = list_first_entry(timers,
1077 struct cpu_timer_list,
1079 if (!--maxfire || ptime < tl->expires.cpu) {
1080 prof_expires = tl->expires.cpu;
1084 list_move_tail(&tl->entry, firing);
1090 while (!list_empty(timers)) {
1091 struct cpu_timer_list *tl = list_first_entry(timers,
1092 struct cpu_timer_list,
1094 if (!--maxfire || utime < tl->expires.cpu) {
1095 virt_expires = tl->expires.cpu;
1099 list_move_tail(&tl->entry, firing);
1105 while (!list_empty(timers)) {
1106 struct cpu_timer_list *tl = list_first_entry(timers,
1107 struct cpu_timer_list,
1109 if (!--maxfire || sum_sched_runtime < tl->expires.sched) {
1110 sched_expires = tl->expires.sched;
1114 list_move_tail(&tl->entry, firing);
1118 * Check for the special case process timers.
1120 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
1122 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
1124 soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1125 if (soft != RLIM_INFINITY) {
1126 unsigned long psecs = cputime_to_secs(ptime);
1127 unsigned long hard =
1128 ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
1130 if (psecs >= hard) {
1132 * At the hard limit, we just die.
1133 * No need to calculate anything else now.
1135 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1138 if (psecs >= soft) {
1140 * At the soft limit, send a SIGXCPU every second.
1142 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1145 sig->rlim[RLIMIT_CPU].rlim_cur = soft;
1148 x = secs_to_cputime(soft);
1149 if (!prof_expires || x < prof_expires) {
1154 sig->cputime_expires.prof_exp = prof_expires;
1155 sig->cputime_expires.virt_exp = virt_expires;
1156 sig->cputime_expires.sched_exp = sched_expires;
1157 if (task_cputime_zero(&sig->cputime_expires))
1158 stop_process_timers(sig);
1162 * This is called from the signal code (via do_schedule_next_timer)
1163 * when the last timer signal was delivered and we have to reload the timer.
1165 void posix_cpu_timer_schedule(struct k_itimer *timer)
1167 struct task_struct *p = timer->it.cpu.task;
1168 union cpu_time_count now;
1170 if (unlikely(p == NULL))
1172 * The task was cleaned up already, no future firings.
1177 * Fetch the current sample and update the timer's expiry time.
1179 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1180 cpu_clock_sample(timer->it_clock, p, &now);
1181 bump_cpu_timer(timer, now);
1182 if (unlikely(p->exit_state)) {
1183 clear_dead_task(timer, now);
1186 read_lock(&tasklist_lock); /* arm_timer needs it. */
1187 spin_lock(&p->sighand->siglock);
1189 read_lock(&tasklist_lock);
1190 if (unlikely(p->sighand == NULL)) {
1192 * The process has been reaped.
1193 * We can't even collect a sample any more.
1196 timer->it.cpu.task = p = NULL;
1197 timer->it.cpu.expires.sched = 0;
1199 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1201 * We've noticed that the thread is dead, but
1202 * not yet reaped. Take this opportunity to
1203 * drop our task ref.
1205 clear_dead_task(timer, now);
1208 spin_lock(&p->sighand->siglock);
1209 cpu_timer_sample_group(timer->it_clock, p, &now);
1210 bump_cpu_timer(timer, now);
1211 /* Leave the tasklist_lock locked for the call below. */
1215 * Now re-arm for the new expiry time.
1217 BUG_ON(!irqs_disabled());
1219 spin_unlock(&p->sighand->siglock);
1222 read_unlock(&tasklist_lock);
1225 timer->it_overrun_last = timer->it_overrun;
1226 timer->it_overrun = -1;
1227 ++timer->it_requeue_pending;
1231 * task_cputime_expired - Compare two task_cputime entities.
1233 * @sample: The task_cputime structure to be checked for expiration.
1234 * @expires: Expiration times, against which @sample will be checked.
1236 * Checks @sample against @expires to see if any field of @sample has expired.
1237 * Returns true if any field of the former is greater than the corresponding
1238 * field of the latter if the latter field is set. Otherwise returns false.
1240 static inline int task_cputime_expired(const struct task_cputime *sample,
1241 const struct task_cputime *expires)
1243 if (expires->utime && sample->utime >= expires->utime)
1245 if (expires->stime && sample->utime + sample->stime >= expires->stime)
1247 if (expires->sum_exec_runtime != 0 &&
1248 sample->sum_exec_runtime >= expires->sum_exec_runtime)
1254 * fastpath_timer_check - POSIX CPU timers fast path.
1256 * @tsk: The task (thread) being checked.
1258 * Check the task and thread group timers. If both are zero (there are no
1259 * timers set) return false. Otherwise snapshot the task and thread group
1260 * timers and compare them with the corresponding expiration times. Return
1261 * true if a timer has expired, else return false.
1263 static inline int fastpath_timer_check(struct task_struct *tsk)
1265 struct signal_struct *sig;
1266 cputime_t utime, stime;
1268 task_cputime(tsk, &utime, &stime);
1270 if (!task_cputime_zero(&tsk->cputime_expires)) {
1271 struct task_cputime task_sample = {
1274 .sum_exec_runtime = tsk->se.sum_exec_runtime
1277 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1282 if (sig->cputimer.running) {
1283 struct task_cputime group_sample;
1285 raw_spin_lock(&sig->cputimer.lock);
1286 group_sample = sig->cputimer.cputime;
1287 raw_spin_unlock(&sig->cputimer.lock);
1289 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1297 * This is called from the timer interrupt handler. The irq handler has
1298 * already updated our counts. We need to check if any timers fire now.
1299 * Interrupts are disabled.
1301 void run_posix_cpu_timers(struct task_struct *tsk)
1304 struct k_itimer *timer, *next;
1305 unsigned long flags;
1307 BUG_ON(!irqs_disabled());
1310 * The fast path checks that there are no expired thread or thread
1311 * group timers. If that's so, just return.
1313 if (!fastpath_timer_check(tsk))
1316 if (!lock_task_sighand(tsk, &flags))
1319 * Here we take off tsk->signal->cpu_timers[N] and
1320 * tsk->cpu_timers[N] all the timers that are firing, and
1321 * put them on the firing list.
1323 check_thread_timers(tsk, &firing);
1325 * If there are any active process wide timers (POSIX 1.b, itimers,
1326 * RLIMIT_CPU) cputimer must be running.
1328 if (tsk->signal->cputimer.running)
1329 check_process_timers(tsk, &firing);
1332 * We must release these locks before taking any timer's lock.
1333 * There is a potential race with timer deletion here, as the
1334 * siglock now protects our private firing list. We have set
1335 * the firing flag in each timer, so that a deletion attempt
1336 * that gets the timer lock before we do will give it up and
1337 * spin until we've taken care of that timer below.
1339 unlock_task_sighand(tsk, &flags);
1342 * Now that all the timers on our list have the firing flag,
1343 * no one will touch their list entries but us. We'll take
1344 * each timer's lock before clearing its firing flag, so no
1345 * timer call will interfere.
1347 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1350 spin_lock(&timer->it_lock);
1351 list_del_init(&timer->it.cpu.entry);
1352 cpu_firing = timer->it.cpu.firing;
1353 timer->it.cpu.firing = 0;
1355 * The firing flag is -1 if we collided with a reset
1356 * of the timer, which already reported this
1357 * almost-firing as an overrun. So don't generate an event.
1359 if (likely(cpu_firing >= 0))
1360 cpu_timer_fire(timer);
1361 spin_unlock(&timer->it_lock);
1365 * In case some timers were rescheduled after the queue got emptied,
1366 * wake up full dynticks CPUs.
1368 if (tsk->signal->cputimer.running)
1369 posix_cpu_timer_kick_nohz();
1373 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1374 * The tsk->sighand->siglock must be held by the caller.
1376 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1377 cputime_t *newval, cputime_t *oldval)
1379 union cpu_time_count now;
1381 BUG_ON(clock_idx == CPUCLOCK_SCHED);
1382 cpu_timer_sample_group(clock_idx, tsk, &now);
1386 * We are setting itimer. The *oldval is absolute and we update
1387 * it to be relative, *newval argument is relative and we update
1388 * it to be absolute.
1391 if (*oldval <= now.cpu) {
1392 /* Just about to fire. */
1393 *oldval = cputime_one_jiffy;
1405 * Update expiration cache if we are the earliest timer, or eventually
1406 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
1408 switch (clock_idx) {
1410 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
1411 tsk->signal->cputime_expires.prof_exp = *newval;
1414 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
1415 tsk->signal->cputime_expires.virt_exp = *newval;
1419 posix_cpu_timer_kick_nohz();
1422 static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1423 struct timespec *rqtp, struct itimerspec *it)
1425 struct k_itimer timer;
1429 * Set up a temporary timer and then wait for it to go off.
1431 memset(&timer, 0, sizeof timer);
1432 spin_lock_init(&timer.it_lock);
1433 timer.it_clock = which_clock;
1434 timer.it_overrun = -1;
1435 error = posix_cpu_timer_create(&timer);
1436 timer.it_process = current;
1438 static struct itimerspec zero_it;
1440 memset(it, 0, sizeof *it);
1441 it->it_value = *rqtp;
1443 spin_lock_irq(&timer.it_lock);
1444 error = posix_cpu_timer_set(&timer, flags, it, NULL);
1446 spin_unlock_irq(&timer.it_lock);
1450 while (!signal_pending(current)) {
1451 if (timer.it.cpu.expires.sched == 0) {
1453 * Our timer fired and was reset, below
1454 * deletion can not fail.
1456 posix_cpu_timer_del(&timer);
1457 spin_unlock_irq(&timer.it_lock);
1462 * Block until cpu_timer_fire (or a signal) wakes us.
1464 __set_current_state(TASK_INTERRUPTIBLE);
1465 spin_unlock_irq(&timer.it_lock);
1467 spin_lock_irq(&timer.it_lock);
1471 * We were interrupted by a signal.
1473 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
1474 error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
1477 * Timer is now unarmed, deletion can not fail.
1479 posix_cpu_timer_del(&timer);
1481 spin_unlock_irq(&timer.it_lock);
1483 while (error == TIMER_RETRY) {
1485 * We need to handle case when timer was or is in the
1486 * middle of firing. In other cases we already freed
1489 spin_lock_irq(&timer.it_lock);
1490 error = posix_cpu_timer_del(&timer);
1491 spin_unlock_irq(&timer.it_lock);
1494 if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
1496 * It actually did fire already.
1501 error = -ERESTART_RESTARTBLOCK;
1507 static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1509 static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1510 struct timespec *rqtp, struct timespec __user *rmtp)
1512 struct restart_block *restart_block =
1513 ¤t_thread_info()->restart_block;
1514 struct itimerspec it;
1518 * Diagnose required errors first.
1520 if (CPUCLOCK_PERTHREAD(which_clock) &&
1521 (CPUCLOCK_PID(which_clock) == 0 ||
1522 CPUCLOCK_PID(which_clock) == current->pid))
1525 error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
1527 if (error == -ERESTART_RESTARTBLOCK) {
1529 if (flags & TIMER_ABSTIME)
1530 return -ERESTARTNOHAND;
1532 * Report back to the user the time still remaining.
1534 if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1537 restart_block->fn = posix_cpu_nsleep_restart;
1538 restart_block->nanosleep.clockid = which_clock;
1539 restart_block->nanosleep.rmtp = rmtp;
1540 restart_block->nanosleep.expires = timespec_to_ns(rqtp);
1545 static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1547 clockid_t which_clock = restart_block->nanosleep.clockid;
1549 struct itimerspec it;
1552 t = ns_to_timespec(restart_block->nanosleep.expires);
1554 error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
1556 if (error == -ERESTART_RESTARTBLOCK) {
1557 struct timespec __user *rmtp = restart_block->nanosleep.rmtp;
1559 * Report back to the user the time still remaining.
1561 if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1564 restart_block->nanosleep.expires = timespec_to_ns(&t);
1570 #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1571 #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1573 static int process_cpu_clock_getres(const clockid_t which_clock,
1574 struct timespec *tp)
1576 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1578 static int process_cpu_clock_get(const clockid_t which_clock,
1579 struct timespec *tp)
1581 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1583 static int process_cpu_timer_create(struct k_itimer *timer)
1585 timer->it_clock = PROCESS_CLOCK;
1586 return posix_cpu_timer_create(timer);
1588 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1589 struct timespec *rqtp,
1590 struct timespec __user *rmtp)
1592 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
1594 static long process_cpu_nsleep_restart(struct restart_block *restart_block)
1598 static int thread_cpu_clock_getres(const clockid_t which_clock,
1599 struct timespec *tp)
1601 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1603 static int thread_cpu_clock_get(const clockid_t which_clock,
1604 struct timespec *tp)
1606 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1608 static int thread_cpu_timer_create(struct k_itimer *timer)
1610 timer->it_clock = THREAD_CLOCK;
1611 return posix_cpu_timer_create(timer);
1614 struct k_clock clock_posix_cpu = {
1615 .clock_getres = posix_cpu_clock_getres,
1616 .clock_set = posix_cpu_clock_set,
1617 .clock_get = posix_cpu_clock_get,
1618 .timer_create = posix_cpu_timer_create,
1619 .nsleep = posix_cpu_nsleep,
1620 .nsleep_restart = posix_cpu_nsleep_restart,
1621 .timer_set = posix_cpu_timer_set,
1622 .timer_del = posix_cpu_timer_del,
1623 .timer_get = posix_cpu_timer_get,
1626 static __init int init_posix_cpu_timers(void)
1628 struct k_clock process = {
1629 .clock_getres = process_cpu_clock_getres,
1630 .clock_get = process_cpu_clock_get,
1631 .timer_create = process_cpu_timer_create,
1632 .nsleep = process_cpu_nsleep,
1633 .nsleep_restart = process_cpu_nsleep_restart,
1635 struct k_clock thread = {
1636 .clock_getres = thread_cpu_clock_getres,
1637 .clock_get = thread_cpu_clock_get,
1638 .timer_create = thread_cpu_timer_create,
1642 posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1643 posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1645 cputime_to_timespec(cputime_one_jiffy, &ts);
1646 onecputick = ts.tv_nsec;
1647 WARN_ON(ts.tv_sec != 0);
1651 __initcall(init_posix_cpu_timers);