2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
10 * Remote softirq infrastructure is by Jens Axboe.
13 #include <linux/module.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/tick.h>
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/irq.h>
33 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself
36 - Even if softirq is serialized, only local cpu is marked for
37 execution. Hence, we get something sort of weak cpu binding.
38 Though it is still not clear, will it result in better locality
42 - NET RX softirq. It is multithreaded and does not require
43 any global serialization.
44 - NET TX softirq. It kicks software netdevice queues, hence
45 it is logically serialized per device, but this serialization
46 is invisible to common code.
47 - Tasklets: serialized wrt itself.
50 #ifndef __ARCH_IRQ_STAT
51 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
52 EXPORT_SYMBOL(irq_stat);
55 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
57 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59 char *softirq_to_name[NR_SOFTIRQS] = {
60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
61 "TASKLET", "SCHED", "HRTIMER", "RCU"
65 * we cannot loop indefinitely here to avoid userspace starvation,
66 * but we also don't want to introduce a worst case 1/HZ latency
67 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us.
70 void wakeup_softirqd(void)
72 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
75 if (tsk && tsk->state != TASK_RUNNING)
80 * This one is for softirq.c-internal use,
81 * where hardirqs are disabled legitimately:
83 #ifdef CONFIG_TRACE_IRQFLAGS
84 static void __local_bh_disable(unsigned long ip)
88 WARN_ON_ONCE(in_irq());
90 raw_local_irq_save(flags);
92 * The preempt tracer hooks into add_preempt_count and will break
93 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
94 * is set and before current->softirq_enabled is cleared.
95 * We must manually increment preempt_count here and manually
96 * call the trace_preempt_off later.
98 preempt_count() += SOFTIRQ_OFFSET;
100 * Were softirqs turned off above:
102 if (softirq_count() == SOFTIRQ_OFFSET)
103 trace_softirqs_off(ip);
104 raw_local_irq_restore(flags);
106 if (preempt_count() == SOFTIRQ_OFFSET)
107 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
109 #else /* !CONFIG_TRACE_IRQFLAGS */
110 static inline void __local_bh_disable(unsigned long ip)
112 add_preempt_count(SOFTIRQ_OFFSET);
115 #endif /* CONFIG_TRACE_IRQFLAGS */
117 void local_bh_disable(void)
119 __local_bh_disable((unsigned long)__builtin_return_address(0));
122 EXPORT_SYMBOL(local_bh_disable);
125 * Special-case - softirqs can safely be enabled in
126 * cond_resched_softirq(), or by __do_softirq(),
127 * without processing still-pending softirqs:
129 void _local_bh_enable(void)
131 WARN_ON_ONCE(in_irq());
132 WARN_ON_ONCE(!irqs_disabled());
134 if (softirq_count() == SOFTIRQ_OFFSET)
135 trace_softirqs_on((unsigned long)__builtin_return_address(0));
136 sub_preempt_count(SOFTIRQ_OFFSET);
139 EXPORT_SYMBOL(_local_bh_enable);
141 static inline void _local_bh_enable_ip(unsigned long ip)
143 WARN_ON_ONCE(in_irq() || irqs_disabled());
144 #ifdef CONFIG_TRACE_IRQFLAGS
148 * Are softirqs going to be turned on now:
150 if (softirq_count() == SOFTIRQ_OFFSET)
151 trace_softirqs_on(ip);
153 * Keep preemption disabled until we are done with
154 * softirq processing:
156 sub_preempt_count(SOFTIRQ_OFFSET - 1);
158 if (unlikely(!in_interrupt() && local_softirq_pending()))
162 #ifdef CONFIG_TRACE_IRQFLAGS
165 preempt_check_resched();
168 void local_bh_enable(void)
170 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
172 EXPORT_SYMBOL(local_bh_enable);
174 void local_bh_enable_ip(unsigned long ip)
176 _local_bh_enable_ip(ip);
178 EXPORT_SYMBOL(local_bh_enable_ip);
181 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
182 * and we fall back to softirqd after that.
184 * This number has been established via experimentation.
185 * The two things to balance is latency against fairness -
186 * we want to handle softirqs as soon as possible, but they
187 * should not be able to lock up the box.
189 #define MAX_SOFTIRQ_RESTART 10
191 asmlinkage void __do_softirq(void)
193 struct softirq_action *h;
195 int max_restart = MAX_SOFTIRQ_RESTART;
198 pending = local_softirq_pending();
199 account_system_vtime(current);
201 __local_bh_disable((unsigned long)__builtin_return_address(0));
202 lockdep_softirq_enter();
204 cpu = smp_processor_id();
206 /* Reset the pending bitmask before enabling irqs */
207 set_softirq_pending(0);
215 unsigned int vec_nr = h - softirq_vec;
216 int prev_count = preempt_count();
218 kstat_incr_softirqs_this_cpu(vec_nr);
220 trace_softirq_entry(vec_nr);
222 trace_softirq_exit(vec_nr);
223 if (unlikely(prev_count != preempt_count())) {
224 printk(KERN_ERR "huh, entered softirq %u %s %p"
225 "with preempt_count %08x,"
226 " exited with %08x?\n", vec_nr,
227 softirq_to_name[vec_nr], h->action,
228 prev_count, preempt_count());
229 preempt_count() = prev_count;
240 pending = local_softirq_pending();
241 if (pending && --max_restart)
247 lockdep_softirq_exit();
249 account_system_vtime(current);
253 #ifndef __ARCH_HAS_DO_SOFTIRQ
255 asmlinkage void do_softirq(void)
263 local_irq_save(flags);
265 pending = local_softirq_pending();
270 local_irq_restore(flags);
276 * Enter an interrupt context.
280 int cpu = smp_processor_id();
283 if (idle_cpu(cpu) && !in_interrupt()) {
285 tick_check_idle(cpu);
290 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
291 # define invoke_softirq() __do_softirq()
293 # define invoke_softirq() do_softirq()
297 * Exit an interrupt context. Process softirqs if needed and possible:
301 account_system_vtime(current);
302 trace_hardirq_exit();
303 sub_preempt_count(IRQ_EXIT_OFFSET);
304 if (!in_interrupt() && local_softirq_pending())
309 /* Make sure that timer wheel updates are propagated */
310 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
311 tick_nohz_stop_sched_tick(0);
313 preempt_enable_no_resched();
317 * This function must run with irqs disabled!
319 inline void raise_softirq_irqoff(unsigned int nr)
321 __raise_softirq_irqoff(nr);
324 * If we're in an interrupt or softirq, we're done
325 * (this also catches softirq-disabled code). We will
326 * actually run the softirq once we return from
327 * the irq or softirq.
329 * Otherwise we wake up ksoftirqd to make sure we
330 * schedule the softirq soon.
336 void raise_softirq(unsigned int nr)
340 local_irq_save(flags);
341 raise_softirq_irqoff(nr);
342 local_irq_restore(flags);
345 void open_softirq(int nr, void (*action)(struct softirq_action *))
347 softirq_vec[nr].action = action;
355 struct tasklet_struct *head;
356 struct tasklet_struct **tail;
359 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
360 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
362 void __tasklet_schedule(struct tasklet_struct *t)
366 local_irq_save(flags);
368 *__get_cpu_var(tasklet_vec).tail = t;
369 __get_cpu_var(tasklet_vec).tail = &(t->next);
370 raise_softirq_irqoff(TASKLET_SOFTIRQ);
371 local_irq_restore(flags);
374 EXPORT_SYMBOL(__tasklet_schedule);
376 void __tasklet_hi_schedule(struct tasklet_struct *t)
380 local_irq_save(flags);
382 *__get_cpu_var(tasklet_hi_vec).tail = t;
383 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
384 raise_softirq_irqoff(HI_SOFTIRQ);
385 local_irq_restore(flags);
388 EXPORT_SYMBOL(__tasklet_hi_schedule);
390 void __tasklet_hi_schedule_first(struct tasklet_struct *t)
392 BUG_ON(!irqs_disabled());
394 t->next = __get_cpu_var(tasklet_hi_vec).head;
395 __get_cpu_var(tasklet_hi_vec).head = t;
396 __raise_softirq_irqoff(HI_SOFTIRQ);
399 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
401 static void tasklet_action(struct softirq_action *a)
403 struct tasklet_struct *list;
406 list = __get_cpu_var(tasklet_vec).head;
407 __get_cpu_var(tasklet_vec).head = NULL;
408 __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
412 struct tasklet_struct *t = list;
416 if (tasklet_trylock(t)) {
417 if (!atomic_read(&t->count)) {
418 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
429 *__get_cpu_var(tasklet_vec).tail = t;
430 __get_cpu_var(tasklet_vec).tail = &(t->next);
431 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
436 static void tasklet_hi_action(struct softirq_action *a)
438 struct tasklet_struct *list;
441 list = __get_cpu_var(tasklet_hi_vec).head;
442 __get_cpu_var(tasklet_hi_vec).head = NULL;
443 __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
447 struct tasklet_struct *t = list;
451 if (tasklet_trylock(t)) {
452 if (!atomic_read(&t->count)) {
453 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
464 *__get_cpu_var(tasklet_hi_vec).tail = t;
465 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
466 __raise_softirq_irqoff(HI_SOFTIRQ);
472 void tasklet_init(struct tasklet_struct *t,
473 void (*func)(unsigned long), unsigned long data)
477 atomic_set(&t->count, 0);
482 EXPORT_SYMBOL(tasklet_init);
484 void tasklet_kill(struct tasklet_struct *t)
487 printk("Attempt to kill tasklet from interrupt\n");
489 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
492 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
494 tasklet_unlock_wait(t);
495 clear_bit(TASKLET_STATE_SCHED, &t->state);
498 EXPORT_SYMBOL(tasklet_kill);
505 * The trampoline is called when the hrtimer expires. It schedules a tasklet
506 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
507 * hrtimer callback, but from softirq context.
509 static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
511 struct tasklet_hrtimer *ttimer =
512 container_of(timer, struct tasklet_hrtimer, timer);
514 tasklet_hi_schedule(&ttimer->tasklet);
515 return HRTIMER_NORESTART;
519 * Helper function which calls the hrtimer callback from
520 * tasklet/softirq context
522 static void __tasklet_hrtimer_trampoline(unsigned long data)
524 struct tasklet_hrtimer *ttimer = (void *)data;
525 enum hrtimer_restart restart;
527 restart = ttimer->function(&ttimer->timer);
528 if (restart != HRTIMER_NORESTART)
529 hrtimer_restart(&ttimer->timer);
533 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
534 * @ttimer: tasklet_hrtimer which is initialized
535 * @function: hrtimer callback funtion which gets called from softirq context
536 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
537 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
539 void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
540 enum hrtimer_restart (*function)(struct hrtimer *),
541 clockid_t which_clock, enum hrtimer_mode mode)
543 hrtimer_init(&ttimer->timer, which_clock, mode);
544 ttimer->timer.function = __hrtimer_tasklet_trampoline;
545 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
546 (unsigned long)ttimer);
547 ttimer->function = function;
549 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
552 * Remote softirq bits
555 DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
556 EXPORT_PER_CPU_SYMBOL(softirq_work_list);
558 static void __local_trigger(struct call_single_data *cp, int softirq)
560 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
562 list_add_tail(&cp->list, head);
564 /* Trigger the softirq only if the list was previously empty. */
565 if (head->next == &cp->list)
566 raise_softirq_irqoff(softirq);
569 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
570 static void remote_softirq_receive(void *data)
572 struct call_single_data *cp = data;
578 local_irq_save(flags);
579 __local_trigger(cp, softirq);
580 local_irq_restore(flags);
583 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
585 if (cpu_online(cpu)) {
586 cp->func = remote_softirq_receive;
591 __smp_call_function_single(cpu, cp, 0);
596 #else /* CONFIG_USE_GENERIC_SMP_HELPERS */
597 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
604 * __send_remote_softirq - try to schedule softirq work on a remote cpu
605 * @cp: private SMP call function data area
606 * @cpu: the remote cpu
607 * @this_cpu: the currently executing cpu
608 * @softirq: the softirq for the work
610 * Attempt to schedule softirq work on a remote cpu. If this cannot be
611 * done, the work is instead queued up on the local cpu.
613 * Interrupts must be disabled.
615 void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
617 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
618 __local_trigger(cp, softirq);
620 EXPORT_SYMBOL(__send_remote_softirq);
623 * send_remote_softirq - try to schedule softirq work on a remote cpu
624 * @cp: private SMP call function data area
625 * @cpu: the remote cpu
626 * @softirq: the softirq for the work
628 * Like __send_remote_softirq except that disabling interrupts and
629 * computing the current cpu is done for the caller.
631 void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
636 local_irq_save(flags);
637 this_cpu = smp_processor_id();
638 __send_remote_softirq(cp, cpu, this_cpu, softirq);
639 local_irq_restore(flags);
641 EXPORT_SYMBOL(send_remote_softirq);
643 static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
644 unsigned long action, void *hcpu)
647 * If a CPU goes away, splice its entries to the current CPU
648 * and trigger a run of the softirq
650 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
651 int cpu = (unsigned long) hcpu;
655 for (i = 0; i < NR_SOFTIRQS; i++) {
656 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
657 struct list_head *local_head;
659 if (list_empty(head))
662 local_head = &__get_cpu_var(softirq_work_list[i]);
663 list_splice_init(head, local_head);
664 raise_softirq_irqoff(i);
672 static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
673 .notifier_call = remote_softirq_cpu_notify,
676 void __init softirq_init(void)
680 for_each_possible_cpu(cpu) {
683 per_cpu(tasklet_vec, cpu).tail =
684 &per_cpu(tasklet_vec, cpu).head;
685 per_cpu(tasklet_hi_vec, cpu).tail =
686 &per_cpu(tasklet_hi_vec, cpu).head;
687 for (i = 0; i < NR_SOFTIRQS; i++)
688 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
691 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
693 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
694 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
697 static int run_ksoftirqd(void * __bind_cpu)
699 set_current_state(TASK_INTERRUPTIBLE);
701 while (!kthread_should_stop()) {
703 if (!local_softirq_pending()) {
704 preempt_enable_no_resched();
709 __set_current_state(TASK_RUNNING);
711 while (local_softirq_pending()) {
712 /* Preempt disable stops cpu going offline.
713 If already offline, we'll be on wrong CPU:
715 if (cpu_is_offline((long)__bind_cpu))
718 preempt_enable_no_resched();
721 rcu_note_context_switch((long)__bind_cpu);
724 set_current_state(TASK_INTERRUPTIBLE);
726 __set_current_state(TASK_RUNNING);
731 /* Wait for kthread_stop */
732 set_current_state(TASK_INTERRUPTIBLE);
733 while (!kthread_should_stop()) {
735 set_current_state(TASK_INTERRUPTIBLE);
737 __set_current_state(TASK_RUNNING);
741 #ifdef CONFIG_HOTPLUG_CPU
743 * tasklet_kill_immediate is called to remove a tasklet which can already be
744 * scheduled for execution on @cpu.
746 * Unlike tasklet_kill, this function removes the tasklet
747 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
749 * When this function is called, @cpu must be in the CPU_DEAD state.
751 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
753 struct tasklet_struct **i;
755 BUG_ON(cpu_online(cpu));
756 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
758 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
761 /* CPU is dead, so no lock needed. */
762 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
765 /* If this was the tail element, move the tail ptr */
767 per_cpu(tasklet_vec, cpu).tail = i;
774 static void takeover_tasklets(unsigned int cpu)
776 /* CPU is dead, so no lock needed. */
779 /* Find end, append list for that CPU. */
780 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
781 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
782 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
783 per_cpu(tasklet_vec, cpu).head = NULL;
784 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
786 raise_softirq_irqoff(TASKLET_SOFTIRQ);
788 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
789 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
790 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
791 per_cpu(tasklet_hi_vec, cpu).head = NULL;
792 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
794 raise_softirq_irqoff(HI_SOFTIRQ);
798 #endif /* CONFIG_HOTPLUG_CPU */
800 static int __cpuinit cpu_callback(struct notifier_block *nfb,
801 unsigned long action,
804 int hotcpu = (unsigned long)hcpu;
805 struct task_struct *p;
809 case CPU_UP_PREPARE_FROZEN:
810 p = kthread_create(run_ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
812 printk("ksoftirqd for %i failed\n", hotcpu);
813 return notifier_from_errno(PTR_ERR(p));
815 kthread_bind(p, hotcpu);
816 per_cpu(ksoftirqd, hotcpu) = p;
819 case CPU_ONLINE_FROZEN:
820 wake_up_process(per_cpu(ksoftirqd, hotcpu));
822 #ifdef CONFIG_HOTPLUG_CPU
823 case CPU_UP_CANCELED:
824 case CPU_UP_CANCELED_FROZEN:
825 if (!per_cpu(ksoftirqd, hotcpu))
827 /* Unbind so it can run. Fall thru. */
828 kthread_bind(per_cpu(ksoftirqd, hotcpu),
829 cpumask_any(cpu_online_mask));
831 case CPU_DEAD_FROZEN: {
832 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
834 p = per_cpu(ksoftirqd, hotcpu);
835 per_cpu(ksoftirqd, hotcpu) = NULL;
836 sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m);
838 takeover_tasklets(hotcpu);
841 #endif /* CONFIG_HOTPLUG_CPU */
846 static struct notifier_block __cpuinitdata cpu_nfb = {
847 .notifier_call = cpu_callback
850 static __init int spawn_ksoftirqd(void)
852 void *cpu = (void *)(long)smp_processor_id();
853 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
855 BUG_ON(err != NOTIFY_OK);
856 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
857 register_cpu_notifier(&cpu_nfb);
860 early_initcall(spawn_ksoftirqd);
864 * Call a function on all processors
866 int on_each_cpu(void (*func) (void *info), void *info, int wait)
871 ret = smp_call_function(func, info, wait);
878 EXPORT_SYMBOL(on_each_cpu);
882 * [ These __weak aliases are kept in a separate compilation unit, so that
883 * GCC does not inline them incorrectly. ]
886 int __init __weak early_irq_init(void)
891 int __init __weak arch_probe_nr_irqs(void)
896 int __init __weak arch_early_irq_init(void)
901 int __weak arch_init_chip_data(struct irq_desc *desc, int node)