2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
11 #include <linux/export.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
16 #include <linux/notifier.h>
17 #include <linux/percpu.h>
18 #include <linux/cpu.h>
19 #include <linux/freezer.h>
20 #include <linux/kthread.h>
21 #include <linux/rcupdate.h>
22 #include <linux/ftrace.h>
23 #include <linux/smp.h>
24 #include <linux/smpboot.h>
25 #include <linux/tick.h>
27 #define CREATE_TRACE_POINTS
28 #include <trace/events/irq.h>
31 - No shared variables, all the data are CPU local.
32 - If a softirq needs serialization, let it serialize itself
34 - Even if softirq is serialized, only local cpu is marked for
35 execution. Hence, we get something sort of weak cpu binding.
36 Though it is still not clear, will it result in better locality
40 - NET RX softirq. It is multithreaded and does not require
41 any global serialization.
42 - NET TX softirq. It kicks software netdevice queues, hence
43 it is logically serialized per device, but this serialization
44 is invisible to common code.
45 - Tasklets: serialized wrt itself.
48 #ifndef __ARCH_IRQ_STAT
49 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
50 EXPORT_SYMBOL(irq_stat);
53 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
55 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
57 char *softirq_to_name[NR_SOFTIRQS] = {
58 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
59 "TASKLET", "SCHED", "HRTIMER", "RCU"
63 * we cannot loop indefinitely here to avoid userspace starvation,
64 * but we also don't want to introduce a worst case 1/HZ latency
65 * to the pending events, so lets the scheduler to balance
66 * the softirq load for us.
68 static void wakeup_softirqd(void)
70 /* Interrupts are disabled: no need to stop preemption */
71 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
73 if (tsk && tsk->state != TASK_RUNNING)
78 * preempt_count and SOFTIRQ_OFFSET usage:
79 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
81 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
82 * on local_bh_disable or local_bh_enable.
83 * This lets us distinguish between whether we are currently processing
84 * softirq and whether we just have bh disabled.
88 * This one is for softirq.c-internal use,
89 * where hardirqs are disabled legitimately:
91 #ifdef CONFIG_TRACE_IRQFLAGS
92 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
96 WARN_ON_ONCE(in_irq());
98 raw_local_irq_save(flags);
100 * The preempt tracer hooks into preempt_count_add and will break
101 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
102 * is set and before current->softirq_enabled is cleared.
103 * We must manually increment preempt_count here and manually
104 * call the trace_preempt_off later.
106 __preempt_count_add(cnt);
108 * Were softirqs turned off above:
110 if (softirq_count() == (cnt & SOFTIRQ_MASK))
111 trace_softirqs_off(ip);
112 raw_local_irq_restore(flags);
114 if (preempt_count() == cnt)
115 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
117 EXPORT_SYMBOL(__local_bh_disable_ip);
118 #endif /* CONFIG_TRACE_IRQFLAGS */
120 static void __local_bh_enable(unsigned int cnt)
122 WARN_ON_ONCE(!irqs_disabled());
124 if (softirq_count() == (cnt & SOFTIRQ_MASK))
125 trace_softirqs_on(_RET_IP_);
126 preempt_count_sub(cnt);
130 * Special-case - softirqs can safely be enabled in
131 * cond_resched_softirq(), or by __do_softirq(),
132 * without processing still-pending softirqs:
134 void _local_bh_enable(void)
136 WARN_ON_ONCE(in_irq());
137 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
140 EXPORT_SYMBOL(_local_bh_enable);
142 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
144 WARN_ON_ONCE(in_irq() || irqs_disabled());
145 #ifdef CONFIG_TRACE_IRQFLAGS
149 * Are softirqs going to be turned on now:
151 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
152 trace_softirqs_on(ip);
154 * Keep preemption disabled until we are done with
155 * softirq processing:
157 preempt_count_sub(cnt - 1);
159 if (unlikely(!in_interrupt() && local_softirq_pending())) {
161 * Run softirq if any pending. And do it in its own stack
162 * as we may be calling this deep in a task call stack already.
168 #ifdef CONFIG_TRACE_IRQFLAGS
171 preempt_check_resched();
173 EXPORT_SYMBOL(__local_bh_enable_ip);
176 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
177 * but break the loop if need_resched() is set or after 2 ms.
178 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
179 * certain cases, such as stop_machine(), jiffies may cease to
180 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
181 * well to make sure we eventually return from this method.
183 * These limits have been established via experimentation.
184 * The two things to balance is latency against fairness -
185 * we want to handle softirqs as soon as possible, but they
186 * should not be able to lock up the box.
188 #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
189 #define MAX_SOFTIRQ_RESTART 10
191 #ifdef CONFIG_TRACE_IRQFLAGS
193 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
194 * to keep the lockdep irq context tracking as tight as possible in order to
195 * not miss-qualify lock contexts and miss possible deadlocks.
198 static inline bool lockdep_softirq_start(void)
200 bool in_hardirq = false;
202 if (trace_hardirq_context(current)) {
204 trace_hardirq_exit();
207 lockdep_softirq_enter();
212 static inline void lockdep_softirq_end(bool in_hardirq)
214 lockdep_softirq_exit();
217 trace_hardirq_enter();
220 static inline bool lockdep_softirq_start(void) { return false; }
221 static inline void lockdep_softirq_end(bool in_hardirq) { }
224 asmlinkage void __do_softirq(void)
226 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
227 unsigned long old_flags = current->flags;
228 int max_restart = MAX_SOFTIRQ_RESTART;
229 struct softirq_action *h;
236 * Mask out PF_MEMALLOC s current task context is borrowed for the
237 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
238 * again if the socket is related to swap
240 current->flags &= ~PF_MEMALLOC;
242 pending = local_softirq_pending();
243 account_irq_enter_time(current);
245 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
246 in_hardirq = lockdep_softirq_start();
248 cpu = smp_processor_id();
250 /* Reset the pending bitmask before enabling irqs */
251 set_softirq_pending(0);
257 while ((softirq_bit = ffs(pending))) {
261 h += softirq_bit - 1;
263 vec_nr = h - softirq_vec;
264 prev_count = preempt_count();
266 kstat_incr_softirqs_this_cpu(vec_nr);
268 trace_softirq_entry(vec_nr);
270 trace_softirq_exit(vec_nr);
271 if (unlikely(prev_count != preempt_count())) {
272 printk(KERN_ERR "huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
273 vec_nr, softirq_to_name[vec_nr], h->action,
274 prev_count, preempt_count());
275 preempt_count_set(prev_count);
279 pending >>= softirq_bit;
284 pending = local_softirq_pending();
286 if (time_before(jiffies, end) && !need_resched() &&
293 lockdep_softirq_end(in_hardirq);
294 account_irq_exit_time(current);
295 __local_bh_enable(SOFTIRQ_OFFSET);
296 WARN_ON_ONCE(in_interrupt());
297 tsk_restore_flags(current, old_flags, PF_MEMALLOC);
300 asmlinkage void do_softirq(void)
308 local_irq_save(flags);
310 pending = local_softirq_pending();
313 do_softirq_own_stack();
315 local_irq_restore(flags);
319 * Enter an interrupt context.
324 if (is_idle_task(current) && !in_interrupt()) {
326 * Prevent raise_softirq from needlessly waking up ksoftirqd
327 * here, as softirq will be serviced on return from interrupt.
337 static inline void invoke_softirq(void)
339 if (!force_irqthreads) {
340 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
342 * We can safely execute softirq on the current stack if
343 * it is the irq stack, because it should be near empty
349 * Otherwise, irq_exit() is called on the task stack that can
350 * be potentially deep already. So call softirq in its own stack
351 * to prevent from any overrun.
353 do_softirq_own_stack();
360 static inline void tick_irq_exit(void)
362 #ifdef CONFIG_NO_HZ_COMMON
363 int cpu = smp_processor_id();
365 /* Make sure that timer wheel updates are propagated */
366 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
368 tick_nohz_irq_exit();
374 * Exit an interrupt context. Process softirqs if needed and possible:
378 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
381 WARN_ON_ONCE(!irqs_disabled());
384 account_irq_exit_time(current);
385 preempt_count_sub(HARDIRQ_OFFSET);
386 if (!in_interrupt() && local_softirq_pending())
391 trace_hardirq_exit(); /* must be last! */
395 * This function must run with irqs disabled!
397 inline void raise_softirq_irqoff(unsigned int nr)
399 __raise_softirq_irqoff(nr);
402 * If we're in an interrupt or softirq, we're done
403 * (this also catches softirq-disabled code). We will
404 * actually run the softirq once we return from
405 * the irq or softirq.
407 * Otherwise we wake up ksoftirqd to make sure we
408 * schedule the softirq soon.
414 void raise_softirq(unsigned int nr)
418 local_irq_save(flags);
419 raise_softirq_irqoff(nr);
420 local_irq_restore(flags);
423 void __raise_softirq_irqoff(unsigned int nr)
425 trace_softirq_raise(nr);
426 or_softirq_pending(1UL << nr);
429 void open_softirq(int nr, void (*action)(struct softirq_action *))
431 softirq_vec[nr].action = action;
439 struct tasklet_struct *head;
440 struct tasklet_struct **tail;
443 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
444 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
446 void __tasklet_schedule(struct tasklet_struct *t)
450 local_irq_save(flags);
452 *__this_cpu_read(tasklet_vec.tail) = t;
453 __this_cpu_write(tasklet_vec.tail, &(t->next));
454 raise_softirq_irqoff(TASKLET_SOFTIRQ);
455 local_irq_restore(flags);
458 EXPORT_SYMBOL(__tasklet_schedule);
460 void __tasklet_hi_schedule(struct tasklet_struct *t)
464 local_irq_save(flags);
466 *__this_cpu_read(tasklet_hi_vec.tail) = t;
467 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
468 raise_softirq_irqoff(HI_SOFTIRQ);
469 local_irq_restore(flags);
472 EXPORT_SYMBOL(__tasklet_hi_schedule);
474 void __tasklet_hi_schedule_first(struct tasklet_struct *t)
476 BUG_ON(!irqs_disabled());
478 t->next = __this_cpu_read(tasklet_hi_vec.head);
479 __this_cpu_write(tasklet_hi_vec.head, t);
480 __raise_softirq_irqoff(HI_SOFTIRQ);
483 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
485 static void tasklet_action(struct softirq_action *a)
487 struct tasklet_struct *list;
490 list = __this_cpu_read(tasklet_vec.head);
491 __this_cpu_write(tasklet_vec.head, NULL);
492 __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
496 struct tasklet_struct *t = list;
500 if (tasklet_trylock(t)) {
501 if (!atomic_read(&t->count)) {
502 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
513 *__this_cpu_read(tasklet_vec.tail) = t;
514 __this_cpu_write(tasklet_vec.tail, &(t->next));
515 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
520 static void tasklet_hi_action(struct softirq_action *a)
522 struct tasklet_struct *list;
525 list = __this_cpu_read(tasklet_hi_vec.head);
526 __this_cpu_write(tasklet_hi_vec.head, NULL);
527 __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
531 struct tasklet_struct *t = list;
535 if (tasklet_trylock(t)) {
536 if (!atomic_read(&t->count)) {
537 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
548 *__this_cpu_read(tasklet_hi_vec.tail) = t;
549 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
550 __raise_softirq_irqoff(HI_SOFTIRQ);
556 void tasklet_init(struct tasklet_struct *t,
557 void (*func)(unsigned long), unsigned long data)
561 atomic_set(&t->count, 0);
566 EXPORT_SYMBOL(tasklet_init);
568 void tasklet_kill(struct tasklet_struct *t)
571 printk("Attempt to kill tasklet from interrupt\n");
573 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
576 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
578 tasklet_unlock_wait(t);
579 clear_bit(TASKLET_STATE_SCHED, &t->state);
582 EXPORT_SYMBOL(tasklet_kill);
589 * The trampoline is called when the hrtimer expires. It schedules a tasklet
590 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
591 * hrtimer callback, but from softirq context.
593 static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
595 struct tasklet_hrtimer *ttimer =
596 container_of(timer, struct tasklet_hrtimer, timer);
598 tasklet_hi_schedule(&ttimer->tasklet);
599 return HRTIMER_NORESTART;
603 * Helper function which calls the hrtimer callback from
604 * tasklet/softirq context
606 static void __tasklet_hrtimer_trampoline(unsigned long data)
608 struct tasklet_hrtimer *ttimer = (void *)data;
609 enum hrtimer_restart restart;
611 restart = ttimer->function(&ttimer->timer);
612 if (restart != HRTIMER_NORESTART)
613 hrtimer_restart(&ttimer->timer);
617 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
618 * @ttimer: tasklet_hrtimer which is initialized
619 * @function: hrtimer callback function which gets called from softirq context
620 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
621 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
623 void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
624 enum hrtimer_restart (*function)(struct hrtimer *),
625 clockid_t which_clock, enum hrtimer_mode mode)
627 hrtimer_init(&ttimer->timer, which_clock, mode);
628 ttimer->timer.function = __hrtimer_tasklet_trampoline;
629 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
630 (unsigned long)ttimer);
631 ttimer->function = function;
633 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
635 void __init softirq_init(void)
639 for_each_possible_cpu(cpu) {
640 per_cpu(tasklet_vec, cpu).tail =
641 &per_cpu(tasklet_vec, cpu).head;
642 per_cpu(tasklet_hi_vec, cpu).tail =
643 &per_cpu(tasklet_hi_vec, cpu).head;
646 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
647 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
650 static int ksoftirqd_should_run(unsigned int cpu)
652 return local_softirq_pending();
655 static void run_ksoftirqd(unsigned int cpu)
658 if (local_softirq_pending()) {
660 * We can safely run softirq on inline stack, as we are not deep
661 * in the task stack here.
664 rcu_note_context_switch(cpu);
672 #ifdef CONFIG_HOTPLUG_CPU
674 * tasklet_kill_immediate is called to remove a tasklet which can already be
675 * scheduled for execution on @cpu.
677 * Unlike tasklet_kill, this function removes the tasklet
678 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
680 * When this function is called, @cpu must be in the CPU_DEAD state.
682 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
684 struct tasklet_struct **i;
686 BUG_ON(cpu_online(cpu));
687 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
689 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
692 /* CPU is dead, so no lock needed. */
693 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
696 /* If this was the tail element, move the tail ptr */
698 per_cpu(tasklet_vec, cpu).tail = i;
705 static void takeover_tasklets(unsigned int cpu)
707 /* CPU is dead, so no lock needed. */
710 /* Find end, append list for that CPU. */
711 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
712 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
713 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
714 per_cpu(tasklet_vec, cpu).head = NULL;
715 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
717 raise_softirq_irqoff(TASKLET_SOFTIRQ);
719 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
720 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
721 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
722 per_cpu(tasklet_hi_vec, cpu).head = NULL;
723 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
725 raise_softirq_irqoff(HI_SOFTIRQ);
729 #endif /* CONFIG_HOTPLUG_CPU */
731 static int cpu_callback(struct notifier_block *nfb,
732 unsigned long action,
736 #ifdef CONFIG_HOTPLUG_CPU
738 case CPU_DEAD_FROZEN:
739 takeover_tasklets((unsigned long)hcpu);
741 #endif /* CONFIG_HOTPLUG_CPU */
746 static struct notifier_block cpu_nfb = {
747 .notifier_call = cpu_callback
750 static struct smp_hotplug_thread softirq_threads = {
752 .thread_should_run = ksoftirqd_should_run,
753 .thread_fn = run_ksoftirqd,
754 .thread_comm = "ksoftirqd/%u",
757 static __init int spawn_ksoftirqd(void)
759 register_cpu_notifier(&cpu_nfb);
761 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
765 early_initcall(spawn_ksoftirqd);
768 * [ These __weak aliases are kept in a separate compilation unit, so that
769 * GCC does not inline them incorrectly. ]
772 int __init __weak early_irq_init(void)
777 int __init __weak arch_probe_nr_irqs(void)
779 return NR_IRQS_LEGACY;
782 int __init __weak arch_early_irq_init(void)