2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptable semantics.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
28 #ifdef CONFIG_TREE_PREEMPT_RCU
30 struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
31 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
34 * Tell them what RCU they are running.
36 static inline void rcu_bootup_announce(void)
39 "Experimental preemptable hierarchical RCU implementation.\n");
43 * Return the number of RCU-preempt batches processed thus far
44 * for debug and statistics.
46 long rcu_batches_completed_preempt(void)
48 return rcu_preempt_state.completed;
50 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
53 * Return the number of RCU batches processed thus far for debug & stats.
55 long rcu_batches_completed(void)
57 return rcu_batches_completed_preempt();
59 EXPORT_SYMBOL_GPL(rcu_batches_completed);
62 * Record a preemptable-RCU quiescent state for the specified CPU. Note
63 * that this just means that the task currently running on the CPU is
64 * not in a quiescent state. There might be any number of tasks blocked
65 * while in an RCU read-side critical section.
67 static void rcu_preempt_qs_record(int cpu)
69 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
70 rdp->passed_quiesc = 1;
71 rdp->passed_quiesc_completed = rdp->completed;
75 * We have entered the scheduler or are between softirqs in ksoftirqd.
76 * If we are in an RCU read-side critical section, we need to reflect
77 * that in the state of the rcu_node structure corresponding to this CPU.
78 * Caller must disable hardirqs.
80 static void rcu_preempt_qs(int cpu)
82 struct task_struct *t = current;
87 if (t->rcu_read_lock_nesting &&
88 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
90 /* Possibly blocking in an RCU read-side critical section. */
91 rdp = rcu_preempt_state.rda[cpu];
93 spin_lock(&rnp->lock);
94 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
95 t->rcu_blocked_node = (void *)rnp;
98 * If this CPU has already checked in, then this task
99 * will hold up the next grace period rather than the
100 * current grace period. Queue the task accordingly.
101 * If the task is queued for the current grace period
102 * (i.e., this CPU has not yet passed through a quiescent
103 * state for the current grace period), then as long
104 * as that task remains queued, the current grace period
107 phase = !(rnp->qsmask & rdp->grpmask) ^ (rnp->gpnum & 0x1);
108 list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]);
109 smp_mb(); /* Ensure later ctxt swtch seen after above. */
110 spin_unlock(&rnp->lock);
114 * Either we were not in an RCU read-side critical section to
115 * begin with, or we have now recorded that critical section
116 * globally. Either way, we can now note a quiescent state
117 * for this CPU. Again, if we were in an RCU read-side critical
118 * section, and if that critical section was blocking the current
119 * grace period, then the fact that the task has been enqueued
120 * means that we continue to block the current grace period.
122 rcu_preempt_qs_record(cpu);
123 t->rcu_read_unlock_special &= ~(RCU_READ_UNLOCK_NEED_QS |
124 RCU_READ_UNLOCK_GOT_QS);
128 * Tree-preemptable RCU implementation for rcu_read_lock().
129 * Just increment ->rcu_read_lock_nesting, shared state will be updated
132 void __rcu_read_lock(void)
134 ACCESS_ONCE(current->rcu_read_lock_nesting)++;
135 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
137 EXPORT_SYMBOL_GPL(__rcu_read_lock);
139 static void rcu_read_unlock_special(struct task_struct *t)
144 struct rcu_node *rnp;
147 /* NMI handlers cannot block and cannot safely manipulate state. */
151 local_irq_save(flags);
154 * If RCU core is waiting for this CPU to exit critical section,
155 * let it know that we have done so.
157 special = t->rcu_read_unlock_special;
158 if (special & RCU_READ_UNLOCK_NEED_QS) {
159 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
160 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_GOT_QS;
163 /* Hardware IRQ handlers cannot block. */
165 local_irq_restore(flags);
169 /* Clean up if blocked during RCU read-side critical section. */
170 if (special & RCU_READ_UNLOCK_BLOCKED) {
171 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
174 * Remove this task from the list it blocked on. The
175 * task can migrate while we acquire the lock, but at
176 * most one time. So at most two passes through loop.
179 rnp = (struct rcu_node *)t->rcu_blocked_node;
180 spin_lock(&rnp->lock);
181 if (rnp == (struct rcu_node *)t->rcu_blocked_node)
183 spin_unlock(&rnp->lock);
185 empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
186 list_del_init(&t->rcu_node_entry);
187 t->rcu_blocked_node = NULL;
190 * If this was the last task on the current list, and if
191 * we aren't waiting on any CPUs, report the quiescent state.
192 * Note that both cpu_quiet_msk_finish() and cpu_quiet_msk()
193 * drop rnp->lock and restore irq.
195 if (!empty && rnp->qsmask == 0 &&
196 list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) {
197 t->rcu_read_unlock_special &=
198 ~(RCU_READ_UNLOCK_NEED_QS |
199 RCU_READ_UNLOCK_GOT_QS);
200 if (rnp->parent == NULL) {
201 /* Only one rcu_node in the tree. */
202 cpu_quiet_msk_finish(&rcu_preempt_state, flags);
205 /* Report up the rest of the hierarchy. */
207 spin_unlock_irqrestore(&rnp->lock, flags);
209 spin_lock_irqsave(&rnp->lock, flags);
210 cpu_quiet_msk(mask, &rcu_preempt_state, rnp, flags);
213 spin_unlock(&rnp->lock);
215 local_irq_restore(flags);
219 * Tree-preemptable RCU implementation for rcu_read_unlock().
220 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
221 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
222 * invoke rcu_read_unlock_special() to clean up after a context switch
223 * in an RCU read-side critical section and other special cases.
225 void __rcu_read_unlock(void)
227 struct task_struct *t = current;
229 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
230 if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
231 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
232 rcu_read_unlock_special(t);
234 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
236 #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
239 * Scan the current list of tasks blocked within RCU read-side critical
240 * sections, printing out the tid of each.
242 static void rcu_print_task_stall(struct rcu_node *rnp)
245 struct list_head *lp;
246 int phase = rnp->gpnum & 0x1;
247 struct task_struct *t;
249 if (!list_empty(&rnp->blocked_tasks[phase])) {
250 spin_lock_irqsave(&rnp->lock, flags);
251 phase = rnp->gpnum & 0x1; /* re-read under lock. */
252 lp = &rnp->blocked_tasks[phase];
253 list_for_each_entry(t, lp, rcu_node_entry)
254 printk(" P%d", t->pid);
255 spin_unlock_irqrestore(&rnp->lock, flags);
259 #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
262 * Check for preempted RCU readers for the specified rcu_node structure.
263 * If the caller needs a reliable answer, it must hold the rcu_node's
266 static int rcu_preempted_readers(struct rcu_node *rnp)
268 return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
271 #ifdef CONFIG_HOTPLUG_CPU
274 * Handle tasklist migration for case in which all CPUs covered by the
275 * specified rcu_node have gone offline. Move them up to the root
276 * rcu_node. The reason for not just moving them to the immediate
277 * parent is to remove the need for rcu_read_unlock_special() to
278 * make more than two attempts to acquire the target rcu_node's lock.
280 * The caller must hold rnp->lock with irqs disabled.
282 static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
283 struct rcu_node *rnp)
286 struct list_head *lp;
287 struct list_head *lp_root;
288 struct rcu_node *rnp_root = rcu_get_root(rsp);
289 struct task_struct *tp;
292 return; /* Shouldn't happen: at least one CPU online. */
295 * Move tasks up to root rcu_node. Rely on the fact that the
296 * root rcu_node can be at most one ahead of the rest of the
297 * rcu_nodes in terms of gp_num value. This fact allows us to
298 * move the blocked_tasks[] array directly, element by element.
300 for (i = 0; i < 2; i++) {
301 lp = &rnp->blocked_tasks[i];
302 lp_root = &rnp_root->blocked_tasks[i];
303 while (!list_empty(lp)) {
304 tp = list_entry(lp->next, typeof(*tp), rcu_node_entry);
305 spin_lock(&rnp_root->lock); /* irqs already disabled */
306 list_del(&tp->rcu_node_entry);
307 tp->rcu_blocked_node = rnp_root;
308 list_add(&tp->rcu_node_entry, lp_root);
309 spin_unlock(&rnp_root->lock); /* irqs remain disabled */
315 * Do CPU-offline processing for preemptable RCU.
317 static void rcu_preempt_offline_cpu(int cpu)
319 __rcu_offline_cpu(cpu, &rcu_preempt_state);
322 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
325 * Check for a quiescent state from the current CPU. When a task blocks,
326 * the task is recorded in the corresponding CPU's rcu_node structure,
327 * which is checked elsewhere.
329 * Caller must disable hard irqs.
331 static void rcu_preempt_check_callbacks(int cpu)
333 struct task_struct *t = current;
335 if (t->rcu_read_lock_nesting == 0) {
336 t->rcu_read_unlock_special &=
337 ~(RCU_READ_UNLOCK_NEED_QS | RCU_READ_UNLOCK_GOT_QS);
338 rcu_preempt_qs_record(cpu);
341 if (per_cpu(rcu_preempt_data, cpu).qs_pending) {
342 if (t->rcu_read_unlock_special & RCU_READ_UNLOCK_GOT_QS) {
343 rcu_preempt_qs_record(cpu);
344 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_GOT_QS;
345 } else if (!(t->rcu_read_unlock_special &
346 RCU_READ_UNLOCK_NEED_QS)) {
347 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
353 * Process callbacks for preemptable RCU.
355 static void rcu_preempt_process_callbacks(void)
357 __rcu_process_callbacks(&rcu_preempt_state,
358 &__get_cpu_var(rcu_preempt_data));
362 * Queue a preemptable-RCU callback for invocation after a grace period.
364 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
366 __call_rcu(head, func, &rcu_preempt_state);
368 EXPORT_SYMBOL_GPL(call_rcu);
371 * Check to see if there is any immediate preemptable-RCU-related work
374 static int rcu_preempt_pending(int cpu)
376 return __rcu_pending(&rcu_preempt_state,
377 &per_cpu(rcu_preempt_data, cpu));
381 * Does preemptable RCU need the CPU to stay out of dynticks mode?
383 static int rcu_preempt_needs_cpu(int cpu)
385 return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
389 * Initialize preemptable RCU's per-CPU data.
391 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
393 rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
397 * Check for a task exiting while in a preemptable-RCU read-side
398 * critical section, clean up if so. No need to issue warnings,
399 * as debug_check_no_locks_held() already does this if lockdep
404 struct task_struct *t = current;
406 if (t->rcu_read_lock_nesting == 0)
408 t->rcu_read_lock_nesting = 1;
412 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
415 * Tell them what RCU they are running.
417 static inline void rcu_bootup_announce(void)
419 printk(KERN_INFO "Hierarchical RCU implementation.\n");
423 * Return the number of RCU batches processed thus far for debug & stats.
425 long rcu_batches_completed(void)
427 return rcu_batches_completed_sched();
429 EXPORT_SYMBOL_GPL(rcu_batches_completed);
432 * Because preemptable RCU does not exist, we never have to check for
433 * CPUs being in quiescent states.
435 static void rcu_preempt_qs(int cpu)
439 #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
442 * Because preemptable RCU does not exist, we never have to check for
443 * tasks blocked within RCU read-side critical sections.
445 static void rcu_print_task_stall(struct rcu_node *rnp)
449 #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
452 * Because preemptable RCU does not exist, there are never any preempted
455 static int rcu_preempted_readers(struct rcu_node *rnp)
460 #ifdef CONFIG_HOTPLUG_CPU
463 * Because preemptable RCU does not exist, it never needs to migrate
464 * tasks that were blocked within RCU read-side critical sections.
466 static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
467 struct rcu_node *rnp)
472 * Because preemptable RCU does not exist, it never needs CPU-offline
475 static void rcu_preempt_offline_cpu(int cpu)
479 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
482 * Because preemptable RCU does not exist, it never has any callbacks
485 void rcu_preempt_check_callbacks(int cpu)
490 * Because preemptable RCU does not exist, it never has any callbacks
493 void rcu_preempt_process_callbacks(void)
498 * In classic RCU, call_rcu() is just call_rcu_sched().
500 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
502 call_rcu_sched(head, func);
504 EXPORT_SYMBOL_GPL(call_rcu);
507 * Because preemptable RCU does not exist, it never has any work to do.
509 static int rcu_preempt_pending(int cpu)
515 * Because preemptable RCU does not exist, it never needs any CPU.
517 static int rcu_preempt_needs_cpu(int cpu)
523 * Because preemptable RCU does not exist, there is no per-CPU
524 * data to initialize.
526 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
530 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */