]> Pileus Git - ~andy/linux/blob - arch/sh/kernel/smp.c
Merge tag 'for-v3.4-rc1' of git://git.infradead.org/battery-2.6
[~andy/linux] / arch / sh / kernel / smp.c
1 /*
2  * arch/sh/kernel/smp.c
3  *
4  * SMP support for the SuperH processors.
5  *
6  * Copyright (C) 2002 - 2010 Paul Mundt
7  * Copyright (C) 2006 - 2007 Akio Idehara
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13 #include <linux/err.h>
14 #include <linux/cache.h>
15 #include <linux/cpumask.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/spinlock.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/cpu.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <linux/atomic.h>
25 #include <asm/processor.h>
26 #include <asm/mmu_context.h>
27 #include <asm/smp.h>
28 #include <asm/cacheflush.h>
29 #include <asm/sections.h>
30
31 int __cpu_number_map[NR_CPUS];          /* Map physical to logical */
32 int __cpu_logical_map[NR_CPUS];         /* Map logical to physical */
33
34 struct plat_smp_ops *mp_ops = NULL;
35
36 /* State of each CPU */
37 DEFINE_PER_CPU(int, cpu_state) = { 0 };
38
39 void __cpuinit register_smp_ops(struct plat_smp_ops *ops)
40 {
41         if (mp_ops)
42                 printk(KERN_WARNING "Overriding previously set SMP ops\n");
43
44         mp_ops = ops;
45 }
46
47 static inline void __cpuinit smp_store_cpu_info(unsigned int cpu)
48 {
49         struct sh_cpuinfo *c = cpu_data + cpu;
50
51         memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
52
53         c->loops_per_jiffy = loops_per_jiffy;
54 }
55
56 void __init smp_prepare_cpus(unsigned int max_cpus)
57 {
58         unsigned int cpu = smp_processor_id();
59
60         init_new_context(current, &init_mm);
61         current_thread_info()->cpu = cpu;
62         mp_ops->prepare_cpus(max_cpus);
63
64 #ifndef CONFIG_HOTPLUG_CPU
65         init_cpu_present(cpu_possible_mask);
66 #endif
67 }
68
69 void __init smp_prepare_boot_cpu(void)
70 {
71         unsigned int cpu = smp_processor_id();
72
73         __cpu_number_map[0] = cpu;
74         __cpu_logical_map[0] = cpu;
75
76         set_cpu_online(cpu, true);
77         set_cpu_possible(cpu, true);
78
79         per_cpu(cpu_state, cpu) = CPU_ONLINE;
80 }
81
82 #ifdef CONFIG_HOTPLUG_CPU
83 void native_cpu_die(unsigned int cpu)
84 {
85         unsigned int i;
86
87         for (i = 0; i < 10; i++) {
88                 smp_rmb();
89                 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
90                         if (system_state == SYSTEM_RUNNING)
91                                 pr_info("CPU %u is now offline\n", cpu);
92
93                         return;
94                 }
95
96                 msleep(100);
97         }
98
99         pr_err("CPU %u didn't die...\n", cpu);
100 }
101
102 int native_cpu_disable(unsigned int cpu)
103 {
104         return cpu == 0 ? -EPERM : 0;
105 }
106
107 void play_dead_common(void)
108 {
109         idle_task_exit();
110         irq_ctx_exit(raw_smp_processor_id());
111         mb();
112
113         __get_cpu_var(cpu_state) = CPU_DEAD;
114         local_irq_disable();
115 }
116
117 void native_play_dead(void)
118 {
119         play_dead_common();
120 }
121
122 int __cpu_disable(void)
123 {
124         unsigned int cpu = smp_processor_id();
125         struct task_struct *p;
126         int ret;
127
128         ret = mp_ops->cpu_disable(cpu);
129         if (ret)
130                 return ret;
131
132         /*
133          * Take this CPU offline.  Once we clear this, we can't return,
134          * and we must not schedule until we're ready to give up the cpu.
135          */
136         set_cpu_online(cpu, false);
137
138         /*
139          * OK - migrate IRQs away from this CPU
140          */
141         migrate_irqs();
142
143         /*
144          * Stop the local timer for this CPU.
145          */
146         local_timer_stop(cpu);
147
148         /*
149          * Flush user cache and TLB mappings, and then remove this CPU
150          * from the vm mask set of all processes.
151          */
152         flush_cache_all();
153         local_flush_tlb_all();
154
155         read_lock(&tasklist_lock);
156         for_each_process(p)
157                 if (p->mm)
158                         cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
159         read_unlock(&tasklist_lock);
160
161         return 0;
162 }
163 #else /* ... !CONFIG_HOTPLUG_CPU */
164 int native_cpu_disable(unsigned int cpu)
165 {
166         return -ENOSYS;
167 }
168
169 void native_cpu_die(unsigned int cpu)
170 {
171         /* We said "no" in __cpu_disable */
172         BUG();
173 }
174
175 void native_play_dead(void)
176 {
177         BUG();
178 }
179 #endif
180
181 asmlinkage void __cpuinit start_secondary(void)
182 {
183         unsigned int cpu = smp_processor_id();
184         struct mm_struct *mm = &init_mm;
185
186         enable_mmu();
187         atomic_inc(&mm->mm_count);
188         atomic_inc(&mm->mm_users);
189         current->active_mm = mm;
190         enter_lazy_tlb(mm, current);
191         local_flush_tlb_all();
192
193         per_cpu_trap_init();
194
195         preempt_disable();
196
197         notify_cpu_starting(cpu);
198
199         local_irq_enable();
200
201         /* Enable local timers */
202         local_timer_setup(cpu);
203         calibrate_delay();
204
205         smp_store_cpu_info(cpu);
206
207         set_cpu_online(cpu, true);
208         per_cpu(cpu_state, cpu) = CPU_ONLINE;
209
210         cpu_idle();
211 }
212
213 extern struct {
214         unsigned long sp;
215         unsigned long bss_start;
216         unsigned long bss_end;
217         void *start_kernel_fn;
218         void *cpu_init_fn;
219         void *thread_info;
220 } stack_start;
221
222 int __cpuinit __cpu_up(unsigned int cpu)
223 {
224         struct task_struct *tsk;
225         unsigned long timeout;
226
227         tsk = cpu_data[cpu].idle;
228         if (!tsk) {
229                 tsk = fork_idle(cpu);
230                 if (IS_ERR(tsk)) {
231                         pr_err("Failed forking idle task for cpu %d\n", cpu);
232                         return PTR_ERR(tsk);
233                 }
234
235                 cpu_data[cpu].idle = tsk;
236         }
237
238         per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
239
240         /* Fill in data in head.S for secondary cpus */
241         stack_start.sp = tsk->thread.sp;
242         stack_start.thread_info = tsk->stack;
243         stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
244         stack_start.start_kernel_fn = start_secondary;
245
246         flush_icache_range((unsigned long)&stack_start,
247                            (unsigned long)&stack_start + sizeof(stack_start));
248         wmb();
249
250         mp_ops->start_cpu(cpu, (unsigned long)_stext);
251
252         timeout = jiffies + HZ;
253         while (time_before(jiffies, timeout)) {
254                 if (cpu_online(cpu))
255                         break;
256
257                 udelay(10);
258                 barrier();
259         }
260
261         if (cpu_online(cpu))
262                 return 0;
263
264         return -ENOENT;
265 }
266
267 void __init smp_cpus_done(unsigned int max_cpus)
268 {
269         unsigned long bogosum = 0;
270         int cpu;
271
272         for_each_online_cpu(cpu)
273                 bogosum += cpu_data[cpu].loops_per_jiffy;
274
275         printk(KERN_INFO "SMP: Total of %d processors activated "
276                "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
277                bogosum / (500000/HZ),
278                (bogosum / (5000/HZ)) % 100);
279 }
280
281 void smp_send_reschedule(int cpu)
282 {
283         mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
284 }
285
286 void smp_send_stop(void)
287 {
288         smp_call_function(stop_this_cpu, 0, 0);
289 }
290
291 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
292 {
293         int cpu;
294
295         for_each_cpu(cpu, mask)
296                 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
297 }
298
299 void arch_send_call_function_single_ipi(int cpu)
300 {
301         mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
302 }
303
304 void smp_timer_broadcast(const struct cpumask *mask)
305 {
306         int cpu;
307
308         for_each_cpu(cpu, mask)
309                 mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
310 }
311
312 static void ipi_timer(void)
313 {
314         irq_enter();
315         local_timer_interrupt();
316         irq_exit();
317 }
318
319 void smp_message_recv(unsigned int msg)
320 {
321         switch (msg) {
322         case SMP_MSG_FUNCTION:
323                 generic_smp_call_function_interrupt();
324                 break;
325         case SMP_MSG_RESCHEDULE:
326                 scheduler_ipi();
327                 break;
328         case SMP_MSG_FUNCTION_SINGLE:
329                 generic_smp_call_function_single_interrupt();
330                 break;
331         case SMP_MSG_TIMER:
332                 ipi_timer();
333                 break;
334         default:
335                 printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
336                        smp_processor_id(), __func__, msg);
337                 break;
338         }
339 }
340
341 /* Not really SMP stuff ... */
342 int setup_profiling_timer(unsigned int multiplier)
343 {
344         return 0;
345 }
346
347 static void flush_tlb_all_ipi(void *info)
348 {
349         local_flush_tlb_all();
350 }
351
352 void flush_tlb_all(void)
353 {
354         on_each_cpu(flush_tlb_all_ipi, 0, 1);
355 }
356
357 static void flush_tlb_mm_ipi(void *mm)
358 {
359         local_flush_tlb_mm((struct mm_struct *)mm);
360 }
361
362 /*
363  * The following tlb flush calls are invoked when old translations are
364  * being torn down, or pte attributes are changing. For single threaded
365  * address spaces, a new context is obtained on the current cpu, and tlb
366  * context on other cpus are invalidated to force a new context allocation
367  * at switch_mm time, should the mm ever be used on other cpus. For
368  * multithreaded address spaces, intercpu interrupts have to be sent.
369  * Another case where intercpu interrupts are required is when the target
370  * mm might be active on another cpu (eg debuggers doing the flushes on
371  * behalf of debugees, kswapd stealing pages from another process etc).
372  * Kanoj 07/00.
373  */
374 void flush_tlb_mm(struct mm_struct *mm)
375 {
376         preempt_disable();
377
378         if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
379                 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
380         } else {
381                 int i;
382                 for (i = 0; i < num_online_cpus(); i++)
383                         if (smp_processor_id() != i)
384                                 cpu_context(i, mm) = 0;
385         }
386         local_flush_tlb_mm(mm);
387
388         preempt_enable();
389 }
390
391 struct flush_tlb_data {
392         struct vm_area_struct *vma;
393         unsigned long addr1;
394         unsigned long addr2;
395 };
396
397 static void flush_tlb_range_ipi(void *info)
398 {
399         struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
400
401         local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
402 }
403
404 void flush_tlb_range(struct vm_area_struct *vma,
405                      unsigned long start, unsigned long end)
406 {
407         struct mm_struct *mm = vma->vm_mm;
408
409         preempt_disable();
410         if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
411                 struct flush_tlb_data fd;
412
413                 fd.vma = vma;
414                 fd.addr1 = start;
415                 fd.addr2 = end;
416                 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
417         } else {
418                 int i;
419                 for (i = 0; i < num_online_cpus(); i++)
420                         if (smp_processor_id() != i)
421                                 cpu_context(i, mm) = 0;
422         }
423         local_flush_tlb_range(vma, start, end);
424         preempt_enable();
425 }
426
427 static void flush_tlb_kernel_range_ipi(void *info)
428 {
429         struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
430
431         local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
432 }
433
434 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
435 {
436         struct flush_tlb_data fd;
437
438         fd.addr1 = start;
439         fd.addr2 = end;
440         on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
441 }
442
443 static void flush_tlb_page_ipi(void *info)
444 {
445         struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
446
447         local_flush_tlb_page(fd->vma, fd->addr1);
448 }
449
450 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
451 {
452         preempt_disable();
453         if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
454             (current->mm != vma->vm_mm)) {
455                 struct flush_tlb_data fd;
456
457                 fd.vma = vma;
458                 fd.addr1 = page;
459                 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
460         } else {
461                 int i;
462                 for (i = 0; i < num_online_cpus(); i++)
463                         if (smp_processor_id() != i)
464                                 cpu_context(i, vma->vm_mm) = 0;
465         }
466         local_flush_tlb_page(vma, page);
467         preempt_enable();
468 }
469
470 static void flush_tlb_one_ipi(void *info)
471 {
472         struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
473         local_flush_tlb_one(fd->addr1, fd->addr2);
474 }
475
476 void flush_tlb_one(unsigned long asid, unsigned long vaddr)
477 {
478         struct flush_tlb_data fd;
479
480         fd.addr1 = asid;
481         fd.addr2 = vaddr;
482
483         smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
484         local_flush_tlb_one(asid, vaddr);
485 }