2 * SMP related functions
4 * Copyright IBM Corp. 1999,2012
5 * Author(s): Denis Joseph Barrow,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
18 #define KMSG_COMPONENT "cpu"
19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21 #include <linux/workqueue.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
25 #include <linux/err.h>
26 #include <linux/spinlock.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/irqflags.h>
31 #include <linux/cpu.h>
32 #include <linux/slab.h>
33 #include <linux/crash_dump.h>
34 #include <asm/asm-offsets.h>
35 #include <asm/switch_to.h>
36 #include <asm/facility.h>
38 #include <asm/setup.h>
40 #include <asm/tlbflush.h>
41 #include <asm/timer.h>
42 #include <asm/lowcore.h>
45 #include <asm/debug.h>
46 #include <asm/os_info.h>
51 sigp_external_call = 2,
52 sigp_emergency_signal = 3,
56 sigp_stop_and_store_status = 9,
57 sigp_initial_cpu_reset = 11,
60 sigp_store_status_at_address = 14,
61 sigp_store_extended_status_at_address = 15,
62 sigp_set_architecture = 18,
63 sigp_conditional_emergency_signal = 19,
64 sigp_sense_running = 21,
68 sigp_order_code_accepted = 0,
69 sigp_status_stored = 1,
71 sigp_not_operational = 3,
77 ec_call_function_single,
88 struct task_struct *idle; /* idle process for the cpu */
89 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
90 unsigned long async_stack; /* async stack for the cpu */
91 unsigned long panic_stack; /* panic stack for the cpu */
92 unsigned long ec_mask; /* bit mask for ec_xxx functions */
93 int state; /* physical cpu state */
94 u32 status; /* last status received via sigp */
95 u16 address; /* physical cpu address */
98 static u8 boot_cpu_type;
99 static u16 boot_cpu_address;
100 static struct pcpu pcpu_devices[NR_CPUS];
102 DEFINE_MUTEX(smp_cpu_state_mutex);
105 * Signal processor helper functions.
107 static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status)
109 register unsigned int reg1 asm ("1") = parm;
113 " sigp %1,%2,0(%3)\n"
116 : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc");
117 if (status && cc == 1)
122 static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status)
127 cc = __pcpu_sigp(addr, order, parm, status);
134 static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
138 for (retry = 0; ; retry++) {
139 cc = __pcpu_sigp(pcpu->address, order, parm, &pcpu->status);
148 static inline int pcpu_stopped(struct pcpu *pcpu)
150 if (__pcpu_sigp(pcpu->address, sigp_sense,
151 0, &pcpu->status) != sigp_status_stored)
153 /* Check for stopped and check stop state */
154 return !!(pcpu->status & 0x50);
157 static inline int pcpu_running(struct pcpu *pcpu)
159 if (__pcpu_sigp(pcpu->address, sigp_sense_running,
160 0, &pcpu->status) != sigp_status_stored)
162 /* Check for running status */
163 return !(pcpu->status & 0x400);
167 * Find struct pcpu by cpu address.
169 static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address)
173 for_each_cpu(cpu, mask)
174 if (pcpu_devices[cpu].address == address)
175 return pcpu_devices + cpu;
179 static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
183 set_bit(ec_bit, &pcpu->ec_mask);
184 order = pcpu_running(pcpu) ?
185 sigp_external_call : sigp_emergency_signal;
186 pcpu_sigp_retry(pcpu, order, 0);
189 static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
193 if (pcpu != &pcpu_devices[0]) {
194 pcpu->lowcore = (struct _lowcore *)
195 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
196 pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
197 pcpu->panic_stack = __get_free_page(GFP_KERNEL);
198 if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack)
202 memcpy(lc, &S390_lowcore, 512);
203 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
204 lc->async_stack = pcpu->async_stack + ASYNC_SIZE;
205 lc->panic_stack = pcpu->panic_stack + PAGE_SIZE;
208 if (MACHINE_HAS_IEEE) {
209 lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL);
210 if (!lc->extended_save_area_addr)
214 if (vdso_alloc_per_cpu(lc))
217 lowcore_ptr[cpu] = lc;
218 pcpu_sigp_retry(pcpu, sigp_set_prefix, (u32)(unsigned long) lc);
221 if (pcpu != &pcpu_devices[0]) {
222 free_page(pcpu->panic_stack);
223 free_pages(pcpu->async_stack, ASYNC_ORDER);
224 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
229 static void pcpu_free_lowcore(struct pcpu *pcpu)
231 pcpu_sigp_retry(pcpu, sigp_set_prefix, 0);
232 lowcore_ptr[pcpu - pcpu_devices] = NULL;
234 if (MACHINE_HAS_IEEE) {
235 struct _lowcore *lc = pcpu->lowcore;
237 free_page((unsigned long) lc->extended_save_area_addr);
238 lc->extended_save_area_addr = 0;
241 vdso_free_per_cpu(pcpu->lowcore);
243 if (pcpu != &pcpu_devices[0]) {
244 free_page(pcpu->panic_stack);
245 free_pages(pcpu->async_stack, ASYNC_ORDER);
246 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
250 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
252 struct _lowcore *lc = pcpu->lowcore;
254 atomic_inc(&init_mm.context.attach_count);
256 lc->percpu_offset = __per_cpu_offset[cpu];
257 lc->kernel_asce = S390_lowcore.kernel_asce;
258 lc->machine_flags = S390_lowcore.machine_flags;
259 lc->ftrace_func = S390_lowcore.ftrace_func;
260 lc->user_timer = lc->system_timer = lc->steal_timer = 0;
261 __ctl_store(lc->cregs_save_area, 0, 15);
262 save_access_regs((unsigned int *) lc->access_regs_save_area);
263 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
267 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
269 struct _lowcore *lc = pcpu->lowcore;
270 struct thread_info *ti = task_thread_info(tsk);
272 lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE;
273 lc->thread_info = (unsigned long) task_thread_info(tsk);
274 lc->current_task = (unsigned long) tsk;
275 lc->user_timer = ti->user_timer;
276 lc->system_timer = ti->system_timer;
280 static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
282 struct _lowcore *lc = pcpu->lowcore;
284 lc->restart_stack = lc->kernel_stack;
285 lc->restart_fn = (unsigned long) func;
286 lc->restart_data = (unsigned long) data;
287 lc->restart_source = -1UL;
288 pcpu_sigp_retry(pcpu, sigp_restart, 0);
292 * Call function via PSW restart on pcpu and stop the current cpu.
294 static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
295 void *data, unsigned long stack)
297 struct _lowcore *lc = pcpu->lowcore;
298 unsigned short this_cpu;
300 __load_psw_mask(psw_kernel_bits);
302 if (pcpu->address == this_cpu)
303 func(data); /* should not return */
304 /* Stop target cpu (if func returns this stops the current cpu). */
305 pcpu_sigp_retry(pcpu, sigp_stop, 0);
306 /* Restart func on the target cpu and stop the current cpu. */
307 lc->restart_stack = stack;
308 lc->restart_fn = (unsigned long) func;
309 lc->restart_data = (unsigned long) data;
310 lc->restart_source = (unsigned long) this_cpu;
312 "0: sigp 0,%0,6 # sigp restart to target cpu\n"
313 " brc 2,0b # busy, try again\n"
314 "1: sigp 0,%1,5 # sigp stop to current cpu\n"
315 " brc 2,1b # busy, try again\n"
316 : : "d" (pcpu->address), "d" (this_cpu) : "0", "1", "cc");
321 * Call function on an online CPU.
323 void smp_call_online_cpu(void (*func)(void *), void *data)
327 /* Use the current cpu if it is online. */
328 pcpu = pcpu_find_address(cpu_online_mask, stap());
330 /* Use the first online cpu. */
331 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
332 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
336 * Call function on the ipl CPU.
338 void smp_call_ipl_cpu(void (*func)(void *), void *data)
340 pcpu_delegate(&pcpu_devices[0], func, data,
341 pcpu_devices->panic_stack + PAGE_SIZE);
344 int smp_find_processor_id(u16 address)
348 for_each_present_cpu(cpu)
349 if (pcpu_devices[cpu].address == address)
354 int smp_vcpu_scheduled(int cpu)
356 return pcpu_running(pcpu_devices + cpu);
361 if (MACHINE_HAS_DIAG44)
362 asm volatile("diag 0,0,0x44");
365 void smp_yield_cpu(int cpu)
367 if (MACHINE_HAS_DIAG9C)
368 asm volatile("diag %0,0,0x9c"
369 : : "d" (pcpu_devices[cpu].address));
370 else if (MACHINE_HAS_DIAG44)
371 asm volatile("diag 0,0,0x44");
375 * Send cpus emergency shutdown signal. This gives the cpus the
376 * opportunity to complete outstanding interrupts.
378 void smp_emergency_stop(cpumask_t *cpumask)
383 end = get_clock() + (1000000UL << 12);
384 for_each_cpu(cpu, cpumask) {
385 struct pcpu *pcpu = pcpu_devices + cpu;
386 set_bit(ec_stop_cpu, &pcpu->ec_mask);
387 while (__pcpu_sigp(pcpu->address, sigp_emergency_signal,
388 0, NULL) == sigp_busy &&
392 while (get_clock() < end) {
393 for_each_cpu(cpu, cpumask)
394 if (pcpu_stopped(pcpu_devices + cpu))
395 cpumask_clear_cpu(cpu, cpumask);
396 if (cpumask_empty(cpumask))
403 * Stop all cpus but the current one.
405 void smp_send_stop(void)
410 /* Disable all interrupts/machine checks */
411 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
412 trace_hardirqs_off();
414 debug_set_critical();
415 cpumask_copy(&cpumask, cpu_online_mask);
416 cpumask_clear_cpu(smp_processor_id(), &cpumask);
418 if (oops_in_progress)
419 smp_emergency_stop(&cpumask);
421 /* stop all processors */
422 for_each_cpu(cpu, &cpumask) {
423 struct pcpu *pcpu = pcpu_devices + cpu;
424 pcpu_sigp_retry(pcpu, sigp_stop, 0);
425 while (!pcpu_stopped(pcpu))
431 * Stop the current cpu.
433 void smp_stop_cpu(void)
435 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0);
440 * This is the main routine where commands issued by other
443 static void do_ext_call_interrupt(struct ext_code ext_code,
444 unsigned int param32, unsigned long param64)
449 cpu = smp_processor_id();
450 if (ext_code.code == 0x1202)
451 kstat_cpu(cpu).irqs[EXTINT_EXC]++;
453 kstat_cpu(cpu).irqs[EXTINT_EMS]++;
455 * handle bit signal external calls
457 bits = xchg(&pcpu_devices[cpu].ec_mask, 0);
459 if (test_bit(ec_stop_cpu, &bits))
462 if (test_bit(ec_schedule, &bits))
465 if (test_bit(ec_call_function, &bits))
466 generic_smp_call_function_interrupt();
468 if (test_bit(ec_call_function_single, &bits))
469 generic_smp_call_function_single_interrupt();
473 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
477 for_each_cpu(cpu, mask)
478 pcpu_ec_call(pcpu_devices + cpu, ec_call_function);
481 void arch_send_call_function_single_ipi(int cpu)
483 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
488 * this function sends a 'purge tlb' signal to another CPU.
490 static void smp_ptlb_callback(void *info)
495 void smp_ptlb_all(void)
497 on_each_cpu(smp_ptlb_callback, NULL, 1);
499 EXPORT_SYMBOL(smp_ptlb_all);
500 #endif /* ! CONFIG_64BIT */
503 * this function sends a 'reschedule' IPI to another CPU.
504 * it goes straight through and wastes no time serializing
505 * anything. Worst case is that we lose a reschedule ...
507 void smp_send_reschedule(int cpu)
509 pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
513 * parameter area for the set/clear control bit callbacks
515 struct ec_creg_mask_parms {
517 unsigned long andval;
522 * callback for setting/clearing control bits
524 static void smp_ctl_bit_callback(void *info)
526 struct ec_creg_mask_parms *pp = info;
527 unsigned long cregs[16];
529 __ctl_store(cregs, 0, 15);
530 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
531 __ctl_load(cregs, 0, 15);
535 * Set a bit in a control register of all cpus
537 void smp_ctl_set_bit(int cr, int bit)
539 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
541 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
543 EXPORT_SYMBOL(smp_ctl_set_bit);
546 * Clear a bit in a control register of all cpus
548 void smp_ctl_clear_bit(int cr, int bit)
550 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
552 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
554 EXPORT_SYMBOL(smp_ctl_clear_bit);
556 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
558 struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
559 EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
561 static void __init smp_get_save_area(int cpu, u16 address)
563 void *lc = pcpu_devices[0].lowcore;
564 struct save_area *save_area;
566 if (is_kdump_kernel())
568 if (!OLDMEM_BASE && (address == boot_cpu_address ||
569 ipl_info.type != IPL_TYPE_FCP_DUMP))
571 if (cpu >= NR_CPUS) {
572 pr_warning("CPU %i exceeds the maximum %i and is excluded "
573 "from the dump\n", cpu, NR_CPUS - 1);
576 save_area = kmalloc(sizeof(struct save_area), GFP_KERNEL);
578 panic("could not allocate memory for save area\n");
579 zfcpdump_save_areas[cpu] = save_area;
580 #ifdef CONFIG_CRASH_DUMP
581 if (address == boot_cpu_address) {
582 /* Copy the registers of the boot cpu. */
583 copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
584 SAVE_AREA_BASE - PAGE_SIZE, 0);
588 /* Get the registers of a non-boot cpu. */
589 __pcpu_sigp_relax(address, sigp_stop_and_store_status, 0, NULL);
590 memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area));
593 int smp_store_status(int cpu)
597 pcpu = pcpu_devices + cpu;
598 if (__pcpu_sigp_relax(pcpu->address, sigp_stop_and_store_status,
599 0, NULL) != sigp_order_code_accepted)
604 #else /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
606 static inline void smp_get_save_area(int cpu, u16 address) { }
608 #endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
610 static struct sclp_cpu_info *smp_get_cpu_info(void)
612 static int use_sigp_detection;
613 struct sclp_cpu_info *info;
616 info = kzalloc(sizeof(*info), GFP_KERNEL);
617 if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
618 use_sigp_detection = 1;
619 for (address = 0; address <= MAX_CPU_ADDRESS; address++) {
620 if (__pcpu_sigp_relax(address, sigp_sense, 0, NULL) ==
621 sigp_not_operational)
623 info->cpu[info->configured].address = address;
626 info->combined = info->configured;
631 static int __devinit smp_add_present_cpu(int cpu);
633 static int __devinit __smp_rescan_cpus(struct sclp_cpu_info *info,
641 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
642 cpu = cpumask_first(&avail);
643 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
644 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
646 if (pcpu_find_address(cpu_present_mask, info->cpu[i].address))
648 pcpu = pcpu_devices + cpu;
649 pcpu->address = info->cpu[i].address;
650 pcpu->state = (cpu >= info->configured) ?
651 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
652 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
653 set_cpu_present(cpu, true);
654 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
655 set_cpu_present(cpu, false);
658 cpu = cpumask_next(cpu, &avail);
663 static void __init smp_detect_cpus(void)
665 unsigned int cpu, c_cpus, s_cpus;
666 struct sclp_cpu_info *info;
668 info = smp_get_cpu_info();
670 panic("smp_detect_cpus failed to allocate memory\n");
671 if (info->has_cpu_type) {
672 for (cpu = 0; cpu < info->combined; cpu++) {
673 if (info->cpu[cpu].address != boot_cpu_address)
675 /* The boot cpu dictates the cpu type. */
676 boot_cpu_type = info->cpu[cpu].type;
681 for (cpu = 0; cpu < info->combined; cpu++) {
682 if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type)
684 if (cpu < info->configured) {
685 smp_get_save_area(c_cpus, info->cpu[cpu].address);
690 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
692 __smp_rescan_cpus(info, 0);
698 * Activate a secondary processor.
700 static void __cpuinit smp_start_secondary(void *cpuvoid)
702 S390_lowcore.last_update_clock = get_clock();
703 S390_lowcore.restart_stack = (unsigned long) restart_stack;
704 S390_lowcore.restart_fn = (unsigned long) do_restart;
705 S390_lowcore.restart_data = 0;
706 S390_lowcore.restart_source = -1UL;
707 restore_access_regs(S390_lowcore.access_regs_save_area);
708 __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
709 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
715 notify_cpu_starting(smp_processor_id());
717 set_cpu_online(smp_processor_id(), true);
720 /* cpu_idle will call schedule for us */
725 struct work_struct work;
726 struct task_struct *idle;
727 struct completion done;
731 static void __cpuinit smp_fork_idle(struct work_struct *work)
733 struct create_idle *c_idle;
735 c_idle = container_of(work, struct create_idle, work);
736 c_idle->idle = fork_idle(c_idle->cpu);
737 complete(&c_idle->done);
740 /* Upping and downing of CPUs */
741 int __cpuinit __cpu_up(unsigned int cpu)
743 struct create_idle c_idle;
747 pcpu = pcpu_devices + cpu;
748 if (pcpu->state != CPU_STATE_CONFIGURED)
750 if (pcpu_sigp_retry(pcpu, sigp_initial_cpu_reset, 0) !=
751 sigp_order_code_accepted)
754 c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done);
755 INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle);
757 schedule_work(&c_idle.work);
758 wait_for_completion(&c_idle.done);
759 if (IS_ERR(c_idle.idle))
760 return PTR_ERR(c_idle.idle);
761 pcpu->idle = c_idle.idle;
763 init_idle(pcpu->idle, cpu);
764 rc = pcpu_alloc_lowcore(pcpu, cpu);
767 pcpu_prepare_secondary(pcpu, cpu);
768 pcpu_attach_task(pcpu, pcpu->idle);
769 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
770 while (!cpu_online(cpu))
775 static int __init setup_possible_cpus(char *s)
779 if (kstrtoint(s, 0, &max) < 0)
781 init_cpu_possible(cpumask_of(0));
782 for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++)
783 set_cpu_possible(cpu, true);
786 early_param("possible_cpus", setup_possible_cpus);
788 #ifdef CONFIG_HOTPLUG_CPU
790 int __cpu_disable(void)
792 unsigned long cregs[16];
794 set_cpu_online(smp_processor_id(), false);
795 /* Disable pseudo page faults on this cpu. */
797 /* Disable interrupt sources via control register. */
798 __ctl_store(cregs, 0, 15);
799 cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
800 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
801 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
802 __ctl_load(cregs, 0, 15);
806 void __cpu_die(unsigned int cpu)
810 /* Wait until target cpu is down */
811 pcpu = pcpu_devices + cpu;
812 while (!pcpu_stopped(pcpu))
814 pcpu_free_lowcore(pcpu);
815 atomic_dec(&init_mm.context.attach_count);
818 void __noreturn cpu_die(void)
821 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0);
825 #endif /* CONFIG_HOTPLUG_CPU */
827 static void smp_call_os_info_init_fn(void)
829 int (*init_fn)(void);
832 init_fn = os_info_old_entry(OS_INFO_INIT_FN, &size);
838 void __init smp_prepare_cpus(unsigned int max_cpus)
840 /* request the 0x1201 emergency signal external interrupt */
841 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
842 panic("Couldn't request external interrupt 0x1201");
843 /* request the 0x1202 external call external interrupt */
844 if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
845 panic("Couldn't request external interrupt 0x1202");
846 smp_call_os_info_init_fn();
850 void __init smp_prepare_boot_cpu(void)
852 struct pcpu *pcpu = pcpu_devices;
854 boot_cpu_address = stap();
855 pcpu->idle = current;
856 pcpu->state = CPU_STATE_CONFIGURED;
857 pcpu->address = boot_cpu_address;
858 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
859 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE;
860 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE;
861 S390_lowcore.percpu_offset = __per_cpu_offset[0];
862 cpu_set_polarization(0, POLARIZATION_UNKNOWN);
863 set_cpu_present(0, true);
864 set_cpu_online(0, true);
867 void __init smp_cpus_done(unsigned int max_cpus)
871 void __init smp_setup_processor_id(void)
873 S390_lowcore.cpu_nr = 0;
877 * the frequency of the profiling timer can be changed
878 * by writing a multiplier value into /proc/profile.
880 * usually you want to run this on all CPUs ;)
882 int setup_profiling_timer(unsigned int multiplier)
887 #ifdef CONFIG_HOTPLUG_CPU
888 static ssize_t cpu_configure_show(struct device *dev,
889 struct device_attribute *attr, char *buf)
893 mutex_lock(&smp_cpu_state_mutex);
894 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
895 mutex_unlock(&smp_cpu_state_mutex);
899 static ssize_t cpu_configure_store(struct device *dev,
900 struct device_attribute *attr,
901 const char *buf, size_t count)
907 if (sscanf(buf, "%d %c", &val, &delim) != 1)
909 if (val != 0 && val != 1)
912 mutex_lock(&smp_cpu_state_mutex);
914 /* disallow configuration changes of online cpus and cpu 0 */
916 if (cpu_online(cpu) || cpu == 0)
918 pcpu = pcpu_devices + cpu;
922 if (pcpu->state != CPU_STATE_CONFIGURED)
924 rc = sclp_cpu_deconfigure(pcpu->address);
927 pcpu->state = CPU_STATE_STANDBY;
928 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
929 topology_expect_change();
932 if (pcpu->state != CPU_STATE_STANDBY)
934 rc = sclp_cpu_configure(pcpu->address);
937 pcpu->state = CPU_STATE_CONFIGURED;
938 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
939 topology_expect_change();
945 mutex_unlock(&smp_cpu_state_mutex);
947 return rc ? rc : count;
949 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
950 #endif /* CONFIG_HOTPLUG_CPU */
952 static ssize_t show_cpu_address(struct device *dev,
953 struct device_attribute *attr, char *buf)
955 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
957 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
959 static struct attribute *cpu_common_attrs[] = {
960 #ifdef CONFIG_HOTPLUG_CPU
961 &dev_attr_configure.attr,
963 &dev_attr_address.attr,
967 static struct attribute_group cpu_common_attr_group = {
968 .attrs = cpu_common_attrs,
971 static ssize_t show_capability(struct device *dev,
972 struct device_attribute *attr, char *buf)
974 unsigned int capability;
977 rc = get_cpu_capability(&capability);
980 return sprintf(buf, "%u\n", capability);
982 static DEVICE_ATTR(capability, 0444, show_capability, NULL);
984 static ssize_t show_idle_count(struct device *dev,
985 struct device_attribute *attr, char *buf)
987 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
988 unsigned long long idle_count;
989 unsigned int sequence;
992 sequence = ACCESS_ONCE(idle->sequence);
993 idle_count = ACCESS_ONCE(idle->idle_count);
994 if (ACCESS_ONCE(idle->idle_enter))
996 } while ((sequence & 1) || (idle->sequence != sequence));
997 return sprintf(buf, "%llu\n", idle_count);
999 static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
1001 static ssize_t show_idle_time(struct device *dev,
1002 struct device_attribute *attr, char *buf)
1004 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
1005 unsigned long long now, idle_time, idle_enter, idle_exit;
1006 unsigned int sequence;
1010 sequence = ACCESS_ONCE(idle->sequence);
1011 idle_time = ACCESS_ONCE(idle->idle_time);
1012 idle_enter = ACCESS_ONCE(idle->idle_enter);
1013 idle_exit = ACCESS_ONCE(idle->idle_exit);
1014 } while ((sequence & 1) || (idle->sequence != sequence));
1015 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
1016 return sprintf(buf, "%llu\n", idle_time >> 12);
1018 static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
1020 static struct attribute *cpu_online_attrs[] = {
1021 &dev_attr_capability.attr,
1022 &dev_attr_idle_count.attr,
1023 &dev_attr_idle_time_us.attr,
1027 static struct attribute_group cpu_online_attr_group = {
1028 .attrs = cpu_online_attrs,
1031 static int __cpuinit smp_cpu_notify(struct notifier_block *self,
1032 unsigned long action, void *hcpu)
1034 unsigned int cpu = (unsigned int)(long)hcpu;
1035 struct cpu *c = &pcpu_devices[cpu].cpu;
1036 struct device *s = &c->dev;
1037 struct s390_idle_data *idle;
1042 case CPU_ONLINE_FROZEN:
1043 idle = &per_cpu(s390_idle, cpu);
1044 memset(idle, 0, sizeof(struct s390_idle_data));
1045 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1048 case CPU_DEAD_FROZEN:
1049 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1052 return notifier_from_errno(err);
1055 static struct notifier_block __cpuinitdata smp_cpu_nb = {
1056 .notifier_call = smp_cpu_notify,
1059 static int __devinit smp_add_present_cpu(int cpu)
1061 struct cpu *c = &pcpu_devices[cpu].cpu;
1062 struct device *s = &c->dev;
1065 c->hotpluggable = 1;
1066 rc = register_cpu(c, cpu);
1069 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1072 if (cpu_online(cpu)) {
1073 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1077 rc = topology_cpu_init(c);
1083 if (cpu_online(cpu))
1084 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1086 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1088 #ifdef CONFIG_HOTPLUG_CPU
1095 #ifdef CONFIG_HOTPLUG_CPU
1097 int __ref smp_rescan_cpus(void)
1099 struct sclp_cpu_info *info;
1102 info = smp_get_cpu_info();
1106 mutex_lock(&smp_cpu_state_mutex);
1107 nr = __smp_rescan_cpus(info, 1);
1108 mutex_unlock(&smp_cpu_state_mutex);
1112 topology_schedule_update();
1116 static ssize_t __ref rescan_store(struct device *dev,
1117 struct device_attribute *attr,
1123 rc = smp_rescan_cpus();
1124 return rc ? rc : count;
1126 static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
1127 #endif /* CONFIG_HOTPLUG_CPU */
1129 static int __init s390_smp_init(void)
1133 register_cpu_notifier(&smp_cpu_nb);
1134 #ifdef CONFIG_HOTPLUG_CPU
1135 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1139 for_each_present_cpu(cpu) {
1140 rc = smp_add_present_cpu(cpu);
1146 subsys_initcall(s390_smp_init);