]> Pileus Git - ~andy/linux/commitdiff
[S390] rework smp code
authorMartin Schwidefsky <schwidefsky@de.ibm.com>
Sun, 11 Mar 2012 15:59:26 +0000 (11:59 -0400)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Sun, 11 Mar 2012 15:59:28 +0000 (11:59 -0400)
Define struct pcpu and merge some of the NR_CPUS arrays into it, including
__cpu_logical_map, current_set and smp_cpu_state. Split smp related
functions to those operating on physical cpus and the functions operating
on a logical cpu number. Make the functions for physical cpus use a
pointer to a struct pcpu. This hides the knowledge about cpu addresses in
smp.c, entry[64].S and swsusp_asm64.S, thus remove the sigp.h header.

The PSW restart mechanism is used to start secondary cpus, calling a
function on an online cpu, calling a function on the ipl cpu, and for
the nmi signal. Replace the different assembler functions with a
single function restart_int_handler. The new entry point calls a function
whose pointer is stored in the lowcore of the target cpu and it can wait
for the source cpu to stop. This covers all existing use cases.

Overall the code is now simpler and there are ~380 lines less code.

Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
22 files changed:
arch/s390/include/asm/lowcore.h
arch/s390/include/asm/sigp.h [deleted file]
arch/s390/include/asm/smp.h
arch/s390/include/asm/vdso.h
arch/s390/kernel/Makefile
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/entry.S
arch/s390/kernel/entry.h
arch/s390/kernel/entry64.S
arch/s390/kernel/ipl.c
arch/s390/kernel/machine_kexec.c
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c
arch/s390/kernel/switch_cpu.S [deleted file]
arch/s390/kernel/switch_cpu64.S [deleted file]
arch/s390/kernel/swsusp_asm64.S
arch/s390/kernel/topology.c
arch/s390/kernel/vdso.c
arch/s390/kernel/vtime.c
arch/s390/lib/spinlock.c
drivers/s390/char/sclp_quiesce.c
drivers/s390/char/zcore.c

index 0831449e87a3866ec934b24efe40d3fc9dac93c8..4e69563bc956fd55b3220490621578659eb2d8b0 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *    Copyright IBM Corp. 1999,2010
+ *    Copyright IBM Corp. 1999,2012
  *    Author(s): Hartmut Penner <hp@de.ibm.com>,
  *              Martin Schwidefsky <schwidefsky@de.ibm.com>,
  *              Denis Joseph Barrow,
 #include <asm/ptrace.h>
 #include <asm/cpu.h>
 
-void restart_int_handler(void);
-void ext_int_handler(void);
-void system_call(void);
-void pgm_check_handler(void);
-void mcck_int_handler(void);
-void io_int_handler(void);
-void psw_restart_int_handler(void);
-
 #ifdef CONFIG_32BIT
 
 #define LC_ORDER 0
@@ -117,32 +109,37 @@ struct _lowcore {
        __u64   steal_timer;                    /* 0x0288 */
        __u64   last_update_timer;              /* 0x0290 */
        __u64   last_update_clock;              /* 0x0298 */
+       __u64   int_clock;                      /* 0x02a0 */
+       __u64   mcck_clock;                     /* 0x02a8 */
+       __u64   clock_comparator;               /* 0x02b0 */
 
        /* Current process. */
-       __u32   current_task;                   /* 0x02a0 */
-       __u32   thread_info;                    /* 0x02a4 */
-       __u32   kernel_stack;                   /* 0x02a8 */
+       __u32   current_task;                   /* 0x02b8 */
+       __u32   thread_info;                    /* 0x02bc */
+       __u32   kernel_stack;                   /* 0x02c0 */
+
+       /* Interrupt, panic and restart stack. */
+       __u32   async_stack;                    /* 0x02c4 */
+       __u32   panic_stack;                    /* 0x02c8 */
+       __u32   restart_stack;                  /* 0x02cc */
 
-       /* Interrupt and panic stack. */
-       __u32   async_stack;                    /* 0x02ac */
-       __u32   panic_stack;                    /* 0x02b0 */
+       /* Restart function and parameter. */
+       __u32   restart_fn;                     /* 0x02d0 */
+       __u32   restart_data;                   /* 0x02d4 */
+       __u32   restart_source;                 /* 0x02d8 */
 
        /* Address space pointer. */
-       __u32   kernel_asce;                    /* 0x02b4 */
-       __u32   user_asce;                      /* 0x02b8 */
-       __u32   current_pid;                    /* 0x02bc */
+       __u32   kernel_asce;                    /* 0x02dc */
+       __u32   user_asce;                      /* 0x02e0 */
+       __u32   current_pid;                    /* 0x02e4 */
 
        /* SMP info area */
-       __u32   cpu_nr;                         /* 0x02c0 */
-       __u32   softirq_pending;                /* 0x02c4 */
-       __u32   percpu_offset;                  /* 0x02c8 */
-       __u32   ext_call_fast;                  /* 0x02cc */
-       __u64   int_clock;                      /* 0x02d0 */
-       __u64   mcck_clock;                     /* 0x02d8 */
-       __u64   clock_comparator;               /* 0x02e0 */
-       __u32   machine_flags;                  /* 0x02e8 */
-       __u32   ftrace_func;                    /* 0x02ec */
-       __u8    pad_0x02f8[0x0300-0x02f0];      /* 0x02f0 */
+       __u32   cpu_nr;                         /* 0x02e8 */
+       __u32   softirq_pending;                /* 0x02ec */
+       __u32   percpu_offset;                  /* 0x02f0 */
+       __u32   machine_flags;                  /* 0x02f4 */
+       __u32   ftrace_func;                    /* 0x02f8 */
+       __u8    pad_0x02fc[0x0300-0x02fc];      /* 0x02fc */
 
        /* Interrupt response block */
        __u8    irb[64];                        /* 0x0300 */
@@ -254,34 +251,39 @@ struct _lowcore {
        __u64   steal_timer;                    /* 0x02e0 */
        __u64   last_update_timer;              /* 0x02e8 */
        __u64   last_update_clock;              /* 0x02f0 */
+       __u64   int_clock;                      /* 0x02f8 */
+       __u64   mcck_clock;                     /* 0x0300 */
+       __u64   clock_comparator;               /* 0x0308 */
 
        /* Current process. */
-       __u64   current_task;                   /* 0x02f8 */
-       __u64   thread_info;                    /* 0x0300 */
-       __u64   kernel_stack;                   /* 0x0308 */
+       __u64   current_task;                   /* 0x0310 */
+       __u64   thread_info;                    /* 0x0318 */
+       __u64   kernel_stack;                   /* 0x0320 */
+
+       /* Interrupt, panic and restart stack. */
+       __u64   async_stack;                    /* 0x0328 */
+       __u64   panic_stack;                    /* 0x0330 */
+       __u64   restart_stack;                  /* 0x0338 */
 
-       /* Interrupt and panic stack. */
-       __u64   async_stack;                    /* 0x0310 */
-       __u64   panic_stack;                    /* 0x0318 */
+       /* Restart function and parameter. */
+       __u64   restart_fn;                     /* 0x0340 */
+       __u64   restart_data;                   /* 0x0348 */
+       __u64   restart_source;                 /* 0x0350 */
 
        /* Address space pointer. */
-       __u64   kernel_asce;                    /* 0x0320 */
-       __u64   user_asce;                      /* 0x0328 */
-       __u64   current_pid;                    /* 0x0330 */
+       __u64   kernel_asce;                    /* 0x0358 */
+       __u64   user_asce;                      /* 0x0360 */
+       __u64   current_pid;                    /* 0x0368 */
 
        /* SMP info area */
-       __u32   cpu_nr;                         /* 0x0338 */
-       __u32   softirq_pending;                /* 0x033c */
-       __u64   percpu_offset;                  /* 0x0340 */
-       __u64   ext_call_fast;                  /* 0x0348 */
-       __u64   int_clock;                      /* 0x0350 */
-       __u64   mcck_clock;                     /* 0x0358 */
-       __u64   clock_comparator;               /* 0x0360 */
-       __u64   vdso_per_cpu_data;              /* 0x0368 */
-       __u64   machine_flags;                  /* 0x0370 */
-       __u64   ftrace_func;                    /* 0x0378 */
-       __u64   gmap;                           /* 0x0380 */
-       __u8    pad_0x0388[0x0400-0x0388];      /* 0x0388 */
+       __u32   cpu_nr;                         /* 0x0370 */
+       __u32   softirq_pending;                /* 0x0374 */
+       __u64   percpu_offset;                  /* 0x0378 */
+       __u64   vdso_per_cpu_data;              /* 0x0380 */
+       __u64   machine_flags;                  /* 0x0388 */
+       __u64   ftrace_func;                    /* 0x0390 */
+       __u64   gmap;                           /* 0x0398 */
+       __u8    pad_0x03a0[0x0400-0x03a0];      /* 0x03a0 */
 
        /* Interrupt response block. */
        __u8    irb[64];                        /* 0x0400 */
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
deleted file mode 100644 (file)
index 7040b85..0000000
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- *  Routines and structures for signalling other processors.
- *
- *    Copyright IBM Corp. 1999,2010
- *    Author(s): Denis Joseph Barrow,
- *              Martin Schwidefsky <schwidefsky@de.ibm.com>,
- *              Heiko Carstens <heiko.carstens@de.ibm.com>,
- */
-
-#ifndef __ASM_SIGP_H
-#define __ASM_SIGP_H
-
-#include <asm/system.h>
-
-/* Get real cpu address from logical cpu number. */
-extern unsigned short __cpu_logical_map[];
-
-static inline int cpu_logical_map(int cpu)
-{
-#ifdef CONFIG_SMP
-       return __cpu_logical_map[cpu];
-#else
-       return stap();
-#endif
-}
-
-enum {
-       sigp_sense = 1,
-       sigp_external_call = 2,
-       sigp_emergency_signal = 3,
-       sigp_start = 4,
-       sigp_stop = 5,
-       sigp_restart = 6,
-       sigp_stop_and_store_status = 9,
-       sigp_initial_cpu_reset = 11,
-       sigp_cpu_reset = 12,
-       sigp_set_prefix = 13,
-       sigp_store_status_at_address = 14,
-       sigp_store_extended_status_at_address = 15,
-       sigp_set_architecture = 18,
-       sigp_conditional_emergency_signal = 19,
-       sigp_sense_running = 21,
-};
-
-enum {
-       sigp_order_code_accepted = 0,
-       sigp_status_stored = 1,
-       sigp_busy = 2,
-       sigp_not_operational = 3,
-};
-
-/*
- * Definitions for external call.
- */
-enum {
-       ec_schedule = 0,
-       ec_call_function,
-       ec_call_function_single,
-       ec_stop_cpu,
-};
-
-/*
- * Signal processor.
- */
-static inline int raw_sigp(u16 cpu, int order)
-{
-       register unsigned long reg1 asm ("1") = 0;
-       int ccode;
-
-       asm volatile(
-               "       sigp    %1,%2,0(%3)\n"
-               "       ipm     %0\n"
-               "       srl     %0,28\n"
-               :       "=d"    (ccode)
-               : "d" (reg1), "d" (cpu),
-                 "a" (order) : "cc" , "memory");
-       return ccode;
-}
-
-/*
- * Signal processor with parameter.
- */
-static inline int raw_sigp_p(u32 parameter, u16 cpu, int order)
-{
-       register unsigned int reg1 asm ("1") = parameter;
-       int ccode;
-
-       asm volatile(
-               "       sigp    %1,%2,0(%3)\n"
-               "       ipm     %0\n"
-               "       srl     %0,28\n"
-               : "=d" (ccode)
-               : "d" (reg1), "d" (cpu),
-                 "a" (order) : "cc" , "memory");
-       return ccode;
-}
-
-/*
- * Signal processor with parameter and return status.
- */
-static inline int raw_sigp_ps(u32 *status, u32 parm, u16 cpu, int order)
-{
-       register unsigned int reg1 asm ("1") = parm;
-       int ccode;
-
-       asm volatile(
-               "       sigp    %1,%2,0(%3)\n"
-               "       ipm     %0\n"
-               "       srl     %0,28\n"
-               : "=d" (ccode), "+d" (reg1)
-               : "d" (cpu), "a" (order)
-               : "cc" , "memory");
-       *status = reg1;
-       return ccode;
-}
-
-static inline int sigp(int cpu, int order)
-{
-       return raw_sigp(cpu_logical_map(cpu), order);
-}
-
-static inline int sigp_p(u32 parameter, int cpu, int order)
-{
-       return raw_sigp_p(parameter, cpu_logical_map(cpu), order);
-}
-
-static inline int sigp_ps(u32 *status, u32 parm, int cpu, int order)
-{
-       return raw_sigp_ps(status, parm, cpu_logical_map(cpu), order);
-}
-
-#endif /* __ASM_SIGP_H */
index c32e9123b40c1e10a3346a5435469e1a6888c8c8..797f7872968020aa26430e72b847c6ec8a2ee887 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *    Copyright IBM Corp. 1999,2009
+ *    Copyright IBM Corp. 1999,2012
  *    Author(s): Denis Joseph Barrow,
  *              Martin Schwidefsky <schwidefsky@de.ibm.com>,
  *              Heiko Carstens <heiko.carstens@de.ibm.com>,
 #ifdef CONFIG_SMP
 
 #include <asm/system.h>
-#include <asm/sigp.h>
-
-extern void machine_restart_smp(char *);
-extern void machine_halt_smp(void);
-extern void machine_power_off_smp(void);
 
 #define raw_smp_processor_id() (S390_lowcore.cpu_nr)
 
-extern int __cpu_disable (void);
-extern void __cpu_die (unsigned int cpu);
-extern int __cpu_up (unsigned int cpu);
-
 extern struct mutex smp_cpu_state_mutex;
+extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
+
+extern int __cpu_up(unsigned int cpu);
 
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 
-extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
-
-extern void smp_switch_to_ipl_cpu(void (*func)(void *), void *);
-extern void smp_switch_to_cpu(void (*)(void *), void *, unsigned long sp,
-                             int from, int to);
-extern void smp_restart_with_online_cpu(void);
-extern void smp_restart_cpu(void);
+extern void smp_call_online_cpu(void (*func)(void *), void *);
+extern void smp_call_ipl_cpu(void (*func)(void *), void *);
 
-/*
- * returns 1 if (virtual) cpu is scheduled
- * returns 0 otherwise
- */
-static inline int smp_vcpu_scheduled(int cpu)
-{
-       u32 status;
-
-       switch (sigp_ps(&status, 0, cpu, sigp_sense_running)) {
-       case sigp_status_stored:
-               /* Check for running status */
-               if (status & 0x400)
-                       return 0;
-               break;
-       case sigp_not_operational:
-               return 0;
-       default:
-               break;
-       }
-       return 1;
-}
+extern int smp_find_processor_id(u16 address);
+extern int smp_store_status(int cpu);
+extern int smp_vcpu_scheduled(int cpu);
+extern void smp_yield_cpu(int cpu);
+extern void smp_yield(void);
+extern void smp_stop_cpu(void);
 
 #else /* CONFIG_SMP */
 
-static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
+static inline void smp_call_ipl_cpu(void (*func)(void *), void *data)
 {
        func(data);
 }
 
-static inline void smp_restart_with_online_cpu(void)
+static inline void smp_call_online_cpu(void (*func)(void *), void *data)
 {
+       func(data);
 }
 
-#define smp_vcpu_scheduled     (1)
+static inline int smp_find_processor_id(int address) { return 0; }
+static inline int smp_vcpu_scheduled(int cpu) { return 1; }
+static inline void smp_yield_cpu(int cpu) { }
+static inline void smp_yield(void) { }
+static inline void smp_stop_cpu(void) { }
 
 #endif /* CONFIG_SMP */
 
 #ifdef CONFIG_HOTPLUG_CPU
 extern int smp_rescan_cpus(void);
 extern void __noreturn cpu_die(void);
+extern void __cpu_die(unsigned int cpu);
+extern int __cpu_disable(void);
 #else
 static inline int smp_rescan_cpus(void) { return 0; }
 static inline void cpu_die(void) { }
index 533f35751aeb9ad130af57e0f6cf551cfe873862..c4a11cfad3c8a55aa1178b5769f82d973d963c66 100644 (file)
@@ -40,8 +40,8 @@ struct vdso_per_cpu_data {
 extern struct vdso_data *vdso_data;
 
 #ifdef CONFIG_64BIT
-int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore);
-void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore);
+int vdso_alloc_per_cpu(struct _lowcore *lowcore);
+void vdso_free_per_cpu(struct _lowcore *lowcore);
 #endif
 
 #endif /* __ASSEMBLY__ */
index 7d9ec924e7e7db947e569e17b3c18b827b1e5ec1..d0a48268eb27008dd1ed12b80d75acd013f0bfcc 100644 (file)
@@ -34,8 +34,6 @@ extra-y                               += $(if $(CONFIG_64BIT),head64.o,head31.o)
 obj-$(CONFIG_MODULES)          += s390_ksyms.o module.o
 obj-$(CONFIG_SMP)              += smp.o
 obj-$(CONFIG_SCHED_BOOK)       += topology.o
-obj-$(CONFIG_SMP)              += $(if $(CONFIG_64BIT),switch_cpu64.o, \
-                                                       switch_cpu.o)
 obj-$(CONFIG_HIBERNATION)      += suspend.o swsusp_asm64.o
 obj-$(CONFIG_AUDIT)            += audit.o
 compat-obj-$(CONFIG_AUDIT)     += compat_audit.o
index 530ae0e8e38f73849253073fd66f20a9d1718a2b..aeeaf896be9b69604566172c60218a9e6113d7f4 100644 (file)
@@ -9,8 +9,8 @@
 #include <linux/kbuild.h>
 #include <linux/sched.h>
 #include <asm/vdso.h>
-#include <asm/sigp.h>
 #include <asm/pgtable.h>
+#include <asm/system.h>
 
 /*
  * Make sure that the compiler is new enough. We want a compiler that
@@ -70,12 +70,6 @@ int main(void)
        DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
        DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
        BLANK();
-       /* constants for SIGP */
-       DEFINE(__SIGP_STOP, sigp_stop);
-       DEFINE(__SIGP_RESTART, sigp_restart);
-       DEFINE(__SIGP_SENSE, sigp_sense);
-       DEFINE(__SIGP_INITIAL_CPU_RESET, sigp_initial_cpu_reset);
-       BLANK();
        /* lowcore offsets */
        DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params));
        DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr));
@@ -95,20 +89,19 @@ int main(void)
        DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word));
        DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list));
        DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code));
-       DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
-       BLANK();
-       DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw));
        DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw));
        DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw));
        DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw));
        DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw));
        DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw));
        DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw));
+       DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw));
        DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw));
        DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw));
        DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw));
        DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw));
        DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw));
+       BLANK();
        DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync));
        DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async));
        DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart));
@@ -129,12 +122,16 @@ int main(void)
        DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
        DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
        DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
+       DEFINE(__LC_RESTART_STACK, offsetof(struct _lowcore, restart_stack));
+       DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn));
        DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
        DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
        DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
        DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
        DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func));
        DEFINE(__LC_IRB, offsetof(struct _lowcore, irb));
+       DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
+       BLANK();
        DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area));
        DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area));
        DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area));
index 5f437b830da55051d632771d9072a15c8ead8197..6143521a4fffa5f9f311cc0ac71e97b61dd87f89 100644 (file)
@@ -2,7 +2,7 @@
  *  arch/s390/kernel/entry.S
  *    S390 low-level entry points.
  *
- *    Copyright (C) IBM Corp. 1999,2006
+ *    Copyright (C) IBM Corp. 1999,2012
  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  *              Hartmut Penner (hp@de.ibm.com),
  *              Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
@@ -691,77 +691,30 @@ mcck_panic:
 0:     ahi     %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
        j       mcck_skip
 
-/*
- * Restart interruption handler, kick starter for additional CPUs
- */
-#ifdef CONFIG_SMP
-       __CPUINIT
-ENTRY(restart_int_handler)
-       basr    %r1,0
-restart_base:
-       spt     restart_vtime-restart_base(%r1)
-       stck    __LC_LAST_UPDATE_CLOCK
-       mvc     __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
-       mvc     __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
-       l       %r15,__LC_GPREGS_SAVE_AREA+60 # load ksp
-       lctl    %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
-       lam     %a0,%a15,__LC_AREGS_SAVE_AREA
-       lm      %r6,%r15,__SF_GPRS(%r15)# load registers from clone
-       l       %r1,__LC_THREAD_INFO
-       mvc     __LC_USER_TIMER(8),__TI_user_timer(%r1)
-       mvc     __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
-       xc      __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
-       ssm     __LC_PGM_NEW_PSW        # turn dat on, keep irqs off
-       basr    %r14,0
-       l       %r14,restart_addr-.(%r14)
-       basr    %r14,%r14               # call start_secondary
-restart_addr:
-       .long   start_secondary
-       .align  8
-restart_vtime:
-       .long   0x7fffffff,0xffffffff
-       .previous
-#else
-/*
- * If we do not run with SMP enabled, let the new CPU crash ...
- */
-ENTRY(restart_int_handler)
-       basr    %r1,0
-restart_base:
-       lpsw    restart_crash-restart_base(%r1)
-       .align  8
-restart_crash:
-       .long   0x000a0000,0x00000000
-restart_go:
-#endif
-
 #
 # PSW restart interrupt handler
 #
-ENTRY(psw_restart_int_handler)
+ENTRY(restart_int_handler)
        st      %r15,__LC_SAVE_AREA_RESTART
-       basr    %r15,0
-0:     l       %r15,.Lrestart_stack-0b(%r15)   # load restart stack
-       l       %r15,0(%r15)
+       l       %r15,__LC_RESTART_STACK
        ahi     %r15,-__PT_SIZE                 # create pt_regs on stack
+       xc      0(__PT_SIZE,%r15),0(%r15)
        stm     %r0,%r14,__PT_R0(%r15)
        mvc     __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART
        mvc     __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw
-       ahi     %r15,-STACK_FRAME_OVERHEAD
-       xc      __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
-       basr    %r14,0
-1:     l       %r14,.Ldo_restart-1b(%r14)
-       basr    %r14,%r14
-       basr    %r14,0                          # load disabled wait PSW if
-2:     lpsw    restart_psw_crash-2b(%r14)      # do_restart returns
-       .align 4
-.Ldo_restart:
-       .long   do_restart
-.Lrestart_stack:
-       .long   restart_stack
-       .align 8
-restart_psw_crash:
-       .long   0x000a0000,0x00000000 + restart_psw_crash
+       ahi     %r15,-STACK_FRAME_OVERHEAD      # create stack frame on stack
+       xc      0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
+       lm      %r1,%r3,__LC_RESTART_FN         # load fn, parm & source cpu
+       ltr     %r3,%r3                         # test source cpu address
+       jm      1f                              # negative -> skip source stop
+0:     sigp    %r4,%r3,1                       # sigp sense to source cpu
+       brc     10,0b                           # wait for status stored
+1:     basr    %r14,%r1                        # call function
+       stap    __SF_EMPTY(%r15)                # store cpu address
+       lh      %r3,__SF_EMPTY(%r15)
+2:     sigp    %r4,%r3,5                       # sigp stop to current cpu
+       brc     2,2b
+3:     j       3b
 
        .section .kprobes.text, "ax"
 
index bf538aaf407d8ceb433f094e7d8b6cb22f2c65d5..92b1617d0c951ff1fc39939516fdbf0b408365e5 100644 (file)
@@ -9,6 +9,14 @@
 extern void (*pgm_check_table[128])(struct pt_regs *);
 extern void *restart_stack;
 
+void system_call(void);
+void pgm_check_handler(void);
+void ext_int_handler(void);
+void io_int_handler(void);
+void mcck_int_handler(void);
+void restart_int_handler(void);
+void restart_call_handler(void);
+
 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
 
@@ -26,7 +34,6 @@ void do_notify_resume(struct pt_regs *regs);
 
 void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long);
 void do_restart(void);
-int __cpuinit start_secondary(void *cpuvoid);
 void __init startup_init(void);
 void die(struct pt_regs *regs, const char *str);
 
index bacbd2848d40483638cdca28fd78f698dcb3fd18..e33789a457527b5f170ff979f9c2d16e0c8a5143 100644 (file)
@@ -2,7 +2,7 @@
  *  arch/s390/kernel/entry64.S
  *    S390 low-level entry points.
  *
- *    Copyright (C) IBM Corp. 1999,2010
+ *    Copyright (C) IBM Corp. 1999,2012
  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  *              Hartmut Penner (hp@de.ibm.com),
  *              Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
@@ -713,68 +713,30 @@ mcck_panic:
 0:     aghi    %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
        j       mcck_skip
 
-/*
- * Restart interruption handler, kick starter for additional CPUs
- */
-#ifdef CONFIG_SMP
-       __CPUINIT
-ENTRY(restart_int_handler)
-       basr    %r1,0
-restart_base:
-       spt     restart_vtime-restart_base(%r1)
-       stck    __LC_LAST_UPDATE_CLOCK
-       mvc     __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
-       mvc     __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
-       lghi    %r10,__LC_GPREGS_SAVE_AREA
-       lg      %r15,120(%r10)          # load ksp
-       lghi    %r10,__LC_CREGS_SAVE_AREA
-       lctlg   %c0,%c15,0(%r10)        # get new ctl regs
-       lghi    %r10,__LC_AREGS_SAVE_AREA
-       lam     %a0,%a15,0(%r10)
-       lmg     %r6,%r15,__SF_GPRS(%r15)# load registers from clone
-       lg      %r1,__LC_THREAD_INFO
-       mvc     __LC_USER_TIMER(8),__TI_user_timer(%r1)
-       mvc     __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
-       xc      __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
-       ssm     __LC_PGM_NEW_PSW        # turn dat on, keep irqs off
-       brasl   %r14,start_secondary
-       .align  8
-restart_vtime:
-       .long   0x7fffffff,0xffffffff
-       .previous
-#else
-/*
- * If we do not run with SMP enabled, let the new CPU crash ...
- */
-ENTRY(restart_int_handler)
-       basr    %r1,0
-restart_base:
-       lpswe   restart_crash-restart_base(%r1)
-       .align 8
-restart_crash:
-       .long  0x000a0000,0x00000000,0x00000000,0x00000000
-restart_go:
-#endif
-
 #
 # PSW restart interrupt handler
 #
-ENTRY(psw_restart_int_handler)
+ENTRY(restart_int_handler)
        stg     %r15,__LC_SAVE_AREA_RESTART
-       larl    %r15,restart_stack              # load restart stack
-       lg      %r15,0(%r15)
+       lg      %r15,__LC_RESTART_STACK
        aghi    %r15,-__PT_SIZE                 # create pt_regs on stack
+       xc      0(__PT_SIZE,%r15),0(%r15)
        stmg    %r0,%r14,__PT_R0(%r15)
        mvc     __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
        mvc     __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
-       aghi    %r15,-STACK_FRAME_OVERHEAD
-       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
-       brasl   %r14,do_restart
-       larl    %r14,restart_psw_crash          # load disabled wait PSW if
-       lpswe   0(%r14)                         # do_restart returns
-       .align 8
-restart_psw_crash:
-       .quad   0x0002000080000000,0x0000000000000000 + restart_psw_crash
+       aghi    %r15,-STACK_FRAME_OVERHEAD      # create stack frame on stack
+       xc      0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
+       lmg     %r1,%r3,__LC_RESTART_FN         # load fn, parm & source cpu
+       ltgr    %r3,%r3                         # test source cpu address
+       jm      1f                              # negative -> skip source stop
+0:     sigp    %r4,%r3,1                       # sigp sense to source cpu
+       brc     10,0b                           # wait for status stored
+1:     basr    %r14,%r1                        # call function
+       stap    __SF_EMPTY(%r15)                # store cpu address
+       llgh    %r3,__SF_EMPTY(%r15)
+2:     sigp    %r4,%r3,5                       # sigp stop to current cpu
+       brc     2,2b
+3:     j       3b
 
        .section .kprobes.text, "ax"
 
index affa8e68124a18f6daad5ee18b03d4f8f0ce098e..e5a72a2b0c560d0cafac5967b81ba3927d9ac356 100644 (file)
@@ -2,7 +2,7 @@
  *  arch/s390/kernel/ipl.c
  *    ipl/reipl/dump support for Linux on s390.
  *
- *    Copyright IBM Corp. 2005,2007
+ *    Copyright IBM Corp. 2005,2012
  *    Author(s): Michael Holzheu <holzheu@de.ibm.com>
  *              Heiko Carstens <heiko.carstens@de.ibm.com>
  *              Volker Sameske <sameske@de.ibm.com>
@@ -25,7 +25,6 @@
 #include <asm/ebcdic.h>
 #include <asm/reset.h>
 #include <asm/sclp.h>
-#include <asm/sigp.h>
 #include <asm/checksum.h>
 #include "entry.h"
 
@@ -571,7 +570,7 @@ static void __ipl_run(void *unused)
 
 static void ipl_run(struct shutdown_trigger *trigger)
 {
-       smp_switch_to_ipl_cpu(__ipl_run, NULL);
+       smp_call_ipl_cpu(__ipl_run, NULL);
 }
 
 static int __init ipl_init(void)
@@ -1101,7 +1100,7 @@ static void __reipl_run(void *unused)
 
 static void reipl_run(struct shutdown_trigger *trigger)
 {
-       smp_switch_to_ipl_cpu(__reipl_run, NULL);
+       smp_call_ipl_cpu(__reipl_run, NULL);
 }
 
 static void reipl_block_ccw_init(struct ipl_parameter_block *ipb)
@@ -1421,7 +1420,7 @@ static void dump_run(struct shutdown_trigger *trigger)
        if (dump_method == DUMP_METHOD_NONE)
                return;
        smp_send_stop();
-       smp_switch_to_ipl_cpu(__dump_run, NULL);
+       smp_call_ipl_cpu(__dump_run, NULL);
 }
 
 static int __init dump_ccw_init(void)
@@ -1623,9 +1622,7 @@ static void stop_run(struct shutdown_trigger *trigger)
        if (strcmp(trigger->name, ON_PANIC_STR) == 0 ||
            strcmp(trigger->name, ON_RESTART_STR) == 0)
                disabled_wait((unsigned long) __builtin_return_address(0));
-       while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
-               cpu_relax();
-       for (;;);
+       smp_stop_cpu();
 }
 
 static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR,
@@ -1738,9 +1735,8 @@ static ssize_t on_restart_store(struct kobject *kobj,
 static struct kobj_attribute on_restart_attr =
        __ATTR(on_restart, 0644, on_restart_show, on_restart_store);
 
-void do_restart(void)
+static void __do_restart(void *ignore)
 {
-       smp_restart_with_online_cpu();
        smp_send_stop();
 #ifdef CONFIG_CRASH_DUMP
        crash_kexec(NULL);
@@ -1749,6 +1745,11 @@ void do_restart(void)
        stop_run(&on_restart_trigger);
 }
 
+void do_restart(void)
+{
+       smp_call_online_cpu(__do_restart, NULL);
+}
+
 /* on halt */
 
 static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action};
index 47b168fb29c45f1c475bcf7cc50348b354fba262..bf6fbc03ebafbfc3cb9f889cef3f3f1868fe7dc9 100644 (file)
@@ -48,51 +48,22 @@ static void add_elf_notes(int cpu)
        memset(ptr, 0, sizeof(struct elf_note));
 }
 
-/*
- * Store status of next available physical CPU
- */
-static int store_status_next(int start_cpu, int this_cpu)
-{
-       struct save_area *sa = (void *) 4608 + store_prefix();
-       int cpu, rc;
-
-       for (cpu = start_cpu; cpu < 65536; cpu++) {
-               if (cpu == this_cpu)
-                       continue;
-               do {
-                       rc = raw_sigp(cpu, sigp_stop_and_store_status);
-               } while (rc == sigp_busy);
-               if (rc != sigp_order_code_accepted)
-                       continue;
-               if (sa->pref_reg)
-                       return cpu;
-       }
-       return -1;
-}
-
 /*
  * Initialize CPU ELF notes
  */
 void setup_regs(void)
 {
        unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE;
-       int cpu, this_cpu, phys_cpu = 0, first = 1;
+       int cpu, this_cpu;
 
-       this_cpu = stap();
-
-       if (!S390_lowcore.prefixreg_save_area)
-               first = 0;
+       this_cpu = smp_find_processor_id(stap());
+       add_elf_notes(this_cpu);
        for_each_online_cpu(cpu) {
-               if (first) {
-                       add_elf_notes(cpu);
-                       first = 0;
+               if (cpu == this_cpu)
+                       continue;
+               if (smp_store_status(cpu))
                        continue;
-               }
-               phys_cpu = store_status_next(phys_cpu, this_cpu);
-               if (phys_cpu == -1)
-                       break;
                add_elf_notes(cpu);
-               phys_cpu++;
        }
        /* Copy dump CPU store status info to absolute zero */
        memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
@@ -255,5 +226,5 @@ void machine_kexec(struct kimage *image)
                return;
        tracer_disable();
        smp_send_stop();
-       smp_switch_to_ipl_cpu(__machine_kexec, image);
+       smp_call_ipl_cpu(__machine_kexec, image);
 }
index 778c100fe314d0101601d3ee0e77ec362431f629..9a3edb5f2c9296c4026ac062ef9a821e9a2a4596 100644 (file)
@@ -2,7 +2,7 @@
  *  arch/s390/kernel/setup.c
  *
  *  S390 version
- *    Copyright (C) IBM Corp. 1999,2010
+ *    Copyright (C) IBM Corp. 1999,2012
  *    Author(s): Hartmut Penner (hp@de.ibm.com),
  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
  *
@@ -62,6 +62,7 @@
 #include <asm/ebcdic.h>
 #include <asm/kvm_virtio.h>
 #include <asm/diag.h>
+#include "entry.h"
 
 long psw_kernel_bits   = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY |
                          PSW_MASK_EA | PSW_MASK_BA;
@@ -351,8 +352,9 @@ static void setup_addressing_mode(void)
        }
 }
 
-static void __init
-setup_lowcore(void)
+void *restart_stack __attribute__((__section__(".data")));
+
+static void __init setup_lowcore(void)
 {
        struct _lowcore *lc;
 
@@ -363,7 +365,7 @@ setup_lowcore(void)
        lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
        lc->restart_psw.mask = psw_kernel_bits;
        lc->restart_psw.addr =
-               PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
+               PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
        lc->external_new_psw.mask = psw_kernel_bits |
                PSW_MASK_DAT | PSW_MASK_MCHECK;
        lc->external_new_psw.addr =
@@ -412,6 +414,24 @@ setup_lowcore(void)
        lc->last_update_timer = S390_lowcore.last_update_timer;
        lc->last_update_clock = S390_lowcore.last_update_clock;
        lc->ftrace_func = S390_lowcore.ftrace_func;
+
+       restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
+       restart_stack += ASYNC_SIZE;
+
+       /*
+        * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
+        * restart data to the absolute zero lowcore. This is necesary if
+        * PSW restart is done on an offline CPU that has lowcore zero.
+        */
+       lc->restart_stack = (unsigned long) restart_stack;
+       lc->restart_fn = (unsigned long) do_restart;
+       lc->restart_data = 0;
+       lc->restart_source = -1UL;
+       memcpy(&S390_lowcore.restart_stack, &lc->restart_stack,
+              4*sizeof(unsigned long));
+       copy_to_absolute_zero(&S390_lowcore.restart_psw,
+                             &lc->restart_psw, sizeof(psw_t));
+
        set_prefix((u32)(unsigned long) lc);
        lowcore_ptr[0] = lc;
 }
@@ -572,27 +592,6 @@ static void __init setup_memory_end(void)
        }
 }
 
-void *restart_stack __attribute__((__section__(".data")));
-
-/*
- * Setup new PSW and allocate stack for PSW restart interrupt
- */
-static void __init setup_restart_psw(void)
-{
-       psw_t psw;
-
-       restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
-       restart_stack += ASYNC_SIZE;
-
-       /*
-        * Setup restart PSW for absolute zero lowcore. This is necesary
-        * if PSW restart is done on an offline CPU that has lowcore zero
-        */
-       psw.mask = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
-       psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
-       copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw));
-}
-
 static void __init setup_vmcoreinfo(void)
 {
 #ifdef CONFIG_KEXEC
@@ -782,8 +781,7 @@ static void __init reserve_crashkernel(void)
 #endif
 }
 
-static void __init
-setup_memory(void)
+static void __init setup_memory(void)
 {
         unsigned long bootmap_size;
        unsigned long start_pfn, end_pfn;
@@ -1014,8 +1012,7 @@ static void __init setup_hwcaps(void)
  * was printed.
  */
 
-void __init
-setup_arch(char **cmdline_p)
+void __init setup_arch(char **cmdline_p)
 {
         /*
          * print what head.S has found out about the machine
@@ -1068,7 +1065,6 @@ setup_arch(char **cmdline_p)
        setup_memory();
        setup_resources();
        setup_vmcoreinfo();
-       setup_restart_psw();
        setup_lowcore();
 
         cpu_init();
index 2398ce6b15aec306ce8ee5c544133380ce18cba2..6db8526a602d7c4b50a01d967c50023e1341cc02 100644 (file)
@@ -1,23 +1,18 @@
 /*
- *  arch/s390/kernel/smp.c
+ *  SMP related functions
  *
- *    Copyright IBM Corp. 1999, 2009
- *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
- *              Martin Schwidefsky (schwidefsky@de.ibm.com)
- *              Heiko Carstens (heiko.carstens@de.ibm.com)
+ *    Copyright IBM Corp. 1999,2012
+ *    Author(s): Denis Joseph Barrow,
+ *              Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ *              Heiko Carstens <heiko.carstens@de.ibm.com>,
  *
  *  based on other smp stuff by
  *    (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net>
  *    (c) 1998 Ingo Molnar
  *
- * We work with logical cpu numbering everywhere we can. The only
- * functions using the real cpu address (got from STAP) are the sigp
- * functions. For all other functions we use the identity mapping.
- * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
- * used e.g. to find the idle task belonging to a logical cpu. Every array
- * in the kernel is sorted by the logical cpu number and not by the physical
- * one which is causing all the confusion with __cpu_logical_map and
- * cpu_number_map in other architectures.
+ * The code outside of smp.c uses logical cpu numbers, only smp.c does
+ * the translation of logical to physical cpu ids. All new code that
+ * operates on physical cpu numbers needs to go into smp.c.
  */
 
 #define KMSG_COMPONENT "cpu"
 #include <linux/spinlock.h>
 #include <linux/kernel_stat.h>
 #include <linux/delay.h>
-#include <linux/cache.h>
 #include <linux/interrupt.h>
 #include <linux/irqflags.h>
 #include <linux/cpu.h>
-#include <linux/timex.h>
-#include <linux/bootmem.h>
 #include <linux/slab.h>
 #include <linux/crash_dump.h>
 #include <asm/asm-offsets.h>
 #include <asm/ipl.h>
 #include <asm/setup.h>
-#include <asm/sigp.h>
-#include <asm/pgalloc.h>
 #include <asm/irq.h>
-#include <asm/cpcmd.h>
 #include <asm/tlbflush.h>
 #include <asm/timer.h>
 #include <asm/lowcore.h>
 #include <asm/sclp.h>
-#include <asm/cputime.h>
 #include <asm/vdso.h>
-#include <asm/cpu.h>
 #include "entry.h"
 
-/* logical cpu to cpu address */
-unsigned short __cpu_logical_map[NR_CPUS];
+enum {
+       sigp_sense = 1,
+       sigp_external_call = 2,
+       sigp_emergency_signal = 3,
+       sigp_start = 4,
+       sigp_stop = 5,
+       sigp_restart = 6,
+       sigp_stop_and_store_status = 9,
+       sigp_initial_cpu_reset = 11,
+       sigp_cpu_reset = 12,
+       sigp_set_prefix = 13,
+       sigp_store_status_at_address = 14,
+       sigp_store_extended_status_at_address = 15,
+       sigp_set_architecture = 18,
+       sigp_conditional_emergency_signal = 19,
+       sigp_sense_running = 21,
+};
 
-static struct task_struct *current_set[NR_CPUS];
+enum {
+       sigp_order_code_accepted = 0,
+       sigp_status_stored = 1,
+       sigp_busy = 2,
+       sigp_not_operational = 3,
+};
 
-static u8 smp_cpu_type;
-static int smp_use_sigp_detection;
+enum {
+       ec_schedule = 0,
+       ec_call_function,
+       ec_call_function_single,
+       ec_stop_cpu,
+};
 
-enum s390_cpu_state {
+enum {
        CPU_STATE_STANDBY,
        CPU_STATE_CONFIGURED,
 };
 
+struct pcpu {
+       struct cpu cpu;
+       struct task_struct *idle;       /* idle process for the cpu */
+       struct _lowcore *lowcore;       /* lowcore page(s) for the cpu */
+       unsigned long async_stack;      /* async stack for the cpu */
+       unsigned long panic_stack;      /* panic stack for the cpu */
+       unsigned long ec_mask;          /* bit mask for ec_xxx functions */
+       int state;                      /* physical cpu state */
+       u32 status;                     /* last status received via sigp */
+       u16 address;                    /* physical cpu address */
+};
+
+static u8 boot_cpu_type;
+static u16 boot_cpu_address;
+static struct pcpu pcpu_devices[NR_CPUS];
+
 DEFINE_MUTEX(smp_cpu_state_mutex);
-static int smp_cpu_state[NR_CPUS];
 
-static DEFINE_PER_CPU(struct cpu, cpu_devices);
+/*
+ * Signal processor helper functions.
+ */
+static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status)
+{
+       register unsigned int reg1 asm ("1") = parm;
+       int cc;
 
-static void smp_ext_bitcall(int, int);
+       asm volatile(
+               "       sigp    %1,%2,0(%3)\n"
+               "       ipm     %0\n"
+               "       srl     %0,28\n"
+               : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc");
+       if (status && cc == 1)
+               *status = reg1;
+       return cc;
+}
 
-static int raw_cpu_stopped(int cpu)
+static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status)
 {
-       u32 status;
+       int cc;
 
-       switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) {
-       case sigp_status_stored:
-               /* Check for stopped and check stop state */
-               if (status & 0x50)
-                       return 1;
-               break;
-       default:
-               break;
+       while (1) {
+               cc = __pcpu_sigp(addr, order, parm, status);
+               if (cc != sigp_busy)
+                       return cc;
+               cpu_relax();
        }
-       return 0;
 }
 
-static inline int cpu_stopped(int cpu)
+static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
 {
-       return raw_cpu_stopped(cpu_logical_map(cpu));
+       int cc, retry;
+
+       for (retry = 0; ; retry++) {
+               cc = __pcpu_sigp(pcpu->address, order, parm, &pcpu->status);
+               if (cc != sigp_busy)
+                       break;
+               if (retry >= 3)
+                       udelay(10);
+       }
+       return cc;
+}
+
+static inline int pcpu_stopped(struct pcpu *pcpu)
+{
+       if (__pcpu_sigp(pcpu->address, sigp_sense,
+                       0, &pcpu->status) != sigp_status_stored)
+               return 0;
+       /* Check for stopped and check stop state */
+       return !!(pcpu->status & 0x50);
+}
+
+static inline int pcpu_running(struct pcpu *pcpu)
+{
+       if (__pcpu_sigp(pcpu->address, sigp_sense_running,
+                       0, &pcpu->status) != sigp_status_stored)
+               return 1;
+       /* Check for running status */
+       return !(pcpu->status & 0x400);
 }
 
 /*
- * Ensure that PSW restart is done on an online CPU
+ * Find struct pcpu by cpu address.
  */
-void smp_restart_with_online_cpu(void)
+static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address)
 {
        int cpu;
 
-       for_each_online_cpu(cpu) {
-               if (stap() == __cpu_logical_map[cpu]) {
-                       /* We are online: Enable DAT again and return */
-                       __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
-                       return;
-               }
+       for_each_cpu(cpu, mask)
+               if (pcpu_devices[cpu].address == address)
+                       return pcpu_devices + cpu;
+       return NULL;
+}
+
+static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
+{
+       int order;
+
+       set_bit(ec_bit, &pcpu->ec_mask);
+       order = pcpu_running(pcpu) ?
+               sigp_external_call : sigp_emergency_signal;
+       pcpu_sigp_retry(pcpu, order, 0);
+}
+
+static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
+{
+       struct _lowcore *lc;
+
+       if (pcpu != &pcpu_devices[0]) {
+               pcpu->lowcore = (struct _lowcore *)
+                       __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
+               pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
+               pcpu->panic_stack = __get_free_page(GFP_KERNEL);
+               if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack)
+                       goto out;
        }
-       /* We are not online: Do PSW restart on an online CPU */
-       while (sigp(cpu, sigp_restart) == sigp_busy)
-               cpu_relax();
-       /* And stop ourself */
-       while (raw_sigp(stap(), sigp_stop) == sigp_busy)
-               cpu_relax();
-       for (;;);
+       lc = pcpu->lowcore;
+       memcpy(lc, &S390_lowcore, 512);
+       memset((char *) lc + 512, 0, sizeof(*lc) - 512);
+       lc->async_stack = pcpu->async_stack + ASYNC_SIZE;
+       lc->panic_stack = pcpu->panic_stack + PAGE_SIZE;
+       lc->cpu_nr = cpu;
+#ifndef CONFIG_64BIT
+       if (MACHINE_HAS_IEEE) {
+               lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL);
+               if (!lc->extended_save_area_addr)
+                       goto out;
+       }
+#else
+       if (vdso_alloc_per_cpu(lc))
+               goto out;
+#endif
+       lowcore_ptr[cpu] = lc;
+       pcpu_sigp_retry(pcpu, sigp_set_prefix, (u32)(unsigned long) lc);
+       return 0;
+out:
+       if (pcpu != &pcpu_devices[0]) {
+               free_page(pcpu->panic_stack);
+               free_pages(pcpu->async_stack, ASYNC_ORDER);
+               free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
+       }
+       return -ENOMEM;
 }
 
-void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
+static void pcpu_free_lowcore(struct pcpu *pcpu)
 {
-       struct _lowcore *lc, *current_lc;
-       struct stack_frame *sf;
-       struct pt_regs *regs;
-       unsigned long sp;
-
-       if (smp_processor_id() == 0)
-               func(data);
-       __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE |
-                       PSW_MASK_EA | PSW_MASK_BA);
-       /* Disable lowcore protection */
-       __ctl_clear_bit(0, 28);
-       current_lc = lowcore_ptr[smp_processor_id()];
-       lc = lowcore_ptr[0];
-       if (!lc)
-               lc = current_lc;
-       lc->restart_psw.mask =
-               PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
-       lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu;
-       if (!cpu_online(0))
-               smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]);
-       while (sigp(0, sigp_stop_and_store_status) == sigp_busy)
-               cpu_relax();
-       sp = lc->panic_stack;
-       sp -= sizeof(struct pt_regs);
-       regs = (struct pt_regs *) sp;
-       memcpy(&regs->gprs, &current_lc->gpregs_save_area, sizeof(regs->gprs));
-       regs->psw = current_lc->psw_save_area;
-       sp -= STACK_FRAME_OVERHEAD;
-       sf = (struct stack_frame *) sp;
-       sf->back_chain = 0;
-       smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]);
+       pcpu_sigp_retry(pcpu, sigp_set_prefix, 0);
+       lowcore_ptr[pcpu - pcpu_devices] = NULL;
+#ifndef CONFIG_64BIT
+       if (MACHINE_HAS_IEEE) {
+               struct _lowcore *lc = pcpu->lowcore;
+
+               free_page((unsigned long) lc->extended_save_area_addr);
+               lc->extended_save_area_addr = 0;
+       }
+#else
+       vdso_free_per_cpu(pcpu->lowcore);
+#endif
+       if (pcpu != &pcpu_devices[0]) {
+               free_page(pcpu->panic_stack);
+               free_pages(pcpu->async_stack, ASYNC_ORDER);
+               free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
+       }
+}
+
+static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
+{
+       struct _lowcore *lc = pcpu->lowcore;
+
+       atomic_inc(&init_mm.context.attach_count);
+       lc->cpu_nr = cpu;
+       lc->percpu_offset = __per_cpu_offset[cpu];
+       lc->kernel_asce = S390_lowcore.kernel_asce;
+       lc->machine_flags = S390_lowcore.machine_flags;
+       lc->ftrace_func = S390_lowcore.ftrace_func;
+       lc->user_timer = lc->system_timer = lc->steal_timer = 0;
+       __ctl_store(lc->cregs_save_area, 0, 15);
+       save_access_regs((unsigned int *) lc->access_regs_save_area);
+       memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
+              MAX_FACILITY_BIT/8);
+}
+
+static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
+{
+       struct _lowcore *lc = pcpu->lowcore;
+       struct thread_info *ti = task_thread_info(tsk);
+
+       lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE;
+       lc->thread_info = (unsigned long) task_thread_info(tsk);
+       lc->current_task = (unsigned long) tsk;
+       lc->user_timer = ti->user_timer;
+       lc->system_timer = ti->system_timer;
+       lc->steal_timer = 0;
+}
+
+static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
+{
+       struct _lowcore *lc = pcpu->lowcore;
+
+       lc->restart_stack = lc->kernel_stack;
+       lc->restart_fn = (unsigned long) func;
+       lc->restart_data = (unsigned long) data;
+       lc->restart_source = -1UL;
+       pcpu_sigp_retry(pcpu, sigp_restart, 0);
+}
+
+/*
+ * Call function via PSW restart on pcpu and stop the current cpu.
+ */
+static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
+                         void *data, unsigned long stack)
+{
+       struct _lowcore *lc = pcpu->lowcore;
+       unsigned short this_cpu;
+
+       __load_psw_mask(psw_kernel_bits);
+       this_cpu = stap();
+       if (pcpu->address == this_cpu)
+               func(data);     /* should not return */
+       /* Stop target cpu (if func returns this stops the current cpu). */
+       pcpu_sigp_retry(pcpu, sigp_stop, 0);
+       /* Restart func on the target cpu and stop the current cpu. */
+       lc->restart_stack = stack;
+       lc->restart_fn = (unsigned long) func;
+       lc->restart_data = (unsigned long) data;
+       lc->restart_source = (unsigned long) this_cpu;
+       asm volatile(
+               "0:     sigp    0,%0,6  # sigp restart to target cpu\n"
+               "       brc     2,0b    # busy, try again\n"
+               "1:     sigp    0,%1,5  # sigp stop to current cpu\n"
+               "       brc     2,1b    # busy, try again\n"
+               : : "d" (pcpu->address), "d" (this_cpu) : "0", "1", "cc");
+       for (;;) ;
+}
+
+/*
+ * Call function on an online CPU.
+ */
+void smp_call_online_cpu(void (*func)(void *), void *data)
+{
+       struct pcpu *pcpu;
+
+       /* Use the current cpu if it is online. */
+       pcpu = pcpu_find_address(cpu_online_mask, stap());
+       if (!pcpu)
+               /* Use the first online cpu. */
+               pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
+       pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
+}
+
+/*
+ * Call function on the ipl CPU.
+ */
+void smp_call_ipl_cpu(void (*func)(void *), void *data)
+{
+       pcpu_delegate(&pcpu_devices[0], func, data, pcpu_devices->panic_stack);
+}
+
+int smp_find_processor_id(u16 address)
+{
+       int cpu;
+
+       for_each_present_cpu(cpu)
+               if (pcpu_devices[cpu].address == address)
+                       return cpu;
+       return -1;
 }
 
-static void smp_stop_cpu(void)
+int smp_vcpu_scheduled(int cpu)
 {
-       while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
+       return pcpu_running(pcpu_devices + cpu);
+}
+
+void smp_yield(void)
+{
+       if (MACHINE_HAS_DIAG44)
+               asm volatile("diag 0,0,0x44");
+}
+
+void smp_yield_cpu(int cpu)
+{
+       if (MACHINE_HAS_DIAG9C)
+               asm volatile("diag %0,0,0x9c"
+                            : : "d" (pcpu_devices[cpu].address));
+       else if (MACHINE_HAS_DIAG44)
+               asm volatile("diag 0,0,0x44");
+}
+
+/*
+ * Send cpus emergency shutdown signal. This gives the cpus the
+ * opportunity to complete outstanding interrupts.
+ */
+void smp_emergency_stop(cpumask_t *cpumask)
+{
+       u64 end;
+       int cpu;
+
+       end = get_clock() + (1000000UL << 12);
+       for_each_cpu(cpu, cpumask) {
+               struct pcpu *pcpu = pcpu_devices + cpu;
+               set_bit(ec_stop_cpu, &pcpu->ec_mask);
+               while (__pcpu_sigp(pcpu->address, sigp_emergency_signal,
+                                  0, NULL) == sigp_busy &&
+                      get_clock() < end)
+                       cpu_relax();
+       }
+       while (get_clock() < end) {
+               for_each_cpu(cpu, cpumask)
+                       if (pcpu_stopped(pcpu_devices + cpu))
+                               cpumask_clear_cpu(cpu, cpumask);
+               if (cpumask_empty(cpumask))
+                       break;
                cpu_relax();
+       }
 }
 
+/*
+ * Stop all cpus but the current one.
+ */
 void smp_send_stop(void)
 {
        cpumask_t cpumask;
        int cpu;
-       u64 end;
 
        /* Disable all interrupts/machine checks */
        __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
@@ -173,56 +409,46 @@ void smp_send_stop(void)
        cpumask_copy(&cpumask, cpu_online_mask);
        cpumask_clear_cpu(smp_processor_id(), &cpumask);
 
-       if (oops_in_progress) {
-               /*
-                * Give the other cpus the opportunity to complete
-                * outstanding interrupts before stopping them.
-                */
-               end = get_clock() + (1000000UL << 12);
-               for_each_cpu(cpu, &cpumask) {
-                       set_bit(ec_stop_cpu, (unsigned long *)
-                               &lowcore_ptr[cpu]->ext_call_fast);
-                       while (sigp(cpu, sigp_emergency_signal) == sigp_busy &&
-                              get_clock() < end)
-                               cpu_relax();
-               }
-               while (get_clock() < end) {
-                       for_each_cpu(cpu, &cpumask)
-                               if (cpu_stopped(cpu))
-                                       cpumask_clear_cpu(cpu, &cpumask);
-                       if (cpumask_empty(&cpumask))
-                               break;
-                       cpu_relax();
-               }
-       }
+       if (oops_in_progress)
+               smp_emergency_stop(&cpumask);
 
        /* stop all processors */
        for_each_cpu(cpu, &cpumask) {
-               while (sigp(cpu, sigp_stop) == sigp_busy)
-                       cpu_relax();
-               while (!cpu_stopped(cpu))
+               struct pcpu *pcpu = pcpu_devices + cpu;
+               pcpu_sigp_retry(pcpu, sigp_stop, 0);
+               while (!pcpu_stopped(pcpu))
                        cpu_relax();
        }
 }
 
+/*
+ * Stop the current cpu.
+ */
+void smp_stop_cpu(void)
+{
+       pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0);
+       for (;;) ;
+}
+
 /*
  * This is the main routine where commands issued by other
  * cpus are handled.
  */
-
 static void do_ext_call_interrupt(unsigned int ext_int_code,
                                  unsigned int param32, unsigned long param64)
 {
        unsigned long bits;
+       int cpu;
 
+       cpu = smp_processor_id();
        if ((ext_int_code & 0xffff) == 0x1202)
-               kstat_cpu(smp_processor_id()).irqs[EXTINT_EXC]++;
+               kstat_cpu(cpu).irqs[EXTINT_EXC]++;
        else
-               kstat_cpu(smp_processor_id()).irqs[EXTINT_EMS]++;
+               kstat_cpu(cpu).irqs[EXTINT_EMS]++;
        /*
         * handle bit signal external calls
         */
-       bits = xchg(&S390_lowcore.ext_call_fast, 0);
+       bits = xchg(&pcpu_devices[cpu].ec_mask, 0);
 
        if (test_bit(ec_stop_cpu, &bits))
                smp_stop_cpu();
@@ -238,38 +464,17 @@ static void do_ext_call_interrupt(unsigned int ext_int_code,
 
 }
 
-/*
- * Send an external call sigp to another cpu and return without waiting
- * for its completion.
- */
-static void smp_ext_bitcall(int cpu, int sig)
-{
-       int order;
-
-       /*
-        * Set signaling bit in lowcore of target cpu and kick it
-        */
-       set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
-       while (1) {
-               order = smp_vcpu_scheduled(cpu) ?
-                       sigp_external_call : sigp_emergency_signal;
-               if (sigp(cpu, order) != sigp_busy)
-                       break;
-               udelay(10);
-       }
-}
-
 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 {
        int cpu;
 
        for_each_cpu(cpu, mask)
-               smp_ext_bitcall(cpu, ec_call_function);
+               pcpu_ec_call(pcpu_devices + cpu, ec_call_function);
 }
 
 void arch_send_call_function_single_ipi(int cpu)
 {
-       smp_ext_bitcall(cpu, ec_call_function_single);
+       pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
 }
 
 #ifndef CONFIG_64BIT
@@ -295,15 +500,16 @@ EXPORT_SYMBOL(smp_ptlb_all);
  */
 void smp_send_reschedule(int cpu)
 {
-       smp_ext_bitcall(cpu, ec_schedule);
+       pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
 }
 
 /*
  * parameter area for the set/clear control bit callbacks
  */
 struct ec_creg_mask_parms {
-       unsigned long orvals[16];
-       unsigned long andvals[16];
+       unsigned long orval;
+       unsigned long andval;
+       int cr;
 };
 
 /*
@@ -313,11 +519,9 @@ static void smp_ctl_bit_callback(void *info)
 {
        struct ec_creg_mask_parms *pp = info;
        unsigned long cregs[16];
-       int i;
 
        __ctl_store(cregs, 0, 15);
-       for (i = 0; i <= 15; i++)
-               cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
+       cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
        __ctl_load(cregs, 0, 15);
 }
 
@@ -326,11 +530,8 @@ static void smp_ctl_bit_callback(void *info)
  */
 void smp_ctl_set_bit(int cr, int bit)
 {
-       struct ec_creg_mask_parms parms;
+       struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
 
-       memset(&parms.orvals, 0, sizeof(parms.orvals));
-       memset(&parms.andvals, 0xff, sizeof(parms.andvals));
-       parms.orvals[cr] = 1UL << bit;
        on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 }
 EXPORT_SYMBOL(smp_ctl_set_bit);
@@ -340,216 +541,175 @@ EXPORT_SYMBOL(smp_ctl_set_bit);
  */
 void smp_ctl_clear_bit(int cr, int bit)
 {
-       struct ec_creg_mask_parms parms;
+       struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
 
-       memset(&parms.orvals, 0, sizeof(parms.orvals));
-       memset(&parms.andvals, 0xff, sizeof(parms.andvals));
-       parms.andvals[cr] = ~(1UL << bit);
        on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 }
 EXPORT_SYMBOL(smp_ctl_clear_bit);
 
 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
 
-static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
+struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
+EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
+
+static void __init smp_get_save_area(int cpu, u16 address)
 {
-       if (ipl_info.type != IPL_TYPE_FCP_DUMP && !OLDMEM_BASE)
-               return;
+       void *lc = pcpu_devices[0].lowcore;
+       struct save_area *save_area;
+
        if (is_kdump_kernel())
                return;
+       if (!OLDMEM_BASE && (address == boot_cpu_address ||
+                            ipl_info.type != IPL_TYPE_FCP_DUMP))
+               return;
        if (cpu >= NR_CPUS) {
-               pr_warning("CPU %i exceeds the maximum %i and is excluded from "
-                          "the dump\n", cpu, NR_CPUS - 1);
+               pr_warning("CPU %i exceeds the maximum %i and is excluded "
+                          "from the dump\n", cpu, NR_CPUS - 1);
                return;
        }
-       zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL);
-       while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy)
-               cpu_relax();
-       memcpy_real(zfcpdump_save_areas[cpu],
-                   (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
-                   sizeof(struct save_area));
+       save_area = kmalloc(sizeof(struct save_area), GFP_KERNEL);
+       if (!save_area)
+               panic("could not allocate memory for save area\n");
+       zfcpdump_save_areas[cpu] = save_area;
+#ifdef CONFIG_CRASH_DUMP
+       if (address == boot_cpu_address) {
+               /* Copy the registers of the boot cpu. */
+               copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
+                                SAVE_AREA_BASE - PAGE_SIZE, 0);
+               return;
+       }
+#endif
+       /* Get the registers of a non-boot cpu. */
+       __pcpu_sigp_relax(address, sigp_stop_and_store_status, 0, NULL);
+       memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area));
 }
 
-struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
-EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
-
-#else
-
-static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
-
-#endif /* CONFIG_ZFCPDUMP */
-
-static int cpu_known(int cpu_id)
+int smp_store_status(int cpu)
 {
-       int cpu;
+       struct pcpu *pcpu;
 
-       for_each_present_cpu(cpu) {
-               if (__cpu_logical_map[cpu] == cpu_id)
-                       return 1;
-       }
+       pcpu = pcpu_devices + cpu;
+       if (__pcpu_sigp_relax(pcpu->address, sigp_stop_and_store_status,
+                             0, NULL) != sigp_order_code_accepted)
+               return -EIO;
        return 0;
 }
 
-static int smp_rescan_cpus_sigp(cpumask_t avail)
-{
-       int cpu_id, logical_cpu;
+#else /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
 
-       logical_cpu = cpumask_first(&avail);
-       if (logical_cpu >= nr_cpu_ids)
-               return 0;
-       for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) {
-               if (cpu_known(cpu_id))
-                       continue;
-               __cpu_logical_map[logical_cpu] = cpu_id;
-               cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN);
-               if (!cpu_stopped(logical_cpu))
-                       continue;
-               set_cpu_present(logical_cpu, true);
-               smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
-               logical_cpu = cpumask_next(logical_cpu, &avail);
-               if (logical_cpu >= nr_cpu_ids)
-                       break;
-       }
-       return 0;
-}
+static inline void smp_get_save_area(int cpu, u16 address) { }
+
+#endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
 
-static int smp_rescan_cpus_sclp(cpumask_t avail)
+static struct sclp_cpu_info *smp_get_cpu_info(void)
 {
+       static int use_sigp_detection;
        struct sclp_cpu_info *info;
-       int cpu_id, logical_cpu, cpu;
-       int rc;
-
-       logical_cpu = cpumask_first(&avail);
-       if (logical_cpu >= nr_cpu_ids)
-               return 0;
-       info = kmalloc(sizeof(*info), GFP_KERNEL);
-       if (!info)
-               return -ENOMEM;
-       rc = sclp_get_cpu_info(info);
-       if (rc)
-               goto out;
-       for (cpu = 0; cpu < info->combined; cpu++) {
-               if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
-                       continue;
-               cpu_id = info->cpu[cpu].address;
-               if (cpu_known(cpu_id))
-                       continue;
-               __cpu_logical_map[logical_cpu] = cpu_id;
-               cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN);
-               set_cpu_present(logical_cpu, true);
-               if (cpu >= info->configured)
-                       smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
-               else
-                       smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
-               logical_cpu = cpumask_next(logical_cpu, &avail);
-               if (logical_cpu >= nr_cpu_ids)
-                       break;
+       int address;
+
+       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
+               use_sigp_detection = 1;
+               for (address = 0; address <= MAX_CPU_ADDRESS; address++) {
+                       if (__pcpu_sigp_relax(address, sigp_sense, 0, NULL) ==
+                           sigp_not_operational)
+                               continue;
+                       info->cpu[info->configured].address = address;
+                       info->configured++;
+               }
+               info->combined = info->configured;
        }
-out:
-       kfree(info);
-       return rc;
+       return info;
 }
 
-static int __smp_rescan_cpus(void)
+static int __devinit smp_add_present_cpu(int cpu);
+
+static int __devinit __smp_rescan_cpus(struct sclp_cpu_info *info,
+                                      int sysfs_add)
 {
+       struct pcpu *pcpu;
        cpumask_t avail;
+       int cpu, nr, i;
 
+       nr = 0;
        cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
-       if (smp_use_sigp_detection)
-               return smp_rescan_cpus_sigp(avail);
-       else
-               return smp_rescan_cpus_sclp(avail);
+       cpu = cpumask_first(&avail);
+       for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
+               if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
+                       continue;
+               if (pcpu_find_address(cpu_present_mask, info->cpu[i].address))
+                       continue;
+               pcpu = pcpu_devices + cpu;
+               pcpu->address = info->cpu[i].address;
+               pcpu->state = (cpu >= info->configured) ?
+                       CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
+               cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+               set_cpu_present(cpu, true);
+               if (sysfs_add && smp_add_present_cpu(cpu) != 0)
+                       set_cpu_present(cpu, false);
+               else
+                       nr++;
+               cpu = cpumask_next(cpu, &avail);
+       }
+       return nr;
 }
 
 static void __init smp_detect_cpus(void)
 {
        unsigned int cpu, c_cpus, s_cpus;
        struct sclp_cpu_info *info;
-       u16 boot_cpu_addr, cpu_addr;
 
-       c_cpus = 1;
-       s_cpus = 0;
-       boot_cpu_addr = __cpu_logical_map[0];
-       info = kmalloc(sizeof(*info), GFP_KERNEL);
+       info = smp_get_cpu_info();
        if (!info)
                panic("smp_detect_cpus failed to allocate memory\n");
-#ifdef CONFIG_CRASH_DUMP
-       if (OLDMEM_BASE && !is_kdump_kernel()) {
-               struct save_area *save_area;
-
-               save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
-               if (!save_area)
-                       panic("could not allocate memory for save area\n");
-               copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
-                                0x200, 0);
-               zfcpdump_save_areas[0] = save_area;
-       }
-#endif
-       /* Use sigp detection algorithm if sclp doesn't work. */
-       if (sclp_get_cpu_info(info)) {
-               smp_use_sigp_detection = 1;
-               for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
-                       if (cpu == boot_cpu_addr)
-                               continue;
-                       if (!raw_cpu_stopped(cpu))
-                               continue;
-                       smp_get_save_area(c_cpus, cpu);
-                       c_cpus++;
-               }
-               goto out;
-       }
-
        if (info->has_cpu_type) {
                for (cpu = 0; cpu < info->combined; cpu++) {
-                       if (info->cpu[cpu].address == boot_cpu_addr) {
-                               smp_cpu_type = info->cpu[cpu].type;
-                               break;
-                       }
+                       if (info->cpu[cpu].address != boot_cpu_address)
+                               continue;
+                       /* The boot cpu dictates the cpu type. */
+                       boot_cpu_type = info->cpu[cpu].type;
+                       break;
                }
        }
-
+       c_cpus = s_cpus = 0;
        for (cpu = 0; cpu < info->combined; cpu++) {
-               if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
+               if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type)
                        continue;
-               cpu_addr = info->cpu[cpu].address;
-               if (cpu_addr == boot_cpu_addr)
-                       continue;
-               if (!raw_cpu_stopped(cpu_addr)) {
+               if (cpu < info->configured) {
+                       smp_get_save_area(c_cpus, info->cpu[cpu].address);
+                       c_cpus++;
+               } else
                        s_cpus++;
-                       continue;
-               }
-               smp_get_save_area(c_cpus, cpu_addr);
-               c_cpus++;
        }
-out:
-       kfree(info);
        pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
        get_online_cpus();
-       __smp_rescan_cpus();
+       __smp_rescan_cpus(info, 0);
        put_online_cpus();
+       kfree(info);
 }
 
 /*
  *     Activate a secondary processor.
  */
-int __cpuinit start_secondary(void *cpuvoid)
+static void __cpuinit smp_start_secondary(void *cpuvoid)
 {
+       S390_lowcore.last_update_clock = get_clock();
+       S390_lowcore.restart_stack = (unsigned long) restart_stack;
+       S390_lowcore.restart_fn = (unsigned long) do_restart;
+       S390_lowcore.restart_data = 0;
+       S390_lowcore.restart_source = -1UL;
+       restore_access_regs(S390_lowcore.access_regs_save_area);
+       __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
+       __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
        cpu_init();
        preempt_disable();
        init_cpu_timer();
        init_cpu_vtimer();
        pfault_init();
-
        notify_cpu_starting(smp_processor_id());
        ipi_call_lock();
        set_cpu_online(smp_processor_id(), true);
        ipi_call_unlock();
-       __ctl_clear_bit(0, 28); /* Disable lowcore protection */
-       S390_lowcore.restart_psw.mask =
-               PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
-       S390_lowcore.restart_psw.addr =
-               PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
-       __ctl_set_bit(0, 28); /* Enable lowcore protection */
        /*
         * Wait until the cpu which brought this one up marked it
         * active before enabling interrupts.
@@ -559,7 +719,6 @@ int __cpuinit start_secondary(void *cpuvoid)
        local_irq_enable();
        /* cpu_idle will call schedule for us */
        cpu_idle();
-       return 0;
 }
 
 struct create_idle {
@@ -578,82 +737,20 @@ static void __cpuinit smp_fork_idle(struct work_struct *work)
        complete(&c_idle->done);
 }
 
-static int __cpuinit smp_alloc_lowcore(int cpu)
-{
-       unsigned long async_stack, panic_stack;
-       struct _lowcore *lowcore;
-
-       lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
-       if (!lowcore)
-               return -ENOMEM;
-       async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
-       panic_stack = __get_free_page(GFP_KERNEL);
-       if (!panic_stack || !async_stack)
-               goto out;
-       memcpy(lowcore, &S390_lowcore, 512);
-       memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
-       lowcore->async_stack = async_stack + ASYNC_SIZE;
-       lowcore->panic_stack = panic_stack + PAGE_SIZE;
-       lowcore->restart_psw.mask =
-               PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
-       lowcore->restart_psw.addr =
-               PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
-       if (user_mode != HOME_SPACE_MODE)
-               lowcore->restart_psw.mask |= PSW_ASC_HOME;
-#ifndef CONFIG_64BIT
-       if (MACHINE_HAS_IEEE) {
-               unsigned long save_area;
-
-               save_area = get_zeroed_page(GFP_KERNEL);
-               if (!save_area)
-                       goto out;
-               lowcore->extended_save_area_addr = (u32) save_area;
-       }
-#else
-       if (vdso_alloc_per_cpu(cpu, lowcore))
-               goto out;
-#endif
-       lowcore_ptr[cpu] = lowcore;
-       return 0;
-
-out:
-       free_page(panic_stack);
-       free_pages(async_stack, ASYNC_ORDER);
-       free_pages((unsigned long) lowcore, LC_ORDER);
-       return -ENOMEM;
-}
-
-static void smp_free_lowcore(int cpu)
-{
-       struct _lowcore *lowcore;
-
-       lowcore = lowcore_ptr[cpu];
-#ifndef CONFIG_64BIT
-       if (MACHINE_HAS_IEEE)
-               free_page((unsigned long) lowcore->extended_save_area_addr);
-#else
-       vdso_free_per_cpu(cpu, lowcore);
-#endif
-       free_page(lowcore->panic_stack - PAGE_SIZE);
-       free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
-       free_pages((unsigned long) lowcore, LC_ORDER);
-       lowcore_ptr[cpu] = NULL;
-}
-
 /* Upping and downing of CPUs */
 int __cpuinit __cpu_up(unsigned int cpu)
 {
-       struct _lowcore *cpu_lowcore;
        struct create_idle c_idle;
-       struct task_struct *idle;
-       struct stack_frame *sf;
-       u32 lowcore;
-       int ccode;
+       struct pcpu *pcpu;
+       int rc;
 
-       if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
+       pcpu = pcpu_devices + cpu;
+       if (pcpu->state != CPU_STATE_CONFIGURED)
                return -EIO;
-       idle = current_set[cpu];
-       if (!idle) {
+       if (pcpu_sigp_retry(pcpu, sigp_initial_cpu_reset, 0) !=
+           sigp_order_code_accepted)
+               return -EIO;
+       if (!pcpu->idle) {
                c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done);
                INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle);
                c_idle.cpu = cpu;
@@ -661,68 +758,28 @@ int __cpuinit __cpu_up(unsigned int cpu)
                wait_for_completion(&c_idle.done);
                if (IS_ERR(c_idle.idle))
                        return PTR_ERR(c_idle.idle);
-               idle = c_idle.idle;
-               current_set[cpu] = c_idle.idle;
+               pcpu->idle = c_idle.idle;
        }
-       init_idle(idle, cpu);
-       if (smp_alloc_lowcore(cpu))
-               return -ENOMEM;
-       do {
-               ccode = sigp(cpu, sigp_initial_cpu_reset);
-               if (ccode == sigp_busy)
-                       udelay(10);
-               if (ccode == sigp_not_operational)
-                       goto err_out;
-       } while (ccode == sigp_busy);
-
-       lowcore = (u32)(unsigned long)lowcore_ptr[cpu];
-       while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
-               udelay(10);
-
-       cpu_lowcore = lowcore_ptr[cpu];
-       cpu_lowcore->kernel_stack = (unsigned long)
-               task_stack_page(idle) + THREAD_SIZE;
-       cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
-       sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
-                                    - sizeof(struct pt_regs)
-                                    - sizeof(struct stack_frame));
-       memset(sf, 0, sizeof(struct stack_frame));
-       sf->gprs[9] = (unsigned long) sf;
-       cpu_lowcore->gpregs_save_area[15] = (unsigned long) sf;
-       __ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
-       atomic_inc(&init_mm.context.attach_count);
-       asm volatile(
-               "       stam    0,15,0(%0)"
-               : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
-       cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
-       cpu_lowcore->current_task = (unsigned long) idle;
-       cpu_lowcore->cpu_nr = cpu;
-       cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
-       cpu_lowcore->machine_flags = S390_lowcore.machine_flags;
-       cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
-       memcpy(cpu_lowcore->stfle_fac_list, S390_lowcore.stfle_fac_list,
-              MAX_FACILITY_BIT/8);
-       eieio();
-
-       while (sigp(cpu, sigp_restart) == sigp_busy)
-               udelay(10);
-
+       init_idle(pcpu->idle, cpu);
+       rc = pcpu_alloc_lowcore(pcpu, cpu);
+       if (rc)
+               return rc;
+       pcpu_prepare_secondary(pcpu, cpu);
+       pcpu_attach_task(pcpu, pcpu->idle);
+       pcpu_start_fn(pcpu, smp_start_secondary, NULL);
        while (!cpu_online(cpu))
                cpu_relax();
        return 0;
-
-err_out:
-       smp_free_lowcore(cpu);
-       return -EIO;
 }
 
 static int __init setup_possible_cpus(char *s)
 {
-       int pcpus, cpu;
+       int max, cpu;
 
-       pcpus = simple_strtoul(s, NULL, 0);
+       if (kstrtoint(s, 0, &max) < 0)
+               return 0;
        init_cpu_possible(cpumask_of(0));
-       for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++)
+       for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++)
                set_cpu_possible(cpu, true);
        return 0;
 }
@@ -732,113 +789,67 @@ early_param("possible_cpus", setup_possible_cpus);
 
 int __cpu_disable(void)
 {
-       struct ec_creg_mask_parms cr_parms;
-       int cpu = smp_processor_id();
-
-       set_cpu_online(cpu, false);
+       unsigned long cregs[16];
 
-       /* Disable pfault pseudo page faults on this cpu. */
+       set_cpu_online(smp_processor_id(), false);
+       /* Disable pseudo page faults on this cpu. */
        pfault_fini();
-
-       memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
-       memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
-
-       /* disable all external interrupts */
-       cr_parms.orvals[0] = 0;
-       cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 |
-                               1 << 10 | 1 <<  9 | 1 <<  6 | 1 <<  5 |
-                               1 <<  4);
-       /* disable all I/O interrupts */
-       cr_parms.orvals[6] = 0;
-       cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
-                               1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
-       /* disable most machine checks */
-       cr_parms.orvals[14] = 0;
-       cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
-                                1 << 25 | 1 << 24);
-
-       smp_ctl_bit_callback(&cr_parms);
-
+       /* Disable interrupt sources via control register. */
+       __ctl_store(cregs, 0, 15);
+       cregs[0]  &= ~0x0000ee70UL;     /* disable all external interrupts */
+       cregs[6]  &= ~0xff000000UL;     /* disable all I/O interrupts */
+       cregs[14] &= ~0x1f000000UL;     /* disable most machine checks */
+       __ctl_load(cregs, 0, 15);
        return 0;
 }
 
 void __cpu_die(unsigned int cpu)
 {
+       struct pcpu *pcpu;
+
        /* Wait until target cpu is down */
-       while (!cpu_stopped(cpu))
+       pcpu = pcpu_devices + cpu;
+       while (!pcpu_stopped(pcpu))
                cpu_relax();
-       while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy)
-               udelay(10);
-       smp_free_lowcore(cpu);
+       pcpu_free_lowcore(pcpu);
        atomic_dec(&init_mm.context.attach_count);
 }
 
 void __noreturn cpu_die(void)
 {
        idle_task_exit();
-       while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
-               cpu_relax();
-       for (;;);
+       pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0);
+       for (;;) ;
 }
 
 #endif /* CONFIG_HOTPLUG_CPU */
 
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
-#ifndef CONFIG_64BIT
-       unsigned long save_area = 0;
-#endif
-       unsigned long async_stack, panic_stack;
-       struct _lowcore *lowcore;
-
-       smp_detect_cpus();
-
        /* request the 0x1201 emergency signal external interrupt */
        if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
                panic("Couldn't request external interrupt 0x1201");
        /* request the 0x1202 external call external interrupt */
        if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
                panic("Couldn't request external interrupt 0x1202");
-
-       /* Reallocate current lowcore, but keep its contents. */
-       lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
-       panic_stack = __get_free_page(GFP_KERNEL);
-       async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
-       BUG_ON(!lowcore || !panic_stack || !async_stack);
-#ifndef CONFIG_64BIT
-       if (MACHINE_HAS_IEEE)
-               save_area = get_zeroed_page(GFP_KERNEL);
-#endif
-       local_irq_disable();
-       local_mcck_disable();
-       lowcore_ptr[smp_processor_id()] = lowcore;
-       *lowcore = S390_lowcore;
-       lowcore->panic_stack = panic_stack + PAGE_SIZE;
-       lowcore->async_stack = async_stack + ASYNC_SIZE;
-#ifndef CONFIG_64BIT
-       if (MACHINE_HAS_IEEE)
-               lowcore->extended_save_area_addr = (u32) save_area;
-#endif
-       set_prefix((u32)(unsigned long) lowcore);
-       local_mcck_enable();
-       local_irq_enable();
-#ifdef CONFIG_64BIT
-       if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
-               BUG();
-#endif
+       smp_detect_cpus();
 }
 
 void __init smp_prepare_boot_cpu(void)
 {
-       BUG_ON(smp_processor_id() != 0);
-
-       current_thread_info()->cpu = 0;
-       set_cpu_present(0, true);
-       set_cpu_online(0, true);
+       struct pcpu *pcpu = pcpu_devices;
+
+       boot_cpu_address = stap();
+       pcpu->idle = current;
+       pcpu->state = CPU_STATE_CONFIGURED;
+       pcpu->address = boot_cpu_address;
+       pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
+       pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE;
+       pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE;
        S390_lowcore.percpu_offset = __per_cpu_offset[0];
-       current_set[0] = current;
-       smp_cpu_state[0] = CPU_STATE_CONFIGURED;
        cpu_set_polarization(0, POLARIZATION_UNKNOWN);
+       set_cpu_present(0, true);
+       set_cpu_online(0, true);
 }
 
 void __init smp_cpus_done(unsigned int max_cpus)
@@ -848,7 +859,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
 void __init smp_setup_processor_id(void)
 {
        S390_lowcore.cpu_nr = 0;
-       __cpu_logical_map[0] = stap();
 }
 
 /*
@@ -864,56 +874,57 @@ int setup_profiling_timer(unsigned int multiplier)
 
 #ifdef CONFIG_HOTPLUG_CPU
 static ssize_t cpu_configure_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
+                                 struct device_attribute *attr, char *buf)
 {
        ssize_t count;
 
        mutex_lock(&smp_cpu_state_mutex);
-       count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
+       count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
        mutex_unlock(&smp_cpu_state_mutex);
        return count;
 }
 
 static ssize_t cpu_configure_store(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t count)
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t count)
 {
-       int cpu = dev->id;
-       int val, rc;
+       struct pcpu *pcpu;
+       int cpu, val, rc;
        char delim;
 
        if (sscanf(buf, "%d %c", &val, &delim) != 1)
                return -EINVAL;
        if (val != 0 && val != 1)
                return -EINVAL;
-
        get_online_cpus();
        mutex_lock(&smp_cpu_state_mutex);
        rc = -EBUSY;
        /* disallow configuration changes of online cpus and cpu 0 */
+       cpu = dev->id;
        if (cpu_online(cpu) || cpu == 0)
                goto out;
+       pcpu = pcpu_devices + cpu;
        rc = 0;
        switch (val) {
        case 0:
-               if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
-                       rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
-                       if (!rc) {
-                               smp_cpu_state[cpu] = CPU_STATE_STANDBY;
-                               cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
-                               topology_expect_change();
-                       }
-               }
+               if (pcpu->state != CPU_STATE_CONFIGURED)
+                       break;
+               rc = sclp_cpu_deconfigure(pcpu->address);
+               if (rc)
+                       break;
+               pcpu->state = CPU_STATE_STANDBY;
+               cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+               topology_expect_change();
                break;
        case 1:
-               if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
-                       rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
-                       if (!rc) {
-                               smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
-                               cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
-                               topology_expect_change();
-                       }
-               }
+               if (pcpu->state != CPU_STATE_STANDBY)
+                       break;
+               rc = sclp_cpu_configure(pcpu->address);
+               if (rc)
+                       break;
+               pcpu->state = CPU_STATE_CONFIGURED;
+               cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+               topology_expect_change();
                break;
        default:
                break;
@@ -929,7 +940,7 @@ static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
 static ssize_t show_cpu_address(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
-       return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
+       return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
 }
 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
 
@@ -1021,7 +1032,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
                                    unsigned long action, void *hcpu)
 {
        unsigned int cpu = (unsigned int)(long)hcpu;
-       struct cpu *c = &per_cpu(cpu_devices, cpu);
+       struct cpu *c = &pcpu_devices[cpu].cpu;
        struct device *s = &c->dev;
        struct s390_idle_data *idle;
        int err = 0;
@@ -1047,7 +1058,7 @@ static struct notifier_block __cpuinitdata smp_cpu_nb = {
 
 static int __devinit smp_add_present_cpu(int cpu)
 {
-       struct cpu *c = &per_cpu(cpu_devices, cpu);
+       struct cpu *c = &pcpu_devices[cpu].cpu;
        struct device *s = &c->dev;
        int rc;
 
@@ -1085,29 +1096,21 @@ out:
 
 int __ref smp_rescan_cpus(void)
 {
-       cpumask_t newcpus;
-       int cpu;
-       int rc;
+       struct sclp_cpu_info *info;
+       int nr;
 
+       info = smp_get_cpu_info();
+       if (!info)
+               return -ENOMEM;
        get_online_cpus();
        mutex_lock(&smp_cpu_state_mutex);
-       cpumask_copy(&newcpus, cpu_present_mask);
-       rc = __smp_rescan_cpus();
-       if (rc)
-               goto out;
-       cpumask_andnot(&newcpus, cpu_present_mask, &newcpus);
-       for_each_cpu(cpu, &newcpus) {
-               rc = smp_add_present_cpu(cpu);
-               if (rc)
-                       set_cpu_present(cpu, false);
-       }
-       rc = 0;
-out:
+       nr = __smp_rescan_cpus(info, 1);
        mutex_unlock(&smp_cpu_state_mutex);
        put_online_cpus();
-       if (!cpumask_empty(&newcpus))
+       kfree(info);
+       if (nr)
                topology_schedule_update();
-       return rc;
+       return 0;
 }
 
 static ssize_t __ref rescan_store(struct device *dev,
diff --git a/arch/s390/kernel/switch_cpu.S b/arch/s390/kernel/switch_cpu.S
deleted file mode 100644 (file)
index bfe070b..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * 31-bit switch cpu code
- *
- * Copyright IBM Corp. 2009
- *
- */
-
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/ptrace.h>
-
-# smp_switch_to_cpu switches to destination cpu and executes the passed function
-# Parameter: %r2 - function to call
-#           %r3 - function parameter
-#           %r4 - stack poiner
-#           %r5 - current cpu
-#           %r6 - destination cpu
-
-       .section .text
-ENTRY(smp_switch_to_cpu)
-       stm     %r6,%r15,__SF_GPRS(%r15)
-       lr      %r1,%r15
-       ahi     %r15,-STACK_FRAME_OVERHEAD
-       st      %r1,__SF_BACKCHAIN(%r15)
-       basr    %r13,0
-0:     la      %r1,.gprregs_addr-0b(%r13)
-       l       %r1,0(%r1)
-       stm     %r0,%r15,0(%r1)
-1:     sigp    %r0,%r6,__SIGP_RESTART  /* start destination CPU */
-       brc     2,1b                    /* busy, try again */
-2:     sigp    %r0,%r5,__SIGP_STOP     /* stop current CPU */
-       brc     2,2b                    /* busy, try again */
-3:     j       3b
-
-ENTRY(smp_restart_cpu)
-       basr    %r13,0
-0:     la      %r1,.gprregs_addr-0b(%r13)
-       l       %r1,0(%r1)
-       lm      %r0,%r15,0(%r1)
-1:     sigp    %r0,%r5,__SIGP_SENSE    /* Wait for calling CPU */
-       brc     10,1b                   /* busy, accepted (status 0), running */
-       tmll    %r0,0x40                /* Test if calling CPU is stopped */
-       jz      1b
-       ltr     %r4,%r4                 /* New stack ? */
-       jz      1f
-       lr      %r15,%r4
-1:     lr      %r14,%r2                /* r14: Function to call */
-       lr      %r2,%r3                 /* r2 : Parameter for function*/
-       basr    %r14,%r14               /* Call function */
-
-.gprregs_addr:
-       .long   .gprregs
-
-       .section .data,"aw",@progbits
-.gprregs:
-       .rept   16
-       .long   0
-       .endr
diff --git a/arch/s390/kernel/switch_cpu64.S b/arch/s390/kernel/switch_cpu64.S
deleted file mode 100644 (file)
index fcc42d7..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * 64-bit switch cpu code
- *
- * Copyright IBM Corp. 2009
- *
- */
-
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/ptrace.h>
-
-# smp_switch_to_cpu switches to destination cpu and executes the passed function
-# Parameter: %r2 - function to call
-#           %r3 - function parameter
-#           %r4 - stack poiner
-#           %r5 - current cpu
-#           %r6 - destination cpu
-
-       .section .text
-ENTRY(smp_switch_to_cpu)
-       stmg    %r6,%r15,__SF_GPRS(%r15)
-       lgr     %r1,%r15
-       aghi    %r15,-STACK_FRAME_OVERHEAD
-       stg     %r1,__SF_BACKCHAIN(%r15)
-       larl    %r1,.gprregs
-       stmg    %r0,%r15,0(%r1)
-1:     sigp    %r0,%r6,__SIGP_RESTART  /* start destination CPU */
-       brc     2,1b                    /* busy, try again */
-2:     sigp    %r0,%r5,__SIGP_STOP     /* stop current CPU */
-       brc     2,2b                    /* busy, try again */
-3:     j       3b
-
-ENTRY(smp_restart_cpu)
-       larl    %r1,.gprregs
-       lmg     %r0,%r15,0(%r1)
-1:     sigp    %r0,%r5,__SIGP_SENSE    /* Wait for calling CPU */
-       brc     10,1b                   /* busy, accepted (status 0), running */
-       tmll    %r0,0x40                /* Test if calling CPU is stopped */
-       jz      1b
-       ltgr    %r4,%r4                 /* New stack ? */
-       jz      1f
-       lgr     %r15,%r4
-1:     lgr     %r14,%r2                /* r14: Function to call */
-       lgr     %r2,%r3                 /* r2 : Parameter for function*/
-       basr    %r14,%r14               /* Call function */
-
-       .section .data,"aw",@progbits
-.gprregs:
-       .rept   16
-       .quad   0
-       .endr
index 2ef39d1519a9cc784c489ebd07808a22700c972d..ad3c79eceed74190ef250f38d1b863451fdd6a75 100644 (file)
@@ -179,9 +179,9 @@ pgm_check_entry:
        larl    %r4,.Lrestart_suspend_psw       /* Set new restart PSW */
        mvc     __LC_RST_NEW_PSW(16,%r0),0(%r4)
 3:
-       sigp    %r9,%r1,__SIGP_INITIAL_CPU_RESET
-       brc     8,4f    /* accepted */
-       brc     2,3b    /* busy, try again */
+       sigp    %r9,%r1,11                      /* sigp initial cpu reset */
+       brc     8,4f                            /* accepted */
+       brc     2,3b                            /* busy, try again */
 
        /* Suspend CPU not available -> panic */
        larl    %r15,init_thread_union
@@ -196,10 +196,10 @@ pgm_check_entry:
        lpsw    0(%r3)
 4:
        /* Switch to suspend CPU */
-       sigp    %r9,%r1,__SIGP_RESTART  /* start suspend CPU */
+       sigp    %r9,%r1,6               /* sigp restart to suspend CPU */
        brc     2,4b                    /* busy, try again */
 5:
-       sigp    %r9,%r2,__SIGP_STOP     /* stop resume (current) CPU */
+       sigp    %r9,%r2,5               /* sigp stop to current resume CPU */
        brc     2,5b                    /* busy, try again */
 6:     j       6b
 
@@ -207,7 +207,7 @@ restart_suspend:
        larl    %r1,.Lresume_cpu
        llgh    %r2,0(%r1)
 7:
-       sigp    %r9,%r2,__SIGP_SENSE    /* Wait for resume CPU */
+       sigp    %r9,%r2,1               /* sigp sense, wait for resume CPU */
        brc     8,7b                    /* accepted, status 0, still running */
        brc     2,7b                    /* busy, try again */
        tmll    %r9,0x40                /* Test if resume CPU is stopped */
index 7370a41948cad1d7657f86f47f5a362c99fdc60d..4f8dc942257c378bba5a940f41cc8e6c270861c0 100644 (file)
@@ -79,12 +79,12 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
             cpu < TOPOLOGY_CPU_BITS;
             cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1))
        {
-               unsigned int rcpu, lcpu;
+               unsigned int rcpu;
+               int lcpu;
 
                rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
-               for_each_present_cpu(lcpu) {
-                       if (cpu_logical_map(lcpu) != rcpu)
-                               continue;
+               lcpu = smp_find_processor_id(rcpu);
+               if (lcpu >= 0) {
                        cpumask_set_cpu(lcpu, &book->mask);
                        cpu_book_id[lcpu] = book->id;
                        cpumask_set_cpu(lcpu, &core->mask);
index d73630b4fe1dbe87f9d492dd5f1e465749a5a0dc..e704a9965f902ce808648da176a99d60505ce927 100644 (file)
@@ -88,19 +88,12 @@ static void vdso_init_data(struct vdso_data *vd)
 }
 
 #ifdef CONFIG_64BIT
-/*
- * Setup per cpu vdso data page.
- */
-static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd)
-{
-}
-
 /*
  * Allocate/free per cpu vdso data.
  */
 #define SEGMENT_ORDER  2
 
-int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
+int vdso_alloc_per_cpu(struct _lowcore *lowcore)
 {
        unsigned long segment_table, page_table, page_frame;
        u32 *psal, *aste;
@@ -139,7 +132,6 @@ int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
        aste[4] = (u32)(addr_t) psal;
        lowcore->vdso_per_cpu_data = page_frame;
 
-       vdso_init_per_cpu_data(cpu, (struct vdso_per_cpu_data *) page_frame);
        return 0;
 
 out:
@@ -149,7 +141,7 @@ out:
        return -ENOMEM;
 }
 
-void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore)
+void vdso_free_per_cpu(struct _lowcore *lowcore)
 {
        unsigned long segment_table, page_table, page_frame;
        u32 *psal, *aste;
@@ -168,19 +160,15 @@ void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore)
        free_pages(segment_table, SEGMENT_ORDER);
 }
 
-static void __vdso_init_cr5(void *dummy)
+static void vdso_init_cr5(void)
 {
        unsigned long cr5;
 
+       if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
+               return;
        cr5 = offsetof(struct _lowcore, paste);
        __ctl_load(cr5, 5, 5);
 }
-
-static void vdso_init_cr5(void)
-{
-       if (user_mode != HOME_SPACE_MODE && vdso_enabled)
-               on_each_cpu(__vdso_init_cr5, NULL, 1);
-}
 #endif /* CONFIG_64BIT */
 
 /*
@@ -322,10 +310,8 @@ static int __init vdso_init(void)
        }
        vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
        vdso64_pagelist[vdso64_pages] = NULL;
-#ifndef CONFIG_SMP
-       if (vdso_alloc_per_cpu(0, &S390_lowcore))
+       if (vdso_alloc_per_cpu(&S390_lowcore))
                BUG();
-#endif
        vdso_init_cr5();
 #endif /* CONFIG_64BIT */
 
@@ -335,7 +321,7 @@ static int __init vdso_init(void)
 
        return 0;
 }
-arch_initcall(vdso_init);
+early_initcall(vdso_init);
 
 int in_gate_area_no_mm(unsigned long addr)
 {
index bb48977f54697bdb1d68b34da3cc239947659ea2..7bacee9a546f5058a9fdb182722882a1ecbeae86 100644 (file)
@@ -570,6 +570,9 @@ void init_cpu_vtimer(void)
 
        /* enable cpu timer interrupts */
        __ctl_set_bit(0,10);
+
+       /* set initial cpu timer */
+       set_vtimer(0x7fffffffffffffffULL);
 }
 
 static int __cpuinit s390_nohz_notify(struct notifier_block *self,
index 91754ffb9203a79fc3d7005d3cf8d80905ceb1d3..093eb694d9c157c717d56a685eb104dd03b0cdab 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include <linux/spinlock.h>
 #include <linux/init.h>
+#include <linux/smp.h>
 #include <asm/io.h>
 
 int spin_retry = 1000;
@@ -24,21 +25,6 @@ static int __init spin_retry_setup(char *str)
 }
 __setup("spin_retry=", spin_retry_setup);
 
-static inline void _raw_yield(void)
-{
-       if (MACHINE_HAS_DIAG44)
-               asm volatile("diag 0,0,0x44");
-}
-
-static inline void _raw_yield_cpu(int cpu)
-{
-       if (MACHINE_HAS_DIAG9C)
-               asm volatile("diag %0,0,0x9c"
-                            : : "d" (cpu_logical_map(cpu)));
-       else
-               _raw_yield();
-}
-
 void arch_spin_lock_wait(arch_spinlock_t *lp)
 {
        int count = spin_retry;
@@ -60,7 +46,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
                }
                owner = lp->owner_cpu;
                if (owner)
-                       _raw_yield_cpu(~owner);
+                       smp_yield_cpu(~owner);
                if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
                        return;
        }
@@ -91,7 +77,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
                }
                owner = lp->owner_cpu;
                if (owner)
-                       _raw_yield_cpu(~owner);
+                       smp_yield_cpu(~owner);
                local_irq_disable();
                if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
                        return;
@@ -121,7 +107,7 @@ void arch_spin_relax(arch_spinlock_t *lock)
        if (cpu != 0) {
                if (MACHINE_IS_VM || MACHINE_IS_KVM ||
                    !smp_vcpu_scheduled(~cpu))
-                       _raw_yield_cpu(~cpu);
+                       smp_yield_cpu(~cpu);
        }
 }
 EXPORT_SYMBOL(arch_spin_relax);
@@ -133,7 +119,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
 
        while (1) {
                if (count-- <= 0) {
-                       _raw_yield();
+                       smp_yield();
                        count = spin_retry;
                }
                if (!arch_read_can_lock(rw))
@@ -153,7 +139,7 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
        local_irq_restore(flags);
        while (1) {
                if (count-- <= 0) {
-                       _raw_yield();
+                       smp_yield();
                        count = spin_retry;
                }
                if (!arch_read_can_lock(rw))
@@ -188,7 +174,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
 
        while (1) {
                if (count-- <= 0) {
-                       _raw_yield();
+                       smp_yield();
                        count = spin_retry;
                }
                if (!arch_write_can_lock(rw))
@@ -206,7 +192,7 @@ void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
        local_irq_restore(flags);
        while (1) {
                if (count-- <= 0) {
-                       _raw_yield();
+                       smp_yield();
                        count = spin_retry;
                }
                if (!arch_write_can_lock(rw))
index 87fc0ac11e6766b16e3c4386010afe4cdabe67d2..69df137310bc5b5a3081969530d042c77bf095da 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/reboot.h>
 #include <linux/atomic.h>
 #include <asm/ptrace.h>
-#include <asm/sigp.h>
 #include <asm/smp.h>
 
 #include "sclp.h"
index 1b6d9247fdc78a4237d5e7048347a0a66875d140..3303d66b27941c7d1e51e5031f6867c25fbbb8a5 100644 (file)
@@ -21,7 +21,6 @@
 #include <asm/ipl.h>
 #include <asm/sclp.h>
 #include <asm/setup.h>
-#include <asm/sigp.h>
 #include <asm/uaccess.h>
 #include <asm/debug.h>
 #include <asm/processor.h>