2 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
5 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 #define KMSG_COMPONENT "sclp_cmd"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 #include <linux/completion.h>
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
18 #include <linux/mmzone.h>
19 #include <linux/memory.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <asm/chpid.h>
24 #include <asm/setup.h>
25 #include <asm/ctl_reg.h>
29 #define SCLP_CMDW_READ_SCP_INFO 0x00020001
30 #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
32 struct read_info_sccb {
33 struct sccb_header header; /* 0-7 */
36 u8 _reserved0[24 - 11]; /* 11-15 */
37 u8 loadparm[8]; /* 24-31 */
38 u8 _reserved1[48 - 32]; /* 32-47 */
39 u64 facilities; /* 48-55 */
40 u8 _reserved2[84 - 56]; /* 56-83 */
43 u8 _reserved3[91 - 86]; /* 86-90 */
45 u8 _reserved4[100 - 92]; /* 92-99 */
46 u32 rnsize2; /* 100-103 */
47 u64 rnmax2; /* 104-111 */
48 u8 _reserved5[4096 - 112]; /* 112-4095 */
49 } __attribute__((packed, aligned(PAGE_SIZE)));
51 static struct read_info_sccb __initdata early_read_info_sccb;
52 static int __initdata early_read_info_sccb_valid;
57 static unsigned long long rzm;
58 static unsigned long long rnmax;
60 static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
65 rc = sclp_service_call(cmd, sccb);
68 __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA |
69 PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT);
72 /* Contents of the sccb might have changed. */
74 __ctl_clear_bit(0, 9);
78 static void __init sclp_read_info_early(void)
82 struct read_info_sccb *sccb;
83 sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
84 SCLP_CMDW_READ_SCP_INFO};
86 sccb = &early_read_info_sccb;
87 for (i = 0; i < ARRAY_SIZE(commands); i++) {
89 memset(sccb, 0, sizeof(*sccb));
90 sccb->header.length = sizeof(*sccb);
91 sccb->header.function_code = 0x80;
92 sccb->header.control_mask[2] = 0x80;
93 rc = sclp_cmd_sync_early(commands[i], sccb);
94 } while (rc == -EBUSY);
98 if (sccb->header.response_code == 0x10) {
99 early_read_info_sccb_valid = 1;
102 if (sccb->header.response_code != 0x1f0)
107 void __init sclp_facilities_detect(void)
109 struct read_info_sccb *sccb;
111 sclp_read_info_early();
112 if (!early_read_info_sccb_valid)
115 sccb = &early_read_info_sccb;
116 sclp_facilities = sccb->facilities;
117 sclp_fac84 = sccb->fac84;
118 sclp_fac85 = sccb->fac85;
119 rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
120 rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
124 unsigned long long sclp_get_rnmax(void)
129 unsigned long long sclp_get_rzm(void)
134 u8 sclp_get_fac85(void)
138 EXPORT_SYMBOL_GPL(sclp_get_fac85);
141 * This function will be called after sclp_facilities_detect(), which gets
142 * called from early.c code. Therefore the sccb should have valid contents.
144 void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
146 struct read_info_sccb *sccb;
148 if (!early_read_info_sccb_valid)
150 sccb = &early_read_info_sccb;
152 if (sccb->flags & 0x2)
154 memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
157 static void sclp_sync_callback(struct sclp_req *req, void *data)
159 struct completion *completion = data;
161 complete(completion);
164 static int do_sync_request(sclp_cmdw_t cmd, void *sccb)
166 struct completion completion;
167 struct sclp_req *request;
170 request = kzalloc(sizeof(*request), GFP_KERNEL);
173 request->command = cmd;
174 request->sccb = sccb;
175 request->status = SCLP_REQ_FILLED;
176 request->callback = sclp_sync_callback;
177 request->callback_data = &completion;
178 init_completion(&completion);
180 /* Perform sclp request. */
181 rc = sclp_add_request(request);
184 wait_for_completion(&completion);
186 /* Check response. */
187 if (request->status != SCLP_REQ_DONE) {
188 pr_warning("sync request failed (cmd=0x%08x, "
189 "status=0x%02x)\n", cmd, request->status);
198 * CPU configuration related functions.
201 #define SCLP_CMDW_READ_CPU_INFO 0x00010001
202 #define SCLP_CMDW_CONFIGURE_CPU 0x00110001
203 #define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
205 struct read_cpu_info_sccb {
206 struct sccb_header header;
208 u16 offset_configured;
211 u8 reserved[4096 - 16];
212 } __attribute__((packed, aligned(PAGE_SIZE)));
214 static void sclp_fill_cpu_info(struct sclp_cpu_info *info,
215 struct read_cpu_info_sccb *sccb)
217 char *page = (char *) sccb;
219 memset(info, 0, sizeof(*info));
220 info->configured = sccb->nr_configured;
221 info->standby = sccb->nr_standby;
222 info->combined = sccb->nr_configured + sccb->nr_standby;
223 info->has_cpu_type = sclp_fac84 & 0x1;
224 memcpy(&info->cpu, page + sccb->offset_configured,
225 info->combined * sizeof(struct sclp_cpu_entry));
228 int sclp_get_cpu_info(struct sclp_cpu_info *info)
231 struct read_cpu_info_sccb *sccb;
233 if (!SCLP_HAS_CPU_INFO)
235 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
238 sccb->header.length = sizeof(*sccb);
239 rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb);
242 if (sccb->header.response_code != 0x0010) {
243 pr_warning("readcpuinfo failed (response=0x%04x)\n",
244 sccb->header.response_code);
248 sclp_fill_cpu_info(info, sccb);
250 free_page((unsigned long) sccb);
254 struct cpu_configure_sccb {
255 struct sccb_header header;
256 } __attribute__((packed, aligned(8)));
258 static int do_cpu_configure(sclp_cmdw_t cmd)
260 struct cpu_configure_sccb *sccb;
263 if (!SCLP_HAS_CPU_RECONFIG)
266 * This is not going to cross a page boundary since we force
267 * kmalloc to have a minimum alignment of 8 bytes on s390.
269 sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
272 sccb->header.length = sizeof(*sccb);
273 rc = do_sync_request(cmd, sccb);
276 switch (sccb->header.response_code) {
281 pr_warning("configure cpu failed (cmd=0x%08x, "
282 "response=0x%04x)\n", cmd,
283 sccb->header.response_code);
292 int sclp_cpu_configure(u8 cpu)
294 return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8);
297 int sclp_cpu_deconfigure(u8 cpu)
299 return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
302 #ifdef CONFIG_MEMORY_HOTPLUG
304 static DEFINE_MUTEX(sclp_mem_mutex);
305 static LIST_HEAD(sclp_mem_list);
306 static u8 sclp_max_storage_id;
307 static unsigned long sclp_storage_ids[256 / BITS_PER_LONG];
308 static int sclp_mem_state_changed;
310 struct memory_increment {
311 struct list_head list;
317 struct assign_storage_sccb {
318 struct sccb_header header;
322 int arch_get_memory_phys_device(unsigned long start_pfn)
326 return PFN_PHYS(start_pfn) >> ilog2(rzm);
329 static unsigned long long rn2addr(u16 rn)
331 return (unsigned long long) (rn - 1) * rzm;
334 static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
336 struct assign_storage_sccb *sccb;
339 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
342 sccb->header.length = PAGE_SIZE;
344 rc = do_sync_request(cmd, sccb);
347 switch (sccb->header.response_code) {
352 pr_warning("assign storage failed (cmd=0x%08x, "
353 "response=0x%04x, rn=0x%04x)\n", cmd,
354 sccb->header.response_code, rn);
359 free_page((unsigned long) sccb);
363 static int sclp_assign_storage(u16 rn)
365 unsigned long long start, address;
368 rc = do_assign_storage(0x000d0001, rn);
371 start = address = rn2addr(rn);
372 for (; address < start + rzm; address += PAGE_SIZE)
373 page_set_storage_key(address, PAGE_DEFAULT_KEY, 0);
378 static int sclp_unassign_storage(u16 rn)
380 return do_assign_storage(0x000c0001, rn);
383 struct attach_storage_sccb {
384 struct sccb_header header;
391 static int sclp_attach_storage(u8 id)
393 struct attach_storage_sccb *sccb;
397 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
400 sccb->header.length = PAGE_SIZE;
401 rc = do_sync_request(0x00080001 | id << 8, sccb);
404 switch (sccb->header.response_code) {
406 set_bit(id, sclp_storage_ids);
407 for (i = 0; i < sccb->assigned; i++) {
408 if (sccb->entries[i])
409 sclp_unassign_storage(sccb->entries[i] >> 16);
417 free_page((unsigned long) sccb);
421 static int sclp_mem_change_state(unsigned long start, unsigned long size,
424 struct memory_increment *incr;
425 unsigned long long istart;
428 list_for_each_entry(incr, &sclp_mem_list, list) {
429 istart = rn2addr(incr->rn);
430 if (start + size - 1 < istart)
432 if (start > istart + rzm - 1)
435 if (incr->usecount++)
438 * Don't break the loop if one assign fails. Loop may
439 * be walked again on CANCEL and we can't save
440 * information if state changed before or not.
441 * So continue and increase usecount for all increments.
443 rc |= sclp_assign_storage(incr->rn);
445 if (--incr->usecount)
447 sclp_unassign_storage(incr->rn);
450 return rc ? -EIO : 0;
453 static int sclp_mem_notifier(struct notifier_block *nb,
454 unsigned long action, void *data)
456 unsigned long start, size;
457 struct memory_notify *arg;
462 start = arg->start_pfn << PAGE_SHIFT;
463 size = arg->nr_pages << PAGE_SHIFT;
464 mutex_lock(&sclp_mem_mutex);
465 for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
466 sclp_attach_storage(id);
469 case MEM_GOING_OFFLINE:
470 case MEM_CANCEL_OFFLINE:
472 case MEM_GOING_ONLINE:
473 rc = sclp_mem_change_state(start, size, 1);
475 case MEM_CANCEL_ONLINE:
476 sclp_mem_change_state(start, size, 0);
479 sclp_mem_change_state(start, size, 0);
486 sclp_mem_state_changed = 1;
487 mutex_unlock(&sclp_mem_mutex);
488 return rc ? NOTIFY_BAD : NOTIFY_OK;
491 static struct notifier_block sclp_mem_nb = {
492 .notifier_call = sclp_mem_notifier,
495 static void __init add_memory_merged(u16 rn)
497 static u16 first_rn, num;
498 unsigned long long start, size;
500 if (rn && first_rn && (first_rn + num == rn)) {
506 start = rn2addr(first_rn);
507 size = (unsigned long long ) num * rzm;
508 if (start >= VMEM_MAX_PHYS)
510 if (start + size > VMEM_MAX_PHYS)
511 size = VMEM_MAX_PHYS - start;
512 if (memory_end_set && (start >= memory_end))
514 if (memory_end_set && (start + size > memory_end))
515 size = memory_end - start;
516 add_memory(0, start, size);
522 static void __init sclp_add_standby_memory(void)
524 struct memory_increment *incr;
526 list_for_each_entry(incr, &sclp_mem_list, list)
528 add_memory_merged(incr->rn);
529 add_memory_merged(0);
532 static void __init insert_increment(u16 rn, int standby, int assigned)
534 struct memory_increment *incr, *new_incr;
535 struct list_head *prev;
538 new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
542 new_incr->standby = standby;
544 new_incr->usecount = 1;
546 prev = &sclp_mem_list;
547 list_for_each_entry(incr, &sclp_mem_list, list) {
548 if (assigned && incr->rn > rn)
550 if (!assigned && incr->rn - last_rn > 1)
556 new_incr->rn = last_rn + 1;
557 if (new_incr->rn > rnmax) {
561 list_add(&new_incr->list, prev);
564 static int sclp_mem_freeze(struct device *dev)
566 if (!sclp_mem_state_changed)
568 pr_err("Memory hotplug state changed, suspend refused.\n");
572 struct read_storage_sccb {
573 struct sccb_header header;
581 static const struct dev_pm_ops sclp_mem_pm_ops = {
582 .freeze = sclp_mem_freeze,
585 static struct platform_driver sclp_mem_pdrv = {
588 .pm = &sclp_mem_pm_ops,
592 static int __init sclp_detect_standby_memory(void)
594 struct platform_device *sclp_pdev;
595 struct read_storage_sccb *sccb;
596 int i, id, assigned, rc;
598 if (!early_read_info_sccb_valid)
600 if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
603 sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
607 for (id = 0; id <= sclp_max_storage_id; id++) {
608 memset(sccb, 0, PAGE_SIZE);
609 sccb->header.length = PAGE_SIZE;
610 rc = do_sync_request(0x00040001 | id << 8, sccb);
613 switch (sccb->header.response_code) {
615 set_bit(id, sclp_storage_ids);
616 for (i = 0; i < sccb->assigned; i++) {
617 if (!sccb->entries[i])
620 insert_increment(sccb->entries[i] >> 16, 0, 1);
626 for (i = 0; i < sccb->assigned; i++) {
627 if (!sccb->entries[i])
630 insert_increment(sccb->entries[i] >> 16, 1, 1);
638 sclp_max_storage_id = sccb->max_id;
640 if (rc || list_empty(&sclp_mem_list))
642 for (i = 1; i <= rnmax - assigned; i++)
643 insert_increment(0, 1, 0);
644 rc = register_memory_notifier(&sclp_mem_nb);
647 rc = platform_driver_register(&sclp_mem_pdrv);
650 sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0);
651 rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
654 sclp_add_standby_memory();
657 platform_driver_unregister(&sclp_mem_pdrv);
659 free_page((unsigned long) sccb);
662 __initcall(sclp_detect_standby_memory);
664 #endif /* CONFIG_MEMORY_HOTPLUG */
667 * Channel path configuration related functions.
670 #define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
671 #define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
672 #define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
674 struct chp_cfg_sccb {
675 struct sccb_header header;
679 } __attribute__((packed));
681 static int do_chp_configure(sclp_cmdw_t cmd)
683 struct chp_cfg_sccb *sccb;
686 if (!SCLP_HAS_CHP_RECONFIG)
689 sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
692 sccb->header.length = sizeof(*sccb);
693 rc = do_sync_request(cmd, sccb);
696 switch (sccb->header.response_code) {
703 pr_warning("configure channel-path failed "
704 "(cmd=0x%08x, response=0x%04x)\n", cmd,
705 sccb->header.response_code);
710 free_page((unsigned long) sccb);
715 * sclp_chp_configure - perform configure channel-path sclp command
716 * @chpid: channel-path ID
718 * Perform configure channel-path command sclp command for specified chpid.
719 * Return 0 after command successfully finished, non-zero otherwise.
721 int sclp_chp_configure(struct chp_id chpid)
723 return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
727 * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
728 * @chpid: channel-path ID
730 * Perform deconfigure channel-path command sclp command for specified chpid
731 * and wait for completion. On success return 0. Return non-zero otherwise.
733 int sclp_chp_deconfigure(struct chp_id chpid)
735 return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
738 struct chp_info_sccb {
739 struct sccb_header header;
740 u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
741 u8 standby[SCLP_CHP_INFO_MASK_SIZE];
742 u8 configured[SCLP_CHP_INFO_MASK_SIZE];
746 } __attribute__((packed));
749 * sclp_chp_read_info - perform read channel-path information sclp command
750 * @info: resulting channel-path information data
752 * Perform read channel-path information sclp command and wait for completion.
753 * On success, store channel-path information in @info and return 0. Return
754 * non-zero otherwise.
756 int sclp_chp_read_info(struct sclp_chp_info *info)
758 struct chp_info_sccb *sccb;
761 if (!SCLP_HAS_CHP_INFO)
764 sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
767 sccb->header.length = sizeof(*sccb);
768 rc = do_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
771 if (sccb->header.response_code != 0x0010) {
772 pr_warning("read channel-path info failed "
773 "(response=0x%04x)\n", sccb->header.response_code);
777 memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
778 memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
779 memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
781 free_page((unsigned long) sccb);