1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2007,2012
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
6 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
9 #define KMSG_COMPONENT "sclp_cmd"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 #include <linux/completion.h>
13 #include <linux/init.h>
14 #include <linux/errno.h>
15 #include <linux/err.h>
16 #include <linux/export.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
20 #include <linux/mmzone.h>
21 #include <linux/memory.h>
22 #include <linux/module.h>
23 #include <asm/ctl_reg.h>
24 #include <asm/chpid.h>
25 #include <asm/setup.h>
29 #include <asm/facility.h>
33 static void sclp_sync_callback(struct sclp_req *req, void *data)
35 struct completion *completion = data;
40 int sclp_sync_request(sclp_cmdw_t cmd, void *sccb)
42 return sclp_sync_request_timeout(cmd, sccb, 0);
45 int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout)
47 struct completion completion;
48 struct sclp_req *request;
51 request = kzalloc(sizeof(*request), GFP_KERNEL);
55 request->queue_timeout = timeout;
56 request->command = cmd;
58 request->status = SCLP_REQ_FILLED;
59 request->callback = sclp_sync_callback;
60 request->callback_data = &completion;
61 init_completion(&completion);
63 /* Perform sclp request. */
64 rc = sclp_add_request(request);
67 wait_for_completion(&completion);
70 if (request->status != SCLP_REQ_DONE) {
71 pr_warn("sync request failed (cmd=0x%08x, status=0x%02x)\n",
72 cmd, request->status);
81 * CPU configuration related functions.
84 #define SCLP_CMDW_CONFIGURE_CPU 0x00110001
85 #define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
87 int _sclp_get_core_info(struct sclp_core_info *info)
90 int length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE;
91 struct read_cpu_info_sccb *sccb;
93 if (!SCLP_HAS_CPU_INFO)
96 sccb = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA | __GFP_ZERO, get_order(length));
99 sccb->header.length = length;
100 sccb->header.control_mask[2] = 0x80;
101 rc = sclp_sync_request_timeout(SCLP_CMDW_READ_CPU_INFO, sccb,
102 SCLP_QUEUE_INTERVAL);
105 if (sccb->header.response_code != 0x0010) {
106 pr_warn("readcpuinfo failed (response=0x%04x)\n",
107 sccb->header.response_code);
111 sclp_fill_core_info(info, sccb);
113 free_pages((unsigned long) sccb, get_order(length));
117 struct cpu_configure_sccb {
118 struct sccb_header header;
119 } __attribute__((packed, aligned(8)));
121 static int do_core_configure(sclp_cmdw_t cmd)
123 struct cpu_configure_sccb *sccb;
126 if (!SCLP_HAS_CPU_RECONFIG)
129 * This is not going to cross a page boundary since we force
130 * kmalloc to have a minimum alignment of 8 bytes on s390.
132 sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
135 sccb->header.length = sizeof(*sccb);
136 rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
139 switch (sccb->header.response_code) {
144 pr_warn("configure cpu failed (cmd=0x%08x, response=0x%04x)\n",
145 cmd, sccb->header.response_code);
154 int sclp_core_configure(u8 core)
156 return do_core_configure(SCLP_CMDW_CONFIGURE_CPU | core << 8);
159 int sclp_core_deconfigure(u8 core)
161 return do_core_configure(SCLP_CMDW_DECONFIGURE_CPU | core << 8);
164 #ifdef CONFIG_MEMORY_HOTPLUG
166 static DEFINE_MUTEX(sclp_mem_mutex);
167 static LIST_HEAD(sclp_mem_list);
168 static u8 sclp_max_storage_id;
169 static DECLARE_BITMAP(sclp_storage_ids, 256);
171 struct memory_increment {
172 struct list_head list;
177 struct assign_storage_sccb {
178 struct sccb_header header;
182 int arch_get_memory_phys_device(unsigned long start_pfn)
186 return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm);
189 static unsigned long long rn2addr(u16 rn)
191 return (unsigned long long) (rn - 1) * sclp.rzm;
194 static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
196 struct assign_storage_sccb *sccb;
199 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
202 sccb->header.length = PAGE_SIZE;
204 rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
207 switch (sccb->header.response_code) {
212 pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n",
213 cmd, sccb->header.response_code, rn);
218 free_page((unsigned long) sccb);
222 static int sclp_assign_storage(u16 rn)
224 unsigned long long start;
227 rc = do_assign_storage(0x000d0001, rn);
231 storage_key_init_range(start, start + sclp.rzm);
235 static int sclp_unassign_storage(u16 rn)
237 return do_assign_storage(0x000c0001, rn);
240 struct attach_storage_sccb {
241 struct sccb_header header;
248 static int sclp_attach_storage(u8 id)
250 struct attach_storage_sccb *sccb;
254 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
257 sccb->header.length = PAGE_SIZE;
258 sccb->header.function_code = 0x40;
259 rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
260 SCLP_QUEUE_INTERVAL);
263 switch (sccb->header.response_code) {
265 set_bit(id, sclp_storage_ids);
266 for (i = 0; i < sccb->assigned; i++) {
267 if (sccb->entries[i])
268 sclp_unassign_storage(sccb->entries[i] >> 16);
276 free_page((unsigned long) sccb);
280 static int sclp_mem_change_state(unsigned long start, unsigned long size,
283 struct memory_increment *incr;
284 unsigned long long istart;
287 list_for_each_entry(incr, &sclp_mem_list, list) {
288 istart = rn2addr(incr->rn);
289 if (start + size - 1 < istart)
291 if (start > istart + sclp.rzm - 1)
294 rc |= sclp_assign_storage(incr->rn);
296 sclp_unassign_storage(incr->rn);
298 incr->standby = online ? 0 : 1;
300 return rc ? -EIO : 0;
303 static bool contains_standby_increment(unsigned long start, unsigned long end)
305 struct memory_increment *incr;
306 unsigned long istart;
308 list_for_each_entry(incr, &sclp_mem_list, list) {
309 istart = rn2addr(incr->rn);
310 if (end - 1 < istart)
312 if (start > istart + sclp.rzm - 1)
320 static int sclp_mem_notifier(struct notifier_block *nb,
321 unsigned long action, void *data)
323 unsigned long start, size;
324 struct memory_notify *arg;
329 start = arg->start_pfn << PAGE_SHIFT;
330 size = arg->nr_pages << PAGE_SHIFT;
331 mutex_lock(&sclp_mem_mutex);
332 for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
333 sclp_attach_storage(id);
335 case MEM_GOING_OFFLINE:
337 * We do not allow to set memory blocks offline that contain
338 * standby memory. This is done to simplify the "memory online"
341 if (contains_standby_increment(start, start + size))
345 case MEM_CANCEL_OFFLINE:
347 case MEM_GOING_ONLINE:
348 rc = sclp_mem_change_state(start, size, 1);
350 case MEM_CANCEL_ONLINE:
351 sclp_mem_change_state(start, size, 0);
354 sclp_mem_change_state(start, size, 0);
360 mutex_unlock(&sclp_mem_mutex);
361 return rc ? NOTIFY_BAD : NOTIFY_OK;
364 static struct notifier_block sclp_mem_nb = {
365 .notifier_call = sclp_mem_notifier,
368 static void __init align_to_block_size(unsigned long long *start,
369 unsigned long long *size,
370 unsigned long long alignment)
372 unsigned long long start_align, size_align;
374 start_align = roundup(*start, alignment);
375 size_align = rounddown(*start + *size, alignment) - start_align;
377 pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n",
378 *start, size_align >> 20, *size >> 20);
379 *start = start_align;
383 static void __init add_memory_merged(u16 rn)
385 unsigned long long start, size, addr, block_size;
386 static u16 first_rn, num;
388 if (rn && first_rn && (first_rn + num == rn)) {
394 start = rn2addr(first_rn);
395 size = (unsigned long long) num * sclp.rzm;
396 if (start >= VMEM_MAX_PHYS)
398 if (start + size > VMEM_MAX_PHYS)
399 size = VMEM_MAX_PHYS - start;
400 if (start >= ident_map_size)
402 if (start + size > ident_map_size)
403 size = ident_map_size - start;
404 block_size = memory_block_size_bytes();
405 align_to_block_size(&start, &size, block_size);
408 for (addr = start; addr < start + size; addr += block_size)
409 add_memory(0, addr, block_size, MHP_NONE);
415 static void __init sclp_add_standby_memory(void)
417 struct memory_increment *incr;
419 list_for_each_entry(incr, &sclp_mem_list, list)
421 add_memory_merged(incr->rn);
422 add_memory_merged(0);
425 static void __init insert_increment(u16 rn, int standby, int assigned)
427 struct memory_increment *incr, *new_incr;
428 struct list_head *prev;
431 new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
435 new_incr->standby = standby;
437 prev = &sclp_mem_list;
438 list_for_each_entry(incr, &sclp_mem_list, list) {
439 if (assigned && incr->rn > rn)
441 if (!assigned && incr->rn - last_rn > 1)
447 new_incr->rn = last_rn + 1;
448 if (new_incr->rn > sclp.rnmax) {
452 list_add(&new_incr->list, prev);
455 static int __init sclp_detect_standby_memory(void)
457 struct read_storage_sccb *sccb;
458 int i, id, assigned, rc;
460 if (OLDMEM_BASE) /* No standby memory in kdump mode */
462 if ((sclp.facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
465 sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
469 for (id = 0; id <= sclp_max_storage_id; id++) {
470 memset(sccb, 0, PAGE_SIZE);
471 sccb->header.length = PAGE_SIZE;
472 rc = sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO | id << 8, sccb);
475 switch (sccb->header.response_code) {
477 set_bit(id, sclp_storage_ids);
478 for (i = 0; i < sccb->assigned; i++) {
479 if (!sccb->entries[i])
482 insert_increment(sccb->entries[i] >> 16, 0, 1);
488 for (i = 0; i < sccb->assigned; i++) {
489 if (!sccb->entries[i])
492 insert_increment(sccb->entries[i] >> 16, 1, 1);
500 sclp_max_storage_id = sccb->max_id;
502 if (rc || list_empty(&sclp_mem_list))
504 for (i = 1; i <= sclp.rnmax - assigned; i++)
505 insert_increment(0, 1, 0);
506 rc = register_memory_notifier(&sclp_mem_nb);
509 sclp_add_standby_memory();
511 free_page((unsigned long) sccb);
514 __initcall(sclp_detect_standby_memory);
516 #endif /* CONFIG_MEMORY_HOTPLUG */
519 * Channel path configuration related functions.
522 #define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
523 #define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
524 #define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
526 struct chp_cfg_sccb {
527 struct sccb_header header;
531 } __attribute__((packed));
533 static int do_chp_configure(sclp_cmdw_t cmd)
535 struct chp_cfg_sccb *sccb;
538 if (!SCLP_HAS_CHP_RECONFIG)
541 sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
544 sccb->header.length = sizeof(*sccb);
545 rc = sclp_sync_request(cmd, sccb);
548 switch (sccb->header.response_code) {
555 pr_warn("configure channel-path failed (cmd=0x%08x, response=0x%04x)\n",
556 cmd, sccb->header.response_code);
561 free_page((unsigned long) sccb);
566 * sclp_chp_configure - perform configure channel-path sclp command
567 * @chpid: channel-path ID
569 * Perform configure channel-path command sclp command for specified chpid.
570 * Return 0 after command successfully finished, non-zero otherwise.
572 int sclp_chp_configure(struct chp_id chpid)
574 return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
578 * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
579 * @chpid: channel-path ID
581 * Perform deconfigure channel-path command sclp command for specified chpid
582 * and wait for completion. On success return 0. Return non-zero otherwise.
584 int sclp_chp_deconfigure(struct chp_id chpid)
586 return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
589 struct chp_info_sccb {
590 struct sccb_header header;
591 u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
592 u8 standby[SCLP_CHP_INFO_MASK_SIZE];
593 u8 configured[SCLP_CHP_INFO_MASK_SIZE];
597 } __attribute__((packed));
600 * sclp_chp_read_info - perform read channel-path information sclp command
601 * @info: resulting channel-path information data
603 * Perform read channel-path information sclp command and wait for completion.
604 * On success, store channel-path information in @info and return 0. Return
605 * non-zero otherwise.
607 int sclp_chp_read_info(struct sclp_chp_info *info)
609 struct chp_info_sccb *sccb;
612 if (!SCLP_HAS_CHP_INFO)
615 sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
618 sccb->header.length = sizeof(*sccb);
619 rc = sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
622 if (sccb->header.response_code != 0x0010) {
623 pr_warn("read channel-path info failed (response=0x%04x)\n",
624 sccb->header.response_code);
628 memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
629 memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
630 memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
632 free_page((unsigned long) sccb);