1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corp. 2007,2012
4  *
5  * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6  */
7 
8 #define KMSG_COMPONENT "sclp_cmd"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 
11 #include <linux/cpufeature.h>
12 #include <linux/completion.h>
13 #include <linux/init.h>
14 #include <linux/errno.h>
15 #include <linux/err.h>
16 #include <linux/export.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
19 #include <linux/mm.h>
20 #include <linux/mmzone.h>
21 #include <linux/memory.h>
22 #include <linux/memory_hotplug.h>
23 #include <linux/module.h>
24 #include <asm/ctlreg.h>
25 #include <asm/chpid.h>
26 #include <asm/setup.h>
27 #include <asm/page.h>
28 #include <asm/sclp.h>
29 #include <asm/numa.h>
30 #include <asm/facility.h>
31 #include <asm/page-states.h>
32 
33 #include "sclp.h"
34 
35 #define SCLP_CMDW_ASSIGN_STORAGE	0x000d0001
36 #define SCLP_CMDW_UNASSIGN_STORAGE	0x000c0001
37 
38 static void sclp_sync_callback(struct sclp_req *req, void *data)
39 {
40 	struct completion *completion = data;
41 
42 	complete(completion);
43 }
44 
45 int sclp_sync_request(sclp_cmdw_t cmd, void *sccb)
46 {
47 	return sclp_sync_request_timeout(cmd, sccb, 0);
48 }
49 
50 int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout)
51 {
52 	struct completion completion;
53 	struct sclp_req *request;
54 	int rc;
55 
56 	request = kzalloc(sizeof(*request), GFP_KERNEL);
57 	if (!request)
58 		return -ENOMEM;
59 	if (timeout)
60 		request->queue_timeout = timeout;
61 	request->command = cmd;
62 	request->sccb = sccb;
63 	request->status = SCLP_REQ_FILLED;
64 	request->callback = sclp_sync_callback;
65 	request->callback_data = &completion;
66 	init_completion(&completion);
67 
68 	/* Perform sclp request. */
69 	rc = sclp_add_request(request);
70 	if (rc)
71 		goto out;
72 	wait_for_completion(&completion);
73 
74 	/* Check response. */
75 	if (request->status != SCLP_REQ_DONE) {
76 		pr_warn("sync request failed (cmd=0x%08x, status=0x%02x)\n",
77 			cmd, request->status);
78 		rc = -EIO;
79 	}
80 out:
81 	kfree(request);
82 	return rc;
83 }
84 
85 /*
86  * CPU configuration related functions.
87  */
88 
89 #define SCLP_CMDW_CONFIGURE_CPU		0x00110001
90 #define SCLP_CMDW_DECONFIGURE_CPU	0x00100001
91 
92 int _sclp_get_core_info(struct sclp_core_info *info)
93 {
94 	int rc;
95 	int length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE;
96 	struct read_cpu_info_sccb *sccb;
97 
98 	if (!SCLP_HAS_CPU_INFO)
99 		return -EOPNOTSUPP;
100 
101 	sccb = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA | __GFP_ZERO, get_order(length));
102 	if (!sccb)
103 		return -ENOMEM;
104 	sccb->header.length = length;
105 	sccb->header.control_mask[2] = 0x80;
106 	rc = sclp_sync_request_timeout(SCLP_CMDW_READ_CPU_INFO, sccb,
107 				       SCLP_QUEUE_INTERVAL);
108 	if (rc)
109 		goto out;
110 	if (sccb->header.response_code != 0x0010) {
111 		pr_warn("readcpuinfo failed (response=0x%04x)\n",
112 			sccb->header.response_code);
113 		rc = -EIO;
114 		goto out;
115 	}
116 	sclp_fill_core_info(info, sccb);
117 out:
118 	free_pages((unsigned long) sccb, get_order(length));
119 	return rc;
120 }
121 
122 struct cpu_configure_sccb {
123 	struct sccb_header header;
124 } __attribute__((packed, aligned(8)));
125 
126 static int do_core_configure(sclp_cmdw_t cmd)
127 {
128 	struct cpu_configure_sccb *sccb;
129 	int rc;
130 
131 	if (!SCLP_HAS_CPU_RECONFIG)
132 		return -EOPNOTSUPP;
133 	/*
134 	 * This is not going to cross a page boundary since we force
135 	 * kmalloc to have a minimum alignment of 8 bytes on s390.
136 	 */
137 	sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
138 	if (!sccb)
139 		return -ENOMEM;
140 	sccb->header.length = sizeof(*sccb);
141 	rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
142 	if (rc)
143 		goto out;
144 	switch (sccb->header.response_code) {
145 	case 0x0020:
146 	case 0x0120:
147 		break;
148 	default:
149 		pr_warn("configure cpu failed (cmd=0x%08x, response=0x%04x)\n",
150 			cmd, sccb->header.response_code);
151 		rc = -EIO;
152 		break;
153 	}
154 out:
155 	kfree(sccb);
156 	return rc;
157 }
158 
159 int sclp_core_configure(u8 core)
160 {
161 	return do_core_configure(SCLP_CMDW_CONFIGURE_CPU | core << 8);
162 }
163 
164 int sclp_core_deconfigure(u8 core)
165 {
166 	return do_core_configure(SCLP_CMDW_DECONFIGURE_CPU | core << 8);
167 }
168 
169 #ifdef CONFIG_MEMORY_HOTPLUG
170 
171 static DEFINE_MUTEX(sclp_mem_mutex);
172 static LIST_HEAD(sclp_mem_list);
173 static u8 sclp_max_storage_id;
174 static DECLARE_BITMAP(sclp_storage_ids, 256);
175 
176 struct memory_increment {
177 	struct list_head list;
178 	u16 rn;
179 	int standby;
180 };
181 
182 struct assign_storage_sccb {
183 	struct sccb_header header;
184 	u16 rn;
185 } __packed;
186 
187 int arch_get_memory_phys_device(unsigned long start_pfn)
188 {
189 	if (!sclp.rzm)
190 		return 0;
191 	return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm);
192 }
193 
194 static unsigned long long rn2addr(u16 rn)
195 {
196 	return (unsigned long long) (rn - 1) * sclp.rzm;
197 }
198 
199 static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
200 {
201 	struct assign_storage_sccb *sccb;
202 	int rc;
203 
204 	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
205 	if (!sccb)
206 		return -ENOMEM;
207 	sccb->header.length = PAGE_SIZE;
208 	sccb->rn = rn;
209 	rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
210 	if (rc)
211 		goto out;
212 	switch (sccb->header.response_code) {
213 	case 0x0020:
214 	case 0x0120:
215 		break;
216 	default:
217 		pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n",
218 			cmd, sccb->header.response_code, rn);
219 		rc = -EIO;
220 		break;
221 	}
222 out:
223 	free_page((unsigned long) sccb);
224 	return rc;
225 }
226 
227 static int sclp_assign_storage(u16 rn)
228 {
229 	unsigned long long start;
230 	int rc;
231 
232 	rc = do_assign_storage(SCLP_CMDW_ASSIGN_STORAGE, rn);
233 	if (rc)
234 		return rc;
235 	start = rn2addr(rn);
236 	storage_key_init_range(start, start + sclp.rzm);
237 	return 0;
238 }
239 
240 static int sclp_unassign_storage(u16 rn)
241 {
242 	return do_assign_storage(SCLP_CMDW_UNASSIGN_STORAGE, rn);
243 }
244 
245 struct attach_storage_sccb {
246 	struct sccb_header header;
247 	u16 :16;
248 	u16 assigned;
249 	u32 :32;
250 	u32 entries[];
251 } __packed;
252 
253 static int sclp_attach_storage(u8 id)
254 {
255 	struct attach_storage_sccb *sccb;
256 	int rc;
257 	int i;
258 
259 	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
260 	if (!sccb)
261 		return -ENOMEM;
262 	sccb->header.length = PAGE_SIZE;
263 	sccb->header.function_code = 0x40;
264 	rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
265 				       SCLP_QUEUE_INTERVAL);
266 	if (rc)
267 		goto out;
268 	switch (sccb->header.response_code) {
269 	case 0x0020:
270 		set_bit(id, sclp_storage_ids);
271 		for (i = 0; i < sccb->assigned; i++) {
272 			if (sccb->entries[i])
273 				sclp_unassign_storage(sccb->entries[i] >> 16);
274 		}
275 		break;
276 	default:
277 		rc = -EIO;
278 		break;
279 	}
280 out:
281 	free_page((unsigned long) sccb);
282 	return rc;
283 }
284 
285 static int sclp_mem_change_state(unsigned long start, unsigned long size,
286 				 int online)
287 {
288 	struct memory_increment *incr;
289 	unsigned long long istart;
290 	int rc = 0;
291 
292 	list_for_each_entry(incr, &sclp_mem_list, list) {
293 		istart = rn2addr(incr->rn);
294 		if (start + size - 1 < istart)
295 			break;
296 		if (start > istart + sclp.rzm - 1)
297 			continue;
298 		if (online)
299 			rc |= sclp_assign_storage(incr->rn);
300 		else
301 			sclp_unassign_storage(incr->rn);
302 		if (rc == 0)
303 			incr->standby = online ? 0 : 1;
304 	}
305 	return rc ? -EIO : 0;
306 }
307 
308 static bool contains_standby_increment(unsigned long start, unsigned long end)
309 {
310 	struct memory_increment *incr;
311 	unsigned long istart;
312 
313 	list_for_each_entry(incr, &sclp_mem_list, list) {
314 		istart = rn2addr(incr->rn);
315 		if (end - 1 < istart)
316 			continue;
317 		if (start > istart + sclp.rzm - 1)
318 			continue;
319 		if (incr->standby)
320 			return true;
321 	}
322 	return false;
323 }
324 
325 static int sclp_mem_notifier(struct notifier_block *nb,
326 			     unsigned long action, void *data)
327 {
328 	unsigned long start, size;
329 	struct memory_notify *arg;
330 	unsigned char id;
331 	int rc = 0;
332 
333 	arg = data;
334 	start = arg->start_pfn << PAGE_SHIFT;
335 	size = arg->nr_pages << PAGE_SHIFT;
336 	mutex_lock(&sclp_mem_mutex);
337 	for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
338 		sclp_attach_storage(id);
339 	switch (action) {
340 	case MEM_GOING_OFFLINE:
341 		/*
342 		 * We do not allow to set memory blocks offline that contain
343 		 * standby memory. This is done to simplify the "memory online"
344 		 * case.
345 		 */
346 		if (contains_standby_increment(start, start + size))
347 			rc = -EPERM;
348 		break;
349 	case MEM_PREPARE_ONLINE:
350 		/*
351 		 * Access the altmap_start_pfn and altmap_nr_pages fields
352 		 * within the struct memory_notify specifically when dealing
353 		 * with only MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers.
354 		 *
355 		 * When altmap is in use, take the specified memory range
356 		 * online, which includes the altmap.
357 		 */
358 		if (arg->altmap_nr_pages) {
359 			start = PFN_PHYS(arg->altmap_start_pfn);
360 			size += PFN_PHYS(arg->altmap_nr_pages);
361 		}
362 		rc = sclp_mem_change_state(start, size, 1);
363 		if (rc || !arg->altmap_nr_pages)
364 			break;
365 		/*
366 		 * Set CMMA state to nodat here, since the struct page memory
367 		 * at the beginning of the memory block will not go through the
368 		 * buddy allocator later.
369 		 */
370 		__arch_set_page_nodat((void *)__va(start), arg->altmap_nr_pages);
371 		break;
372 	case MEM_FINISH_OFFLINE:
373 		/*
374 		 * When altmap is in use, take the specified memory range
375 		 * offline, which includes the altmap.
376 		 */
377 		if (arg->altmap_nr_pages) {
378 			start = PFN_PHYS(arg->altmap_start_pfn);
379 			size += PFN_PHYS(arg->altmap_nr_pages);
380 		}
381 		sclp_mem_change_state(start, size, 0);
382 		break;
383 	default:
384 		break;
385 	}
386 	mutex_unlock(&sclp_mem_mutex);
387 	return rc ? NOTIFY_BAD : NOTIFY_OK;
388 }
389 
390 static struct notifier_block sclp_mem_nb = {
391 	.notifier_call = sclp_mem_notifier,
392 };
393 
394 static void __init align_to_block_size(unsigned long long *start,
395 				       unsigned long long *size,
396 				       unsigned long long alignment)
397 {
398 	unsigned long long start_align, size_align;
399 
400 	start_align = roundup(*start, alignment);
401 	size_align = rounddown(*start + *size, alignment) - start_align;
402 
403 	pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n",
404 		*start, size_align >> 20, *size >> 20);
405 	*start = start_align;
406 	*size = size_align;
407 }
408 
409 static void __init add_memory_merged(u16 rn)
410 {
411 	unsigned long long start, size, addr, block_size;
412 	static u16 first_rn, num;
413 
414 	if (rn && first_rn && (first_rn + num == rn)) {
415 		num++;
416 		return;
417 	}
418 	if (!first_rn)
419 		goto skip_add;
420 	start = rn2addr(first_rn);
421 	size = (unsigned long long) num * sclp.rzm;
422 	if (start >= ident_map_size)
423 		goto skip_add;
424 	if (start + size > ident_map_size)
425 		size = ident_map_size - start;
426 	block_size = memory_block_size_bytes();
427 	align_to_block_size(&start, &size, block_size);
428 	if (!size)
429 		goto skip_add;
430 	for (addr = start; addr < start + size; addr += block_size)
431 		add_memory(0, addr, block_size,
432 			   cpu_has_edat1() ?
433 			   MHP_MEMMAP_ON_MEMORY | MHP_OFFLINE_INACCESSIBLE : MHP_NONE);
434 skip_add:
435 	first_rn = rn;
436 	num = 1;
437 }
438 
439 static void __init sclp_add_standby_memory(void)
440 {
441 	struct memory_increment *incr;
442 
443 	list_for_each_entry(incr, &sclp_mem_list, list)
444 		if (incr->standby)
445 			add_memory_merged(incr->rn);
446 	add_memory_merged(0);
447 }
448 
449 static void __init insert_increment(u16 rn, int standby, int assigned)
450 {
451 	struct memory_increment *incr, *new_incr;
452 	struct list_head *prev;
453 	u16 last_rn;
454 
455 	new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
456 	if (!new_incr)
457 		return;
458 	new_incr->rn = rn;
459 	new_incr->standby = standby;
460 	last_rn = 0;
461 	prev = &sclp_mem_list;
462 	list_for_each_entry(incr, &sclp_mem_list, list) {
463 		if (assigned && incr->rn > rn)
464 			break;
465 		if (!assigned && incr->rn - last_rn > 1)
466 			break;
467 		last_rn = incr->rn;
468 		prev = &incr->list;
469 	}
470 	if (!assigned)
471 		new_incr->rn = last_rn + 1;
472 	if (new_incr->rn > sclp.rnmax) {
473 		kfree(new_incr);
474 		return;
475 	}
476 	list_add(&new_incr->list, prev);
477 }
478 
479 static int __init sclp_detect_standby_memory(void)
480 {
481 	struct read_storage_sccb *sccb;
482 	int i, id, assigned, rc;
483 
484 	if (oldmem_data.start) /* No standby memory in kdump mode */
485 		return 0;
486 	if ((sclp.facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
487 		return 0;
488 	rc = -ENOMEM;
489 	sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
490 	if (!sccb)
491 		goto out;
492 	assigned = 0;
493 	for (id = 0; id <= sclp_max_storage_id; id++) {
494 		memset(sccb, 0, PAGE_SIZE);
495 		sccb->header.length = PAGE_SIZE;
496 		rc = sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO | id << 8, sccb);
497 		if (rc)
498 			goto out;
499 		switch (sccb->header.response_code) {
500 		case 0x0010:
501 			set_bit(id, sclp_storage_ids);
502 			for (i = 0; i < sccb->assigned; i++) {
503 				if (!sccb->entries[i])
504 					continue;
505 				assigned++;
506 				insert_increment(sccb->entries[i] >> 16, 0, 1);
507 			}
508 			break;
509 		case 0x0310:
510 			break;
511 		case 0x0410:
512 			for (i = 0; i < sccb->assigned; i++) {
513 				if (!sccb->entries[i])
514 					continue;
515 				assigned++;
516 				insert_increment(sccb->entries[i] >> 16, 1, 1);
517 			}
518 			break;
519 		default:
520 			rc = -EIO;
521 			break;
522 		}
523 		if (!rc)
524 			sclp_max_storage_id = sccb->max_id;
525 	}
526 	if (rc || list_empty(&sclp_mem_list))
527 		goto out;
528 	for (i = 1; i <= sclp.rnmax - assigned; i++)
529 		insert_increment(0, 1, 0);
530 	rc = register_memory_notifier(&sclp_mem_nb);
531 	if (rc)
532 		goto out;
533 	sclp_add_standby_memory();
534 out:
535 	free_page((unsigned long) sccb);
536 	return rc;
537 }
538 __initcall(sclp_detect_standby_memory);
539 
540 #endif /* CONFIG_MEMORY_HOTPLUG */
541 
542 /*
543  * Channel path configuration related functions.
544  */
545 
546 #define SCLP_CMDW_CONFIGURE_CHPATH		0x000f0001
547 #define SCLP_CMDW_DECONFIGURE_CHPATH		0x000e0001
548 #define SCLP_CMDW_READ_CHPATH_INFORMATION	0x00030001
549 
550 struct chp_cfg_sccb {
551 	struct sccb_header header;
552 	u8 ccm;
553 	u8 reserved[6];
554 	u8 cssid;
555 } __attribute__((packed));
556 
557 static int do_chp_configure(sclp_cmdw_t cmd)
558 {
559 	struct chp_cfg_sccb *sccb;
560 	int rc;
561 
562 	if (!SCLP_HAS_CHP_RECONFIG)
563 		return -EOPNOTSUPP;
564 	/* Prepare sccb. */
565 	sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
566 	if (!sccb)
567 		return -ENOMEM;
568 	sccb->header.length = sizeof(*sccb);
569 	rc = sclp_sync_request(cmd, sccb);
570 	if (rc)
571 		goto out;
572 	switch (sccb->header.response_code) {
573 	case 0x0020:
574 	case 0x0120:
575 	case 0x0440:
576 	case 0x0450:
577 		break;
578 	default:
579 		pr_warn("configure channel-path failed (cmd=0x%08x, response=0x%04x)\n",
580 			cmd, sccb->header.response_code);
581 		rc = -EIO;
582 		break;
583 	}
584 out:
585 	free_page((unsigned long) sccb);
586 	return rc;
587 }
588 
589 /**
590  * sclp_chp_configure - perform configure channel-path sclp command
591  * @chpid: channel-path ID
592  *
593  * Perform configure channel-path command sclp command for specified chpid.
594  * Return 0 after command successfully finished, non-zero otherwise.
595  */
596 int sclp_chp_configure(struct chp_id chpid)
597 {
598 	return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
599 }
600 
601 /**
602  * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
603  * @chpid: channel-path ID
604  *
605  * Perform deconfigure channel-path command sclp command for specified chpid
606  * and wait for completion. On success return 0. Return non-zero otherwise.
607  */
608 int sclp_chp_deconfigure(struct chp_id chpid)
609 {
610 	return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
611 }
612 
613 struct chp_info_sccb {
614 	struct sccb_header header;
615 	u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
616 	u8 standby[SCLP_CHP_INFO_MASK_SIZE];
617 	u8 configured[SCLP_CHP_INFO_MASK_SIZE];
618 	u8 ccm;
619 	u8 reserved[6];
620 	u8 cssid;
621 } __attribute__((packed));
622 
623 /**
624  * sclp_chp_read_info - perform read channel-path information sclp command
625  * @info: resulting channel-path information data
626  *
627  * Perform read channel-path information sclp command and wait for completion.
628  * On success, store channel-path information in @info and return 0. Return
629  * non-zero otherwise.
630  */
631 int sclp_chp_read_info(struct sclp_chp_info *info)
632 {
633 	struct chp_info_sccb *sccb;
634 	int rc;
635 
636 	if (!SCLP_HAS_CHP_INFO)
637 		return -EOPNOTSUPP;
638 	/* Prepare sccb. */
639 	sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
640 	if (!sccb)
641 		return -ENOMEM;
642 	sccb->header.length = sizeof(*sccb);
643 	rc = sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
644 	if (rc)
645 		goto out;
646 	if (sccb->header.response_code != 0x0010) {
647 		pr_warn("read channel-path info failed (response=0x%04x)\n",
648 			sccb->header.response_code);
649 		rc = -EIO;
650 		goto out;
651 	}
652 	memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
653 	memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
654 	memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
655 out:
656 	free_page((unsigned long) sccb);
657 	return rc;
658 }
659