xref: /kvm-unit-tests/lib/s390x/sclp.c (revision 6163f75d09a0a96a5c3db82dd768b13f79629c00)
1 /*
2  * s390x SCLP driver
3  *
4  * Copyright (c) 2017 Red Hat Inc
5  *
6  * Authors:
7  *  David Hildenbrand <david@redhat.com>
8  *
9  * This code is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU Library General Public License version 2.
11  */
12 
13 #include <libcflat.h>
14 #include <asm/page.h>
15 #include <asm/arch_def.h>
16 #include <asm/interrupt.h>
17 #include <asm/barrier.h>
18 #include <asm/spinlock.h>
19 #include "sclp.h"
20 #include <alloc_phys.h>
21 #include <alloc_page.h>
22 
23 extern unsigned long stacktop;
24 
25 static uint64_t storage_increment_size;
26 static uint64_t max_ram_size;
27 static uint64_t ram_size;
28 
29 char _sccb[PAGE_SIZE] __attribute__((__aligned__(4096)));
30 static volatile bool sclp_busy;
31 static struct spinlock sclp_lock;
32 
33 static void mem_init(phys_addr_t mem_end)
34 {
35 	phys_addr_t freemem_start = (phys_addr_t)&stacktop;
36 	phys_addr_t base, top;
37 
38 	phys_alloc_init(freemem_start, mem_end - freemem_start);
39 	phys_alloc_get_unused(&base, &top);
40 	base = (base + PAGE_SIZE - 1) & -PAGE_SIZE;
41 	top = top & -PAGE_SIZE;
42 
43 	/* Make the pages available to the physical allocator */
44 	free_pages((void *)(unsigned long)base, top - base);
45 	page_alloc_ops_enable();
46 }
47 
48 static void sclp_setup_int(void)
49 {
50 	uint64_t mask;
51 
52 	ctl_set_bit(0, 9);
53 
54 	mask = extract_psw_mask();
55 	mask |= PSW_MASK_EXT;
56 	load_psw_mask(mask);
57 }
58 
59 void sclp_handle_ext(void)
60 {
61 	ctl_clear_bit(0, 9);
62 	spin_lock(&sclp_lock);
63 	sclp_busy = false;
64 	spin_unlock(&sclp_lock);
65 }
66 
67 void sclp_wait_busy(void)
68 {
69 	while (sclp_busy)
70 		mb();
71 }
72 
73 void sclp_mark_busy(void)
74 {
75 	/*
76 	 * With multiple CPUs we might need to wait for another CPU's
77 	 * request before grabbing the busy indication.
78 	 */
79 	while (true) {
80 		sclp_wait_busy();
81 		spin_lock(&sclp_lock);
82 		if (!sclp_busy) {
83 			sclp_busy = true;
84 			spin_unlock(&sclp_lock);
85 			return;
86 		}
87 		spin_unlock(&sclp_lock);
88 	}
89 }
90 
91 static void sclp_read_scp_info(ReadInfo *ri, int length)
92 {
93 	unsigned int commands[] = { SCLP_CMDW_READ_SCP_INFO_FORCED,
94 				    SCLP_CMDW_READ_SCP_INFO };
95 	int i, cc;
96 
97 	for (i = 0; i < ARRAY_SIZE(commands); i++) {
98 		sclp_mark_busy();
99 		memset(&ri->h, 0, sizeof(ri->h));
100 		ri->h.length = length;
101 
102 		cc = sclp_service_call(commands[i], ri);
103 		if (cc)
104 			break;
105 		if (ri->h.response_code == SCLP_RC_NORMAL_READ_COMPLETION)
106 			return;
107 		if (ri->h.response_code != SCLP_RC_INVALID_SCLP_COMMAND)
108 			break;
109 	}
110 	report_abort("READ_SCP_INFO failed");
111 }
112 
113 /* Perform service call. Return 0 on success, non-zero otherwise. */
114 int sclp_service_call(unsigned int command, void *sccb)
115 {
116 	int cc;
117 
118 	sclp_setup_int();
119 	asm volatile(
120 		"       .insn   rre,0xb2200000,%1,%2\n"  /* servc %1,%2 */
121 		"       ipm     %0\n"
122 		"       srl     %0,28"
123 		: "=&d" (cc) : "d" (command), "a" (__pa(sccb))
124 		: "cc", "memory");
125 	sclp_wait_busy();
126 	if (cc == 3)
127 		return -1;
128 	if (cc == 2)
129 		return -1;
130 	return 0;
131 }
132 
133 void sclp_memory_setup(void)
134 {
135 	ReadInfo *ri = (void *)_sccb;
136 	uint64_t rnmax, rnsize;
137 	int cc;
138 
139 	sclp_read_scp_info(ri, SCCB_SIZE);
140 
141 	/* calculate the storage increment size */
142 	rnsize = ri->rnsize;
143 	if (!rnsize) {
144 		rnsize = ri->rnsize2;
145 	}
146 	storage_increment_size = rnsize << 20;
147 
148 	/* calculate the maximum memory size */
149 	rnmax = ri->rnmax;
150 	if (!rnmax) {
151 		rnmax = ri->rnmax2;
152 	}
153 	max_ram_size = rnmax * storage_increment_size;
154 
155 	/* lowcore is always accessible, so the first increment is accessible */
156 	ram_size = storage_increment_size;
157 
158 	/* probe for r/w memory up to max memory size */
159 	while (ram_size < max_ram_size) {
160 		expect_pgm_int();
161 		cc = tprot(ram_size + storage_increment_size - 1);
162 		/* stop once we receive an exception or have protected memory */
163 		if (clear_pgm_int() || cc != 0)
164 			break;
165 		ram_size += storage_increment_size;
166 	}
167 
168 	mem_init(ram_size);
169 }
170 
171 uint64_t get_ram_size(void)
172 {
173 	return ram_size;
174 }
175 
176 uint64_t get_max_ram_size(void)
177 {
178 	return max_ram_size;
179 }
180