xref: /kvm-unit-tests/lib/s390x/sclp.c (revision 4c8a99ca02252d4a2bee43f4558fe47ce5ab7ec0)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * s390x SCLP driver
4  *
5  * Copyright (c) 2017 Red Hat Inc
6  *
7  * Authors:
8  *  David Hildenbrand <david@redhat.com>
9  */
10 
11 #include <libcflat.h>
12 #include <asm/page.h>
13 #include <asm/arch_def.h>
14 #include <asm/interrupt.h>
15 #include <asm/barrier.h>
16 #include <asm/spinlock.h>
17 #include "sclp.h"
18 #include <alloc_phys.h>
19 #include <alloc_page.h>
20 
21 extern unsigned long stacktop;
22 
23 static uint64_t storage_increment_size;
24 static uint64_t max_ram_size;
25 static uint64_t ram_size;
26 char _read_info[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
27 static ReadInfo *read_info;
28 struct sclp_facilities sclp_facilities;
29 
30 char _sccb[PAGE_SIZE] __attribute__((__aligned__(4096)));
31 static volatile bool sclp_busy;
32 static struct spinlock sclp_lock;
33 
34 static void mem_init(phys_addr_t mem_end)
35 {
36 	phys_addr_t freemem_start = (phys_addr_t)&stacktop;
37 	phys_addr_t base, top;
38 
39 	phys_alloc_init(freemem_start, mem_end - freemem_start);
40 	phys_alloc_get_unused(&base, &top);
41 	base = PAGE_ALIGN(base) >> PAGE_SHIFT;
42 	top = top >> PAGE_SHIFT;
43 
44 	/* Make the pages available to the physical allocator */
45 	page_alloc_init_area(AREA_ANY_NUMBER, base, top);
46 	page_alloc_ops_enable();
47 }
48 
49 void sclp_setup_int(void)
50 {
51 	ctl_set_bit(0, CTL0_SERVICE_SIGNAL);
52 	psw_mask_set_bits(PSW_MASK_EXT);
53 }
54 
55 void sclp_handle_ext(void)
56 {
57 	ctl_clear_bit(0, CTL0_SERVICE_SIGNAL);
58 	sclp_clear_busy();
59 }
60 
61 void sclp_wait_busy(void)
62 {
63 	while (sclp_busy)
64 		mb();
65 }
66 
67 void sclp_mark_busy(void)
68 {
69 	/*
70 	 * With multiple CPUs we might need to wait for another CPU's
71 	 * request before grabbing the busy indication.
72 	 */
73 	while (true) {
74 		sclp_wait_busy();
75 		spin_lock(&sclp_lock);
76 		if (!sclp_busy) {
77 			sclp_busy = true;
78 			spin_unlock(&sclp_lock);
79 			return;
80 		}
81 		spin_unlock(&sclp_lock);
82 	}
83 }
84 
85 void sclp_clear_busy(void)
86 {
87 	spin_lock(&sclp_lock);
88 	sclp_busy = false;
89 	spin_unlock(&sclp_lock);
90 }
91 
92 static void sclp_read_scp_info(ReadInfo *ri, int length)
93 {
94 	unsigned int commands[] = { SCLP_CMDW_READ_SCP_INFO_FORCED,
95 				    SCLP_CMDW_READ_SCP_INFO };
96 	int i, cc;
97 
98 	for (i = 0; i < ARRAY_SIZE(commands); i++) {
99 		sclp_mark_busy();
100 		memset(&ri->h, 0, sizeof(ri->h));
101 		ri->h.length = length;
102 
103 		cc = sclp_service_call(commands[i], ri);
104 		if (cc)
105 			break;
106 		if (ri->h.response_code == SCLP_RC_NORMAL_READ_COMPLETION)
107 			return;
108 		if (ri->h.response_code != SCLP_RC_INVALID_SCLP_COMMAND)
109 			break;
110 	}
111 	report_abort("READ_SCP_INFO failed");
112 }
113 
114 void sclp_read_info(void)
115 {
116 	sclp_read_scp_info((void *)_read_info, SCCB_SIZE);
117 	read_info = (ReadInfo *)_read_info;
118 }
119 
120 int sclp_get_cpu_num(void)
121 {
122 	assert(read_info);
123 	return read_info->entries_cpu;
124 }
125 
126 CPUEntry *sclp_get_cpu_entries(void)
127 {
128 	assert(read_info);
129 	return (CPUEntry *)(_read_info + read_info->offset_cpu);
130 }
131 
132 static bool sclp_feat_check(int byte, int bit)
133 {
134 	uint8_t *rib = (uint8_t *)read_info;
135 
136 	return !!(rib[byte] & (0x80 >> bit));
137 }
138 
139 void sclp_facilities_setup(void)
140 {
141 	unsigned short cpu0_addr = stap();
142 	CPUEntry *cpu;
143 	int i;
144 
145 	assert(read_info);
146 
147 	cpu = sclp_get_cpu_entries();
148 	if (read_info->offset_cpu > 134)
149 		sclp_facilities.has_diag318 = read_info->byte_134_diag318;
150 	sclp_facilities.has_sop = sclp_feat_check(80, SCLP_FEAT_80_BIT_SOP);
151 	sclp_facilities.has_gsls = sclp_feat_check(85, SCLP_FEAT_85_BIT_GSLS);
152 	sclp_facilities.has_esop = sclp_feat_check(85, SCLP_FEAT_85_BIT_ESOP);
153 	sclp_facilities.has_kss = sclp_feat_check(98, SCLP_FEAT_98_BIT_KSS);
154 	sclp_facilities.has_cmma = sclp_feat_check(116, SCLP_FEAT_116_BIT_CMMA);
155 	sclp_facilities.has_64bscao = sclp_feat_check(116, SCLP_FEAT_116_BIT_64BSCAO);
156 	sclp_facilities.has_esca = sclp_feat_check(116, SCLP_FEAT_116_BIT_ESCA);
157 	sclp_facilities.has_ibs = sclp_feat_check(117, SCLP_FEAT_117_BIT_IBS);
158 	sclp_facilities.has_pfmfi = sclp_feat_check(117, SCLP_FEAT_117_BIT_PFMFI);
159 
160 	for (i = 0; i < read_info->entries_cpu; i++, cpu++) {
161 		/*
162 		 * The logic for only reading the facilities from the
163 		 * boot cpu comes from the kernel. I haven't yet found
164 		 * documentation that explains why this is necessary
165 		 * but I figure there's a reason behind doing it this
166 		 * way.
167 		 */
168 		if (cpu->address == cpu0_addr) {
169 			sclp_facilities.has_sief2 = cpu->feat_sief2;
170 			sclp_facilities.has_skeyi = cpu->feat_skeyi;
171 			sclp_facilities.has_siif = cpu->feat_siif;
172 			sclp_facilities.has_sigpif = cpu->feat_sigpif;
173 			sclp_facilities.has_ib = cpu->feat_ib;
174 			sclp_facilities.has_cei = cpu->feat_cei;
175 			break;
176 		}
177 	}
178 }
179 
180 /* Perform service call. Return 0 on success, non-zero otherwise. */
181 int sclp_service_call(unsigned int command, void *sccb)
182 {
183 	int cc;
184 
185 	sclp_setup_int();
186 	cc = servc(command, __pa(sccb));
187 	sclp_wait_busy();
188 	if (cc == 3)
189 		return -1;
190 	if (cc == 2)
191 		return -1;
192 	return 0;
193 }
194 
195 void sclp_memory_setup(void)
196 {
197 	uint64_t rnmax, rnsize;
198 	enum tprot_permission permission;
199 
200 	assert(read_info);
201 
202 	/* calculate the storage increment size */
203 	rnsize = read_info->rnsize;
204 	if (!rnsize) {
205 		rnsize = read_info->rnsize2;
206 	}
207 	storage_increment_size = rnsize << 20;
208 
209 	/* calculate the maximum memory size */
210 	rnmax = read_info->rnmax;
211 	if (!rnmax) {
212 		rnmax = read_info->rnmax2;
213 	}
214 	max_ram_size = rnmax * storage_increment_size;
215 
216 	/* lowcore is always accessible, so the first increment is accessible */
217 	ram_size = storage_increment_size;
218 
219 	/* probe for r/w memory up to max memory size */
220 	while (ram_size < max_ram_size) {
221 		expect_pgm_int();
222 		permission = tprot(ram_size + storage_increment_size - 1, 0);
223 		/* stop once we receive an exception or have protected memory */
224 		if (clear_pgm_int() || permission != TPROT_READ_WRITE)
225 			break;
226 		ram_size += storage_increment_size;
227 	}
228 
229 	mem_init(ram_size);
230 }
231 
232 uint64_t get_ram_size(void)
233 {
234 	return ram_size;
235 }
236 
237 uint64_t get_max_ram_size(void)
238 {
239 	return max_ram_size;
240 }
241