xref: /kvm-unit-tests/lib/s390x/sclp.c (revision 90cacd85c6ad50f032a3fe95586fab4f2335b93d)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * s390x SCLP driver
4  *
5  * Copyright (c) 2017 Red Hat Inc
6  *
7  * Authors:
8  *  David Hildenbrand <david@redhat.com>
9  */
10 
11 #include <libcflat.h>
12 #include <asm/page.h>
13 #include <asm/arch_def.h>
14 #include <asm/interrupt.h>
15 #include <asm/barrier.h>
16 #include <asm/spinlock.h>
17 #include "sclp.h"
18 #include <alloc_phys.h>
19 #include <alloc_page.h>
20 #include <asm/facility.h>
21 
22 extern unsigned long stacktop;
23 
24 static uint64_t storage_increment_size;
25 static uint64_t max_ram_size;
26 static uint64_t ram_size;
27 char _read_info[2 * PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
28 static ReadInfo *read_info;
29 struct sclp_facilities sclp_facilities;
30 
31 char _sccb[PAGE_SIZE] __attribute__((__aligned__(4096)));
32 static volatile bool sclp_busy;
33 static struct spinlock sclp_lock;
34 
mem_init(phys_addr_t mem_end)35 static void mem_init(phys_addr_t mem_end)
36 {
37 	phys_addr_t freemem_start = (phys_addr_t)&stacktop;
38 	phys_addr_t base, top;
39 
40 	phys_alloc_init(freemem_start, mem_end - freemem_start);
41 	phys_alloc_get_unused(&base, &top);
42 	base = PAGE_ALIGN(base) >> PAGE_SHIFT;
43 	top = top >> PAGE_SHIFT;
44 
45 	/* Make the pages available to the physical allocator */
46 	page_alloc_init_area(AREA_ANY_NUMBER, base, top);
47 	page_alloc_ops_enable();
48 }
49 
sclp_setup_int(void)50 void sclp_setup_int(void)
51 {
52 	ctl_set_bit(0, CTL0_SERVICE_SIGNAL);
53 	psw_mask_set_bits(PSW_MASK_EXT);
54 }
55 
sclp_handle_ext(void)56 void sclp_handle_ext(void)
57 {
58 	ctl_clear_bit(0, CTL0_SERVICE_SIGNAL);
59 	sclp_clear_busy();
60 }
61 
sclp_wait_busy(void)62 void sclp_wait_busy(void)
63 {
64 	while (sclp_busy)
65 		mb();
66 }
67 
sclp_mark_busy(void)68 void sclp_mark_busy(void)
69 {
70 	/*
71 	 * With multiple CPUs we might need to wait for another CPU's
72 	 * request before grabbing the busy indication.
73 	 */
74 	while (true) {
75 		sclp_wait_busy();
76 		spin_lock(&sclp_lock);
77 		if (!sclp_busy) {
78 			sclp_busy = true;
79 			spin_unlock(&sclp_lock);
80 			return;
81 		}
82 		spin_unlock(&sclp_lock);
83 	}
84 }
85 
sclp_clear_busy(void)86 void sclp_clear_busy(void)
87 {
88 	spin_lock(&sclp_lock);
89 	sclp_busy = false;
90 	spin_unlock(&sclp_lock);
91 }
92 
sclp_read_scp_info(ReadInfo * ri,int length)93 static void sclp_read_scp_info(ReadInfo *ri, int length)
94 {
95 	unsigned int commands[] = { SCLP_CMDW_READ_SCP_INFO_FORCED,
96 				    SCLP_CMDW_READ_SCP_INFO };
97 	int i, cc;
98 
99 	for (i = 0; i < ARRAY_SIZE(commands); i++) {
100 		sclp_mark_busy();
101 		memset(&ri->h, 0, sizeof(ri->h));
102 		ri->h.length = length;
103 
104 		cc = sclp_service_call(commands[i], ri);
105 		if (cc)
106 			break;
107 		if (ri->h.response_code == SCLP_RC_NORMAL_READ_COMPLETION)
108 			return;
109 		if (ri->h.response_code != SCLP_RC_INVALID_SCLP_COMMAND)
110 			break;
111 	}
112 	report_abort("READ_SCP_INFO failed");
113 }
114 
sclp_read_info(void)115 void sclp_read_info(void)
116 {
117 	sclp_read_scp_info((void *)_read_info,
118 		test_facility(140) ? sizeof(_read_info) : SCCB_SIZE);
119 	read_info = (ReadInfo *)_read_info;
120 }
121 
sclp_get_cpu_num(void)122 int sclp_get_cpu_num(void)
123 {
124 	if (read_info)
125 		return read_info->entries_cpu;
126 	/*
127 	 * Don't abort here if read_info is NULL since abort() calls
128 	 * smp_teardown() which eventually calls this function and thus
129 	 * causes an infinite abort() chain, causing the test to hang.
130 	 * Since we obviously have at least one CPU, just return one.
131 	 */
132 	return 1;
133 }
134 
sclp_get_cpu_entries(void)135 CPUEntry *sclp_get_cpu_entries(void)
136 {
137 	assert(read_info);
138 	return (CPUEntry *)(_read_info + read_info->offset_cpu);
139 }
140 
sclp_feat_check(int byte,int bit)141 static bool sclp_feat_check(int byte, int bit)
142 {
143 	uint8_t *rib = (uint8_t *)read_info;
144 
145 	return !!(rib[byte] & (0x80 >> bit));
146 }
147 
sclp_facilities_setup(void)148 void sclp_facilities_setup(void)
149 {
150 	unsigned short cpu0_addr = stap();
151 	CPUEntry *cpu;
152 	int i;
153 
154 	assert(read_info);
155 
156 	cpu = sclp_get_cpu_entries();
157 	if (read_info->offset_cpu > 134)
158 		sclp_facilities.has_diag318 = read_info->byte_134_diag318;
159 	sclp_facilities.has_sop = sclp_feat_check(80, SCLP_FEAT_80_BIT_SOP);
160 	sclp_facilities.has_gsls = sclp_feat_check(85, SCLP_FEAT_85_BIT_GSLS);
161 	sclp_facilities.has_esop = sclp_feat_check(85, SCLP_FEAT_85_BIT_ESOP);
162 	sclp_facilities.has_kss = sclp_feat_check(98, SCLP_FEAT_98_BIT_KSS);
163 	sclp_facilities.has_cmma = sclp_feat_check(116, SCLP_FEAT_116_BIT_CMMA);
164 	sclp_facilities.has_64bscao = sclp_feat_check(116, SCLP_FEAT_116_BIT_64BSCAO);
165 	sclp_facilities.has_esca = sclp_feat_check(116, SCLP_FEAT_116_BIT_ESCA);
166 	sclp_facilities.has_ibs = sclp_feat_check(117, SCLP_FEAT_117_BIT_IBS);
167 	sclp_facilities.has_pfmfi = sclp_feat_check(117, SCLP_FEAT_117_BIT_PFMFI);
168 
169 	for (i = 0; i < read_info->entries_cpu; i++, cpu++) {
170 		/*
171 		 * The logic for only reading the facilities from the
172 		 * boot cpu comes from the kernel. I haven't yet found
173 		 * documentation that explains why this is necessary
174 		 * but I figure there's a reason behind doing it this
175 		 * way.
176 		 */
177 		if (cpu->address == cpu0_addr) {
178 			sclp_facilities.has_sief2 = cpu->feat_sief2;
179 			sclp_facilities.has_skeyi = cpu->feat_skeyi;
180 			sclp_facilities.has_siif = cpu->feat_siif;
181 			sclp_facilities.has_sigpif = cpu->feat_sigpif;
182 			sclp_facilities.has_ib = cpu->feat_ib;
183 			sclp_facilities.has_cei = cpu->feat_cei;
184 			break;
185 		}
186 	}
187 }
188 
189 /* Perform service call. Return 0 on success, non-zero otherwise. */
sclp_service_call(unsigned int command,void * sccb)190 int sclp_service_call(unsigned int command, void *sccb)
191 {
192 	int cc;
193 
194 	sclp_setup_int();
195 	cc = servc(command, __pa(sccb));
196 	sclp_wait_busy();
197 	if (cc == 3)
198 		return -1;
199 	if (cc == 2)
200 		return -1;
201 	return 0;
202 }
203 
sclp_memory_setup(void)204 void sclp_memory_setup(void)
205 {
206 	uint64_t rnmax, rnsize;
207 	enum tprot_permission permission;
208 
209 	assert(read_info);
210 
211 	/* calculate the storage increment size */
212 	rnsize = read_info->rnsize;
213 	if (!rnsize) {
214 		rnsize = read_info->rnsize2;
215 	}
216 	storage_increment_size = rnsize << 20;
217 
218 	/* calculate the maximum memory size */
219 	rnmax = read_info->rnmax;
220 	if (!rnmax) {
221 		rnmax = read_info->rnmax2;
222 	}
223 	max_ram_size = rnmax * storage_increment_size;
224 
225 	/* lowcore is always accessible, so the first increment is accessible */
226 	ram_size = storage_increment_size;
227 
228 	/* probe for r/w memory up to max memory size */
229 	while (ram_size < max_ram_size) {
230 		expect_pgm_int();
231 		permission = tprot(ram_size + storage_increment_size - 1, 0);
232 		/* stop once we receive an exception or have protected memory */
233 		if (clear_pgm_int() || permission != TPROT_READ_WRITE)
234 			break;
235 		ram_size += storage_increment_size;
236 	}
237 
238 	mem_init(ram_size);
239 }
240 
get_ram_size(void)241 uint64_t get_ram_size(void)
242 {
243 	return ram_size;
244 }
245 
get_max_ram_size(void)246 uint64_t get_max_ram_size(void)
247 {
248 	return max_ram_size;
249 }
250 
sclp_get_stsi_mnest(void)251 uint64_t sclp_get_stsi_mnest(void)
252 {
253 	assert(read_info);
254 	return read_info->stsi_parm;
255 }
256