1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * The hwprobe interface, for allowing userspace to probe to see which features
4  * are supported by the hardware.  See Documentation/arch/riscv/hwprobe.rst for
5  * more details.
6  */
7 #include <linux/syscalls.h>
8 #include <asm/cacheflush.h>
9 #include <asm/cpufeature.h>
10 #include <asm/hwprobe.h>
11 #include <asm/processor.h>
12 #include <asm/delay.h>
13 #include <asm/sbi.h>
14 #include <asm/switch_to.h>
15 #include <asm/uaccess.h>
16 #include <asm/unistd.h>
17 #include <asm/vector.h>
18 #include <asm/vendor_extensions/thead_hwprobe.h>
19 #include <vdso/vsyscall.h>
20 
21 
hwprobe_arch_id(struct riscv_hwprobe * pair,const struct cpumask * cpus)22 static void hwprobe_arch_id(struct riscv_hwprobe *pair,
23 			    const struct cpumask *cpus)
24 {
25 	u64 id = -1ULL;
26 	bool first = true;
27 	int cpu;
28 
29 	for_each_cpu(cpu, cpus) {
30 		u64 cpu_id;
31 
32 		switch (pair->key) {
33 		case RISCV_HWPROBE_KEY_MVENDORID:
34 			cpu_id = riscv_cached_mvendorid(cpu);
35 			break;
36 		case RISCV_HWPROBE_KEY_MIMPID:
37 			cpu_id = riscv_cached_mimpid(cpu);
38 			break;
39 		case RISCV_HWPROBE_KEY_MARCHID:
40 			cpu_id = riscv_cached_marchid(cpu);
41 			break;
42 		}
43 
44 		if (first) {
45 			id = cpu_id;
46 			first = false;
47 		}
48 
49 		/*
50 		 * If there's a mismatch for the given set, return -1 in the
51 		 * value.
52 		 */
53 		if (id != cpu_id) {
54 			id = -1ULL;
55 			break;
56 		}
57 	}
58 
59 	pair->value = id;
60 }
61 
hwprobe_isa_ext0(struct riscv_hwprobe * pair,const struct cpumask * cpus)62 static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
63 			     const struct cpumask *cpus)
64 {
65 	int cpu;
66 	u64 missing = 0;
67 
68 	pair->value = 0;
69 	if (has_fpu())
70 		pair->value |= RISCV_HWPROBE_IMA_FD;
71 
72 	if (riscv_isa_extension_available(NULL, c))
73 		pair->value |= RISCV_HWPROBE_IMA_C;
74 
75 	if (has_vector() && riscv_isa_extension_available(NULL, v))
76 		pair->value |= RISCV_HWPROBE_IMA_V;
77 
78 	/*
79 	 * Loop through and record extensions that 1) anyone has, and 2) anyone
80 	 * doesn't have.
81 	 */
82 	for_each_cpu(cpu, cpus) {
83 		struct riscv_isainfo *isainfo = &hart_isa[cpu];
84 
85 #define EXT_KEY(ext)									\
86 	do {										\
87 		if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext))	\
88 			pair->value |= RISCV_HWPROBE_EXT_##ext;				\
89 		else									\
90 			missing |= RISCV_HWPROBE_EXT_##ext;				\
91 	} while (false)
92 
93 		/*
94 		 * Only use EXT_KEY() for extensions which can be exposed to userspace,
95 		 * regardless of the kernel's configuration, as no other checks, besides
96 		 * presence in the hart_isa bitmap, are made.
97 		 */
98 		EXT_KEY(ZAAMO);
99 		EXT_KEY(ZACAS);
100 		EXT_KEY(ZALRSC);
101 		EXT_KEY(ZAWRS);
102 		EXT_KEY(ZBA);
103 		EXT_KEY(ZBB);
104 		EXT_KEY(ZBC);
105 		EXT_KEY(ZBKB);
106 		EXT_KEY(ZBKC);
107 		EXT_KEY(ZBKX);
108 		EXT_KEY(ZBS);
109 		EXT_KEY(ZCA);
110 		EXT_KEY(ZCB);
111 		EXT_KEY(ZCMOP);
112 		EXT_KEY(ZICBOM);
113 		EXT_KEY(ZICBOZ);
114 		EXT_KEY(ZICNTR);
115 		EXT_KEY(ZICOND);
116 		EXT_KEY(ZIHINTNTL);
117 		EXT_KEY(ZIHINTPAUSE);
118 		EXT_KEY(ZIHPM);
119 		EXT_KEY(ZIMOP);
120 		EXT_KEY(ZKND);
121 		EXT_KEY(ZKNE);
122 		EXT_KEY(ZKNH);
123 		EXT_KEY(ZKSED);
124 		EXT_KEY(ZKSH);
125 		EXT_KEY(ZKT);
126 		EXT_KEY(ZTSO);
127 
128 		/*
129 		 * All the following extensions must depend on the kernel
130 		 * support of V.
131 		 */
132 		if (has_vector()) {
133 			EXT_KEY(ZVBB);
134 			EXT_KEY(ZVBC);
135 			EXT_KEY(ZVE32F);
136 			EXT_KEY(ZVE32X);
137 			EXT_KEY(ZVE64D);
138 			EXT_KEY(ZVE64F);
139 			EXT_KEY(ZVE64X);
140 			EXT_KEY(ZVFBFMIN);
141 			EXT_KEY(ZVFBFWMA);
142 			EXT_KEY(ZVFH);
143 			EXT_KEY(ZVFHMIN);
144 			EXT_KEY(ZVKB);
145 			EXT_KEY(ZVKG);
146 			EXT_KEY(ZVKNED);
147 			EXT_KEY(ZVKNHA);
148 			EXT_KEY(ZVKNHB);
149 			EXT_KEY(ZVKSED);
150 			EXT_KEY(ZVKSH);
151 			EXT_KEY(ZVKT);
152 		}
153 
154 		if (has_fpu()) {
155 			EXT_KEY(ZCD);
156 			EXT_KEY(ZCF);
157 			EXT_KEY(ZFA);
158 			EXT_KEY(ZFBFMIN);
159 			EXT_KEY(ZFH);
160 			EXT_KEY(ZFHMIN);
161 		}
162 
163 		if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM))
164 			EXT_KEY(SUPM);
165 #undef EXT_KEY
166 	}
167 
168 	/* Now turn off reporting features if any CPU is missing it. */
169 	pair->value &= ~missing;
170 }
171 
hwprobe_ext0_has(const struct cpumask * cpus,u64 ext)172 static bool hwprobe_ext0_has(const struct cpumask *cpus, u64 ext)
173 {
174 	struct riscv_hwprobe pair;
175 
176 	hwprobe_isa_ext0(&pair, cpus);
177 	return (pair.value & ext);
178 }
179 
180 #if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS)
hwprobe_misaligned(const struct cpumask * cpus)181 static u64 hwprobe_misaligned(const struct cpumask *cpus)
182 {
183 	int cpu;
184 	u64 perf = -1ULL;
185 
186 	for_each_cpu(cpu, cpus) {
187 		int this_perf = per_cpu(misaligned_access_speed, cpu);
188 
189 		if (perf == -1ULL)
190 			perf = this_perf;
191 
192 		if (perf != this_perf) {
193 			perf = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
194 			break;
195 		}
196 	}
197 
198 	if (perf == -1ULL)
199 		return RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
200 
201 	return perf;
202 }
203 #else
hwprobe_misaligned(const struct cpumask * cpus)204 static u64 hwprobe_misaligned(const struct cpumask *cpus)
205 {
206 	if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS))
207 		return RISCV_HWPROBE_MISALIGNED_SCALAR_FAST;
208 
209 	if (IS_ENABLED(CONFIG_RISCV_EMULATED_UNALIGNED_ACCESS) && unaligned_ctl_available())
210 		return RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED;
211 
212 	return RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW;
213 }
214 #endif
215 
216 #ifdef CONFIG_RISCV_VECTOR_MISALIGNED
hwprobe_vec_misaligned(const struct cpumask * cpus)217 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
218 {
219 	int cpu;
220 	u64 perf = -1ULL;
221 
222 	/* Return if supported or not even if speed wasn't probed */
223 	for_each_cpu(cpu, cpus) {
224 		int this_perf = per_cpu(vector_misaligned_access, cpu);
225 
226 		if (perf == -1ULL)
227 			perf = this_perf;
228 
229 		if (perf != this_perf) {
230 			perf = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
231 			break;
232 		}
233 	}
234 
235 	if (perf == -1ULL)
236 		return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
237 
238 	return perf;
239 }
240 #else
hwprobe_vec_misaligned(const struct cpumask * cpus)241 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
242 {
243 	if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS))
244 		return RISCV_HWPROBE_MISALIGNED_VECTOR_FAST;
245 
246 	if (IS_ENABLED(CONFIG_RISCV_SLOW_VECTOR_UNALIGNED_ACCESS))
247 		return RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW;
248 
249 	return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
250 }
251 #endif
252 
hwprobe_one_pair(struct riscv_hwprobe * pair,const struct cpumask * cpus)253 static void hwprobe_one_pair(struct riscv_hwprobe *pair,
254 			     const struct cpumask *cpus)
255 {
256 	switch (pair->key) {
257 	case RISCV_HWPROBE_KEY_MVENDORID:
258 	case RISCV_HWPROBE_KEY_MARCHID:
259 	case RISCV_HWPROBE_KEY_MIMPID:
260 		hwprobe_arch_id(pair, cpus);
261 		break;
262 	/*
263 	 * The kernel already assumes that the base single-letter ISA
264 	 * extensions are supported on all harts, and only supports the
265 	 * IMA base, so just cheat a bit here and tell that to
266 	 * userspace.
267 	 */
268 	case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
269 		pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
270 		break;
271 
272 	case RISCV_HWPROBE_KEY_IMA_EXT_0:
273 		hwprobe_isa_ext0(pair, cpus);
274 		break;
275 
276 	case RISCV_HWPROBE_KEY_CPUPERF_0:
277 	case RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF:
278 		pair->value = hwprobe_misaligned(cpus);
279 		break;
280 
281 	case RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF:
282 		pair->value = hwprobe_vec_misaligned(cpus);
283 		break;
284 
285 	case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
286 		pair->value = 0;
287 		if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
288 			pair->value = riscv_cboz_block_size;
289 		break;
290 	case RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE:
291 		pair->value = 0;
292 		if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOM))
293 			pair->value = riscv_cbom_block_size;
294 		break;
295 	case RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS:
296 		pair->value = user_max_virt_addr();
297 		break;
298 
299 	case RISCV_HWPROBE_KEY_TIME_CSR_FREQ:
300 		pair->value = riscv_timebase;
301 		break;
302 
303 	case RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0:
304 		hwprobe_isa_vendor_ext_thead_0(pair, cpus);
305 		break;
306 
307 	/*
308 	 * For forward compatibility, unknown keys don't fail the whole
309 	 * call, but get their element key set to -1 and value set to 0
310 	 * indicating they're unrecognized.
311 	 */
312 	default:
313 		pair->key = -1;
314 		pair->value = 0;
315 		break;
316 	}
317 }
318 
hwprobe_get_values(struct riscv_hwprobe __user * pairs,size_t pair_count,size_t cpusetsize,unsigned long __user * cpus_user,unsigned int flags)319 static int hwprobe_get_values(struct riscv_hwprobe __user *pairs,
320 			      size_t pair_count, size_t cpusetsize,
321 			      unsigned long __user *cpus_user,
322 			      unsigned int flags)
323 {
324 	size_t out;
325 	int ret;
326 	cpumask_t cpus;
327 
328 	/* Check the reserved flags. */
329 	if (flags != 0)
330 		return -EINVAL;
331 
332 	/*
333 	 * The interface supports taking in a CPU mask, and returns values that
334 	 * are consistent across that mask. Allow userspace to specify NULL and
335 	 * 0 as a shortcut to all online CPUs.
336 	 */
337 	cpumask_clear(&cpus);
338 	if (!cpusetsize && !cpus_user) {
339 		cpumask_copy(&cpus, cpu_online_mask);
340 	} else {
341 		if (cpusetsize > cpumask_size())
342 			cpusetsize = cpumask_size();
343 
344 		ret = copy_from_user(&cpus, cpus_user, cpusetsize);
345 		if (ret)
346 			return -EFAULT;
347 
348 		/*
349 		 * Userspace must provide at least one online CPU, without that
350 		 * there's no way to define what is supported.
351 		 */
352 		cpumask_and(&cpus, &cpus, cpu_online_mask);
353 		if (cpumask_empty(&cpus))
354 			return -EINVAL;
355 	}
356 
357 	for (out = 0; out < pair_count; out++, pairs++) {
358 		struct riscv_hwprobe pair;
359 
360 		if (get_user(pair.key, &pairs->key))
361 			return -EFAULT;
362 
363 		pair.value = 0;
364 		hwprobe_one_pair(&pair, &cpus);
365 		ret = put_user(pair.key, &pairs->key);
366 		if (ret == 0)
367 			ret = put_user(pair.value, &pairs->value);
368 
369 		if (ret)
370 			return -EFAULT;
371 	}
372 
373 	return 0;
374 }
375 
hwprobe_get_cpus(struct riscv_hwprobe __user * pairs,size_t pair_count,size_t cpusetsize,unsigned long __user * cpus_user,unsigned int flags)376 static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
377 			    size_t pair_count, size_t cpusetsize,
378 			    unsigned long __user *cpus_user,
379 			    unsigned int flags)
380 {
381 	cpumask_t cpus, one_cpu;
382 	bool clear_all = false;
383 	size_t i;
384 	int ret;
385 
386 	if (flags != RISCV_HWPROBE_WHICH_CPUS)
387 		return -EINVAL;
388 
389 	if (!cpusetsize || !cpus_user)
390 		return -EINVAL;
391 
392 	if (cpusetsize > cpumask_size())
393 		cpusetsize = cpumask_size();
394 
395 	ret = copy_from_user(&cpus, cpus_user, cpusetsize);
396 	if (ret)
397 		return -EFAULT;
398 
399 	if (cpumask_empty(&cpus))
400 		cpumask_copy(&cpus, cpu_online_mask);
401 
402 	cpumask_and(&cpus, &cpus, cpu_online_mask);
403 
404 	cpumask_clear(&one_cpu);
405 
406 	for (i = 0; i < pair_count; i++) {
407 		struct riscv_hwprobe pair, tmp;
408 		int cpu;
409 
410 		ret = copy_from_user(&pair, &pairs[i], sizeof(pair));
411 		if (ret)
412 			return -EFAULT;
413 
414 		if (!riscv_hwprobe_key_is_valid(pair.key)) {
415 			clear_all = true;
416 			pair = (struct riscv_hwprobe){ .key = -1, };
417 			ret = copy_to_user(&pairs[i], &pair, sizeof(pair));
418 			if (ret)
419 				return -EFAULT;
420 		}
421 
422 		if (clear_all)
423 			continue;
424 
425 		tmp = (struct riscv_hwprobe){ .key = pair.key, };
426 
427 		for_each_cpu(cpu, &cpus) {
428 			cpumask_set_cpu(cpu, &one_cpu);
429 
430 			hwprobe_one_pair(&tmp, &one_cpu);
431 
432 			if (!riscv_hwprobe_pair_cmp(&tmp, &pair))
433 				cpumask_clear_cpu(cpu, &cpus);
434 
435 			cpumask_clear_cpu(cpu, &one_cpu);
436 		}
437 	}
438 
439 	if (clear_all)
440 		cpumask_clear(&cpus);
441 
442 	ret = copy_to_user(cpus_user, &cpus, cpusetsize);
443 	if (ret)
444 		return -EFAULT;
445 
446 	return 0;
447 }
448 
do_riscv_hwprobe(struct riscv_hwprobe __user * pairs,size_t pair_count,size_t cpusetsize,unsigned long __user * cpus_user,unsigned int flags)449 static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
450 			    size_t pair_count, size_t cpusetsize,
451 			    unsigned long __user *cpus_user,
452 			    unsigned int flags)
453 {
454 	if (flags & RISCV_HWPROBE_WHICH_CPUS)
455 		return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
456 					cpus_user, flags);
457 
458 	return hwprobe_get_values(pairs, pair_count, cpusetsize,
459 				  cpus_user, flags);
460 }
461 
462 #ifdef CONFIG_MMU
463 
init_hwprobe_vdso_data(void)464 static int __init init_hwprobe_vdso_data(void)
465 {
466 	struct vdso_arch_data *avd = vdso_k_arch_data;
467 	u64 id_bitsmash = 0;
468 	struct riscv_hwprobe pair;
469 	int key;
470 
471 	/*
472 	 * Initialize vDSO data with the answers for the "all CPUs" case, to
473 	 * save a syscall in the common case.
474 	 */
475 	for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
476 		pair.key = key;
477 		hwprobe_one_pair(&pair, cpu_online_mask);
478 
479 		WARN_ON_ONCE(pair.key < 0);
480 
481 		avd->all_cpu_hwprobe_values[key] = pair.value;
482 		/*
483 		 * Smash together the vendor, arch, and impl IDs to see if
484 		 * they're all 0 or any negative.
485 		 */
486 		if (key <= RISCV_HWPROBE_KEY_MIMPID)
487 			id_bitsmash |= pair.value;
488 	}
489 
490 	/*
491 	 * If the arch, vendor, and implementation ID are all the same across
492 	 * all harts, then assume all CPUs are the same, and allow the vDSO to
493 	 * answer queries for arbitrary masks. However if all values are 0 (not
494 	 * populated) or any value returns -1 (varies across CPUs), then the
495 	 * vDSO should defer to the kernel for exotic cpu masks.
496 	 */
497 	avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
498 	return 0;
499 }
500 
501 arch_initcall_sync(init_hwprobe_vdso_data);
502 
503 #endif /* CONFIG_MMU */
504 
SYSCALL_DEFINE5(riscv_hwprobe,struct riscv_hwprobe __user *,pairs,size_t,pair_count,size_t,cpusetsize,unsigned long __user *,cpus,unsigned int,flags)505 SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
506 		size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
507 		cpus, unsigned int, flags)
508 {
509 	return do_riscv_hwprobe(pairs, pair_count, cpusetsize,
510 				cpus, flags);
511 }
512