1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Common Ultravisor functions and initialization
4 *
5 * Copyright IBM Corp. 2019, 2024
6 */
7 #define pr_fmt(fmt) "prot_virt: " fmt
8
9 #include <linux/export.h>
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/sizes.h>
13 #include <linux/bitmap.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <linux/pagewalk.h>
18 #include <linux/backing-dev.h>
19 #include <asm/facility.h>
20 #include <asm/sections.h>
21 #include <asm/uv.h>
22
23 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
24 int __bootdata_preserved(prot_virt_guest);
25 EXPORT_SYMBOL(prot_virt_guest);
26
27 /*
28 * uv_info contains both host and guest information but it's currently only
29 * expected to be used within modules if it's the KVM module or for
30 * any PV guest module.
31 *
32 * The kernel itself will write these values once in uv_query_info()
33 * and then make some of them readable via a sysfs interface.
34 */
35 struct uv_info __bootdata_preserved(uv_info);
36 EXPORT_SYMBOL(uv_info);
37
38 int __bootdata_preserved(prot_virt_host);
39 EXPORT_SYMBOL(prot_virt_host);
40
uv_init(phys_addr_t stor_base,unsigned long stor_len)41 static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
42 {
43 struct uv_cb_init uvcb = {
44 .header.cmd = UVC_CMD_INIT_UV,
45 .header.len = sizeof(uvcb),
46 .stor_origin = stor_base,
47 .stor_len = stor_len,
48 };
49
50 if (uv_call(0, (uint64_t)&uvcb)) {
51 pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
52 uvcb.header.rc, uvcb.header.rrc);
53 return -1;
54 }
55 return 0;
56 }
57
setup_uv(void)58 void __init setup_uv(void)
59 {
60 void *uv_stor_base;
61
62 if (!is_prot_virt_host())
63 return;
64
65 uv_stor_base = memblock_alloc_try_nid(
66 uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
67 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
68 if (!uv_stor_base) {
69 pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
70 uv_info.uv_base_stor_len);
71 goto fail;
72 }
73
74 if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
75 memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
76 goto fail;
77 }
78
79 pr_info("Reserving %luMB as ultravisor base storage\n",
80 uv_info.uv_base_stor_len >> 20);
81 return;
82 fail:
83 pr_info("Disabling support for protected virtualization");
84 prot_virt_host = 0;
85 }
86
87 /*
88 * Requests the Ultravisor to pin the page in the shared state. This will
89 * cause an intercept when the guest attempts to unshare the pinned page.
90 */
uv_pin_shared(unsigned long paddr)91 int uv_pin_shared(unsigned long paddr)
92 {
93 struct uv_cb_cfs uvcb = {
94 .header.cmd = UVC_CMD_PIN_PAGE_SHARED,
95 .header.len = sizeof(uvcb),
96 .paddr = paddr,
97 };
98
99 if (uv_call(0, (u64)&uvcb))
100 return -EINVAL;
101 return 0;
102 }
103 EXPORT_SYMBOL_GPL(uv_pin_shared);
104
105 /*
106 * Requests the Ultravisor to destroy a guest page and make it
107 * accessible to the host. The destroy clears the page instead of
108 * exporting.
109 *
110 * @paddr: Absolute host address of page to be destroyed
111 */
uv_destroy(unsigned long paddr)112 static int uv_destroy(unsigned long paddr)
113 {
114 struct uv_cb_cfs uvcb = {
115 .header.cmd = UVC_CMD_DESTR_SEC_STOR,
116 .header.len = sizeof(uvcb),
117 .paddr = paddr
118 };
119
120 if (uv_call(0, (u64)&uvcb)) {
121 /*
122 * Older firmware uses 107/d as an indication of a non secure
123 * page. Let us emulate the newer variant (no-op).
124 */
125 if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
126 return 0;
127 return -EINVAL;
128 }
129 return 0;
130 }
131
132 /*
133 * The caller must already hold a reference to the folio
134 */
uv_destroy_folio(struct folio * folio)135 int uv_destroy_folio(struct folio *folio)
136 {
137 unsigned long i;
138 int rc;
139
140 folio_get(folio);
141 for (i = 0; i < (1 << folio_order(folio)); i++) {
142 rc = uv_destroy(folio_to_phys(folio) + i * PAGE_SIZE);
143 if (rc)
144 break;
145 }
146 if (!rc)
147 clear_bit(PG_arch_1, &folio->flags.f);
148 folio_put(folio);
149 return rc;
150 }
151 EXPORT_SYMBOL(uv_destroy_folio);
152
153 /*
154 * The present PTE still indirectly holds a folio reference through the mapping.
155 */
uv_destroy_pte(pte_t pte)156 int uv_destroy_pte(pte_t pte)
157 {
158 VM_WARN_ON(!pte_present(pte));
159 return uv_destroy_folio(pfn_folio(pte_pfn(pte)));
160 }
161
162 /*
163 * Requests the Ultravisor to encrypt a guest page and make it
164 * accessible to the host for paging (export).
165 *
166 * @paddr: Absolute host address of page to be exported
167 */
uv_convert_from_secure(unsigned long paddr)168 int uv_convert_from_secure(unsigned long paddr)
169 {
170 struct uv_cb_cfs uvcb = {
171 .header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
172 .header.len = sizeof(uvcb),
173 .paddr = paddr
174 };
175
176 if (uv_call(0, (u64)&uvcb))
177 return -EINVAL;
178 return 0;
179 }
180 EXPORT_SYMBOL_GPL(uv_convert_from_secure);
181
182 /*
183 * The caller must already hold a reference to the folio.
184 */
uv_convert_from_secure_folio(struct folio * folio)185 int uv_convert_from_secure_folio(struct folio *folio)
186 {
187 unsigned long i;
188 int rc;
189
190 folio_get(folio);
191 for (i = 0; i < (1 << folio_order(folio)); i++) {
192 rc = uv_convert_from_secure(folio_to_phys(folio) + i * PAGE_SIZE);
193 if (rc)
194 break;
195 }
196 if (!rc)
197 clear_bit(PG_arch_1, &folio->flags.f);
198 folio_put(folio);
199 return rc;
200 }
201 EXPORT_SYMBOL_GPL(uv_convert_from_secure_folio);
202
203 /*
204 * The present PTE still indirectly holds a folio reference through the mapping.
205 */
uv_convert_from_secure_pte(pte_t pte)206 int uv_convert_from_secure_pte(pte_t pte)
207 {
208 VM_WARN_ON(!pte_present(pte));
209 return uv_convert_from_secure_folio(pfn_folio(pte_pfn(pte)));
210 }
211
212 /*
213 * Calculate the expected ref_count for a folio that would otherwise have no
214 * further pins. This was cribbed from similar functions in other places in
215 * the kernel, but with some slight modifications. We know that a secure
216 * folio can not be a large folio, for example.
217 */
expected_folio_refs(struct folio * folio)218 static int expected_folio_refs(struct folio *folio)
219 {
220 int res;
221
222 res = folio_mapcount(folio);
223 if (folio_test_swapcache(folio)) {
224 res++;
225 } else if (folio_mapping(folio)) {
226 res++;
227 if (folio->private)
228 res++;
229 }
230 return res;
231 }
232
233 /**
234 * __make_folio_secure() - make a folio secure
235 * @folio: the folio to make secure
236 * @uvcb: the uvcb that describes the UVC to be used
237 *
238 * The folio @folio will be made secure if possible, @uvcb will be passed
239 * as-is to the UVC.
240 *
241 * Return: 0 on success;
242 * -EBUSY if the folio is in writeback or has too many references;
243 * -EAGAIN if the UVC needs to be attempted again;
244 * -ENXIO if the address is not mapped;
245 * -EINVAL if the UVC failed for other reasons.
246 *
247 * Context: The caller must hold exactly one extra reference on the folio
248 * (it's the same logic as split_folio()), and the folio must be
249 * locked.
250 */
__make_folio_secure(struct folio * folio,struct uv_cb_header * uvcb)251 int __make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
252 {
253 int expected, cc = 0;
254
255 if (folio_test_writeback(folio))
256 return -EBUSY;
257 expected = expected_folio_refs(folio) + 1;
258 if (!folio_ref_freeze(folio, expected))
259 return -EBUSY;
260 set_bit(PG_arch_1, &folio->flags.f);
261 /*
262 * If the UVC does not succeed or fail immediately, we don't want to
263 * loop for long, or we might get stall notifications.
264 * On the other hand, this is a complex scenario and we are holding a lot of
265 * locks, so we can't easily sleep and reschedule. We try only once,
266 * and if the UVC returned busy or partial completion, we return
267 * -EAGAIN and we let the callers deal with it.
268 */
269 cc = __uv_call(0, (u64)uvcb);
270 folio_ref_unfreeze(folio, expected);
271 /*
272 * Return -ENXIO if the folio was not mapped, -EINVAL for other errors.
273 * If busy or partially completed, return -EAGAIN.
274 */
275 if (cc == UVC_CC_OK)
276 return 0;
277 else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
278 return -EAGAIN;
279 return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
280 }
281 EXPORT_SYMBOL(__make_folio_secure);
282
283 /**
284 * s390_wiggle_split_folio() - try to drain extra references to a folio and
285 * split the folio if it is large.
286 * @mm: the mm containing the folio to work on
287 * @folio: the folio
288 *
289 * Context: Must be called while holding an extra reference to the folio;
290 * the mm lock should not be held.
291 * Return: 0 if the operation was successful;
292 * -EAGAIN if splitting the large folio was not successful,
293 * but another attempt can be made;
294 * -EINVAL in case of other folio splitting errors. See split_folio().
295 */
s390_wiggle_split_folio(struct mm_struct * mm,struct folio * folio)296 int s390_wiggle_split_folio(struct mm_struct *mm, struct folio *folio)
297 {
298 int rc, tried_splits;
299
300 lockdep_assert_not_held(&mm->mmap_lock);
301 folio_wait_writeback(folio);
302 lru_add_drain_all();
303
304 if (!folio_test_large(folio))
305 return 0;
306
307 for (tried_splits = 0; tried_splits < 2; tried_splits++) {
308 struct address_space *mapping;
309 loff_t lstart, lend;
310 struct inode *inode;
311
312 folio_lock(folio);
313 rc = split_folio(folio);
314 if (rc != -EBUSY) {
315 folio_unlock(folio);
316 return rc;
317 }
318
319 /*
320 * Splitting with -EBUSY can fail for various reasons, but we
321 * have to handle one case explicitly for now: some mappings
322 * don't allow for splitting dirty folios; writeback will
323 * mark them clean again, including marking all page table
324 * entries mapping the folio read-only, to catch future write
325 * attempts.
326 *
327 * While the system should be writing back dirty folios in the
328 * background, we obtained this folio by looking up a writable
329 * page table entry. On these problematic mappings, writable
330 * page table entries imply dirty folios, preventing the
331 * split in the first place.
332 *
333 * To prevent a livelock when trigger writeback manually and
334 * letting the caller look up the folio again in the page
335 * table (turning it dirty), immediately try to split again.
336 *
337 * This is only a problem for some mappings (e.g., XFS);
338 * mappings that do not support writeback (e.g., shmem) do not
339 * apply.
340 */
341 if (!folio_test_dirty(folio) || folio_test_anon(folio) ||
342 !folio->mapping || !mapping_can_writeback(folio->mapping)) {
343 folio_unlock(folio);
344 break;
345 }
346
347 /*
348 * Ideally, we'd only trigger writeback on this exact folio. But
349 * there is no easy way to do that, so we'll stabilize the
350 * mapping while we still hold the folio lock, so we can drop
351 * the folio lock to trigger writeback on the range currently
352 * covered by the folio instead.
353 */
354 mapping = folio->mapping;
355 lstart = folio_pos(folio);
356 lend = lstart + folio_size(folio) - 1;
357 inode = igrab(mapping->host);
358 folio_unlock(folio);
359
360 if (unlikely(!inode))
361 break;
362
363 filemap_write_and_wait_range(mapping, lstart, lend);
364 iput(mapping->host);
365 }
366 return -EAGAIN;
367 }
368 EXPORT_SYMBOL_GPL(s390_wiggle_split_folio);
369
370 /*
371 * To be called with the folio locked or with an extra reference! This will
372 * prevent kvm_s390_pv_make_secure() from touching the folio concurrently.
373 * Having 2 parallel arch_make_folio_accessible is fine, as the UV calls will
374 * become a no-op if the folio is already exported.
375 */
arch_make_folio_accessible(struct folio * folio)376 int arch_make_folio_accessible(struct folio *folio)
377 {
378 int rc = 0;
379
380 /*
381 * PG_arch_1 is used as an indication that this small folio might be
382 * secure. This can overindicate, e.g. we set the bit before calling
383 * convert_to_secure.
384 */
385 if (!test_bit(PG_arch_1, &folio->flags.f))
386 return 0;
387
388 /* Large folios cannot be secure. */
389 if (WARN_ON_ONCE(folio_test_large(folio)))
390 return -EFAULT;
391
392 rc = uv_pin_shared(folio_to_phys(folio));
393 if (!rc) {
394 clear_bit(PG_arch_1, &folio->flags.f);
395 return 0;
396 }
397
398 rc = uv_convert_from_secure(folio_to_phys(folio));
399 if (!rc) {
400 clear_bit(PG_arch_1, &folio->flags.f);
401 return 0;
402 }
403
404 return rc;
405 }
406 EXPORT_SYMBOL_GPL(arch_make_folio_accessible);
407
uv_query_facilities(struct kobject * kobj,struct kobj_attribute * attr,char * buf)408 static ssize_t uv_query_facilities(struct kobject *kobj,
409 struct kobj_attribute *attr, char *buf)
410 {
411 return sysfs_emit(buf, "%lx\n%lx\n%lx\n%lx\n",
412 uv_info.inst_calls_list[0],
413 uv_info.inst_calls_list[1],
414 uv_info.inst_calls_list[2],
415 uv_info.inst_calls_list[3]);
416 }
417
418 static struct kobj_attribute uv_query_facilities_attr =
419 __ATTR(facilities, 0444, uv_query_facilities, NULL);
420
uv_query_supp_se_hdr_ver(struct kobject * kobj,struct kobj_attribute * attr,char * buf)421 static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
422 struct kobj_attribute *attr, char *buf)
423 {
424 return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
425 }
426
427 static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
428 __ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
429
uv_query_supp_se_hdr_pcf(struct kobject * kobj,struct kobj_attribute * attr,char * buf)430 static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
431 struct kobj_attribute *attr, char *buf)
432 {
433 return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
434 }
435
436 static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
437 __ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
438
uv_query_dump_cpu_len(struct kobject * kobj,struct kobj_attribute * attr,char * buf)439 static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
440 struct kobj_attribute *attr, char *buf)
441 {
442 return sysfs_emit(buf, "%lx\n", uv_info.guest_cpu_stor_len);
443 }
444
445 static struct kobj_attribute uv_query_dump_cpu_len_attr =
446 __ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
447
uv_query_dump_storage_state_len(struct kobject * kobj,struct kobj_attribute * attr,char * buf)448 static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
449 struct kobj_attribute *attr, char *buf)
450 {
451 return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_storage_state_len);
452 }
453
454 static struct kobj_attribute uv_query_dump_storage_state_len_attr =
455 __ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
456
uv_query_dump_finalize_len(struct kobject * kobj,struct kobj_attribute * attr,char * buf)457 static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
458 struct kobj_attribute *attr, char *buf)
459 {
460 return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_finalize_len);
461 }
462
463 static struct kobj_attribute uv_query_dump_finalize_len_attr =
464 __ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
465
uv_query_feature_indications(struct kobject * kobj,struct kobj_attribute * attr,char * buf)466 static ssize_t uv_query_feature_indications(struct kobject *kobj,
467 struct kobj_attribute *attr, char *buf)
468 {
469 return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
470 }
471
472 static struct kobj_attribute uv_query_feature_indications_attr =
473 __ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
474
uv_query_max_guest_cpus(struct kobject * kobj,struct kobj_attribute * attr,char * buf)475 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
476 struct kobj_attribute *attr, char *buf)
477 {
478 return sysfs_emit(buf, "%d\n", uv_info.max_guest_cpu_id + 1);
479 }
480
481 static struct kobj_attribute uv_query_max_guest_cpus_attr =
482 __ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
483
uv_query_max_guest_vms(struct kobject * kobj,struct kobj_attribute * attr,char * buf)484 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
485 struct kobj_attribute *attr, char *buf)
486 {
487 return sysfs_emit(buf, "%d\n", uv_info.max_num_sec_conf);
488 }
489
490 static struct kobj_attribute uv_query_max_guest_vms_attr =
491 __ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
492
uv_query_max_guest_addr(struct kobject * kobj,struct kobj_attribute * attr,char * buf)493 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
494 struct kobj_attribute *attr, char *buf)
495 {
496 return sysfs_emit(buf, "%lx\n", uv_info.max_sec_stor_addr);
497 }
498
499 static struct kobj_attribute uv_query_max_guest_addr_attr =
500 __ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
501
uv_query_supp_att_req_hdr_ver(struct kobject * kobj,struct kobj_attribute * attr,char * buf)502 static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
503 struct kobj_attribute *attr, char *buf)
504 {
505 return sysfs_emit(buf, "%lx\n", uv_info.supp_att_req_hdr_ver);
506 }
507
508 static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
509 __ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
510
uv_query_supp_att_pflags(struct kobject * kobj,struct kobj_attribute * attr,char * buf)511 static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
512 struct kobj_attribute *attr, char *buf)
513 {
514 return sysfs_emit(buf, "%lx\n", uv_info.supp_att_pflags);
515 }
516
517 static struct kobj_attribute uv_query_supp_att_pflags_attr =
518 __ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
519
uv_query_supp_add_secret_req_ver(struct kobject * kobj,struct kobj_attribute * attr,char * buf)520 static ssize_t uv_query_supp_add_secret_req_ver(struct kobject *kobj,
521 struct kobj_attribute *attr, char *buf)
522 {
523 return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_req_ver);
524 }
525
526 static struct kobj_attribute uv_query_supp_add_secret_req_ver_attr =
527 __ATTR(supp_add_secret_req_ver, 0444, uv_query_supp_add_secret_req_ver, NULL);
528
uv_query_supp_add_secret_pcf(struct kobject * kobj,struct kobj_attribute * attr,char * buf)529 static ssize_t uv_query_supp_add_secret_pcf(struct kobject *kobj,
530 struct kobj_attribute *attr, char *buf)
531 {
532 return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_pcf);
533 }
534
535 static struct kobj_attribute uv_query_supp_add_secret_pcf_attr =
536 __ATTR(supp_add_secret_pcf, 0444, uv_query_supp_add_secret_pcf, NULL);
537
uv_query_supp_secret_types(struct kobject * kobj,struct kobj_attribute * attr,char * buf)538 static ssize_t uv_query_supp_secret_types(struct kobject *kobj,
539 struct kobj_attribute *attr, char *buf)
540 {
541 return sysfs_emit(buf, "%lx\n", uv_info.supp_secret_types);
542 }
543
544 static struct kobj_attribute uv_query_supp_secret_types_attr =
545 __ATTR(supp_secret_types, 0444, uv_query_supp_secret_types, NULL);
546
uv_query_max_secrets(struct kobject * kobj,struct kobj_attribute * attr,char * buf)547 static ssize_t uv_query_max_secrets(struct kobject *kobj,
548 struct kobj_attribute *attr, char *buf)
549 {
550 return sysfs_emit(buf, "%d\n",
551 uv_info.max_assoc_secrets + uv_info.max_retr_secrets);
552 }
553
554 static struct kobj_attribute uv_query_max_secrets_attr =
555 __ATTR(max_secrets, 0444, uv_query_max_secrets, NULL);
556
uv_query_max_retr_secrets(struct kobject * kobj,struct kobj_attribute * attr,char * buf)557 static ssize_t uv_query_max_retr_secrets(struct kobject *kobj,
558 struct kobj_attribute *attr, char *buf)
559 {
560 return sysfs_emit(buf, "%d\n", uv_info.max_retr_secrets);
561 }
562
563 static struct kobj_attribute uv_query_max_retr_secrets_attr =
564 __ATTR(max_retr_secrets, 0444, uv_query_max_retr_secrets, NULL);
565
uv_query_max_assoc_secrets(struct kobject * kobj,struct kobj_attribute * attr,char * buf)566 static ssize_t uv_query_max_assoc_secrets(struct kobject *kobj,
567 struct kobj_attribute *attr,
568 char *buf)
569 {
570 return sysfs_emit(buf, "%d\n", uv_info.max_assoc_secrets);
571 }
572
573 static struct kobj_attribute uv_query_max_assoc_secrets_attr =
574 __ATTR(max_assoc_secrets, 0444, uv_query_max_assoc_secrets, NULL);
575
576 static struct attribute *uv_query_attrs[] = {
577 &uv_query_facilities_attr.attr,
578 &uv_query_feature_indications_attr.attr,
579 &uv_query_max_guest_cpus_attr.attr,
580 &uv_query_max_guest_vms_attr.attr,
581 &uv_query_max_guest_addr_attr.attr,
582 &uv_query_supp_se_hdr_ver_attr.attr,
583 &uv_query_supp_se_hdr_pcf_attr.attr,
584 &uv_query_dump_storage_state_len_attr.attr,
585 &uv_query_dump_finalize_len_attr.attr,
586 &uv_query_dump_cpu_len_attr.attr,
587 &uv_query_supp_att_req_hdr_ver_attr.attr,
588 &uv_query_supp_att_pflags_attr.attr,
589 &uv_query_supp_add_secret_req_ver_attr.attr,
590 &uv_query_supp_add_secret_pcf_attr.attr,
591 &uv_query_supp_secret_types_attr.attr,
592 &uv_query_max_secrets_attr.attr,
593 &uv_query_max_assoc_secrets_attr.attr,
594 &uv_query_max_retr_secrets_attr.attr,
595 NULL,
596 };
597
uv_query_keys(void)598 static inline struct uv_cb_query_keys uv_query_keys(void)
599 {
600 struct uv_cb_query_keys uvcb = {
601 .header.cmd = UVC_CMD_QUERY_KEYS,
602 .header.len = sizeof(uvcb)
603 };
604
605 uv_call(0, (uint64_t)&uvcb);
606 return uvcb;
607 }
608
emit_hash(struct uv_key_hash * hash,char * buf,int at)609 static inline ssize_t emit_hash(struct uv_key_hash *hash, char *buf, int at)
610 {
611 return sysfs_emit_at(buf, at, "%016llx%016llx%016llx%016llx\n",
612 hash->dword[0], hash->dword[1], hash->dword[2], hash->dword[3]);
613 }
614
uv_keys_host_key(struct kobject * kobj,struct kobj_attribute * attr,char * buf)615 static ssize_t uv_keys_host_key(struct kobject *kobj,
616 struct kobj_attribute *attr, char *buf)
617 {
618 struct uv_cb_query_keys uvcb = uv_query_keys();
619
620 return emit_hash(&uvcb.key_hashes[UVC_QUERY_KEYS_IDX_HK], buf, 0);
621 }
622
623 static struct kobj_attribute uv_keys_host_key_attr =
624 __ATTR(host_key, 0444, uv_keys_host_key, NULL);
625
uv_keys_backup_host_key(struct kobject * kobj,struct kobj_attribute * attr,char * buf)626 static ssize_t uv_keys_backup_host_key(struct kobject *kobj,
627 struct kobj_attribute *attr, char *buf)
628 {
629 struct uv_cb_query_keys uvcb = uv_query_keys();
630
631 return emit_hash(&uvcb.key_hashes[UVC_QUERY_KEYS_IDX_BACK_HK], buf, 0);
632 }
633
634 static struct kobj_attribute uv_keys_backup_host_key_attr =
635 __ATTR(backup_host_key, 0444, uv_keys_backup_host_key, NULL);
636
uv_keys_all(struct kobject * kobj,struct kobj_attribute * attr,char * buf)637 static ssize_t uv_keys_all(struct kobject *kobj,
638 struct kobj_attribute *attr, char *buf)
639 {
640 struct uv_cb_query_keys uvcb = uv_query_keys();
641 ssize_t len = 0;
642 int i;
643
644 for (i = 0; i < ARRAY_SIZE(uvcb.key_hashes); i++)
645 len += emit_hash(uvcb.key_hashes + i, buf, len);
646
647 return len;
648 }
649
650 static struct kobj_attribute uv_keys_all_attr =
651 __ATTR(all, 0444, uv_keys_all, NULL);
652
653 static struct attribute_group uv_query_attr_group = {
654 .attrs = uv_query_attrs,
655 };
656
657 static struct attribute *uv_keys_attrs[] = {
658 &uv_keys_host_key_attr.attr,
659 &uv_keys_backup_host_key_attr.attr,
660 &uv_keys_all_attr.attr,
661 NULL,
662 };
663
664 static struct attribute_group uv_keys_attr_group = {
665 .attrs = uv_keys_attrs,
666 };
667
uv_is_prot_virt_guest(struct kobject * kobj,struct kobj_attribute * attr,char * buf)668 static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
669 struct kobj_attribute *attr, char *buf)
670 {
671 return sysfs_emit(buf, "%d\n", prot_virt_guest);
672 }
673
uv_is_prot_virt_host(struct kobject * kobj,struct kobj_attribute * attr,char * buf)674 static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
675 struct kobj_attribute *attr, char *buf)
676 {
677 return sysfs_emit(buf, "%d\n", prot_virt_host);
678 }
679
680 static struct kobj_attribute uv_prot_virt_guest =
681 __ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
682
683 static struct kobj_attribute uv_prot_virt_host =
684 __ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
685
686 static const struct attribute *uv_prot_virt_attrs[] = {
687 &uv_prot_virt_guest.attr,
688 &uv_prot_virt_host.attr,
689 NULL,
690 };
691
692 static struct kset *uv_query_kset;
693 static struct kset *uv_keys_kset;
694 static struct kobject *uv_kobj;
695
uv_sysfs_dir_init(const struct attribute_group * grp,struct kset ** uv_dir_kset,const char * name)696 static int __init uv_sysfs_dir_init(const struct attribute_group *grp,
697 struct kset **uv_dir_kset, const char *name)
698 {
699 struct kset *kset;
700 int rc;
701
702 kset = kset_create_and_add(name, NULL, uv_kobj);
703 if (!kset)
704 return -ENOMEM;
705 *uv_dir_kset = kset;
706
707 rc = sysfs_create_group(&kset->kobj, grp);
708 if (rc)
709 kset_unregister(kset);
710 return rc;
711 }
712
uv_sysfs_init(void)713 static int __init uv_sysfs_init(void)
714 {
715 int rc = -ENOMEM;
716
717 if (!test_facility(158))
718 return 0;
719
720 uv_kobj = kobject_create_and_add("uv", firmware_kobj);
721 if (!uv_kobj)
722 return -ENOMEM;
723
724 rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
725 if (rc)
726 goto out_kobj;
727
728 rc = uv_sysfs_dir_init(&uv_query_attr_group, &uv_query_kset, "query");
729 if (rc)
730 goto out_ind_files;
731
732 /* Get installed key hashes if available, ignore any errors */
733 if (test_bit_inv(BIT_UVC_CMD_QUERY_KEYS, uv_info.inst_calls_list))
734 uv_sysfs_dir_init(&uv_keys_attr_group, &uv_keys_kset, "keys");
735
736 return 0;
737
738 out_ind_files:
739 sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
740 out_kobj:
741 kobject_del(uv_kobj);
742 kobject_put(uv_kobj);
743 return rc;
744 }
745 device_initcall(uv_sysfs_init);
746
747 /*
748 * Locate a secret in the list by its id.
749 * @secret_id: search pattern.
750 * @list: ephemeral buffer space
751 * @secret: output data, containing the secret's metadata.
752 *
753 * Search for a secret with the given secret_id in the Ultravisor secret store.
754 *
755 * Context: might sleep.
756 */
find_secret_in_page(const u8 secret_id[UV_SECRET_ID_LEN],const struct uv_secret_list * list,struct uv_secret_list_item_hdr * secret)757 static int find_secret_in_page(const u8 secret_id[UV_SECRET_ID_LEN],
758 const struct uv_secret_list *list,
759 struct uv_secret_list_item_hdr *secret)
760 {
761 u16 i;
762
763 for (i = 0; i < list->total_num_secrets; i++) {
764 if (memcmp(secret_id, list->secrets[i].id, UV_SECRET_ID_LEN) == 0) {
765 *secret = list->secrets[i].hdr;
766 return 0;
767 }
768 }
769 return -ENOENT;
770 }
771
772 /**
773 * uv_find_secret() - search secret metadata for a given secret id.
774 * @secret_id: search pattern.
775 * @list: ephemeral buffer space
776 * @secret: output data, containing the secret's metadata.
777 *
778 * Context: might sleep.
779 */
uv_find_secret(const u8 secret_id[UV_SECRET_ID_LEN],struct uv_secret_list * list,struct uv_secret_list_item_hdr * secret)780 int uv_find_secret(const u8 secret_id[UV_SECRET_ID_LEN],
781 struct uv_secret_list *list,
782 struct uv_secret_list_item_hdr *secret)
783 {
784 u16 start_idx = 0;
785 u16 list_rc;
786 int ret;
787
788 do {
789 uv_list_secrets(list, start_idx, &list_rc, NULL);
790 if (list_rc != UVC_RC_EXECUTED && list_rc != UVC_RC_MORE_DATA) {
791 if (list_rc == UVC_RC_INV_CMD)
792 return -ENODEV;
793 else
794 return -EIO;
795 }
796 ret = find_secret_in_page(secret_id, list, secret);
797 if (ret == 0)
798 return ret;
799 start_idx = list->next_secret_idx;
800 } while (list_rc == UVC_RC_MORE_DATA && start_idx < list->next_secret_idx);
801
802 return -ENOENT;
803 }
804 EXPORT_SYMBOL_GPL(uv_find_secret);
805
806 /**
807 * uv_retrieve_secret() - get the secret value for the secret index.
808 * @secret_idx: Secret index for which the secret should be retrieved.
809 * @buf: Buffer to store retrieved secret.
810 * @buf_size: Size of the buffer. The correct buffer size is reported as part of
811 * the result from `uv_get_secret_metadata`.
812 *
813 * Calls the Retrieve Secret UVC and translates the UV return code into an errno.
814 *
815 * Context: might sleep.
816 *
817 * Return:
818 * * %0 - Entry found; buffer contains a valid secret.
819 * * %ENOENT: - No entry found or secret at the index is non-retrievable.
820 * * %ENODEV: - Not supported: UV not available or command not available.
821 * * %EINVAL: - Buffer too small for content.
822 * * %EIO: - Other unexpected UV error.
823 */
uv_retrieve_secret(u16 secret_idx,u8 * buf,size_t buf_size)824 int uv_retrieve_secret(u16 secret_idx, u8 *buf, size_t buf_size)
825 {
826 struct uv_cb_retr_secr uvcb = {
827 .header.len = sizeof(uvcb),
828 .header.cmd = UVC_CMD_RETR_SECRET,
829 .secret_idx = secret_idx,
830 .buf_addr = (u64)buf,
831 .buf_size = buf_size,
832 };
833
834 uv_call_sched(0, (u64)&uvcb);
835
836 switch (uvcb.header.rc) {
837 case UVC_RC_EXECUTED:
838 return 0;
839 case UVC_RC_INV_CMD:
840 return -ENODEV;
841 case UVC_RC_RETR_SECR_STORE_EMPTY:
842 case UVC_RC_RETR_SECR_INV_SECRET:
843 case UVC_RC_RETR_SECR_INV_IDX:
844 return -ENOENT;
845 case UVC_RC_RETR_SECR_BUF_SMALL:
846 return -EINVAL;
847 default:
848 return -EIO;
849 }
850 }
851 EXPORT_SYMBOL_GPL(uv_retrieve_secret);
852