1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Common Ultravisor functions and initialization
4 *
5 * Copyright IBM Corp. 2019, 2024
6 */
7 #define KMSG_COMPONENT "prot_virt"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/sizes.h>
14 #include <linux/bitmap.h>
15 #include <linux/memblock.h>
16 #include <linux/pagemap.h>
17 #include <linux/swap.h>
18 #include <linux/pagewalk.h>
19 #include <linux/backing-dev.h>
20 #include <asm/facility.h>
21 #include <asm/sections.h>
22 #include <asm/uv.h>
23
24 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
25 int __bootdata_preserved(prot_virt_guest);
26 EXPORT_SYMBOL(prot_virt_guest);
27
28 /*
29 * uv_info contains both host and guest information but it's currently only
30 * expected to be used within modules if it's the KVM module or for
31 * any PV guest module.
32 *
33 * The kernel itself will write these values once in uv_query_info()
34 * and then make some of them readable via a sysfs interface.
35 */
36 struct uv_info __bootdata_preserved(uv_info);
37 EXPORT_SYMBOL(uv_info);
38
39 int __bootdata_preserved(prot_virt_host);
40 EXPORT_SYMBOL(prot_virt_host);
41
uv_init(phys_addr_t stor_base,unsigned long stor_len)42 static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
43 {
44 struct uv_cb_init uvcb = {
45 .header.cmd = UVC_CMD_INIT_UV,
46 .header.len = sizeof(uvcb),
47 .stor_origin = stor_base,
48 .stor_len = stor_len,
49 };
50
51 if (uv_call(0, (uint64_t)&uvcb)) {
52 pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
53 uvcb.header.rc, uvcb.header.rrc);
54 return -1;
55 }
56 return 0;
57 }
58
setup_uv(void)59 void __init setup_uv(void)
60 {
61 void *uv_stor_base;
62
63 if (!is_prot_virt_host())
64 return;
65
66 uv_stor_base = memblock_alloc_try_nid(
67 uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
68 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
69 if (!uv_stor_base) {
70 pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
71 uv_info.uv_base_stor_len);
72 goto fail;
73 }
74
75 if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
76 memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
77 goto fail;
78 }
79
80 pr_info("Reserving %luMB as ultravisor base storage\n",
81 uv_info.uv_base_stor_len >> 20);
82 return;
83 fail:
84 pr_info("Disabling support for protected virtualization");
85 prot_virt_host = 0;
86 }
87
88 /*
89 * Requests the Ultravisor to pin the page in the shared state. This will
90 * cause an intercept when the guest attempts to unshare the pinned page.
91 */
uv_pin_shared(unsigned long paddr)92 int uv_pin_shared(unsigned long paddr)
93 {
94 struct uv_cb_cfs uvcb = {
95 .header.cmd = UVC_CMD_PIN_PAGE_SHARED,
96 .header.len = sizeof(uvcb),
97 .paddr = paddr,
98 };
99
100 if (uv_call(0, (u64)&uvcb))
101 return -EINVAL;
102 return 0;
103 }
104 EXPORT_SYMBOL_GPL(uv_pin_shared);
105
106 /*
107 * Requests the Ultravisor to destroy a guest page and make it
108 * accessible to the host. The destroy clears the page instead of
109 * exporting.
110 *
111 * @paddr: Absolute host address of page to be destroyed
112 */
uv_destroy(unsigned long paddr)113 static int uv_destroy(unsigned long paddr)
114 {
115 struct uv_cb_cfs uvcb = {
116 .header.cmd = UVC_CMD_DESTR_SEC_STOR,
117 .header.len = sizeof(uvcb),
118 .paddr = paddr
119 };
120
121 if (uv_call(0, (u64)&uvcb)) {
122 /*
123 * Older firmware uses 107/d as an indication of a non secure
124 * page. Let us emulate the newer variant (no-op).
125 */
126 if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
127 return 0;
128 return -EINVAL;
129 }
130 return 0;
131 }
132
133 /*
134 * The caller must already hold a reference to the folio
135 */
uv_destroy_folio(struct folio * folio)136 int uv_destroy_folio(struct folio *folio)
137 {
138 int rc;
139
140 /* Large folios cannot be secure */
141 if (unlikely(folio_test_large(folio)))
142 return 0;
143
144 folio_get(folio);
145 rc = uv_destroy(folio_to_phys(folio));
146 if (!rc)
147 clear_bit(PG_arch_1, &folio->flags);
148 folio_put(folio);
149 return rc;
150 }
151 EXPORT_SYMBOL(uv_destroy_folio);
152
153 /*
154 * The present PTE still indirectly holds a folio reference through the mapping.
155 */
uv_destroy_pte(pte_t pte)156 int uv_destroy_pte(pte_t pte)
157 {
158 VM_WARN_ON(!pte_present(pte));
159 return uv_destroy_folio(pfn_folio(pte_pfn(pte)));
160 }
161
162 /*
163 * Requests the Ultravisor to encrypt a guest page and make it
164 * accessible to the host for paging (export).
165 *
166 * @paddr: Absolute host address of page to be exported
167 */
uv_convert_from_secure(unsigned long paddr)168 int uv_convert_from_secure(unsigned long paddr)
169 {
170 struct uv_cb_cfs uvcb = {
171 .header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
172 .header.len = sizeof(uvcb),
173 .paddr = paddr
174 };
175
176 if (uv_call(0, (u64)&uvcb))
177 return -EINVAL;
178 return 0;
179 }
180 EXPORT_SYMBOL_GPL(uv_convert_from_secure);
181
182 /*
183 * The caller must already hold a reference to the folio.
184 */
uv_convert_from_secure_folio(struct folio * folio)185 int uv_convert_from_secure_folio(struct folio *folio)
186 {
187 int rc;
188
189 /* Large folios cannot be secure */
190 if (unlikely(folio_test_large(folio)))
191 return 0;
192
193 folio_get(folio);
194 rc = uv_convert_from_secure(folio_to_phys(folio));
195 if (!rc)
196 clear_bit(PG_arch_1, &folio->flags);
197 folio_put(folio);
198 return rc;
199 }
200 EXPORT_SYMBOL_GPL(uv_convert_from_secure_folio);
201
202 /*
203 * The present PTE still indirectly holds a folio reference through the mapping.
204 */
uv_convert_from_secure_pte(pte_t pte)205 int uv_convert_from_secure_pte(pte_t pte)
206 {
207 VM_WARN_ON(!pte_present(pte));
208 return uv_convert_from_secure_folio(pfn_folio(pte_pfn(pte)));
209 }
210
211 /**
212 * should_export_before_import - Determine whether an export is needed
213 * before an import-like operation
214 * @uvcb: the Ultravisor control block of the UVC to be performed
215 * @mm: the mm of the process
216 *
217 * Returns whether an export is needed before every import-like operation.
218 * This is needed for shared pages, which don't trigger a secure storage
219 * exception when accessed from a different guest.
220 *
221 * Although considered as one, the Unpin Page UVC is not an actual import,
222 * so it is not affected.
223 *
224 * No export is needed also when there is only one protected VM, because the
225 * page cannot belong to the wrong VM in that case (there is no "other VM"
226 * it can belong to).
227 *
228 * Return: true if an export is needed before every import, otherwise false.
229 */
should_export_before_import(struct uv_cb_header * uvcb,struct mm_struct * mm)230 static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
231 {
232 /*
233 * The misc feature indicates, among other things, that importing a
234 * shared page from a different protected VM will automatically also
235 * transfer its ownership.
236 */
237 if (uv_has_feature(BIT_UV_FEAT_MISC))
238 return false;
239 if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
240 return false;
241 return atomic_read(&mm->context.protected_count) > 1;
242 }
243
244 /*
245 * Calculate the expected ref_count for a folio that would otherwise have no
246 * further pins. This was cribbed from similar functions in other places in
247 * the kernel, but with some slight modifications. We know that a secure
248 * folio can not be a large folio, for example.
249 */
expected_folio_refs(struct folio * folio)250 static int expected_folio_refs(struct folio *folio)
251 {
252 int res;
253
254 res = folio_mapcount(folio);
255 if (folio_test_swapcache(folio)) {
256 res++;
257 } else if (folio_mapping(folio)) {
258 res++;
259 if (folio->private)
260 res++;
261 }
262 return res;
263 }
264
265 /**
266 * __make_folio_secure() - make a folio secure
267 * @folio: the folio to make secure
268 * @uvcb: the uvcb that describes the UVC to be used
269 *
270 * The folio @folio will be made secure if possible, @uvcb will be passed
271 * as-is to the UVC.
272 *
273 * Return: 0 on success;
274 * -EBUSY if the folio is in writeback or has too many references;
275 * -EAGAIN if the UVC needs to be attempted again;
276 * -ENXIO if the address is not mapped;
277 * -EINVAL if the UVC failed for other reasons.
278 *
279 * Context: The caller must hold exactly one extra reference on the folio
280 * (it's the same logic as split_folio()), and the folio must be
281 * locked.
282 */
__make_folio_secure(struct folio * folio,struct uv_cb_header * uvcb)283 static int __make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
284 {
285 int expected, cc = 0;
286
287 if (folio_test_writeback(folio))
288 return -EBUSY;
289 expected = expected_folio_refs(folio) + 1;
290 if (!folio_ref_freeze(folio, expected))
291 return -EBUSY;
292 set_bit(PG_arch_1, &folio->flags);
293 /*
294 * If the UVC does not succeed or fail immediately, we don't want to
295 * loop for long, or we might get stall notifications.
296 * On the other hand, this is a complex scenario and we are holding a lot of
297 * locks, so we can't easily sleep and reschedule. We try only once,
298 * and if the UVC returned busy or partial completion, we return
299 * -EAGAIN and we let the callers deal with it.
300 */
301 cc = __uv_call(0, (u64)uvcb);
302 folio_ref_unfreeze(folio, expected);
303 /*
304 * Return -ENXIO if the folio was not mapped, -EINVAL for other errors.
305 * If busy or partially completed, return -EAGAIN.
306 */
307 if (cc == UVC_CC_OK)
308 return 0;
309 else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
310 return -EAGAIN;
311 return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
312 }
313
make_folio_secure(struct mm_struct * mm,struct folio * folio,struct uv_cb_header * uvcb)314 static int make_folio_secure(struct mm_struct *mm, struct folio *folio, struct uv_cb_header *uvcb)
315 {
316 int rc;
317
318 if (!folio_trylock(folio))
319 return -EAGAIN;
320 if (should_export_before_import(uvcb, mm))
321 uv_convert_from_secure(folio_to_phys(folio));
322 rc = __make_folio_secure(folio, uvcb);
323 folio_unlock(folio);
324
325 return rc;
326 }
327
328 /**
329 * s390_wiggle_split_folio() - try to drain extra references to a folio and
330 * split the folio if it is large.
331 * @mm: the mm containing the folio to work on
332 * @folio: the folio
333 *
334 * Context: Must be called while holding an extra reference to the folio;
335 * the mm lock should not be held.
336 * Return: 0 if the operation was successful;
337 * -EAGAIN if splitting the large folio was not successful,
338 * but another attempt can be made;
339 * -EINVAL in case of other folio splitting errors. See split_folio().
340 */
s390_wiggle_split_folio(struct mm_struct * mm,struct folio * folio)341 static int s390_wiggle_split_folio(struct mm_struct *mm, struct folio *folio)
342 {
343 int rc, tried_splits;
344
345 lockdep_assert_not_held(&mm->mmap_lock);
346 folio_wait_writeback(folio);
347 lru_add_drain_all();
348
349 if (!folio_test_large(folio))
350 return 0;
351
352 for (tried_splits = 0; tried_splits < 2; tried_splits++) {
353 struct address_space *mapping;
354 loff_t lstart, lend;
355 struct inode *inode;
356
357 folio_lock(folio);
358 rc = split_folio(folio);
359 if (rc != -EBUSY) {
360 folio_unlock(folio);
361 return rc;
362 }
363
364 /*
365 * Splitting with -EBUSY can fail for various reasons, but we
366 * have to handle one case explicitly for now: some mappings
367 * don't allow for splitting dirty folios; writeback will
368 * mark them clean again, including marking all page table
369 * entries mapping the folio read-only, to catch future write
370 * attempts.
371 *
372 * While the system should be writing back dirty folios in the
373 * background, we obtained this folio by looking up a writable
374 * page table entry. On these problematic mappings, writable
375 * page table entries imply dirty folios, preventing the
376 * split in the first place.
377 *
378 * To prevent a livelock when trigger writeback manually and
379 * letting the caller look up the folio again in the page
380 * table (turning it dirty), immediately try to split again.
381 *
382 * This is only a problem for some mappings (e.g., XFS);
383 * mappings that do not support writeback (e.g., shmem) do not
384 * apply.
385 */
386 if (!folio_test_dirty(folio) || folio_test_anon(folio) ||
387 !folio->mapping || !mapping_can_writeback(folio->mapping)) {
388 folio_unlock(folio);
389 break;
390 }
391
392 /*
393 * Ideally, we'd only trigger writeback on this exact folio. But
394 * there is no easy way to do that, so we'll stabilize the
395 * mapping while we still hold the folio lock, so we can drop
396 * the folio lock to trigger writeback on the range currently
397 * covered by the folio instead.
398 */
399 mapping = folio->mapping;
400 lstart = folio_pos(folio);
401 lend = lstart + folio_size(folio) - 1;
402 inode = igrab(mapping->host);
403 folio_unlock(folio);
404
405 if (unlikely(!inode))
406 break;
407
408 filemap_write_and_wait_range(mapping, lstart, lend);
409 iput(mapping->host);
410 }
411 return -EAGAIN;
412 }
413
make_hva_secure(struct mm_struct * mm,unsigned long hva,struct uv_cb_header * uvcb)414 int make_hva_secure(struct mm_struct *mm, unsigned long hva, struct uv_cb_header *uvcb)
415 {
416 struct vm_area_struct *vma;
417 struct folio_walk fw;
418 struct folio *folio;
419 int rc;
420
421 mmap_read_lock(mm);
422 vma = vma_lookup(mm, hva);
423 if (!vma) {
424 mmap_read_unlock(mm);
425 return -EFAULT;
426 }
427 folio = folio_walk_start(&fw, vma, hva, 0);
428 if (!folio) {
429 mmap_read_unlock(mm);
430 return -ENXIO;
431 }
432
433 folio_get(folio);
434 /*
435 * Secure pages cannot be huge and userspace should not combine both.
436 * In case userspace does it anyway this will result in an -EFAULT for
437 * the unpack. The guest is thus never reaching secure mode.
438 * If userspace plays dirty tricks and decides to map huge pages at a
439 * later point in time, it will receive a segmentation fault or
440 * KVM_RUN will return -EFAULT.
441 */
442 if (folio_test_hugetlb(folio))
443 rc = -EFAULT;
444 else if (folio_test_large(folio))
445 rc = -E2BIG;
446 else if (!pte_write(fw.pte) || (pte_val(fw.pte) & _PAGE_INVALID))
447 rc = -ENXIO;
448 else
449 rc = make_folio_secure(mm, folio, uvcb);
450 folio_walk_end(&fw, vma);
451 mmap_read_unlock(mm);
452
453 if (rc == -E2BIG || rc == -EBUSY) {
454 rc = s390_wiggle_split_folio(mm, folio);
455 if (!rc)
456 rc = -EAGAIN;
457 }
458 folio_put(folio);
459
460 return rc;
461 }
462 EXPORT_SYMBOL_GPL(make_hva_secure);
463
464 /*
465 * To be called with the folio locked or with an extra reference! This will
466 * prevent kvm_s390_pv_make_secure() from touching the folio concurrently.
467 * Having 2 parallel arch_make_folio_accessible is fine, as the UV calls will
468 * become a no-op if the folio is already exported.
469 */
arch_make_folio_accessible(struct folio * folio)470 int arch_make_folio_accessible(struct folio *folio)
471 {
472 int rc = 0;
473
474 /* Large folios cannot be secure */
475 if (unlikely(folio_test_large(folio)))
476 return 0;
477
478 /*
479 * PG_arch_1 is used in 2 places:
480 * 1. for storage keys of hugetlb folios and KVM
481 * 2. As an indication that this small folio might be secure. This can
482 * overindicate, e.g. we set the bit before calling
483 * convert_to_secure.
484 * As secure pages are never large folios, both variants can co-exists.
485 */
486 if (!test_bit(PG_arch_1, &folio->flags))
487 return 0;
488
489 rc = uv_pin_shared(folio_to_phys(folio));
490 if (!rc) {
491 clear_bit(PG_arch_1, &folio->flags);
492 return 0;
493 }
494
495 rc = uv_convert_from_secure(folio_to_phys(folio));
496 if (!rc) {
497 clear_bit(PG_arch_1, &folio->flags);
498 return 0;
499 }
500
501 return rc;
502 }
503 EXPORT_SYMBOL_GPL(arch_make_folio_accessible);
504
uv_query_facilities(struct kobject * kobj,struct kobj_attribute * attr,char * buf)505 static ssize_t uv_query_facilities(struct kobject *kobj,
506 struct kobj_attribute *attr, char *buf)
507 {
508 return sysfs_emit(buf, "%lx\n%lx\n%lx\n%lx\n",
509 uv_info.inst_calls_list[0],
510 uv_info.inst_calls_list[1],
511 uv_info.inst_calls_list[2],
512 uv_info.inst_calls_list[3]);
513 }
514
515 static struct kobj_attribute uv_query_facilities_attr =
516 __ATTR(facilities, 0444, uv_query_facilities, NULL);
517
uv_query_supp_se_hdr_ver(struct kobject * kobj,struct kobj_attribute * attr,char * buf)518 static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
519 struct kobj_attribute *attr, char *buf)
520 {
521 return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
522 }
523
524 static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
525 __ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
526
uv_query_supp_se_hdr_pcf(struct kobject * kobj,struct kobj_attribute * attr,char * buf)527 static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
528 struct kobj_attribute *attr, char *buf)
529 {
530 return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
531 }
532
533 static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
534 __ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
535
uv_query_dump_cpu_len(struct kobject * kobj,struct kobj_attribute * attr,char * buf)536 static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
537 struct kobj_attribute *attr, char *buf)
538 {
539 return sysfs_emit(buf, "%lx\n", uv_info.guest_cpu_stor_len);
540 }
541
542 static struct kobj_attribute uv_query_dump_cpu_len_attr =
543 __ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
544
uv_query_dump_storage_state_len(struct kobject * kobj,struct kobj_attribute * attr,char * buf)545 static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
546 struct kobj_attribute *attr, char *buf)
547 {
548 return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_storage_state_len);
549 }
550
551 static struct kobj_attribute uv_query_dump_storage_state_len_attr =
552 __ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
553
uv_query_dump_finalize_len(struct kobject * kobj,struct kobj_attribute * attr,char * buf)554 static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
555 struct kobj_attribute *attr, char *buf)
556 {
557 return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_finalize_len);
558 }
559
560 static struct kobj_attribute uv_query_dump_finalize_len_attr =
561 __ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
562
uv_query_feature_indications(struct kobject * kobj,struct kobj_attribute * attr,char * buf)563 static ssize_t uv_query_feature_indications(struct kobject *kobj,
564 struct kobj_attribute *attr, char *buf)
565 {
566 return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
567 }
568
569 static struct kobj_attribute uv_query_feature_indications_attr =
570 __ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
571
uv_query_max_guest_cpus(struct kobject * kobj,struct kobj_attribute * attr,char * buf)572 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
573 struct kobj_attribute *attr, char *buf)
574 {
575 return sysfs_emit(buf, "%d\n", uv_info.max_guest_cpu_id + 1);
576 }
577
578 static struct kobj_attribute uv_query_max_guest_cpus_attr =
579 __ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
580
uv_query_max_guest_vms(struct kobject * kobj,struct kobj_attribute * attr,char * buf)581 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
582 struct kobj_attribute *attr, char *buf)
583 {
584 return sysfs_emit(buf, "%d\n", uv_info.max_num_sec_conf);
585 }
586
587 static struct kobj_attribute uv_query_max_guest_vms_attr =
588 __ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
589
uv_query_max_guest_addr(struct kobject * kobj,struct kobj_attribute * attr,char * buf)590 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
591 struct kobj_attribute *attr, char *buf)
592 {
593 return sysfs_emit(buf, "%lx\n", uv_info.max_sec_stor_addr);
594 }
595
596 static struct kobj_attribute uv_query_max_guest_addr_attr =
597 __ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
598
uv_query_supp_att_req_hdr_ver(struct kobject * kobj,struct kobj_attribute * attr,char * buf)599 static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
600 struct kobj_attribute *attr, char *buf)
601 {
602 return sysfs_emit(buf, "%lx\n", uv_info.supp_att_req_hdr_ver);
603 }
604
605 static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
606 __ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
607
uv_query_supp_att_pflags(struct kobject * kobj,struct kobj_attribute * attr,char * buf)608 static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
609 struct kobj_attribute *attr, char *buf)
610 {
611 return sysfs_emit(buf, "%lx\n", uv_info.supp_att_pflags);
612 }
613
614 static struct kobj_attribute uv_query_supp_att_pflags_attr =
615 __ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
616
uv_query_supp_add_secret_req_ver(struct kobject * kobj,struct kobj_attribute * attr,char * buf)617 static ssize_t uv_query_supp_add_secret_req_ver(struct kobject *kobj,
618 struct kobj_attribute *attr, char *buf)
619 {
620 return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_req_ver);
621 }
622
623 static struct kobj_attribute uv_query_supp_add_secret_req_ver_attr =
624 __ATTR(supp_add_secret_req_ver, 0444, uv_query_supp_add_secret_req_ver, NULL);
625
uv_query_supp_add_secret_pcf(struct kobject * kobj,struct kobj_attribute * attr,char * buf)626 static ssize_t uv_query_supp_add_secret_pcf(struct kobject *kobj,
627 struct kobj_attribute *attr, char *buf)
628 {
629 return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_pcf);
630 }
631
632 static struct kobj_attribute uv_query_supp_add_secret_pcf_attr =
633 __ATTR(supp_add_secret_pcf, 0444, uv_query_supp_add_secret_pcf, NULL);
634
uv_query_supp_secret_types(struct kobject * kobj,struct kobj_attribute * attr,char * buf)635 static ssize_t uv_query_supp_secret_types(struct kobject *kobj,
636 struct kobj_attribute *attr, char *buf)
637 {
638 return sysfs_emit(buf, "%lx\n", uv_info.supp_secret_types);
639 }
640
641 static struct kobj_attribute uv_query_supp_secret_types_attr =
642 __ATTR(supp_secret_types, 0444, uv_query_supp_secret_types, NULL);
643
uv_query_max_secrets(struct kobject * kobj,struct kobj_attribute * attr,char * buf)644 static ssize_t uv_query_max_secrets(struct kobject *kobj,
645 struct kobj_attribute *attr, char *buf)
646 {
647 return sysfs_emit(buf, "%d\n",
648 uv_info.max_assoc_secrets + uv_info.max_retr_secrets);
649 }
650
651 static struct kobj_attribute uv_query_max_secrets_attr =
652 __ATTR(max_secrets, 0444, uv_query_max_secrets, NULL);
653
uv_query_max_retr_secrets(struct kobject * kobj,struct kobj_attribute * attr,char * buf)654 static ssize_t uv_query_max_retr_secrets(struct kobject *kobj,
655 struct kobj_attribute *attr, char *buf)
656 {
657 return sysfs_emit(buf, "%d\n", uv_info.max_retr_secrets);
658 }
659
660 static struct kobj_attribute uv_query_max_retr_secrets_attr =
661 __ATTR(max_retr_secrets, 0444, uv_query_max_retr_secrets, NULL);
662
uv_query_max_assoc_secrets(struct kobject * kobj,struct kobj_attribute * attr,char * buf)663 static ssize_t uv_query_max_assoc_secrets(struct kobject *kobj,
664 struct kobj_attribute *attr,
665 char *buf)
666 {
667 return sysfs_emit(buf, "%d\n", uv_info.max_assoc_secrets);
668 }
669
670 static struct kobj_attribute uv_query_max_assoc_secrets_attr =
671 __ATTR(max_assoc_secrets, 0444, uv_query_max_assoc_secrets, NULL);
672
673 static struct attribute *uv_query_attrs[] = {
674 &uv_query_facilities_attr.attr,
675 &uv_query_feature_indications_attr.attr,
676 &uv_query_max_guest_cpus_attr.attr,
677 &uv_query_max_guest_vms_attr.attr,
678 &uv_query_max_guest_addr_attr.attr,
679 &uv_query_supp_se_hdr_ver_attr.attr,
680 &uv_query_supp_se_hdr_pcf_attr.attr,
681 &uv_query_dump_storage_state_len_attr.attr,
682 &uv_query_dump_finalize_len_attr.attr,
683 &uv_query_dump_cpu_len_attr.attr,
684 &uv_query_supp_att_req_hdr_ver_attr.attr,
685 &uv_query_supp_att_pflags_attr.attr,
686 &uv_query_supp_add_secret_req_ver_attr.attr,
687 &uv_query_supp_add_secret_pcf_attr.attr,
688 &uv_query_supp_secret_types_attr.attr,
689 &uv_query_max_secrets_attr.attr,
690 &uv_query_max_assoc_secrets_attr.attr,
691 &uv_query_max_retr_secrets_attr.attr,
692 NULL,
693 };
694
uv_query_keys(void)695 static inline struct uv_cb_query_keys uv_query_keys(void)
696 {
697 struct uv_cb_query_keys uvcb = {
698 .header.cmd = UVC_CMD_QUERY_KEYS,
699 .header.len = sizeof(uvcb)
700 };
701
702 uv_call(0, (uint64_t)&uvcb);
703 return uvcb;
704 }
705
emit_hash(struct uv_key_hash * hash,char * buf,int at)706 static inline ssize_t emit_hash(struct uv_key_hash *hash, char *buf, int at)
707 {
708 return sysfs_emit_at(buf, at, "%016llx%016llx%016llx%016llx\n",
709 hash->dword[0], hash->dword[1], hash->dword[2], hash->dword[3]);
710 }
711
uv_keys_host_key(struct kobject * kobj,struct kobj_attribute * attr,char * buf)712 static ssize_t uv_keys_host_key(struct kobject *kobj,
713 struct kobj_attribute *attr, char *buf)
714 {
715 struct uv_cb_query_keys uvcb = uv_query_keys();
716
717 return emit_hash(&uvcb.key_hashes[UVC_QUERY_KEYS_IDX_HK], buf, 0);
718 }
719
720 static struct kobj_attribute uv_keys_host_key_attr =
721 __ATTR(host_key, 0444, uv_keys_host_key, NULL);
722
uv_keys_backup_host_key(struct kobject * kobj,struct kobj_attribute * attr,char * buf)723 static ssize_t uv_keys_backup_host_key(struct kobject *kobj,
724 struct kobj_attribute *attr, char *buf)
725 {
726 struct uv_cb_query_keys uvcb = uv_query_keys();
727
728 return emit_hash(&uvcb.key_hashes[UVC_QUERY_KEYS_IDX_BACK_HK], buf, 0);
729 }
730
731 static struct kobj_attribute uv_keys_backup_host_key_attr =
732 __ATTR(backup_host_key, 0444, uv_keys_backup_host_key, NULL);
733
uv_keys_all(struct kobject * kobj,struct kobj_attribute * attr,char * buf)734 static ssize_t uv_keys_all(struct kobject *kobj,
735 struct kobj_attribute *attr, char *buf)
736 {
737 struct uv_cb_query_keys uvcb = uv_query_keys();
738 ssize_t len = 0;
739 int i;
740
741 for (i = 0; i < ARRAY_SIZE(uvcb.key_hashes); i++)
742 len += emit_hash(uvcb.key_hashes + i, buf, len);
743
744 return len;
745 }
746
747 static struct kobj_attribute uv_keys_all_attr =
748 __ATTR(all, 0444, uv_keys_all, NULL);
749
750 static struct attribute_group uv_query_attr_group = {
751 .attrs = uv_query_attrs,
752 };
753
754 static struct attribute *uv_keys_attrs[] = {
755 &uv_keys_host_key_attr.attr,
756 &uv_keys_backup_host_key_attr.attr,
757 &uv_keys_all_attr.attr,
758 NULL,
759 };
760
761 static struct attribute_group uv_keys_attr_group = {
762 .attrs = uv_keys_attrs,
763 };
764
uv_is_prot_virt_guest(struct kobject * kobj,struct kobj_attribute * attr,char * buf)765 static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
766 struct kobj_attribute *attr, char *buf)
767 {
768 return sysfs_emit(buf, "%d\n", prot_virt_guest);
769 }
770
uv_is_prot_virt_host(struct kobject * kobj,struct kobj_attribute * attr,char * buf)771 static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
772 struct kobj_attribute *attr, char *buf)
773 {
774 return sysfs_emit(buf, "%d\n", prot_virt_host);
775 }
776
777 static struct kobj_attribute uv_prot_virt_guest =
778 __ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
779
780 static struct kobj_attribute uv_prot_virt_host =
781 __ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
782
783 static const struct attribute *uv_prot_virt_attrs[] = {
784 &uv_prot_virt_guest.attr,
785 &uv_prot_virt_host.attr,
786 NULL,
787 };
788
789 static struct kset *uv_query_kset;
790 static struct kset *uv_keys_kset;
791 static struct kobject *uv_kobj;
792
uv_sysfs_dir_init(const struct attribute_group * grp,struct kset ** uv_dir_kset,const char * name)793 static int __init uv_sysfs_dir_init(const struct attribute_group *grp,
794 struct kset **uv_dir_kset, const char *name)
795 {
796 struct kset *kset;
797 int rc;
798
799 kset = kset_create_and_add(name, NULL, uv_kobj);
800 if (!kset)
801 return -ENOMEM;
802 *uv_dir_kset = kset;
803
804 rc = sysfs_create_group(&kset->kobj, grp);
805 if (rc)
806 kset_unregister(kset);
807 return rc;
808 }
809
uv_sysfs_init(void)810 static int __init uv_sysfs_init(void)
811 {
812 int rc = -ENOMEM;
813
814 if (!test_facility(158))
815 return 0;
816
817 uv_kobj = kobject_create_and_add("uv", firmware_kobj);
818 if (!uv_kobj)
819 return -ENOMEM;
820
821 rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
822 if (rc)
823 goto out_kobj;
824
825 rc = uv_sysfs_dir_init(&uv_query_attr_group, &uv_query_kset, "query");
826 if (rc)
827 goto out_ind_files;
828
829 /* Get installed key hashes if available, ignore any errors */
830 if (test_bit_inv(BIT_UVC_CMD_QUERY_KEYS, uv_info.inst_calls_list))
831 uv_sysfs_dir_init(&uv_keys_attr_group, &uv_keys_kset, "keys");
832
833 return 0;
834
835 out_ind_files:
836 sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
837 out_kobj:
838 kobject_del(uv_kobj);
839 kobject_put(uv_kobj);
840 return rc;
841 }
842 device_initcall(uv_sysfs_init);
843
844 /*
845 * Locate a secret in the list by its id.
846 * @secret_id: search pattern.
847 * @list: ephemeral buffer space
848 * @secret: output data, containing the secret's metadata.
849 *
850 * Search for a secret with the given secret_id in the Ultravisor secret store.
851 *
852 * Context: might sleep.
853 */
find_secret_in_page(const u8 secret_id[UV_SECRET_ID_LEN],const struct uv_secret_list * list,struct uv_secret_list_item_hdr * secret)854 static int find_secret_in_page(const u8 secret_id[UV_SECRET_ID_LEN],
855 const struct uv_secret_list *list,
856 struct uv_secret_list_item_hdr *secret)
857 {
858 u16 i;
859
860 for (i = 0; i < list->total_num_secrets; i++) {
861 if (memcmp(secret_id, list->secrets[i].id, UV_SECRET_ID_LEN) == 0) {
862 *secret = list->secrets[i].hdr;
863 return 0;
864 }
865 }
866 return -ENOENT;
867 }
868
869 /*
870 * Do the actual search for `uv_get_secret_metadata`.
871 * @secret_id: search pattern.
872 * @list: ephemeral buffer space
873 * @secret: output data, containing the secret's metadata.
874 *
875 * Context: might sleep.
876 */
uv_find_secret(const u8 secret_id[UV_SECRET_ID_LEN],struct uv_secret_list * list,struct uv_secret_list_item_hdr * secret)877 int uv_find_secret(const u8 secret_id[UV_SECRET_ID_LEN],
878 struct uv_secret_list *list,
879 struct uv_secret_list_item_hdr *secret)
880 {
881 u16 start_idx = 0;
882 u16 list_rc;
883 int ret;
884
885 do {
886 uv_list_secrets(list, start_idx, &list_rc, NULL);
887 if (list_rc != UVC_RC_EXECUTED && list_rc != UVC_RC_MORE_DATA) {
888 if (list_rc == UVC_RC_INV_CMD)
889 return -ENODEV;
890 else
891 return -EIO;
892 }
893 ret = find_secret_in_page(secret_id, list, secret);
894 if (ret == 0)
895 return ret;
896 start_idx = list->next_secret_idx;
897 } while (list_rc == UVC_RC_MORE_DATA && start_idx < list->next_secret_idx);
898
899 return -ENOENT;
900 }
901 EXPORT_SYMBOL_GPL(uv_find_secret);
902
903 /**
904 * uv_retrieve_secret() - get the secret value for the secret index.
905 * @secret_idx: Secret index for which the secret should be retrieved.
906 * @buf: Buffer to store retrieved secret.
907 * @buf_size: Size of the buffer. The correct buffer size is reported as part of
908 * the result from `uv_get_secret_metadata`.
909 *
910 * Calls the Retrieve Secret UVC and translates the UV return code into an errno.
911 *
912 * Context: might sleep.
913 *
914 * Return:
915 * * %0 - Entry found; buffer contains a valid secret.
916 * * %ENOENT: - No entry found or secret at the index is non-retrievable.
917 * * %ENODEV: - Not supported: UV not available or command not available.
918 * * %EINVAL: - Buffer too small for content.
919 * * %EIO: - Other unexpected UV error.
920 */
uv_retrieve_secret(u16 secret_idx,u8 * buf,size_t buf_size)921 int uv_retrieve_secret(u16 secret_idx, u8 *buf, size_t buf_size)
922 {
923 struct uv_cb_retr_secr uvcb = {
924 .header.len = sizeof(uvcb),
925 .header.cmd = UVC_CMD_RETR_SECRET,
926 .secret_idx = secret_idx,
927 .buf_addr = (u64)buf,
928 .buf_size = buf_size,
929 };
930
931 uv_call_sched(0, (u64)&uvcb);
932
933 switch (uvcb.header.rc) {
934 case UVC_RC_EXECUTED:
935 return 0;
936 case UVC_RC_INV_CMD:
937 return -ENODEV;
938 case UVC_RC_RETR_SECR_STORE_EMPTY:
939 case UVC_RC_RETR_SECR_INV_SECRET:
940 case UVC_RC_RETR_SECR_INV_IDX:
941 return -ENOENT;
942 case UVC_RC_RETR_SECR_BUF_SMALL:
943 return -EINVAL;
944 default:
945 return -EIO;
946 }
947 }
948 EXPORT_SYMBOL_GPL(uv_retrieve_secret);
949