1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM-SEV support
6 *
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 */
9
10 #include <linux/kvm_types.h>
11 #include <linux/kvm_host.h>
12 #include <linux/kernel.h>
13 #include <linux/highmem.h>
14 #include <linux/psp-sev.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17
18 #include "x86.h"
19 #include "svm.h"
20
21 static int sev_flush_asids(void);
22 static DECLARE_RWSEM(sev_deactivate_lock);
23 static DEFINE_MUTEX(sev_bitmap_lock);
24 unsigned int max_sev_asid;
25 static unsigned int min_sev_asid;
26 static unsigned long *sev_asid_bitmap;
27 static unsigned long *sev_reclaim_asid_bitmap;
28 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
29
30 struct enc_region {
31 struct list_head list;
32 unsigned long npages;
33 struct page **pages;
34 unsigned long uaddr;
35 unsigned long size;
36 };
37
sev_flush_asids(void)38 static int sev_flush_asids(void)
39 {
40 int ret, error = 0;
41
42 /*
43 * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
44 * so it must be guarded.
45 */
46 down_write(&sev_deactivate_lock);
47
48 wbinvd_on_all_cpus();
49 ret = sev_guest_df_flush(&error);
50
51 up_write(&sev_deactivate_lock);
52
53 if (ret)
54 pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
55
56 return ret;
57 }
58
59 /* Must be called with the sev_bitmap_lock held */
__sev_recycle_asids(void)60 static bool __sev_recycle_asids(void)
61 {
62 int pos;
63
64 /* Check if there are any ASIDs to reclaim before performing a flush */
65 pos = find_next_bit(sev_reclaim_asid_bitmap,
66 max_sev_asid, min_sev_asid - 1);
67 if (pos >= max_sev_asid)
68 return false;
69
70 if (sev_flush_asids())
71 return false;
72
73 bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
74 max_sev_asid);
75 bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
76
77 return true;
78 }
79
sev_asid_new(void)80 static int sev_asid_new(void)
81 {
82 bool retry = true;
83 int pos;
84
85 mutex_lock(&sev_bitmap_lock);
86
87 /*
88 * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
89 */
90 again:
91 pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
92 if (pos >= max_sev_asid) {
93 if (retry && __sev_recycle_asids()) {
94 retry = false;
95 goto again;
96 }
97 mutex_unlock(&sev_bitmap_lock);
98 return -EBUSY;
99 }
100
101 __set_bit(pos, sev_asid_bitmap);
102
103 mutex_unlock(&sev_bitmap_lock);
104
105 return pos + 1;
106 }
107
sev_get_asid(struct kvm * kvm)108 static int sev_get_asid(struct kvm *kvm)
109 {
110 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
111
112 return sev->asid;
113 }
114
sev_asid_free(int asid)115 static void sev_asid_free(int asid)
116 {
117 struct svm_cpu_data *sd;
118 int cpu, pos;
119
120 mutex_lock(&sev_bitmap_lock);
121
122 pos = asid - 1;
123 __set_bit(pos, sev_reclaim_asid_bitmap);
124
125 for_each_possible_cpu(cpu) {
126 sd = per_cpu(svm_data, cpu);
127 sd->sev_vmcbs[pos] = NULL;
128 }
129
130 mutex_unlock(&sev_bitmap_lock);
131 }
132
sev_unbind_asid(struct kvm * kvm,unsigned int handle)133 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
134 {
135 struct sev_data_decommission *decommission;
136 struct sev_data_deactivate *data;
137
138 if (!handle)
139 return;
140
141 data = kzalloc(sizeof(*data), GFP_KERNEL);
142 if (!data)
143 return;
144
145 /* deactivate handle */
146 data->handle = handle;
147
148 /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
149 down_read(&sev_deactivate_lock);
150 sev_guest_deactivate(data, NULL);
151 up_read(&sev_deactivate_lock);
152
153 kfree(data);
154
155 decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
156 if (!decommission)
157 return;
158
159 /* decommission handle */
160 decommission->handle = handle;
161 sev_guest_decommission(decommission, NULL);
162
163 kfree(decommission);
164 }
165
sev_guest_init(struct kvm * kvm,struct kvm_sev_cmd * argp)166 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
167 {
168 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
169 int asid, ret;
170
171 ret = -EBUSY;
172 if (unlikely(sev->active))
173 return ret;
174
175 asid = sev_asid_new();
176 if (asid < 0)
177 return ret;
178
179 ret = sev_platform_init(&argp->error);
180 if (ret)
181 goto e_free;
182
183 sev->active = true;
184 sev->asid = asid;
185 INIT_LIST_HEAD(&sev->regions_list);
186
187 return 0;
188
189 e_free:
190 sev_asid_free(asid);
191 return ret;
192 }
193
sev_bind_asid(struct kvm * kvm,unsigned int handle,int * error)194 static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
195 {
196 struct sev_data_activate *data;
197 int asid = sev_get_asid(kvm);
198 int ret;
199
200 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
201 if (!data)
202 return -ENOMEM;
203
204 /* activate ASID on the given handle */
205 data->handle = handle;
206 data->asid = asid;
207 ret = sev_guest_activate(data, error);
208 kfree(data);
209
210 return ret;
211 }
212
__sev_issue_cmd(int fd,int id,void * data,int * error)213 static int __sev_issue_cmd(int fd, int id, void *data, int *error)
214 {
215 struct fd f;
216 int ret;
217
218 f = fdget(fd);
219 if (!f.file)
220 return -EBADF;
221
222 ret = sev_issue_cmd_external_user(f.file, id, data, error);
223
224 fdput(f);
225 return ret;
226 }
227
sev_issue_cmd(struct kvm * kvm,int id,void * data,int * error)228 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
229 {
230 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
231
232 return __sev_issue_cmd(sev->fd, id, data, error);
233 }
234
sev_launch_start(struct kvm * kvm,struct kvm_sev_cmd * argp)235 static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
236 {
237 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
238 struct sev_data_launch_start *start;
239 struct kvm_sev_launch_start params;
240 void *dh_blob, *session_blob;
241 int *error = &argp->error;
242 int ret;
243
244 if (!sev_guest(kvm))
245 return -ENOTTY;
246
247 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
248 return -EFAULT;
249
250 start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT);
251 if (!start)
252 return -ENOMEM;
253
254 dh_blob = NULL;
255 if (params.dh_uaddr) {
256 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
257 if (IS_ERR(dh_blob)) {
258 ret = PTR_ERR(dh_blob);
259 goto e_free;
260 }
261
262 start->dh_cert_address = __sme_set(__pa(dh_blob));
263 start->dh_cert_len = params.dh_len;
264 }
265
266 session_blob = NULL;
267 if (params.session_uaddr) {
268 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
269 if (IS_ERR(session_blob)) {
270 ret = PTR_ERR(session_blob);
271 goto e_free_dh;
272 }
273
274 start->session_address = __sme_set(__pa(session_blob));
275 start->session_len = params.session_len;
276 }
277
278 start->handle = params.handle;
279 start->policy = params.policy;
280
281 /* create memory encryption context */
282 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
283 if (ret)
284 goto e_free_session;
285
286 /* Bind ASID to this guest */
287 ret = sev_bind_asid(kvm, start->handle, error);
288 if (ret)
289 goto e_free_session;
290
291 /* return handle to userspace */
292 params.handle = start->handle;
293 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) {
294 sev_unbind_asid(kvm, start->handle);
295 ret = -EFAULT;
296 goto e_free_session;
297 }
298
299 sev->handle = start->handle;
300 sev->fd = argp->sev_fd;
301
302 e_free_session:
303 kfree(session_blob);
304 e_free_dh:
305 kfree(dh_blob);
306 e_free:
307 kfree(start);
308 return ret;
309 }
310
sev_pin_memory(struct kvm * kvm,unsigned long uaddr,unsigned long ulen,unsigned long * n,int write)311 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
312 unsigned long ulen, unsigned long *n,
313 int write)
314 {
315 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
316 unsigned long npages, size;
317 int npinned;
318 unsigned long locked, lock_limit;
319 struct page **pages;
320 unsigned long first, last;
321 int ret;
322
323 if (ulen == 0 || uaddr + ulen < uaddr)
324 return ERR_PTR(-EINVAL);
325
326 /* Calculate number of pages. */
327 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
328 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
329 npages = (last - first + 1);
330
331 locked = sev->pages_locked + npages;
332 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
333 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
334 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
335 return ERR_PTR(-ENOMEM);
336 }
337
338 if (WARN_ON_ONCE(npages > INT_MAX))
339 return ERR_PTR(-EINVAL);
340
341 /* Avoid using vmalloc for smaller buffers. */
342 size = npages * sizeof(struct page *);
343 if (size > PAGE_SIZE)
344 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
345 else
346 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
347
348 if (!pages)
349 return ERR_PTR(-ENOMEM);
350
351 /* Pin the user virtual address. */
352 npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
353 if (npinned != npages) {
354 pr_err("SEV: Failure locking %lu pages.\n", npages);
355 ret = -ENOMEM;
356 goto err;
357 }
358
359 *n = npages;
360 sev->pages_locked = locked;
361
362 return pages;
363
364 err:
365 if (npinned > 0)
366 unpin_user_pages(pages, npinned);
367
368 kvfree(pages);
369 return ERR_PTR(ret);
370 }
371
sev_unpin_memory(struct kvm * kvm,struct page ** pages,unsigned long npages)372 static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
373 unsigned long npages)
374 {
375 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
376
377 unpin_user_pages(pages, npages);
378 kvfree(pages);
379 sev->pages_locked -= npages;
380 }
381
sev_clflush_pages(struct page * pages[],unsigned long npages)382 static void sev_clflush_pages(struct page *pages[], unsigned long npages)
383 {
384 uint8_t *page_virtual;
385 unsigned long i;
386
387 if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
388 pages == NULL)
389 return;
390
391 for (i = 0; i < npages; i++) {
392 page_virtual = kmap_atomic(pages[i]);
393 clflush_cache_range(page_virtual, PAGE_SIZE);
394 kunmap_atomic(page_virtual);
395 }
396 }
397
get_num_contig_pages(unsigned long idx,struct page ** inpages,unsigned long npages)398 static unsigned long get_num_contig_pages(unsigned long idx,
399 struct page **inpages, unsigned long npages)
400 {
401 unsigned long paddr, next_paddr;
402 unsigned long i = idx + 1, pages = 1;
403
404 /* find the number of contiguous pages starting from idx */
405 paddr = __sme_page_pa(inpages[idx]);
406 while (i < npages) {
407 next_paddr = __sme_page_pa(inpages[i++]);
408 if ((paddr + PAGE_SIZE) == next_paddr) {
409 pages++;
410 paddr = next_paddr;
411 continue;
412 }
413 break;
414 }
415
416 return pages;
417 }
418
sev_launch_update_data(struct kvm * kvm,struct kvm_sev_cmd * argp)419 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
420 {
421 unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
422 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
423 struct kvm_sev_launch_update_data params;
424 struct sev_data_launch_update_data *data;
425 struct page **inpages;
426 int ret;
427
428 if (!sev_guest(kvm))
429 return -ENOTTY;
430
431 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
432 return -EFAULT;
433
434 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
435 if (!data)
436 return -ENOMEM;
437
438 vaddr = params.uaddr;
439 size = params.len;
440 vaddr_end = vaddr + size;
441
442 /* Lock the user memory. */
443 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
444 if (IS_ERR(inpages)) {
445 ret = PTR_ERR(inpages);
446 goto e_free;
447 }
448
449 /*
450 * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in
451 * place; the cache may contain the data that was written unencrypted.
452 */
453 sev_clflush_pages(inpages, npages);
454
455 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
456 int offset, len;
457
458 /*
459 * If the user buffer is not page-aligned, calculate the offset
460 * within the page.
461 */
462 offset = vaddr & (PAGE_SIZE - 1);
463
464 /* Calculate the number of pages that can be encrypted in one go. */
465 pages = get_num_contig_pages(i, inpages, npages);
466
467 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
468
469 data->handle = sev->handle;
470 data->len = len;
471 data->address = __sme_page_pa(inpages[i]) + offset;
472 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
473 if (ret)
474 goto e_unpin;
475
476 size -= len;
477 next_vaddr = vaddr + len;
478 }
479
480 e_unpin:
481 /* content of memory is updated, mark pages dirty */
482 for (i = 0; i < npages; i++) {
483 set_page_dirty_lock(inpages[i]);
484 mark_page_accessed(inpages[i]);
485 }
486 /* unlock the user pages */
487 sev_unpin_memory(kvm, inpages, npages);
488 e_free:
489 kfree(data);
490 return ret;
491 }
492
sev_launch_measure(struct kvm * kvm,struct kvm_sev_cmd * argp)493 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
494 {
495 void __user *measure = (void __user *)(uintptr_t)argp->data;
496 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
497 struct sev_data_launch_measure *data;
498 struct kvm_sev_launch_measure params;
499 void __user *p = NULL;
500 void *blob = NULL;
501 int ret;
502
503 if (!sev_guest(kvm))
504 return -ENOTTY;
505
506 if (copy_from_user(¶ms, measure, sizeof(params)))
507 return -EFAULT;
508
509 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
510 if (!data)
511 return -ENOMEM;
512
513 /* User wants to query the blob length */
514 if (!params.len)
515 goto cmd;
516
517 p = (void __user *)(uintptr_t)params.uaddr;
518 if (p) {
519 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
520 ret = -EINVAL;
521 goto e_free;
522 }
523
524 ret = -ENOMEM;
525 blob = kmalloc(params.len, GFP_KERNEL);
526 if (!blob)
527 goto e_free;
528
529 data->address = __psp_pa(blob);
530 data->len = params.len;
531 }
532
533 cmd:
534 data->handle = sev->handle;
535 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
536
537 /*
538 * If we query the session length, FW responded with expected data.
539 */
540 if (!params.len)
541 goto done;
542
543 if (ret)
544 goto e_free_blob;
545
546 if (blob) {
547 if (copy_to_user(p, blob, params.len))
548 ret = -EFAULT;
549 }
550
551 done:
552 params.len = data->len;
553 if (copy_to_user(measure, ¶ms, sizeof(params)))
554 ret = -EFAULT;
555 e_free_blob:
556 kfree(blob);
557 e_free:
558 kfree(data);
559 return ret;
560 }
561
sev_launch_finish(struct kvm * kvm,struct kvm_sev_cmd * argp)562 static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
563 {
564 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
565 struct sev_data_launch_finish *data;
566 int ret;
567
568 if (!sev_guest(kvm))
569 return -ENOTTY;
570
571 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
572 if (!data)
573 return -ENOMEM;
574
575 data->handle = sev->handle;
576 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
577
578 kfree(data);
579 return ret;
580 }
581
sev_guest_status(struct kvm * kvm,struct kvm_sev_cmd * argp)582 static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
583 {
584 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
585 struct kvm_sev_guest_status params;
586 struct sev_data_guest_status *data;
587 int ret;
588
589 if (!sev_guest(kvm))
590 return -ENOTTY;
591
592 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
593 if (!data)
594 return -ENOMEM;
595
596 data->handle = sev->handle;
597 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
598 if (ret)
599 goto e_free;
600
601 params.policy = data->policy;
602 params.state = data->state;
603 params.handle = data->handle;
604
605 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params)))
606 ret = -EFAULT;
607 e_free:
608 kfree(data);
609 return ret;
610 }
611
__sev_issue_dbg_cmd(struct kvm * kvm,unsigned long src,unsigned long dst,int size,int * error,bool enc)612 static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
613 unsigned long dst, int size,
614 int *error, bool enc)
615 {
616 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
617 struct sev_data_dbg *data;
618 int ret;
619
620 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
621 if (!data)
622 return -ENOMEM;
623
624 data->handle = sev->handle;
625 data->dst_addr = dst;
626 data->src_addr = src;
627 data->len = size;
628
629 ret = sev_issue_cmd(kvm,
630 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
631 data, error);
632 kfree(data);
633 return ret;
634 }
635
__sev_dbg_decrypt(struct kvm * kvm,unsigned long src_paddr,unsigned long dst_paddr,int sz,int * err)636 static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
637 unsigned long dst_paddr, int sz, int *err)
638 {
639 int offset;
640
641 /*
642 * Its safe to read more than we are asked, caller should ensure that
643 * destination has enough space.
644 */
645 offset = src_paddr & 15;
646 src_paddr = round_down(src_paddr, 16);
647 sz = round_up(sz + offset, 16);
648
649 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
650 }
651
__sev_dbg_decrypt_user(struct kvm * kvm,unsigned long paddr,unsigned long __user dst_uaddr,unsigned long dst_paddr,int size,int * err)652 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
653 unsigned long __user dst_uaddr,
654 unsigned long dst_paddr,
655 int size, int *err)
656 {
657 struct page *tpage = NULL;
658 int ret, offset;
659
660 /* if inputs are not 16-byte then use intermediate buffer */
661 if (!IS_ALIGNED(dst_paddr, 16) ||
662 !IS_ALIGNED(paddr, 16) ||
663 !IS_ALIGNED(size, 16)) {
664 tpage = (void *)alloc_page(GFP_KERNEL);
665 if (!tpage)
666 return -ENOMEM;
667
668 dst_paddr = __sme_page_pa(tpage);
669 }
670
671 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
672 if (ret)
673 goto e_free;
674
675 if (tpage) {
676 offset = paddr & 15;
677 if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
678 page_address(tpage) + offset, size))
679 ret = -EFAULT;
680 }
681
682 e_free:
683 if (tpage)
684 __free_page(tpage);
685
686 return ret;
687 }
688
__sev_dbg_encrypt_user(struct kvm * kvm,unsigned long paddr,unsigned long __user vaddr,unsigned long dst_paddr,unsigned long __user dst_vaddr,int size,int * error)689 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
690 unsigned long __user vaddr,
691 unsigned long dst_paddr,
692 unsigned long __user dst_vaddr,
693 int size, int *error)
694 {
695 struct page *src_tpage = NULL;
696 struct page *dst_tpage = NULL;
697 int ret, len = size;
698
699 /* If source buffer is not aligned then use an intermediate buffer */
700 if (!IS_ALIGNED(vaddr, 16)) {
701 src_tpage = alloc_page(GFP_KERNEL);
702 if (!src_tpage)
703 return -ENOMEM;
704
705 if (copy_from_user(page_address(src_tpage),
706 (void __user *)(uintptr_t)vaddr, size)) {
707 __free_page(src_tpage);
708 return -EFAULT;
709 }
710
711 paddr = __sme_page_pa(src_tpage);
712 }
713
714 /*
715 * If destination buffer or length is not aligned then do read-modify-write:
716 * - decrypt destination in an intermediate buffer
717 * - copy the source buffer in an intermediate buffer
718 * - use the intermediate buffer as source buffer
719 */
720 if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
721 int dst_offset;
722
723 dst_tpage = alloc_page(GFP_KERNEL);
724 if (!dst_tpage) {
725 ret = -ENOMEM;
726 goto e_free;
727 }
728
729 ret = __sev_dbg_decrypt(kvm, dst_paddr,
730 __sme_page_pa(dst_tpage), size, error);
731 if (ret)
732 goto e_free;
733
734 /*
735 * If source is kernel buffer then use memcpy() otherwise
736 * copy_from_user().
737 */
738 dst_offset = dst_paddr & 15;
739
740 if (src_tpage)
741 memcpy(page_address(dst_tpage) + dst_offset,
742 page_address(src_tpage), size);
743 else {
744 if (copy_from_user(page_address(dst_tpage) + dst_offset,
745 (void __user *)(uintptr_t)vaddr, size)) {
746 ret = -EFAULT;
747 goto e_free;
748 }
749 }
750
751 paddr = __sme_page_pa(dst_tpage);
752 dst_paddr = round_down(dst_paddr, 16);
753 len = round_up(size, 16);
754 }
755
756 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
757
758 e_free:
759 if (src_tpage)
760 __free_page(src_tpage);
761 if (dst_tpage)
762 __free_page(dst_tpage);
763 return ret;
764 }
765
sev_dbg_crypt(struct kvm * kvm,struct kvm_sev_cmd * argp,bool dec)766 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
767 {
768 unsigned long vaddr, vaddr_end, next_vaddr;
769 unsigned long dst_vaddr;
770 struct page **src_p, **dst_p;
771 struct kvm_sev_dbg debug;
772 unsigned long n;
773 unsigned int size;
774 int ret;
775
776 if (!sev_guest(kvm))
777 return -ENOTTY;
778
779 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
780 return -EFAULT;
781
782 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
783 return -EINVAL;
784 if (!debug.dst_uaddr)
785 return -EINVAL;
786
787 vaddr = debug.src_uaddr;
788 size = debug.len;
789 vaddr_end = vaddr + size;
790 dst_vaddr = debug.dst_uaddr;
791
792 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
793 int len, s_off, d_off;
794
795 /* lock userspace source and destination page */
796 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
797 if (IS_ERR(src_p))
798 return PTR_ERR(src_p);
799
800 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
801 if (IS_ERR(dst_p)) {
802 sev_unpin_memory(kvm, src_p, n);
803 return PTR_ERR(dst_p);
804 }
805
806 /*
807 * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify
808 * the pages; flush the destination too so that future accesses do not
809 * see stale data.
810 */
811 sev_clflush_pages(src_p, 1);
812 sev_clflush_pages(dst_p, 1);
813
814 /*
815 * Since user buffer may not be page aligned, calculate the
816 * offset within the page.
817 */
818 s_off = vaddr & ~PAGE_MASK;
819 d_off = dst_vaddr & ~PAGE_MASK;
820 len = min_t(size_t, (PAGE_SIZE - s_off), size);
821
822 if (dec)
823 ret = __sev_dbg_decrypt_user(kvm,
824 __sme_page_pa(src_p[0]) + s_off,
825 dst_vaddr,
826 __sme_page_pa(dst_p[0]) + d_off,
827 len, &argp->error);
828 else
829 ret = __sev_dbg_encrypt_user(kvm,
830 __sme_page_pa(src_p[0]) + s_off,
831 vaddr,
832 __sme_page_pa(dst_p[0]) + d_off,
833 dst_vaddr,
834 len, &argp->error);
835
836 sev_unpin_memory(kvm, src_p, n);
837 sev_unpin_memory(kvm, dst_p, n);
838
839 if (ret)
840 goto err;
841
842 next_vaddr = vaddr + len;
843 dst_vaddr = dst_vaddr + len;
844 size -= len;
845 }
846 err:
847 return ret;
848 }
849
sev_launch_secret(struct kvm * kvm,struct kvm_sev_cmd * argp)850 static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
851 {
852 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
853 struct sev_data_launch_secret *data;
854 struct kvm_sev_launch_secret params;
855 struct page **pages;
856 void *blob, *hdr;
857 unsigned long n, i;
858 int ret, offset;
859
860 if (!sev_guest(kvm))
861 return -ENOTTY;
862
863 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
864 return -EFAULT;
865
866 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
867 if (IS_ERR(pages))
868 return PTR_ERR(pages);
869
870 /*
871 * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in
872 * place; the cache may contain the data that was written unencrypted.
873 */
874 sev_clflush_pages(pages, n);
875
876 /*
877 * The secret must be copied into contiguous memory region, lets verify
878 * that userspace memory pages are contiguous before we issue command.
879 */
880 if (get_num_contig_pages(0, pages, n) != n) {
881 ret = -EINVAL;
882 goto e_unpin_memory;
883 }
884
885 ret = -ENOMEM;
886 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
887 if (!data)
888 goto e_unpin_memory;
889
890 offset = params.guest_uaddr & (PAGE_SIZE - 1);
891 data->guest_address = __sme_page_pa(pages[0]) + offset;
892 data->guest_len = params.guest_len;
893
894 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
895 if (IS_ERR(blob)) {
896 ret = PTR_ERR(blob);
897 goto e_free;
898 }
899
900 data->trans_address = __psp_pa(blob);
901 data->trans_len = params.trans_len;
902
903 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
904 if (IS_ERR(hdr)) {
905 ret = PTR_ERR(hdr);
906 goto e_free_blob;
907 }
908 data->hdr_address = __psp_pa(hdr);
909 data->hdr_len = params.hdr_len;
910
911 data->handle = sev->handle;
912 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
913
914 kfree(hdr);
915
916 e_free_blob:
917 kfree(blob);
918 e_free:
919 kfree(data);
920 e_unpin_memory:
921 /* content of memory is updated, mark pages dirty */
922 for (i = 0; i < n; i++) {
923 set_page_dirty_lock(pages[i]);
924 mark_page_accessed(pages[i]);
925 }
926 sev_unpin_memory(kvm, pages, n);
927 return ret;
928 }
929
svm_mem_enc_op(struct kvm * kvm,void __user * argp)930 int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
931 {
932 struct kvm_sev_cmd sev_cmd;
933 int r;
934
935 if (!svm_sev_enabled())
936 return -ENOTTY;
937
938 if (!argp)
939 return 0;
940
941 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
942 return -EFAULT;
943
944 mutex_lock(&kvm->lock);
945
946 switch (sev_cmd.id) {
947 case KVM_SEV_INIT:
948 r = sev_guest_init(kvm, &sev_cmd);
949 break;
950 case KVM_SEV_LAUNCH_START:
951 r = sev_launch_start(kvm, &sev_cmd);
952 break;
953 case KVM_SEV_LAUNCH_UPDATE_DATA:
954 r = sev_launch_update_data(kvm, &sev_cmd);
955 break;
956 case KVM_SEV_LAUNCH_MEASURE:
957 r = sev_launch_measure(kvm, &sev_cmd);
958 break;
959 case KVM_SEV_LAUNCH_FINISH:
960 r = sev_launch_finish(kvm, &sev_cmd);
961 break;
962 case KVM_SEV_GUEST_STATUS:
963 r = sev_guest_status(kvm, &sev_cmd);
964 break;
965 case KVM_SEV_DBG_DECRYPT:
966 r = sev_dbg_crypt(kvm, &sev_cmd, true);
967 break;
968 case KVM_SEV_DBG_ENCRYPT:
969 r = sev_dbg_crypt(kvm, &sev_cmd, false);
970 break;
971 case KVM_SEV_LAUNCH_SECRET:
972 r = sev_launch_secret(kvm, &sev_cmd);
973 break;
974 default:
975 r = -EINVAL;
976 goto out;
977 }
978
979 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
980 r = -EFAULT;
981
982 out:
983 mutex_unlock(&kvm->lock);
984 return r;
985 }
986
svm_register_enc_region(struct kvm * kvm,struct kvm_enc_region * range)987 int svm_register_enc_region(struct kvm *kvm,
988 struct kvm_enc_region *range)
989 {
990 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
991 struct enc_region *region;
992 int ret = 0;
993
994 if (!sev_guest(kvm))
995 return -ENOTTY;
996
997 if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
998 return -EINVAL;
999
1000 region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
1001 if (!region)
1002 return -ENOMEM;
1003
1004 region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1);
1005 if (IS_ERR(region->pages)) {
1006 ret = PTR_ERR(region->pages);
1007 goto e_free;
1008 }
1009
1010 /*
1011 * The guest may change the memory encryption attribute from C=0 -> C=1
1012 * or vice versa for this memory range. Lets make sure caches are
1013 * flushed to ensure that guest data gets written into memory with
1014 * correct C-bit.
1015 */
1016 sev_clflush_pages(region->pages, region->npages);
1017
1018 region->uaddr = range->addr;
1019 region->size = range->size;
1020
1021 mutex_lock(&kvm->lock);
1022 list_add_tail(®ion->list, &sev->regions_list);
1023 mutex_unlock(&kvm->lock);
1024
1025 return ret;
1026
1027 e_free:
1028 kfree(region);
1029 return ret;
1030 }
1031
1032 static struct enc_region *
find_enc_region(struct kvm * kvm,struct kvm_enc_region * range)1033 find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1034 {
1035 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1036 struct list_head *head = &sev->regions_list;
1037 struct enc_region *i;
1038
1039 list_for_each_entry(i, head, list) {
1040 if (i->uaddr == range->addr &&
1041 i->size == range->size)
1042 return i;
1043 }
1044
1045 return NULL;
1046 }
1047
__unregister_enc_region_locked(struct kvm * kvm,struct enc_region * region)1048 static void __unregister_enc_region_locked(struct kvm *kvm,
1049 struct enc_region *region)
1050 {
1051 sev_unpin_memory(kvm, region->pages, region->npages);
1052 list_del(®ion->list);
1053 kfree(region);
1054 }
1055
svm_unregister_enc_region(struct kvm * kvm,struct kvm_enc_region * range)1056 int svm_unregister_enc_region(struct kvm *kvm,
1057 struct kvm_enc_region *range)
1058 {
1059 struct enc_region *region;
1060 int ret;
1061
1062 mutex_lock(&kvm->lock);
1063
1064 if (!sev_guest(kvm)) {
1065 ret = -ENOTTY;
1066 goto failed;
1067 }
1068
1069 region = find_enc_region(kvm, range);
1070 if (!region) {
1071 ret = -EINVAL;
1072 goto failed;
1073 }
1074
1075 /*
1076 * Ensure that all guest tagged cache entries are flushed before
1077 * releasing the pages back to the system for use. CLFLUSH will
1078 * not do this, so issue a WBINVD.
1079 */
1080 wbinvd_on_all_cpus();
1081
1082 __unregister_enc_region_locked(kvm, region);
1083
1084 mutex_unlock(&kvm->lock);
1085 return 0;
1086
1087 failed:
1088 mutex_unlock(&kvm->lock);
1089 return ret;
1090 }
1091
sev_vm_destroy(struct kvm * kvm)1092 void sev_vm_destroy(struct kvm *kvm)
1093 {
1094 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1095 struct list_head *head = &sev->regions_list;
1096 struct list_head *pos, *q;
1097
1098 if (!sev_guest(kvm))
1099 return;
1100
1101 mutex_lock(&kvm->lock);
1102
1103 /*
1104 * Ensure that all guest tagged cache entries are flushed before
1105 * releasing the pages back to the system for use. CLFLUSH will
1106 * not do this, so issue a WBINVD.
1107 */
1108 wbinvd_on_all_cpus();
1109
1110 /*
1111 * if userspace was terminated before unregistering the memory regions
1112 * then lets unpin all the registered memory.
1113 */
1114 if (!list_empty(head)) {
1115 list_for_each_safe(pos, q, head) {
1116 __unregister_enc_region_locked(kvm,
1117 list_entry(pos, struct enc_region, list));
1118 cond_resched();
1119 }
1120 }
1121
1122 mutex_unlock(&kvm->lock);
1123
1124 sev_unbind_asid(kvm, sev->handle);
1125 sev_asid_free(sev->asid);
1126 }
1127
sev_hardware_setup(void)1128 int __init sev_hardware_setup(void)
1129 {
1130 struct sev_user_data_status *status;
1131 int rc;
1132
1133 /* Maximum number of encrypted guests supported simultaneously */
1134 max_sev_asid = cpuid_ecx(0x8000001F);
1135
1136 if (!svm_sev_enabled())
1137 return 1;
1138
1139 /* Minimum ASID value that should be used for SEV guest */
1140 min_sev_asid = cpuid_edx(0x8000001F);
1141
1142 /* Initialize SEV ASID bitmaps */
1143 sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1144 if (!sev_asid_bitmap)
1145 return 1;
1146
1147 sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1148 if (!sev_reclaim_asid_bitmap)
1149 return 1;
1150
1151 status = kmalloc(sizeof(*status), GFP_KERNEL);
1152 if (!status)
1153 return 1;
1154
1155 /*
1156 * Check SEV platform status.
1157 *
1158 * PLATFORM_STATUS can be called in any state, if we failed to query
1159 * the PLATFORM status then either PSP firmware does not support SEV
1160 * feature or SEV firmware is dead.
1161 */
1162 rc = sev_platform_status(status, NULL);
1163 if (rc)
1164 goto err;
1165
1166 pr_info("SEV supported\n");
1167
1168 err:
1169 kfree(status);
1170 return rc;
1171 }
1172
sev_hardware_teardown(void)1173 void sev_hardware_teardown(void)
1174 {
1175 if (!svm_sev_enabled())
1176 return;
1177
1178 bitmap_free(sev_asid_bitmap);
1179 bitmap_free(sev_reclaim_asid_bitmap);
1180
1181 sev_flush_asids();
1182 }
1183
pre_sev_run(struct vcpu_svm * svm,int cpu)1184 void pre_sev_run(struct vcpu_svm *svm, int cpu)
1185 {
1186 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
1187 int asid = sev_get_asid(svm->vcpu.kvm);
1188
1189 /* Assign the asid allocated with this SEV guest */
1190 svm->vmcb->control.asid = asid;
1191
1192 /*
1193 * Flush guest TLB:
1194 *
1195 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
1196 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
1197 */
1198 if (sd->sev_vmcbs[asid] == svm->vmcb &&
1199 svm->vcpu.arch.last_vmentry_cpu == cpu)
1200 return;
1201
1202 sd->sev_vmcbs[asid] = svm->vmcb;
1203 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
1204 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
1205 }
1206