xref: /linux/drivers/virt/coco/sev-guest/sev-guest.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Secure Encrypted Virtualization (SEV) guest driver interface
4  *
5  * Copyright (C) 2021-2024 Advanced Micro Devices, Inc.
6  *
7  * Author: Brijesh Singh <brijesh.singh@amd.com>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/mutex.h>
14 #include <linux/io.h>
15 #include <linux/platform_device.h>
16 #include <linux/miscdevice.h>
17 #include <linux/set_memory.h>
18 #include <linux/fs.h>
19 #include <linux/tsm.h>
20 #include <crypto/gcm.h>
21 #include <linux/psp-sev.h>
22 #include <linux/sockptr.h>
23 #include <linux/cleanup.h>
24 #include <linux/uuid.h>
25 #include <linux/configfs.h>
26 #include <linux/mm.h>
27 #include <uapi/linux/sev-guest.h>
28 #include <uapi/linux/psp-sev.h>
29 
30 #include <asm/svm.h>
31 #include <asm/sev.h>
32 
33 #define DEVICE_NAME	"sev-guest"
34 
35 #define SVSM_MAX_RETRIES		3
36 
37 struct snp_guest_dev {
38 	struct device *dev;
39 	struct miscdevice misc;
40 
41 	struct snp_msg_desc *msg_desc;
42 };
43 
44 /*
45  * The VMPCK ID represents the key used by the SNP guest to communicate with the
46  * SEV firmware in the AMD Secure Processor (ASP, aka PSP). By default, the key
47  * used will be the key associated with the VMPL at which the guest is running.
48  * Should the default key be wiped (see snp_disable_vmpck()), this parameter
49  * allows for using one of the remaining VMPCKs.
50  */
51 static int vmpck_id = -1;
52 module_param(vmpck_id, int, 0444);
53 MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP.");
54 
to_snp_dev(struct file * file)55 static inline struct snp_guest_dev *to_snp_dev(struct file *file)
56 {
57 	struct miscdevice *dev = file->private_data;
58 
59 	return container_of(dev, struct snp_guest_dev, misc);
60 }
61 
62 struct snp_req_resp {
63 	sockptr_t req_data;
64 	sockptr_t resp_data;
65 };
66 
get_report(struct snp_guest_dev * snp_dev,struct snp_guest_request_ioctl * arg)67 static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
68 {
69 	struct snp_report_req *report_req __free(kfree) = NULL;
70 	struct snp_msg_desc *mdesc = snp_dev->msg_desc;
71 	struct snp_report_resp *report_resp;
72 	struct snp_guest_req req = {};
73 	int rc, resp_len;
74 
75 	if (!arg->req_data || !arg->resp_data)
76 		return -EINVAL;
77 
78 	report_req = kzalloc(sizeof(*report_req), GFP_KERNEL_ACCOUNT);
79 	if (!report_req)
80 		return -ENOMEM;
81 
82 	if (copy_from_user(report_req, (void __user *)arg->req_data, sizeof(*report_req)))
83 		return -EFAULT;
84 
85 	/*
86 	 * The intermediate response buffer is used while decrypting the
87 	 * response payload. Make sure that it has enough space to cover the
88 	 * authtag.
89 	 */
90 	resp_len = sizeof(report_resp->data) + mdesc->ctx->authsize;
91 	report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
92 	if (!report_resp)
93 		return -ENOMEM;
94 
95 	req.msg_version = arg->msg_version;
96 	req.msg_type = SNP_MSG_REPORT_REQ;
97 	req.vmpck_id = mdesc->vmpck_id;
98 	req.req_buf = report_req;
99 	req.req_sz = sizeof(*report_req);
100 	req.resp_buf = report_resp->data;
101 	req.resp_sz = resp_len;
102 	req.exit_code = SVM_VMGEXIT_GUEST_REQUEST;
103 
104 	rc = snp_send_guest_request(mdesc, &req);
105 	arg->exitinfo2 = req.exitinfo2;
106 	if (rc)
107 		goto e_free;
108 
109 	if (copy_to_user((void __user *)arg->resp_data, report_resp, sizeof(*report_resp)))
110 		rc = -EFAULT;
111 
112 e_free:
113 	kfree(report_resp);
114 	return rc;
115 }
116 
get_derived_key(struct snp_guest_dev * snp_dev,struct snp_guest_request_ioctl * arg)117 static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
118 {
119 	struct snp_derived_key_req *derived_key_req __free(kfree) = NULL;
120 	struct snp_derived_key_resp derived_key_resp = {0};
121 	struct snp_msg_desc *mdesc = snp_dev->msg_desc;
122 	struct snp_guest_req req = {};
123 	int rc, resp_len;
124 	/* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
125 	u8 buf[64 + 16];
126 
127 	if (!arg->req_data || !arg->resp_data)
128 		return -EINVAL;
129 
130 	/*
131 	 * The intermediate response buffer is used while decrypting the
132 	 * response payload. Make sure that it has enough space to cover the
133 	 * authtag.
134 	 */
135 	resp_len = sizeof(derived_key_resp.data) + mdesc->ctx->authsize;
136 	if (sizeof(buf) < resp_len)
137 		return -ENOMEM;
138 
139 	derived_key_req = kzalloc(sizeof(*derived_key_req), GFP_KERNEL_ACCOUNT);
140 	if (!derived_key_req)
141 		return -ENOMEM;
142 
143 	if (copy_from_user(derived_key_req, (void __user *)arg->req_data,
144 			   sizeof(*derived_key_req)))
145 		return -EFAULT;
146 
147 	req.msg_version = arg->msg_version;
148 	req.msg_type = SNP_MSG_KEY_REQ;
149 	req.vmpck_id = mdesc->vmpck_id;
150 	req.req_buf = derived_key_req;
151 	req.req_sz = sizeof(*derived_key_req);
152 	req.resp_buf = buf;
153 	req.resp_sz = resp_len;
154 	req.exit_code = SVM_VMGEXIT_GUEST_REQUEST;
155 
156 	rc = snp_send_guest_request(mdesc, &req);
157 	arg->exitinfo2 = req.exitinfo2;
158 	if (rc)
159 		return rc;
160 
161 	memcpy(derived_key_resp.data, buf, sizeof(derived_key_resp.data));
162 	if (copy_to_user((void __user *)arg->resp_data, &derived_key_resp,
163 			 sizeof(derived_key_resp)))
164 		rc = -EFAULT;
165 
166 	/* The response buffer contains the sensitive data, explicitly clear it. */
167 	memzero_explicit(buf, sizeof(buf));
168 	memzero_explicit(&derived_key_resp, sizeof(derived_key_resp));
169 	return rc;
170 }
171 
get_ext_report(struct snp_guest_dev * snp_dev,struct snp_guest_request_ioctl * arg,struct snp_req_resp * io)172 static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg,
173 			  struct snp_req_resp *io)
174 
175 {
176 	struct snp_ext_report_req *report_req __free(kfree) = NULL;
177 	struct snp_msg_desc *mdesc = snp_dev->msg_desc;
178 	struct snp_report_resp *report_resp;
179 	struct snp_guest_req req = {};
180 	int ret, npages = 0, resp_len;
181 	sockptr_t certs_address;
182 	struct page *page;
183 
184 	if (sockptr_is_null(io->req_data) || sockptr_is_null(io->resp_data))
185 		return -EINVAL;
186 
187 	report_req = kzalloc(sizeof(*report_req), GFP_KERNEL_ACCOUNT);
188 	if (!report_req)
189 		return -ENOMEM;
190 
191 	if (copy_from_sockptr(report_req, io->req_data, sizeof(*report_req)))
192 		return -EFAULT;
193 
194 	/* caller does not want certificate data */
195 	if (!report_req->certs_len || !report_req->certs_address)
196 		goto cmd;
197 
198 	if (report_req->certs_len > SEV_FW_BLOB_MAX_SIZE ||
199 	    !IS_ALIGNED(report_req->certs_len, PAGE_SIZE))
200 		return -EINVAL;
201 
202 	if (sockptr_is_kernel(io->resp_data)) {
203 		certs_address = KERNEL_SOCKPTR((void *)report_req->certs_address);
204 	} else {
205 		certs_address = USER_SOCKPTR((void __user *)report_req->certs_address);
206 		if (!access_ok(certs_address.user, report_req->certs_len))
207 			return -EFAULT;
208 	}
209 
210 	/*
211 	 * Initialize the intermediate buffer with all zeros. This buffer
212 	 * is used in the guest request message to get the certs blob from
213 	 * the host. If host does not supply any certs in it, then copy
214 	 * zeros to indicate that certificate data was not provided.
215 	 */
216 	npages = report_req->certs_len >> PAGE_SHIFT;
217 	page = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO,
218 			   get_order(report_req->certs_len));
219 	if (!page)
220 		return -ENOMEM;
221 
222 	req.certs_data = page_address(page);
223 	ret = set_memory_decrypted((unsigned long)req.certs_data, npages);
224 	if (ret) {
225 		pr_err("failed to mark page shared, ret=%d\n", ret);
226 		__free_pages(page, get_order(report_req->certs_len));
227 		return -EFAULT;
228 	}
229 
230 cmd:
231 	/*
232 	 * The intermediate response buffer is used while decrypting the
233 	 * response payload. Make sure that it has enough space to cover the
234 	 * authtag.
235 	 */
236 	resp_len = sizeof(report_resp->data) + mdesc->ctx->authsize;
237 	report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
238 	if (!report_resp) {
239 		ret = -ENOMEM;
240 		goto e_free_data;
241 	}
242 
243 	req.input.data_npages = npages;
244 
245 	req.msg_version = arg->msg_version;
246 	req.msg_type = SNP_MSG_REPORT_REQ;
247 	req.vmpck_id = mdesc->vmpck_id;
248 	req.req_buf = &report_req->data;
249 	req.req_sz = sizeof(report_req->data);
250 	req.resp_buf = report_resp->data;
251 	req.resp_sz = resp_len;
252 	req.exit_code = SVM_VMGEXIT_EXT_GUEST_REQUEST;
253 
254 	ret = snp_send_guest_request(mdesc, &req);
255 	arg->exitinfo2 = req.exitinfo2;
256 
257 	/* If certs length is invalid then copy the returned length */
258 	if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) {
259 		report_req->certs_len = req.input.data_npages << PAGE_SHIFT;
260 
261 		if (copy_to_sockptr(io->req_data, report_req, sizeof(*report_req)))
262 			ret = -EFAULT;
263 	}
264 
265 	if (ret)
266 		goto e_free;
267 
268 	if (npages && copy_to_sockptr(certs_address, req.certs_data, report_req->certs_len)) {
269 		ret = -EFAULT;
270 		goto e_free;
271 	}
272 
273 	if (copy_to_sockptr(io->resp_data, report_resp, sizeof(*report_resp)))
274 		ret = -EFAULT;
275 
276 e_free:
277 	kfree(report_resp);
278 e_free_data:
279 	if (npages) {
280 		if (set_memory_encrypted((unsigned long)req.certs_data, npages))
281 			WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n");
282 		else
283 			__free_pages(page, get_order(report_req->certs_len));
284 	}
285 	return ret;
286 }
287 
snp_guest_ioctl(struct file * file,unsigned int ioctl,unsigned long arg)288 static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
289 {
290 	struct snp_guest_dev *snp_dev = to_snp_dev(file);
291 	void __user *argp = (void __user *)arg;
292 	struct snp_guest_request_ioctl input;
293 	struct snp_req_resp io;
294 	int ret = -ENOTTY;
295 
296 	if (copy_from_user(&input, argp, sizeof(input)))
297 		return -EFAULT;
298 
299 	input.exitinfo2 = 0xff;
300 
301 	/* Message version must be non-zero */
302 	if (!input.msg_version)
303 		return -EINVAL;
304 
305 	switch (ioctl) {
306 	case SNP_GET_REPORT:
307 		ret = get_report(snp_dev, &input);
308 		break;
309 	case SNP_GET_DERIVED_KEY:
310 		ret = get_derived_key(snp_dev, &input);
311 		break;
312 	case SNP_GET_EXT_REPORT:
313 		/*
314 		 * As get_ext_report() may be called from the ioctl() path and a
315 		 * kernel internal path (configfs-tsm), decorate the passed
316 		 * buffers as user pointers.
317 		 */
318 		io.req_data = USER_SOCKPTR((void __user *)input.req_data);
319 		io.resp_data = USER_SOCKPTR((void __user *)input.resp_data);
320 		ret = get_ext_report(snp_dev, &input, &io);
321 		break;
322 	default:
323 		break;
324 	}
325 
326 	if (input.exitinfo2 && copy_to_user(argp, &input, sizeof(input)))
327 		return -EFAULT;
328 
329 	return ret;
330 }
331 
332 static const struct file_operations snp_guest_fops = {
333 	.owner	= THIS_MODULE,
334 	.unlocked_ioctl = snp_guest_ioctl,
335 };
336 
337 struct snp_msg_report_resp_hdr {
338 	u32 status;
339 	u32 report_size;
340 	u8 rsvd[24];
341 };
342 
343 struct snp_msg_cert_entry {
344 	guid_t guid;
345 	u32 offset;
346 	u32 length;
347 };
348 
sev_svsm_report_new(struct tsm_report * report,void * data)349 static int sev_svsm_report_new(struct tsm_report *report, void *data)
350 {
351 	unsigned int rep_len, man_len, certs_len;
352 	struct tsm_report_desc *desc = &report->desc;
353 	struct svsm_attest_call ac = {};
354 	unsigned int retry_count;
355 	void *rep, *man, *certs;
356 	struct svsm_call call;
357 	unsigned int size;
358 	bool try_again;
359 	void *buffer;
360 	u64 call_id;
361 	int ret;
362 
363 	/*
364 	 * Allocate pages for the request:
365 	 * - Report blob (4K)
366 	 * - Manifest blob (4K)
367 	 * - Certificate blob (16K)
368 	 *
369 	 * Above addresses must be 4K aligned
370 	 */
371 	rep_len = SZ_4K;
372 	man_len = SZ_4K;
373 	certs_len = SEV_FW_BLOB_MAX_SIZE;
374 
375 	if (guid_is_null(&desc->service_guid)) {
376 		call_id = SVSM_ATTEST_CALL(SVSM_ATTEST_SERVICES);
377 	} else {
378 		export_guid(ac.service_guid, &desc->service_guid);
379 		ac.service_manifest_ver = desc->service_manifest_version;
380 
381 		call_id = SVSM_ATTEST_CALL(SVSM_ATTEST_SINGLE_SERVICE);
382 	}
383 
384 	retry_count = 0;
385 
386 retry:
387 	memset(&call, 0, sizeof(call));
388 
389 	size = rep_len + man_len + certs_len;
390 	buffer = alloc_pages_exact(size, __GFP_ZERO);
391 	if (!buffer)
392 		return -ENOMEM;
393 
394 	rep = buffer;
395 	ac.report_buf.pa = __pa(rep);
396 	ac.report_buf.len = rep_len;
397 
398 	man = rep + rep_len;
399 	ac.manifest_buf.pa = __pa(man);
400 	ac.manifest_buf.len = man_len;
401 
402 	certs = man + man_len;
403 	ac.certificates_buf.pa = __pa(certs);
404 	ac.certificates_buf.len = certs_len;
405 
406 	ac.nonce.pa = __pa(desc->inblob);
407 	ac.nonce.len = desc->inblob_len;
408 
409 	ret = snp_issue_svsm_attest_req(call_id, &call, &ac);
410 	if (ret) {
411 		free_pages_exact(buffer, size);
412 
413 		switch (call.rax_out) {
414 		case SVSM_ERR_INVALID_PARAMETER:
415 			try_again = false;
416 
417 			if (ac.report_buf.len > rep_len) {
418 				rep_len = PAGE_ALIGN(ac.report_buf.len);
419 				try_again = true;
420 			}
421 
422 			if (ac.manifest_buf.len > man_len) {
423 				man_len = PAGE_ALIGN(ac.manifest_buf.len);
424 				try_again = true;
425 			}
426 
427 			if (ac.certificates_buf.len > certs_len) {
428 				certs_len = PAGE_ALIGN(ac.certificates_buf.len);
429 				try_again = true;
430 			}
431 
432 			/* If one of the buffers wasn't large enough, retry the request */
433 			if (try_again && retry_count < SVSM_MAX_RETRIES) {
434 				retry_count++;
435 				goto retry;
436 			}
437 
438 			return -EINVAL;
439 		default:
440 			pr_err_ratelimited("SVSM attestation request failed (%d / 0x%llx)\n",
441 					   ret, call.rax_out);
442 			return -EINVAL;
443 		}
444 	}
445 
446 	/*
447 	 * Allocate all the blob memory buffers at once so that the cleanup is
448 	 * done for errors that occur after the first allocation (i.e. before
449 	 * using no_free_ptr()).
450 	 */
451 	rep_len = ac.report_buf.len;
452 	void *rbuf __free(kvfree) = kvzalloc(rep_len, GFP_KERNEL);
453 
454 	man_len = ac.manifest_buf.len;
455 	void *mbuf __free(kvfree) = kvzalloc(man_len, GFP_KERNEL);
456 
457 	certs_len = ac.certificates_buf.len;
458 	void *cbuf __free(kvfree) = certs_len ? kvzalloc(certs_len, GFP_KERNEL) : NULL;
459 
460 	if (!rbuf || !mbuf || (certs_len && !cbuf)) {
461 		free_pages_exact(buffer, size);
462 		return -ENOMEM;
463 	}
464 
465 	memcpy(rbuf, rep, rep_len);
466 	report->outblob = no_free_ptr(rbuf);
467 	report->outblob_len = rep_len;
468 
469 	memcpy(mbuf, man, man_len);
470 	report->manifestblob = no_free_ptr(mbuf);
471 	report->manifestblob_len = man_len;
472 
473 	if (certs_len) {
474 		memcpy(cbuf, certs, certs_len);
475 		report->auxblob = no_free_ptr(cbuf);
476 		report->auxblob_len = certs_len;
477 	}
478 
479 	free_pages_exact(buffer, size);
480 
481 	return 0;
482 }
483 
sev_report_new(struct tsm_report * report,void * data)484 static int sev_report_new(struct tsm_report *report, void *data)
485 {
486 	struct snp_msg_cert_entry *cert_table;
487 	struct tsm_report_desc *desc = &report->desc;
488 	struct snp_guest_dev *snp_dev = data;
489 	struct snp_msg_report_resp_hdr hdr;
490 	const u32 report_size = SZ_4K;
491 	const u32 ext_size = SEV_FW_BLOB_MAX_SIZE;
492 	u32 certs_size, i, size = report_size + ext_size;
493 	int ret;
494 
495 	if (desc->inblob_len != SNP_REPORT_USER_DATA_SIZE)
496 		return -EINVAL;
497 
498 	if (desc->service_provider) {
499 		if (strcmp(desc->service_provider, "svsm"))
500 			return -EINVAL;
501 
502 		return sev_svsm_report_new(report, data);
503 	}
504 
505 	void *buf __free(kvfree) = kvzalloc(size, GFP_KERNEL);
506 	if (!buf)
507 		return -ENOMEM;
508 
509 	cert_table = buf + report_size;
510 	struct snp_ext_report_req ext_req = {
511 		.data = { .vmpl = desc->privlevel },
512 		.certs_address = (__u64)cert_table,
513 		.certs_len = ext_size,
514 	};
515 	memcpy(&ext_req.data.user_data, desc->inblob, desc->inblob_len);
516 
517 	struct snp_guest_request_ioctl input = {
518 		.msg_version = 1,
519 		.req_data = (__u64)&ext_req,
520 		.resp_data = (__u64)buf,
521 		.exitinfo2 = 0xff,
522 	};
523 	struct snp_req_resp io = {
524 		.req_data = KERNEL_SOCKPTR(&ext_req),
525 		.resp_data = KERNEL_SOCKPTR(buf),
526 	};
527 
528 	ret = get_ext_report(snp_dev, &input, &io);
529 	if (ret)
530 		return ret;
531 
532 	memcpy(&hdr, buf, sizeof(hdr));
533 	if (hdr.status == SEV_RET_INVALID_PARAM)
534 		return -EINVAL;
535 	if (hdr.status == SEV_RET_INVALID_KEY)
536 		return -EINVAL;
537 	if (hdr.status)
538 		return -ENXIO;
539 	if ((hdr.report_size + sizeof(hdr)) > report_size)
540 		return -ENOMEM;
541 
542 	void *rbuf __free(kvfree) = kvzalloc(hdr.report_size, GFP_KERNEL);
543 	if (!rbuf)
544 		return -ENOMEM;
545 
546 	memcpy(rbuf, buf + sizeof(hdr), hdr.report_size);
547 	report->outblob = no_free_ptr(rbuf);
548 	report->outblob_len = hdr.report_size;
549 
550 	certs_size = 0;
551 	for (i = 0; i < ext_size / sizeof(struct snp_msg_cert_entry); i++) {
552 		struct snp_msg_cert_entry *ent = &cert_table[i];
553 
554 		if (guid_is_null(&ent->guid) && !ent->offset && !ent->length)
555 			break;
556 		certs_size = max(certs_size, ent->offset + ent->length);
557 	}
558 
559 	/* Suspicious that the response populated entries without populating size */
560 	if (!certs_size && i)
561 		dev_warn_ratelimited(snp_dev->dev, "certificate slots conveyed without size\n");
562 
563 	/* No certs to report */
564 	if (!certs_size)
565 		return 0;
566 
567 	/* Suspicious that the certificate blob size contract was violated
568 	 */
569 	if (certs_size > ext_size) {
570 		dev_warn_ratelimited(snp_dev->dev, "certificate data truncated\n");
571 		certs_size = ext_size;
572 	}
573 
574 	void *cbuf __free(kvfree) = kvzalloc(certs_size, GFP_KERNEL);
575 	if (!cbuf)
576 		return -ENOMEM;
577 
578 	memcpy(cbuf, cert_table, certs_size);
579 	report->auxblob = no_free_ptr(cbuf);
580 	report->auxblob_len = certs_size;
581 
582 	return 0;
583 }
584 
sev_report_attr_visible(int n)585 static bool sev_report_attr_visible(int n)
586 {
587 	switch (n) {
588 	case TSM_REPORT_GENERATION:
589 	case TSM_REPORT_PROVIDER:
590 	case TSM_REPORT_PRIVLEVEL:
591 	case TSM_REPORT_PRIVLEVEL_FLOOR:
592 		return true;
593 	case TSM_REPORT_SERVICE_PROVIDER:
594 	case TSM_REPORT_SERVICE_GUID:
595 	case TSM_REPORT_SERVICE_MANIFEST_VER:
596 		return snp_vmpl;
597 	}
598 
599 	return false;
600 }
601 
sev_report_bin_attr_visible(int n)602 static bool sev_report_bin_attr_visible(int n)
603 {
604 	switch (n) {
605 	case TSM_REPORT_INBLOB:
606 	case TSM_REPORT_OUTBLOB:
607 	case TSM_REPORT_AUXBLOB:
608 		return true;
609 	case TSM_REPORT_MANIFESTBLOB:
610 		return snp_vmpl;
611 	}
612 
613 	return false;
614 }
615 
616 static struct tsm_report_ops sev_tsm_report_ops = {
617 	.name = KBUILD_MODNAME,
618 	.report_new = sev_report_new,
619 	.report_attr_visible = sev_report_attr_visible,
620 	.report_bin_attr_visible = sev_report_bin_attr_visible,
621 };
622 
unregister_sev_tsm(void * data)623 static void unregister_sev_tsm(void *data)
624 {
625 	tsm_report_unregister(&sev_tsm_report_ops);
626 }
627 
sev_guest_probe(struct platform_device * pdev)628 static int __init sev_guest_probe(struct platform_device *pdev)
629 {
630 	struct device *dev = &pdev->dev;
631 	struct snp_guest_dev *snp_dev;
632 	struct snp_msg_desc *mdesc;
633 	struct miscdevice *misc;
634 	int ret;
635 
636 	BUILD_BUG_ON(sizeof(struct snp_guest_msg) > PAGE_SIZE);
637 
638 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
639 		return -ENODEV;
640 
641 	snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
642 	if (!snp_dev)
643 		return -ENOMEM;
644 
645 	mdesc = snp_msg_alloc();
646 	if (IS_ERR_OR_NULL(mdesc))
647 		return -ENOMEM;
648 
649 	ret = snp_msg_init(mdesc, vmpck_id);
650 	if (ret)
651 		goto e_msg_init;
652 
653 	platform_set_drvdata(pdev, snp_dev);
654 	snp_dev->dev = dev;
655 
656 	misc = &snp_dev->misc;
657 	misc->minor = MISC_DYNAMIC_MINOR;
658 	misc->name = DEVICE_NAME;
659 	misc->fops = &snp_guest_fops;
660 
661 	/* Set the privlevel_floor attribute based on the vmpck_id */
662 	sev_tsm_report_ops.privlevel_floor = mdesc->vmpck_id;
663 
664 	ret = tsm_report_register(&sev_tsm_report_ops, snp_dev);
665 	if (ret)
666 		goto e_msg_init;
667 
668 	ret = devm_add_action_or_reset(&pdev->dev, unregister_sev_tsm, NULL);
669 	if (ret)
670 		goto e_msg_init;
671 
672 	ret =  misc_register(misc);
673 	if (ret)
674 		goto e_msg_init;
675 
676 	snp_dev->msg_desc = mdesc;
677 	dev_info(dev, "Initialized SEV guest driver (using VMPCK%d communication key)\n",
678 		 mdesc->vmpck_id);
679 	return 0;
680 
681 e_msg_init:
682 	snp_msg_free(mdesc);
683 
684 	return ret;
685 }
686 
sev_guest_remove(struct platform_device * pdev)687 static void __exit sev_guest_remove(struct platform_device *pdev)
688 {
689 	struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev);
690 
691 	snp_msg_free(snp_dev->msg_desc);
692 	misc_deregister(&snp_dev->misc);
693 }
694 
695 /*
696  * This driver is meant to be a common SEV guest interface driver and to
697  * support any SEV guest API. As such, even though it has been introduced
698  * with the SEV-SNP support, it is named "sev-guest".
699  *
700  * sev_guest_remove() lives in .exit.text. For drivers registered via
701  * module_platform_driver_probe() this is ok because they cannot get unbound
702  * at runtime. So mark the driver struct with __refdata to prevent modpost
703  * triggering a section mismatch warning.
704  */
705 static struct platform_driver sev_guest_driver __refdata = {
706 	.remove		= __exit_p(sev_guest_remove),
707 	.driver		= {
708 		.name = "sev-guest",
709 	},
710 };
711 
712 module_platform_driver_probe(sev_guest_driver, sev_guest_probe);
713 
714 MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
715 MODULE_LICENSE("GPL");
716 MODULE_VERSION("1.0.0");
717 MODULE_DESCRIPTION("AMD SEV Guest Driver");
718 MODULE_ALIAS("platform:sev-guest");
719