xref: /linux/tools/testing/selftests/kvm/lib/x86/sev.c (revision 3bf3e0a521237681d5353cca741474aa8c4e16de)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <stdint.h>
3 #include <stdbool.h>
4 
5 #include "sev.h"
6 
7 /*
8  * sparsebit_next_clear() can return 0 if [x, 2**64-1] are all set, and the
9  * -1 would then cause an underflow back to 2**64 - 1. This is expected and
10  * correct.
11  *
12  * If the last range in the sparsebit is [x, y] and we try to iterate,
13  * sparsebit_next_set() will return 0, and sparsebit_next_clear() will try
14  * and find the first range, but that's correct because the condition
15  * expression would cause us to quit the loop.
16  */
encrypt_region(struct kvm_vm * vm,struct userspace_mem_region * region,uint8_t page_type,bool private)17 static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *region,
18 			   uint8_t page_type, bool private)
19 {
20 	const struct sparsebit *protected_phy_pages = region->protected_phy_pages;
21 	const vm_paddr_t gpa_base = region->region.guest_phys_addr;
22 	const sparsebit_idx_t lowest_page_in_region = gpa_base >> vm->page_shift;
23 	sparsebit_idx_t i, j;
24 
25 	if (!sparsebit_any_set(protected_phy_pages))
26 		return;
27 
28 	if (!is_sev_snp_vm(vm))
29 		sev_register_encrypted_memory(vm, region);
30 
31 	sparsebit_for_each_set_range(protected_phy_pages, i, j) {
32 		const uint64_t size = (j - i + 1) * vm->page_size;
33 		const uint64_t offset = (i - lowest_page_in_region) * vm->page_size;
34 
35 		if (private)
36 			vm_mem_set_private(vm, gpa_base + offset, size);
37 
38 		if (is_sev_snp_vm(vm))
39 			snp_launch_update_data(vm, gpa_base + offset,
40 					       (uint64_t)addr_gpa2hva(vm, gpa_base + offset),
41 					       size, page_type);
42 		else
43 			sev_launch_update_data(vm, gpa_base + offset, size);
44 
45 	}
46 }
47 
sev_vm_init(struct kvm_vm * vm)48 void sev_vm_init(struct kvm_vm *vm)
49 {
50 	if (vm->type == KVM_X86_DEFAULT_VM) {
51 		TEST_ASSERT_EQ(vm->arch.sev_fd, -1);
52 		vm->arch.sev_fd = open_sev_dev_path_or_exit();
53 		vm_sev_ioctl(vm, KVM_SEV_INIT, NULL);
54 	} else {
55 		struct kvm_sev_init init = { 0 };
56 		TEST_ASSERT_EQ(vm->type, KVM_X86_SEV_VM);
57 		vm_sev_ioctl(vm, KVM_SEV_INIT2, &init);
58 	}
59 }
60 
sev_es_vm_init(struct kvm_vm * vm)61 void sev_es_vm_init(struct kvm_vm *vm)
62 {
63 	if (vm->type == KVM_X86_DEFAULT_VM) {
64 		TEST_ASSERT_EQ(vm->arch.sev_fd, -1);
65 		vm->arch.sev_fd = open_sev_dev_path_or_exit();
66 		vm_sev_ioctl(vm, KVM_SEV_ES_INIT, NULL);
67 	} else {
68 		struct kvm_sev_init init = { 0 };
69 		TEST_ASSERT_EQ(vm->type, KVM_X86_SEV_ES_VM);
70 		vm_sev_ioctl(vm, KVM_SEV_INIT2, &init);
71 	}
72 }
73 
snp_vm_init(struct kvm_vm * vm)74 void snp_vm_init(struct kvm_vm *vm)
75 {
76 	struct kvm_sev_init init = { 0 };
77 
78 	TEST_ASSERT_EQ(vm->type, KVM_X86_SNP_VM);
79 	vm_sev_ioctl(vm, KVM_SEV_INIT2, &init);
80 }
81 
sev_vm_launch(struct kvm_vm * vm,uint32_t policy)82 void sev_vm_launch(struct kvm_vm *vm, uint32_t policy)
83 {
84 	struct kvm_sev_launch_start launch_start = {
85 		.policy = policy,
86 	};
87 	struct userspace_mem_region *region;
88 	struct kvm_sev_guest_status status;
89 	int ctr;
90 
91 	vm_sev_ioctl(vm, KVM_SEV_LAUNCH_START, &launch_start);
92 	vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);
93 
94 	TEST_ASSERT_EQ(status.policy, policy);
95 	TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_LAUNCH_UPDATE);
96 
97 	hash_for_each(vm->regions.slot_hash, ctr, region, slot_node)
98 		encrypt_region(vm, region, KVM_SEV_PAGE_TYPE_INVALID, false);
99 
100 	if (policy & SEV_POLICY_ES)
101 		vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
102 
103 	vm->arch.is_pt_protected = true;
104 }
105 
sev_vm_launch_measure(struct kvm_vm * vm,uint8_t * measurement)106 void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement)
107 {
108 	struct kvm_sev_launch_measure launch_measure;
109 	struct kvm_sev_guest_status guest_status;
110 
111 	launch_measure.len = 256;
112 	launch_measure.uaddr = (__u64)measurement;
113 	vm_sev_ioctl(vm, KVM_SEV_LAUNCH_MEASURE, &launch_measure);
114 
115 	vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &guest_status);
116 	TEST_ASSERT_EQ(guest_status.state, SEV_GUEST_STATE_LAUNCH_SECRET);
117 }
118 
sev_vm_launch_finish(struct kvm_vm * vm)119 void sev_vm_launch_finish(struct kvm_vm *vm)
120 {
121 	struct kvm_sev_guest_status status;
122 
123 	vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);
124 	TEST_ASSERT(status.state == SEV_GUEST_STATE_LAUNCH_UPDATE ||
125 		    status.state == SEV_GUEST_STATE_LAUNCH_SECRET,
126 		    "Unexpected guest state: %d", status.state);
127 
128 	vm_sev_ioctl(vm, KVM_SEV_LAUNCH_FINISH, NULL);
129 
130 	vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);
131 	TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_RUNNING);
132 }
133 
snp_vm_launch_start(struct kvm_vm * vm,uint64_t policy)134 void snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy)
135 {
136 	struct kvm_sev_snp_launch_start launch_start = {
137 		.policy = policy,
138 	};
139 
140 	vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_START, &launch_start);
141 }
142 
snp_vm_launch_update(struct kvm_vm * vm)143 void snp_vm_launch_update(struct kvm_vm *vm)
144 {
145 	struct userspace_mem_region *region;
146 	int ctr;
147 
148 	hash_for_each(vm->regions.slot_hash, ctr, region, slot_node)
149 		encrypt_region(vm, region, KVM_SEV_SNP_PAGE_TYPE_NORMAL, true);
150 
151 	vm->arch.is_pt_protected = true;
152 }
153 
snp_vm_launch_finish(struct kvm_vm * vm)154 void snp_vm_launch_finish(struct kvm_vm *vm)
155 {
156 	struct kvm_sev_snp_launch_finish launch_finish = { 0 };
157 
158 	vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_FINISH, &launch_finish);
159 }
160 
vm_sev_create_with_one_vcpu(uint32_t type,void * guest_code,struct kvm_vcpu ** cpu)161 struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
162 					   struct kvm_vcpu **cpu)
163 {
164 	struct vm_shape shape = {
165 		.mode = VM_MODE_DEFAULT,
166 		.type = type,
167 	};
168 	struct kvm_vm *vm;
169 	struct kvm_vcpu *cpus[1];
170 
171 	vm = __vm_create_with_vcpus(shape, 1, 0, guest_code, cpus);
172 	*cpu = cpus[0];
173 
174 	return vm;
175 }
176 
vm_sev_launch(struct kvm_vm * vm,uint64_t policy,uint8_t * measurement)177 void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement)
178 {
179 	if (is_sev_snp_vm(vm)) {
180 		vm_enable_cap(vm, KVM_CAP_EXIT_HYPERCALL, BIT(KVM_HC_MAP_GPA_RANGE));
181 
182 		snp_vm_launch_start(vm, policy);
183 
184 		snp_vm_launch_update(vm);
185 
186 		snp_vm_launch_finish(vm);
187 
188 		return;
189 	}
190 
191 	sev_vm_launch(vm, policy);
192 
193 	if (!measurement)
194 		measurement = alloca(256);
195 
196 	sev_vm_launch_measure(vm, measurement);
197 
198 	sev_vm_launch_finish(vm);
199 }
200