1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2023, Microsoft Corporation.
4  *
5  * Hypercall helper functions used by the mshv_root module.
6  *
7  * Authors: Microsoft Linux virtualization team
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <asm/mshyperv.h>
13 
14 #include "mshv_root.h"
15 
16 /* Determined empirically */
17 #define HV_INIT_PARTITION_DEPOSIT_PAGES 208
18 #define HV_MAP_GPA_DEPOSIT_PAGES	256
19 #define HV_UMAP_GPA_PAGES		512
20 
21 #define HV_PAGE_COUNT_2M_ALIGNED(pg_count) (!((pg_count) & (0x200 - 1)))
22 
23 #define HV_WITHDRAW_BATCH_SIZE	(HV_HYP_PAGE_SIZE / sizeof(u64))
24 #define HV_MAP_GPA_BATCH_SIZE	\
25 	((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_map_gpa_pages)) \
26 		/ sizeof(u64))
27 #define HV_GET_VP_STATE_BATCH_SIZE	\
28 	((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_get_vp_state)) \
29 		/ sizeof(u64))
30 #define HV_SET_VP_STATE_BATCH_SIZE	\
31 	((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_set_vp_state)) \
32 		/ sizeof(u64))
33 #define HV_GET_GPA_ACCESS_STATES_BATCH_SIZE	\
34 	((HV_HYP_PAGE_SIZE - sizeof(union hv_gpa_page_access_state)) \
35 		/ sizeof(union hv_gpa_page_access_state))
36 #define HV_MODIFY_SPARSE_SPA_PAGE_HOST_ACCESS_MAX_PAGE_COUNT		       \
37 	((HV_HYP_PAGE_SIZE -						       \
38 	  sizeof(struct hv_input_modify_sparse_spa_page_host_access)) /        \
39 	 sizeof(u64))
40 
hv_call_withdraw_memory(u64 count,int node,u64 partition_id)41 int hv_call_withdraw_memory(u64 count, int node, u64 partition_id)
42 {
43 	struct hv_input_withdraw_memory *input_page;
44 	struct hv_output_withdraw_memory *output_page;
45 	struct page *page;
46 	u16 completed;
47 	unsigned long remaining = count;
48 	u64 status;
49 	int i;
50 	unsigned long flags;
51 
52 	page = alloc_page(GFP_KERNEL);
53 	if (!page)
54 		return -ENOMEM;
55 	output_page = page_address(page);
56 
57 	while (remaining) {
58 		local_irq_save(flags);
59 
60 		input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
61 
62 		memset(input_page, 0, sizeof(*input_page));
63 		input_page->partition_id = partition_id;
64 		status = hv_do_rep_hypercall(HVCALL_WITHDRAW_MEMORY,
65 					     min(remaining, HV_WITHDRAW_BATCH_SIZE),
66 					     0, input_page, output_page);
67 
68 		local_irq_restore(flags);
69 
70 		completed = hv_repcomp(status);
71 
72 		for (i = 0; i < completed; i++)
73 			__free_page(pfn_to_page(output_page->gpa_page_list[i]));
74 
75 		if (!hv_result_success(status)) {
76 			if (hv_result(status) == HV_STATUS_NO_RESOURCES)
77 				status = HV_STATUS_SUCCESS;
78 			break;
79 		}
80 
81 		remaining -= completed;
82 	}
83 	free_page((unsigned long)output_page);
84 
85 	return hv_result_to_errno(status);
86 }
87 
hv_call_create_partition(u64 flags,struct hv_partition_creation_properties creation_properties,union hv_partition_isolation_properties isolation_properties,u64 * partition_id)88 int hv_call_create_partition(u64 flags,
89 			     struct hv_partition_creation_properties creation_properties,
90 			     union hv_partition_isolation_properties isolation_properties,
91 			     u64 *partition_id)
92 {
93 	struct hv_input_create_partition *input;
94 	struct hv_output_create_partition *output;
95 	u64 status;
96 	int ret;
97 	unsigned long irq_flags;
98 
99 	do {
100 		local_irq_save(irq_flags);
101 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
102 		output = *this_cpu_ptr(hyperv_pcpu_output_arg);
103 
104 		memset(input, 0, sizeof(*input));
105 		input->flags = flags;
106 		input->compatibility_version = HV_COMPATIBILITY_21_H2;
107 
108 		memcpy(&input->partition_creation_properties, &creation_properties,
109 		       sizeof(creation_properties));
110 
111 		memcpy(&input->isolation_properties, &isolation_properties,
112 		       sizeof(isolation_properties));
113 
114 		status = hv_do_hypercall(HVCALL_CREATE_PARTITION,
115 					 input, output);
116 
117 		if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
118 			if (hv_result_success(status))
119 				*partition_id = output->partition_id;
120 			local_irq_restore(irq_flags);
121 			ret = hv_result_to_errno(status);
122 			break;
123 		}
124 		local_irq_restore(irq_flags);
125 		ret = hv_call_deposit_pages(NUMA_NO_NODE,
126 					    hv_current_partition_id, 1);
127 	} while (!ret);
128 
129 	return ret;
130 }
131 
hv_call_initialize_partition(u64 partition_id)132 int hv_call_initialize_partition(u64 partition_id)
133 {
134 	struct hv_input_initialize_partition input;
135 	u64 status;
136 	int ret;
137 
138 	input.partition_id = partition_id;
139 
140 	ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id,
141 				    HV_INIT_PARTITION_DEPOSIT_PAGES);
142 	if (ret)
143 		return ret;
144 
145 	do {
146 		status = hv_do_fast_hypercall8(HVCALL_INITIALIZE_PARTITION,
147 					       *(u64 *)&input);
148 
149 		if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
150 			ret = hv_result_to_errno(status);
151 			break;
152 		}
153 		ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id, 1);
154 	} while (!ret);
155 
156 	return ret;
157 }
158 
hv_call_finalize_partition(u64 partition_id)159 int hv_call_finalize_partition(u64 partition_id)
160 {
161 	struct hv_input_finalize_partition input;
162 	u64 status;
163 
164 	input.partition_id = partition_id;
165 	status = hv_do_fast_hypercall8(HVCALL_FINALIZE_PARTITION,
166 				       *(u64 *)&input);
167 
168 	return hv_result_to_errno(status);
169 }
170 
hv_call_delete_partition(u64 partition_id)171 int hv_call_delete_partition(u64 partition_id)
172 {
173 	struct hv_input_delete_partition input;
174 	u64 status;
175 
176 	input.partition_id = partition_id;
177 	status = hv_do_fast_hypercall8(HVCALL_DELETE_PARTITION, *(u64 *)&input);
178 
179 	return hv_result_to_errno(status);
180 }
181 
182 /* Ask the hypervisor to map guest ram pages or the guest mmio space */
hv_do_map_gpa_hcall(u64 partition_id,u64 gfn,u64 page_struct_count,u32 flags,struct page ** pages,u64 mmio_spa)183 static int hv_do_map_gpa_hcall(u64 partition_id, u64 gfn, u64 page_struct_count,
184 			       u32 flags, struct page **pages, u64 mmio_spa)
185 {
186 	struct hv_input_map_gpa_pages *input_page;
187 	u64 status, *pfnlist;
188 	unsigned long irq_flags, large_shift = 0;
189 	int ret = 0, done = 0;
190 	u64 page_count = page_struct_count;
191 
192 	if (page_count == 0 || (pages && mmio_spa))
193 		return -EINVAL;
194 
195 	if (flags & HV_MAP_GPA_LARGE_PAGE) {
196 		if (mmio_spa)
197 			return -EINVAL;
198 
199 		if (!HV_PAGE_COUNT_2M_ALIGNED(page_count))
200 			return -EINVAL;
201 
202 		large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT;
203 		page_count >>= large_shift;
204 	}
205 
206 	while (done < page_count) {
207 		ulong i, completed, remain = page_count - done;
208 		int rep_count = min(remain, HV_MAP_GPA_BATCH_SIZE);
209 
210 		local_irq_save(irq_flags);
211 		input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
212 
213 		input_page->target_partition_id = partition_id;
214 		input_page->target_gpa_base = gfn + (done << large_shift);
215 		input_page->map_flags = flags;
216 		pfnlist = input_page->source_gpa_page_list;
217 
218 		for (i = 0; i < rep_count; i++)
219 			if (flags & HV_MAP_GPA_NO_ACCESS) {
220 				pfnlist[i] = 0;
221 			} else if (pages) {
222 				u64 index = (done + i) << large_shift;
223 
224 				if (index >= page_struct_count) {
225 					ret = -EINVAL;
226 					break;
227 				}
228 				pfnlist[i] = page_to_pfn(pages[index]);
229 			} else {
230 				pfnlist[i] = mmio_spa + done + i;
231 			}
232 		if (ret)
233 			break;
234 
235 		status = hv_do_rep_hypercall(HVCALL_MAP_GPA_PAGES, rep_count, 0,
236 					     input_page, NULL);
237 		local_irq_restore(irq_flags);
238 
239 		completed = hv_repcomp(status);
240 
241 		if (hv_result(status) == HV_STATUS_INSUFFICIENT_MEMORY) {
242 			ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id,
243 						    HV_MAP_GPA_DEPOSIT_PAGES);
244 			if (ret)
245 				break;
246 
247 		} else if (!hv_result_success(status)) {
248 			ret = hv_result_to_errno(status);
249 			break;
250 		}
251 
252 		done += completed;
253 	}
254 
255 	if (ret && done) {
256 		u32 unmap_flags = 0;
257 
258 		if (flags & HV_MAP_GPA_LARGE_PAGE)
259 			unmap_flags |= HV_UNMAP_GPA_LARGE_PAGE;
260 		hv_call_unmap_gpa_pages(partition_id, gfn, done, unmap_flags);
261 	}
262 
263 	return ret;
264 }
265 
266 /* Ask the hypervisor to map guest ram pages */
hv_call_map_gpa_pages(u64 partition_id,u64 gpa_target,u64 page_count,u32 flags,struct page ** pages)267 int hv_call_map_gpa_pages(u64 partition_id, u64 gpa_target, u64 page_count,
268 			  u32 flags, struct page **pages)
269 {
270 	return hv_do_map_gpa_hcall(partition_id, gpa_target, page_count,
271 				   flags, pages, 0);
272 }
273 
274 /* Ask the hypervisor to map guest mmio space */
hv_call_map_mmio_pages(u64 partition_id,u64 gfn,u64 mmio_spa,u64 numpgs)275 int hv_call_map_mmio_pages(u64 partition_id, u64 gfn, u64 mmio_spa, u64 numpgs)
276 {
277 	int i;
278 	u32 flags = HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE |
279 		    HV_MAP_GPA_NOT_CACHED;
280 
281 	for (i = 0; i < numpgs; i++)
282 		if (page_is_ram(mmio_spa + i))
283 			return -EINVAL;
284 
285 	return hv_do_map_gpa_hcall(partition_id, gfn, numpgs, flags, NULL,
286 				   mmio_spa);
287 }
288 
hv_call_unmap_gpa_pages(u64 partition_id,u64 gfn,u64 page_count_4k,u32 flags)289 int hv_call_unmap_gpa_pages(u64 partition_id, u64 gfn, u64 page_count_4k,
290 			    u32 flags)
291 {
292 	struct hv_input_unmap_gpa_pages *input_page;
293 	u64 status, page_count = page_count_4k;
294 	unsigned long irq_flags, large_shift = 0;
295 	int ret = 0, done = 0;
296 
297 	if (page_count == 0)
298 		return -EINVAL;
299 
300 	if (flags & HV_UNMAP_GPA_LARGE_PAGE) {
301 		if (!HV_PAGE_COUNT_2M_ALIGNED(page_count))
302 			return -EINVAL;
303 
304 		large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT;
305 		page_count >>= large_shift;
306 	}
307 
308 	while (done < page_count) {
309 		ulong completed, remain = page_count - done;
310 		int rep_count = min(remain, HV_UMAP_GPA_PAGES);
311 
312 		local_irq_save(irq_flags);
313 		input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
314 
315 		input_page->target_partition_id = partition_id;
316 		input_page->target_gpa_base = gfn + (done << large_shift);
317 		input_page->unmap_flags = flags;
318 		status = hv_do_rep_hypercall(HVCALL_UNMAP_GPA_PAGES, rep_count,
319 					     0, input_page, NULL);
320 		local_irq_restore(irq_flags);
321 
322 		completed = hv_repcomp(status);
323 		if (!hv_result_success(status)) {
324 			ret = hv_result_to_errno(status);
325 			break;
326 		}
327 
328 		done += completed;
329 	}
330 
331 	return ret;
332 }
333 
hv_call_get_gpa_access_states(u64 partition_id,u32 count,u64 gpa_base_pfn,union hv_gpa_page_access_state_flags state_flags,int * written_total,union hv_gpa_page_access_state * states)334 int hv_call_get_gpa_access_states(u64 partition_id, u32 count, u64 gpa_base_pfn,
335 				  union hv_gpa_page_access_state_flags state_flags,
336 				  int *written_total,
337 				  union hv_gpa_page_access_state *states)
338 {
339 	struct hv_input_get_gpa_pages_access_state *input_page;
340 	union hv_gpa_page_access_state *output_page;
341 	int completed = 0;
342 	unsigned long remaining = count;
343 	int rep_count, i;
344 	u64 status = 0;
345 	unsigned long flags;
346 
347 	*written_total = 0;
348 	while (remaining) {
349 		local_irq_save(flags);
350 		input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
351 		output_page = *this_cpu_ptr(hyperv_pcpu_output_arg);
352 
353 		input_page->partition_id = partition_id;
354 		input_page->hv_gpa_page_number = gpa_base_pfn + *written_total;
355 		input_page->flags = state_flags;
356 		rep_count = min(remaining, HV_GET_GPA_ACCESS_STATES_BATCH_SIZE);
357 
358 		status = hv_do_rep_hypercall(HVCALL_GET_GPA_PAGES_ACCESS_STATES, rep_count,
359 					     0, input_page, output_page);
360 		if (!hv_result_success(status)) {
361 			local_irq_restore(flags);
362 			break;
363 		}
364 		completed = hv_repcomp(status);
365 		for (i = 0; i < completed; ++i)
366 			states[i].as_uint8 = output_page[i].as_uint8;
367 
368 		local_irq_restore(flags);
369 		states += completed;
370 		*written_total += completed;
371 		remaining -= completed;
372 	}
373 
374 	return hv_result_to_errno(status);
375 }
376 
hv_call_assert_virtual_interrupt(u64 partition_id,u32 vector,u64 dest_addr,union hv_interrupt_control control)377 int hv_call_assert_virtual_interrupt(u64 partition_id, u32 vector,
378 				     u64 dest_addr,
379 				     union hv_interrupt_control control)
380 {
381 	struct hv_input_assert_virtual_interrupt *input;
382 	unsigned long flags;
383 	u64 status;
384 
385 	local_irq_save(flags);
386 	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
387 	memset(input, 0, sizeof(*input));
388 	input->partition_id = partition_id;
389 	input->vector = vector;
390 	input->dest_addr = dest_addr;
391 	input->control = control;
392 	status = hv_do_hypercall(HVCALL_ASSERT_VIRTUAL_INTERRUPT, input, NULL);
393 	local_irq_restore(flags);
394 
395 	return hv_result_to_errno(status);
396 }
397 
hv_call_delete_vp(u64 partition_id,u32 vp_index)398 int hv_call_delete_vp(u64 partition_id, u32 vp_index)
399 {
400 	union hv_input_delete_vp input = {};
401 	u64 status;
402 
403 	input.partition_id = partition_id;
404 	input.vp_index = vp_index;
405 
406 	status = hv_do_fast_hypercall16(HVCALL_DELETE_VP,
407 					input.as_uint64[0], input.as_uint64[1]);
408 
409 	return hv_result_to_errno(status);
410 }
411 EXPORT_SYMBOL_GPL(hv_call_delete_vp);
412 
hv_call_get_vp_state(u32 vp_index,u64 partition_id,struct hv_vp_state_data state_data,u64 page_count,struct page ** pages,union hv_output_get_vp_state * ret_output)413 int hv_call_get_vp_state(u32 vp_index, u64 partition_id,
414 			 struct hv_vp_state_data state_data,
415 			 /* Choose between pages and ret_output */
416 			 u64 page_count, struct page **pages,
417 			 union hv_output_get_vp_state *ret_output)
418 {
419 	struct hv_input_get_vp_state *input;
420 	union hv_output_get_vp_state *output;
421 	u64 status;
422 	int i;
423 	u64 control;
424 	unsigned long flags;
425 	int ret = 0;
426 
427 	if (page_count > HV_GET_VP_STATE_BATCH_SIZE)
428 		return -EINVAL;
429 
430 	if (!page_count && !ret_output)
431 		return -EINVAL;
432 
433 	do {
434 		local_irq_save(flags);
435 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
436 		output = *this_cpu_ptr(hyperv_pcpu_output_arg);
437 		memset(input, 0, sizeof(*input));
438 		memset(output, 0, sizeof(*output));
439 
440 		input->partition_id = partition_id;
441 		input->vp_index = vp_index;
442 		input->state_data = state_data;
443 		for (i = 0; i < page_count; i++)
444 			input->output_data_pfns[i] = page_to_pfn(pages[i]);
445 
446 		control = (HVCALL_GET_VP_STATE) |
447 			  (page_count << HV_HYPERCALL_VARHEAD_OFFSET);
448 
449 		status = hv_do_hypercall(control, input, output);
450 
451 		if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
452 			if (hv_result_success(status) && ret_output)
453 				memcpy(ret_output, output, sizeof(*output));
454 
455 			local_irq_restore(flags);
456 			ret = hv_result_to_errno(status);
457 			break;
458 		}
459 		local_irq_restore(flags);
460 
461 		ret = hv_call_deposit_pages(NUMA_NO_NODE,
462 					    partition_id, 1);
463 	} while (!ret);
464 
465 	return ret;
466 }
467 
hv_call_set_vp_state(u32 vp_index,u64 partition_id,struct hv_vp_state_data state_data,u64 page_count,struct page ** pages,u32 num_bytes,u8 * bytes)468 int hv_call_set_vp_state(u32 vp_index, u64 partition_id,
469 			 /* Choose between pages and bytes */
470 			 struct hv_vp_state_data state_data, u64 page_count,
471 			 struct page **pages, u32 num_bytes, u8 *bytes)
472 {
473 	struct hv_input_set_vp_state *input;
474 	u64 status;
475 	int i;
476 	u64 control;
477 	unsigned long flags;
478 	int ret = 0;
479 	u16 varhead_sz;
480 
481 	if (page_count > HV_SET_VP_STATE_BATCH_SIZE)
482 		return -EINVAL;
483 	if (sizeof(*input) + num_bytes > HV_HYP_PAGE_SIZE)
484 		return -EINVAL;
485 
486 	if (num_bytes)
487 		/* round up to 8 and divide by 8 */
488 		varhead_sz = (num_bytes + 7) >> 3;
489 	else if (page_count)
490 		varhead_sz = page_count;
491 	else
492 		return -EINVAL;
493 
494 	do {
495 		local_irq_save(flags);
496 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
497 		memset(input, 0, sizeof(*input));
498 
499 		input->partition_id = partition_id;
500 		input->vp_index = vp_index;
501 		input->state_data = state_data;
502 		if (num_bytes) {
503 			memcpy((u8 *)input->data, bytes, num_bytes);
504 		} else {
505 			for (i = 0; i < page_count; i++)
506 				input->data[i].pfns = page_to_pfn(pages[i]);
507 		}
508 
509 		control = (HVCALL_SET_VP_STATE) |
510 			  (varhead_sz << HV_HYPERCALL_VARHEAD_OFFSET);
511 
512 		status = hv_do_hypercall(control, input, NULL);
513 
514 		if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
515 			local_irq_restore(flags);
516 			ret = hv_result_to_errno(status);
517 			break;
518 		}
519 		local_irq_restore(flags);
520 
521 		ret = hv_call_deposit_pages(NUMA_NO_NODE,
522 					    partition_id, 1);
523 	} while (!ret);
524 
525 	return ret;
526 }
527 
hv_call_map_vp_state_page(u64 partition_id,u32 vp_index,u32 type,union hv_input_vtl input_vtl,struct page ** state_page)528 int hv_call_map_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
529 			      union hv_input_vtl input_vtl,
530 			      struct page **state_page)
531 {
532 	struct hv_input_map_vp_state_page *input;
533 	struct hv_output_map_vp_state_page *output;
534 	u64 status;
535 	int ret;
536 	unsigned long flags;
537 
538 	do {
539 		local_irq_save(flags);
540 
541 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
542 		output = *this_cpu_ptr(hyperv_pcpu_output_arg);
543 
544 		input->partition_id = partition_id;
545 		input->vp_index = vp_index;
546 		input->type = type;
547 		input->input_vtl = input_vtl;
548 
549 		status = hv_do_hypercall(HVCALL_MAP_VP_STATE_PAGE, input, output);
550 
551 		if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
552 			if (hv_result_success(status))
553 				*state_page = pfn_to_page(output->map_location);
554 			local_irq_restore(flags);
555 			ret = hv_result_to_errno(status);
556 			break;
557 		}
558 
559 		local_irq_restore(flags);
560 
561 		ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id, 1);
562 	} while (!ret);
563 
564 	return ret;
565 }
566 
hv_call_unmap_vp_state_page(u64 partition_id,u32 vp_index,u32 type,union hv_input_vtl input_vtl)567 int hv_call_unmap_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
568 				union hv_input_vtl input_vtl)
569 {
570 	unsigned long flags;
571 	u64 status;
572 	struct hv_input_unmap_vp_state_page *input;
573 
574 	local_irq_save(flags);
575 
576 	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
577 
578 	memset(input, 0, sizeof(*input));
579 
580 	input->partition_id = partition_id;
581 	input->vp_index = vp_index;
582 	input->type = type;
583 	input->input_vtl = input_vtl;
584 
585 	status = hv_do_hypercall(HVCALL_UNMAP_VP_STATE_PAGE, input, NULL);
586 
587 	local_irq_restore(flags);
588 
589 	return hv_result_to_errno(status);
590 }
591 
592 int
hv_call_clear_virtual_interrupt(u64 partition_id)593 hv_call_clear_virtual_interrupt(u64 partition_id)
594 {
595 	int status;
596 
597 	status = hv_do_fast_hypercall8(HVCALL_CLEAR_VIRTUAL_INTERRUPT,
598 				       partition_id);
599 
600 	return hv_result_to_errno(status);
601 }
602 
603 int
hv_call_create_port(u64 port_partition_id,union hv_port_id port_id,u64 connection_partition_id,struct hv_port_info * port_info,u8 port_vtl,u8 min_connection_vtl,int node)604 hv_call_create_port(u64 port_partition_id, union hv_port_id port_id,
605 		    u64 connection_partition_id,
606 		    struct hv_port_info *port_info,
607 		    u8 port_vtl, u8 min_connection_vtl, int node)
608 {
609 	struct hv_input_create_port *input;
610 	unsigned long flags;
611 	int ret = 0;
612 	int status;
613 
614 	do {
615 		local_irq_save(flags);
616 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
617 		memset(input, 0, sizeof(*input));
618 
619 		input->port_partition_id = port_partition_id;
620 		input->port_id = port_id;
621 		input->connection_partition_id = connection_partition_id;
622 		input->port_info = *port_info;
623 		input->port_vtl = port_vtl;
624 		input->min_connection_vtl = min_connection_vtl;
625 		input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
626 		status = hv_do_hypercall(HVCALL_CREATE_PORT, input, NULL);
627 		local_irq_restore(flags);
628 		if (hv_result_success(status))
629 			break;
630 
631 		if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
632 			ret = hv_result_to_errno(status);
633 			break;
634 		}
635 		ret = hv_call_deposit_pages(NUMA_NO_NODE, port_partition_id, 1);
636 
637 	} while (!ret);
638 
639 	return ret;
640 }
641 
642 int
hv_call_delete_port(u64 port_partition_id,union hv_port_id port_id)643 hv_call_delete_port(u64 port_partition_id, union hv_port_id port_id)
644 {
645 	union hv_input_delete_port input = { 0 };
646 	int status;
647 
648 	input.port_partition_id = port_partition_id;
649 	input.port_id = port_id;
650 	status = hv_do_fast_hypercall16(HVCALL_DELETE_PORT,
651 					input.as_uint64[0],
652 					input.as_uint64[1]);
653 
654 	return hv_result_to_errno(status);
655 }
656 
657 int
hv_call_connect_port(u64 port_partition_id,union hv_port_id port_id,u64 connection_partition_id,union hv_connection_id connection_id,struct hv_connection_info * connection_info,u8 connection_vtl,int node)658 hv_call_connect_port(u64 port_partition_id, union hv_port_id port_id,
659 		     u64 connection_partition_id,
660 		     union hv_connection_id connection_id,
661 		     struct hv_connection_info *connection_info,
662 		     u8 connection_vtl, int node)
663 {
664 	struct hv_input_connect_port *input;
665 	unsigned long flags;
666 	int ret = 0, status;
667 
668 	do {
669 		local_irq_save(flags);
670 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
671 		memset(input, 0, sizeof(*input));
672 		input->port_partition_id = port_partition_id;
673 		input->port_id = port_id;
674 		input->connection_partition_id = connection_partition_id;
675 		input->connection_id = connection_id;
676 		input->connection_info = *connection_info;
677 		input->connection_vtl = connection_vtl;
678 		input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
679 		status = hv_do_hypercall(HVCALL_CONNECT_PORT, input, NULL);
680 
681 		local_irq_restore(flags);
682 		if (hv_result_success(status))
683 			break;
684 
685 		if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
686 			ret = hv_result_to_errno(status);
687 			break;
688 		}
689 		ret = hv_call_deposit_pages(NUMA_NO_NODE,
690 					    connection_partition_id, 1);
691 	} while (!ret);
692 
693 	return ret;
694 }
695 
696 int
hv_call_disconnect_port(u64 connection_partition_id,union hv_connection_id connection_id)697 hv_call_disconnect_port(u64 connection_partition_id,
698 			union hv_connection_id connection_id)
699 {
700 	union hv_input_disconnect_port input = { 0 };
701 	int status;
702 
703 	input.connection_partition_id = connection_partition_id;
704 	input.connection_id = connection_id;
705 	input.is_doorbell = 1;
706 	status = hv_do_fast_hypercall16(HVCALL_DISCONNECT_PORT,
707 					input.as_uint64[0],
708 					input.as_uint64[1]);
709 
710 	return hv_result_to_errno(status);
711 }
712 
713 int
hv_call_notify_port_ring_empty(u32 sint_index)714 hv_call_notify_port_ring_empty(u32 sint_index)
715 {
716 	union hv_input_notify_port_ring_empty input = { 0 };
717 	int status;
718 
719 	input.sint_index = sint_index;
720 	status = hv_do_fast_hypercall8(HVCALL_NOTIFY_PORT_RING_EMPTY,
721 				       input.as_uint64);
722 
723 	return hv_result_to_errno(status);
724 }
725 
hv_call_map_stat_page(enum hv_stats_object_type type,const union hv_stats_object_identity * identity,void ** addr)726 int hv_call_map_stat_page(enum hv_stats_object_type type,
727 			  const union hv_stats_object_identity *identity,
728 			  void **addr)
729 {
730 	unsigned long flags;
731 	struct hv_input_map_stats_page *input;
732 	struct hv_output_map_stats_page *output;
733 	u64 status, pfn;
734 	int ret = 0;
735 
736 	do {
737 		local_irq_save(flags);
738 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
739 		output = *this_cpu_ptr(hyperv_pcpu_output_arg);
740 
741 		memset(input, 0, sizeof(*input));
742 		input->type = type;
743 		input->identity = *identity;
744 
745 		status = hv_do_hypercall(HVCALL_MAP_STATS_PAGE, input, output);
746 		pfn = output->map_location;
747 
748 		local_irq_restore(flags);
749 		if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
750 			ret = hv_result_to_errno(status);
751 			if (hv_result_success(status))
752 				break;
753 			return ret;
754 		}
755 
756 		ret = hv_call_deposit_pages(NUMA_NO_NODE,
757 					    hv_current_partition_id, 1);
758 		if (ret)
759 			return ret;
760 	} while (!ret);
761 
762 	*addr = page_address(pfn_to_page(pfn));
763 
764 	return ret;
765 }
766 
hv_call_unmap_stat_page(enum hv_stats_object_type type,const union hv_stats_object_identity * identity)767 int hv_call_unmap_stat_page(enum hv_stats_object_type type,
768 			    const union hv_stats_object_identity *identity)
769 {
770 	unsigned long flags;
771 	struct hv_input_unmap_stats_page *input;
772 	u64 status;
773 
774 	local_irq_save(flags);
775 	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
776 
777 	memset(input, 0, sizeof(*input));
778 	input->type = type;
779 	input->identity = *identity;
780 
781 	status = hv_do_hypercall(HVCALL_UNMAP_STATS_PAGE, input, NULL);
782 	local_irq_restore(flags);
783 
784 	return hv_result_to_errno(status);
785 }
786 
hv_call_modify_spa_host_access(u64 partition_id,struct page ** pages,u64 page_struct_count,u32 host_access,u32 flags,u8 acquire)787 int hv_call_modify_spa_host_access(u64 partition_id, struct page **pages,
788 				   u64 page_struct_count, u32 host_access,
789 				   u32 flags, u8 acquire)
790 {
791 	struct hv_input_modify_sparse_spa_page_host_access *input_page;
792 	u64 status;
793 	int done = 0;
794 	unsigned long irq_flags, large_shift = 0;
795 	u64 page_count = page_struct_count;
796 	u16 code = acquire ? HVCALL_ACQUIRE_SPARSE_SPA_PAGE_HOST_ACCESS :
797 			     HVCALL_RELEASE_SPARSE_SPA_PAGE_HOST_ACCESS;
798 
799 	if (page_count == 0)
800 		return -EINVAL;
801 
802 	if (flags & HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE) {
803 		if (!HV_PAGE_COUNT_2M_ALIGNED(page_count))
804 			return -EINVAL;
805 		large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT;
806 		page_count >>= large_shift;
807 	}
808 
809 	while (done < page_count) {
810 		ulong i, completed, remain = page_count - done;
811 		int rep_count = min(remain,
812 				    HV_MODIFY_SPARSE_SPA_PAGE_HOST_ACCESS_MAX_PAGE_COUNT);
813 
814 		local_irq_save(irq_flags);
815 		input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
816 
817 		memset(input_page, 0, sizeof(*input_page));
818 		/* Only set the partition id if you are making the pages
819 		 * exclusive
820 		 */
821 		if (flags & HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_EXCLUSIVE)
822 			input_page->partition_id = partition_id;
823 		input_page->flags = flags;
824 		input_page->host_access = host_access;
825 
826 		for (i = 0; i < rep_count; i++) {
827 			u64 index = (done + i) << large_shift;
828 
829 			if (index >= page_struct_count)
830 				return -EINVAL;
831 
832 			input_page->spa_page_list[i] =
833 						page_to_pfn(pages[index]);
834 		}
835 
836 		status = hv_do_rep_hypercall(code, rep_count, 0, input_page,
837 					     NULL);
838 		local_irq_restore(irq_flags);
839 
840 		completed = hv_repcomp(status);
841 
842 		if (!hv_result_success(status))
843 			return hv_result_to_errno(status);
844 
845 		done += completed;
846 	}
847 
848 	return 0;
849 }
850