Lines Matching +full:partition +full:-

1 // SPDX-License-Identifier: GPL-2.0-only
27 #include <linux/page-flags.h>
38 MODULE_DESCRIPTION("Microsoft Hyper-V root partition VMM interface /dev/mshv");
80 static int mshv_init_async_handler(struct mshv_partition *partition);
124 * Only allow hypercalls that have a u64 partition id as the first member of
155 static int mshv_ioctl_passthru_hvcall(struct mshv_partition *partition, in mshv_ioctl_passthru_hvcall() argument
169 return -EFAULT; in mshv_ioctl_passthru_hvcall()
173 return -EINVAL; in mshv_ioctl_passthru_hvcall()
176 return -EINVAL; in mshv_ioctl_passthru_hvcall()
183 return -EINVAL; in mshv_ioctl_passthru_hvcall()
187 /* async hypercalls can only be called from partition fd */ in mshv_ioctl_passthru_hvcall()
189 return -EINVAL; in mshv_ioctl_passthru_hvcall()
190 ret = mshv_init_async_handler(partition); in mshv_ioctl_passthru_hvcall()
198 return -ENOMEM; in mshv_ioctl_passthru_hvcall()
208 ret = -EFAULT; in mshv_ioctl_passthru_hvcall()
216 *(u64 *)input_pg = partition->pt_id; in mshv_ioctl_passthru_hvcall()
226 mshv_async_hvcall_handler(partition, &status); in mshv_ioctl_passthru_hvcall()
228 ret = -EBADFD; in mshv_ioctl_passthru_hvcall()
234 ret = hv_call_deposit_pages(NUMA_NO_NODE, partition->pt_id, 1); in mshv_ioctl_passthru_hvcall()
236 ret = -EAGAIN; in mshv_ioctl_passthru_hvcall()
250 ret = -EFAULT; in mshv_ioctl_passthru_hvcall()
254 ret = -EFAULT; in mshv_ioctl_passthru_hvcall()
294 * 1. implicit suspend bit set -> explicit suspend bit set -> message sent
295 * 2. implicit suspend bit set -> message sent -> explicit suspend bit set
315 es->suspended = 1; in mshv_suspend_vp()
317 ret = mshv_set_vp_registers(vp->vp_index, vp->vp_partition->pt_id, in mshv_suspend_vp()
324 ret = mshv_get_vp_registers(vp->vp_index, vp->vp_partition->pt_id, in mshv_suspend_vp()
331 *message_in_flight = is->suspended; in mshv_suspend_vp()
357 ret = mshv_set_vp_registers(vp->vp_index, vp->vp_partition->pt_id, in mshv_run_vp_with_hyp_scheduler()
364 ret = wait_event_interruptible(vp->run.vp_suspend_queue, in mshv_run_vp_with_hyp_scheduler()
365 vp->run.kicked_by_hv == 1); in mshv_run_vp_with_hyp_scheduler()
379 return -EINTR; in mshv_run_vp_with_hyp_scheduler()
382 wait_event(vp->run.vp_suspend_queue, vp->run.kicked_by_hv == 1); in mshv_run_vp_with_hyp_scheduler()
389 vp->run.kicked_by_hv = 0; in mshv_run_vp_with_hyp_scheduler()
409 input->partition_id = vp->vp_partition->pt_id; in mshv_vp_dispatch()
410 input->vp_index = vp->vp_index; in mshv_vp_dispatch()
411 input->time_slice = 0; /* Run forever until something happens */ in mshv_vp_dispatch()
412 input->spec_ctrl = 0; /* TODO: set sensible flags */ in mshv_vp_dispatch()
413 input->flags = flags; in mshv_vp_dispatch()
415 vp->run.flags.root_sched_dispatched = 1; in mshv_vp_dispatch()
417 vp->run.flags.root_sched_dispatched = 0; in mshv_vp_dispatch()
438 ret = mshv_set_vp_registers(vp->vp_index, vp->vp_partition->pt_id, in mshv_vp_clear_explicit_suspend()
450 if (!vp->vp_register_page) in mshv_vp_interrupt_pending()
452 return vp->vp_register_page->interrupt_vectors.as_uint64; in mshv_vp_interrupt_pending()
463 struct hv_stats_page **stats = vp->vp_stats_pages; in mshv_vp_dispatch_thread_blocked()
464 u64 *self_vp_cntrs = stats[HV_STATS_AREA_SELF]->vp_cntrs; in mshv_vp_dispatch_thread_blocked()
465 u64 *parent_vp_cntrs = stats[HV_STATS_AREA_PARENT]->vp_cntrs; in mshv_vp_dispatch_thread_blocked()
477 ret = wait_event_interruptible(vp->run.vp_suspend_queue, in mshv_vp_wait_for_hv_kick()
478 (vp->run.kicked_by_hv == 1 && in mshv_vp_wait_for_hv_kick()
482 return -EINTR; in mshv_vp_wait_for_hv_kick()
484 vp->run.flags.root_sched_blocked = 0; in mshv_vp_wait_for_hv_kick()
485 vp->run.kicked_by_hv = 0; in mshv_vp_wait_for_hv_kick()
517 if (vp->run.flags.root_sched_blocked) { in mshv_run_vp_with_root_scheduler()
536 if (vp->run.flags.intercept_suspend) in mshv_run_vp_with_root_scheduler()
546 vp->run.flags.intercept_suspend = 0; in mshv_run_vp_with_root_scheduler()
555 WARN_ONCE(atomic64_read(&vp->run.vp_signaled_count), in mshv_run_vp_with_root_scheduler()
557 __func__, vp->vp_index); in mshv_run_vp_with_root_scheduler()
562 * - set right after the first VP dispatch or in mshv_run_vp_with_root_scheduler()
563 * - set explicitly via hypercall in mshv_run_vp_with_root_scheduler()
575 vp->run.flags.root_sched_blocked = 1; in mshv_run_vp_with_root_scheduler()
584 vp->run.flags.intercept_suspend = 1; in mshv_run_vp_with_root_scheduler()
586 } while (!vp->run.flags.intercept_suspend); in mshv_run_vp_with_root_scheduler()
606 if (copy_to_user(ret_msg, vp->vp_intercept_msg_page, in mshv_vp_ioctl_run_vp()
608 rc = -EFAULT; in mshv_vp_ioctl_run_vp()
624 return -EINVAL; in mshv_vp_ioctl_get_set_state_pfn()
628 * (user_pfn + (page_count - 1)) * PAGE_SIZE in mshv_vp_ioctl_get_set_state_pfn()
630 if (check_add_overflow(user_pfn, (page_count - 1), &check)) in mshv_vp_ioctl_get_set_state_pfn()
631 return -EOVERFLOW; in mshv_vp_ioctl_get_set_state_pfn()
633 return -EOVERFLOW; in mshv_vp_ioctl_get_set_state_pfn()
638 return -ENOMEM; in mshv_vp_ioctl_get_set_state_pfn()
642 int remaining = page_count - completed; in mshv_vp_ioctl_get_set_state_pfn()
654 ret = hv_call_set_vp_state(vp->vp_index, in mshv_vp_ioctl_get_set_state_pfn()
655 vp->vp_partition->pt_id, in mshv_vp_ioctl_get_set_state_pfn()
659 ret = hv_call_get_vp_state(vp->vp_index, in mshv_vp_ioctl_get_set_state_pfn()
660 vp->vp_partition->pt_id, in mshv_vp_ioctl_get_set_state_pfn()
682 return -EFAULT; in mshv_vp_ioctl_get_set_state()
687 return -EINVAL; in mshv_vp_ioctl_get_set_state()
690 return -EFAULT; in mshv_vp_ioctl_get_set_state()
701 ret = hv_call_get_partition_property(vp->vp_partition->pt_id, in mshv_vp_ioctl_get_set_state()
707 ret = hv_call_get_partition_property(vp->vp_partition->pt_id, in mshv_vp_ioctl_get_set_state()
734 return -EINVAL; in mshv_vp_ioctl_get_set_state()
737 if (copy_to_user(&user_args->buf_sz, &data_sz, sizeof(user_args->buf_sz))) in mshv_vp_ioctl_get_set_state()
738 return -EFAULT; in mshv_vp_ioctl_get_set_state()
741 return -EINVAL; in mshv_vp_ioctl_get_set_state()
752 /* Paranoia check - this shouldn't happen! */ in mshv_vp_ioctl_get_set_state()
755 return -EINVAL; in mshv_vp_ioctl_get_set_state()
760 return -EFAULT; in mshv_vp_ioctl_get_set_state()
762 return hv_call_set_vp_state(vp->vp_index, in mshv_vp_ioctl_get_set_state()
763 vp->vp_partition->pt_id, in mshv_vp_ioctl_get_set_state()
768 ret = hv_call_get_vp_state(vp->vp_index, vp->vp_partition->pt_id, in mshv_vp_ioctl_get_set_state()
774 return -EFAULT; in mshv_vp_ioctl_get_set_state()
782 struct mshv_vp *vp = filp->private_data; in mshv_vp_ioctl()
783 long r = -ENOTTY; in mshv_vp_ioctl()
785 if (mutex_lock_killable(&vp->vp_mutex)) in mshv_vp_ioctl()
786 return -EINTR; in mshv_vp_ioctl()
799 r = mshv_ioctl_passthru_hvcall(vp->vp_partition, false, in mshv_vp_ioctl()
806 mutex_unlock(&vp->vp_mutex); in mshv_vp_ioctl()
813 struct mshv_vp *vp = vmf->vma->vm_file->private_data; in mshv_vp_fault()
815 switch (vmf->vma->vm_pgoff) { in mshv_vp_fault()
817 vmf->page = virt_to_page(vp->vp_register_page); in mshv_vp_fault()
820 vmf->page = virt_to_page(vp->vp_intercept_msg_page); in mshv_vp_fault()
823 vmf->page = virt_to_page(vp->vp_ghcb_page); in mshv_vp_fault()
829 get_page(vmf->page); in mshv_vp_fault()
836 struct mshv_vp *vp = file->private_data; in mshv_vp_mmap()
838 switch (vma->vm_pgoff) { in mshv_vp_mmap()
840 if (!vp->vp_register_page) in mshv_vp_mmap()
841 return -ENODEV; in mshv_vp_mmap()
844 if (!vp->vp_intercept_msg_page) in mshv_vp_mmap()
845 return -ENODEV; in mshv_vp_mmap()
848 if (!vp->vp_ghcb_page) in mshv_vp_mmap()
849 return -ENODEV; in mshv_vp_mmap()
852 return -EINVAL; in mshv_vp_mmap()
855 vma->vm_ops = &mshv_vp_vm_ops; in mshv_vp_mmap()
862 struct mshv_vp *vp = filp->private_data; in mshv_vp_release()
865 mshv_partition_put(vp->vp_partition); in mshv_vp_release()
913 mshv_partition_ioctl_create_vp(struct mshv_partition *partition, in mshv_partition_ioctl_create_vp() argument
923 return -EFAULT; in mshv_partition_ioctl_create_vp()
926 return -EINVAL; in mshv_partition_ioctl_create_vp()
928 if (partition->pt_vp_array[args.vp_index]) in mshv_partition_ioctl_create_vp()
929 return -EEXIST; in mshv_partition_ioctl_create_vp()
931 ret = hv_call_create_vp(NUMA_NO_NODE, partition->pt_id, args.vp_index, in mshv_partition_ioctl_create_vp()
932 0 /* Only valid for root partition VPs */); in mshv_partition_ioctl_create_vp()
936 ret = hv_call_map_vp_state_page(partition->pt_id, args.vp_index, in mshv_partition_ioctl_create_vp()
943 if (!mshv_partition_encrypted(partition)) { in mshv_partition_ioctl_create_vp()
944 ret = hv_call_map_vp_state_page(partition->pt_id, args.vp_index, in mshv_partition_ioctl_create_vp()
952 if (mshv_partition_encrypted(partition) && in mshv_partition_ioctl_create_vp()
954 ret = hv_call_map_vp_state_page(partition->pt_id, args.vp_index, in mshv_partition_ioctl_create_vp()
963 ret = mshv_vp_stats_map(partition->pt_id, args.vp_index, in mshv_partition_ioctl_create_vp()
973 vp->vp_partition = mshv_partition_get(partition); in mshv_partition_ioctl_create_vp()
974 if (!vp->vp_partition) { in mshv_partition_ioctl_create_vp()
975 ret = -EBADF; in mshv_partition_ioctl_create_vp()
979 mutex_init(&vp->vp_mutex); in mshv_partition_ioctl_create_vp()
980 init_waitqueue_head(&vp->run.vp_suspend_queue); in mshv_partition_ioctl_create_vp()
981 atomic64_set(&vp->run.vp_signaled_count, 0); in mshv_partition_ioctl_create_vp()
983 vp->vp_index = args.vp_index; in mshv_partition_ioctl_create_vp()
984 vp->vp_intercept_msg_page = page_to_virt(intercept_message_page); in mshv_partition_ioctl_create_vp()
985 if (!mshv_partition_encrypted(partition)) in mshv_partition_ioctl_create_vp()
986 vp->vp_register_page = page_to_virt(register_page); in mshv_partition_ioctl_create_vp()
988 if (mshv_partition_encrypted(partition) && is_ghcb_mapping_available()) in mshv_partition_ioctl_create_vp()
989 vp->vp_ghcb_page = page_to_virt(ghcb_page); in mshv_partition_ioctl_create_vp()
992 memcpy(vp->vp_stats_pages, stats_pages, sizeof(stats_pages)); in mshv_partition_ioctl_create_vp()
1003 /* already exclusive with the partition mutex for all ioctls */ in mshv_partition_ioctl_create_vp()
1004 partition->pt_vp_count++; in mshv_partition_ioctl_create_vp()
1005 partition->pt_vp_array[args.vp_index] = vp; in mshv_partition_ioctl_create_vp()
1010 mshv_partition_put(partition); in mshv_partition_ioctl_create_vp()
1015 mshv_vp_stats_unmap(partition->pt_id, args.vp_index); in mshv_partition_ioctl_create_vp()
1017 if (mshv_partition_encrypted(partition) && is_ghcb_mapping_available()) { in mshv_partition_ioctl_create_vp()
1018 hv_call_unmap_vp_state_page(partition->pt_id, args.vp_index, in mshv_partition_ioctl_create_vp()
1023 if (!mshv_partition_encrypted(partition)) { in mshv_partition_ioctl_create_vp()
1024 hv_call_unmap_vp_state_page(partition->pt_id, args.vp_index, in mshv_partition_ioctl_create_vp()
1029 hv_call_unmap_vp_state_page(partition->pt_id, args.vp_index, in mshv_partition_ioctl_create_vp()
1033 hv_call_delete_vp(partition->pt_id, args.vp_index); in mshv_partition_ioctl_create_vp()
1037 static int mshv_init_async_handler(struct mshv_partition *partition) in mshv_init_async_handler() argument
1039 if (completion_done(&partition->async_hypercall)) { in mshv_init_async_handler()
1040 pt_err(partition, in mshv_init_async_handler()
1042 return -EPERM; in mshv_init_async_handler()
1045 reinit_completion(&partition->async_hypercall); in mshv_init_async_handler()
1051 struct mshv_partition *partition = data; in mshv_async_hvcall_handler() local
1053 wait_for_completion(&partition->async_hypercall); in mshv_async_hvcall_handler()
1054 pt_dbg(partition, "Async hypercall completed!\n"); in mshv_async_hvcall_handler()
1056 *status = partition->async_hypercall_status; in mshv_async_hvcall_handler()
1064 if (region->flags.large_pages) in mshv_partition_region_share()
1067 return hv_call_modify_spa_host_access(region->partition->pt_id, in mshv_partition_region_share()
1068 region->pages, region->nr_pages, in mshv_partition_region_share()
1078 if (region->flags.large_pages) in mshv_partition_region_unshare()
1081 return hv_call_modify_spa_host_access(region->partition->pt_id, in mshv_partition_region_unshare()
1082 region->pages, region->nr_pages, in mshv_partition_region_unshare()
1091 if (page_offset + page_count > region->nr_pages) in mshv_region_remap_pages()
1092 return -EINVAL; in mshv_region_remap_pages()
1094 if (region->flags.large_pages) in mshv_region_remap_pages()
1098 return hv_call_map_gpa_pages(region->partition->pt_id, in mshv_region_remap_pages()
1099 region->start_gfn + page_offset, in mshv_region_remap_pages()
1101 region->pages + page_offset); in mshv_region_remap_pages()
1107 u32 map_flags = region->hv_map_flags; in mshv_region_map()
1110 0, region->nr_pages); in mshv_region_map()
1117 if (region->flags.range_pinned) in mshv_region_evict_pages()
1118 unpin_user_pages(region->pages + page_offset, page_count); in mshv_region_evict_pages()
1120 memset(region->pages + page_offset, 0, in mshv_region_evict_pages()
1127 mshv_region_evict_pages(region, 0, region->nr_pages); in mshv_region_evict()
1139 if (page_offset + page_count > region->nr_pages) in mshv_region_populate_pages()
1140 return -EINVAL; in mshv_region_populate_pages()
1143 pages = region->pages + page_offset + done_count; in mshv_region_populate_pages()
1144 userspace_addr = region->start_uaddr + in mshv_region_populate_pages()
1147 nr_pages = min(page_count - done_count, in mshv_region_populate_pages()
1158 if (region->flags.range_pinned) in mshv_region_populate_pages()
1164 ret = -EOPNOTSUPP; in mshv_region_populate_pages()
1170 if (PageHuge(region->pages[page_offset])) in mshv_region_populate_pages()
1171 region->flags.large_pages = true; in mshv_region_populate_pages()
1183 return mshv_region_populate_pages(region, 0, region->nr_pages); in mshv_region_populate()
1187 mshv_partition_region_by_gfn(struct mshv_partition *partition, u64 gfn) in mshv_partition_region_by_gfn() argument
1191 hlist_for_each_entry(region, &partition->pt_mem_regions, hnode) { in mshv_partition_region_by_gfn()
1192 if (gfn >= region->start_gfn && in mshv_partition_region_by_gfn()
1193 gfn < region->start_gfn + region->nr_pages) in mshv_partition_region_by_gfn()
1201 mshv_partition_region_by_uaddr(struct mshv_partition *partition, u64 uaddr) in mshv_partition_region_by_uaddr() argument
1205 hlist_for_each_entry(region, &partition->pt_mem_regions, hnode) { in mshv_partition_region_by_uaddr()
1206 if (uaddr >= region->start_uaddr && in mshv_partition_region_by_uaddr()
1207 uaddr < region->start_uaddr + in mshv_partition_region_by_uaddr()
1208 (region->nr_pages << HV_HYP_PAGE_SHIFT)) in mshv_partition_region_by_uaddr()
1216 * NB: caller checks and makes sure mem->size is page aligned
1217 * Returns: 0 with regionpp updated on success, or -errno
1219 static int mshv_partition_create_region(struct mshv_partition *partition, in mshv_partition_create_region() argument
1225 u64 nr_pages = HVPFN_DOWN(mem->size); in mshv_partition_create_region()
1228 if (mshv_partition_region_by_gfn(partition, mem->guest_pfn) || in mshv_partition_create_region()
1229 mshv_partition_region_by_gfn(partition, mem->guest_pfn + nr_pages - 1) || in mshv_partition_create_region()
1230 mshv_partition_region_by_uaddr(partition, mem->userspace_addr) || in mshv_partition_create_region()
1231 mshv_partition_region_by_uaddr(partition, mem->userspace_addr + mem->size - 1)) in mshv_partition_create_region()
1232 return -EEXIST; in mshv_partition_create_region()
1236 return -ENOMEM; in mshv_partition_create_region()
1238 region->nr_pages = nr_pages; in mshv_partition_create_region()
1239 region->start_gfn = mem->guest_pfn; in mshv_partition_create_region()
1240 region->start_uaddr = mem->userspace_addr; in mshv_partition_create_region()
1241 region->hv_map_flags = HV_MAP_GPA_READABLE | HV_MAP_GPA_ADJUSTABLE; in mshv_partition_create_region()
1242 if (mem->flags & BIT(MSHV_SET_MEM_BIT_WRITABLE)) in mshv_partition_create_region()
1243 region->hv_map_flags |= HV_MAP_GPA_WRITABLE; in mshv_partition_create_region()
1244 if (mem->flags & BIT(MSHV_SET_MEM_BIT_EXECUTABLE)) in mshv_partition_create_region()
1245 region->hv_map_flags |= HV_MAP_GPA_EXECUTABLE; in mshv_partition_create_region()
1249 region->flags.range_pinned = true; in mshv_partition_create_region()
1251 region->partition = partition; in mshv_partition_create_region()
1265 struct mshv_partition *partition = region->partition; in mshv_partition_mem_region_map() local
1270 pt_err(partition, "Failed to populate memory region: %d\n", in mshv_partition_mem_region_map()
1276 * For an SNP partition it is a requirement that for every memory region in mshv_partition_mem_region_map()
1277 * that we are going to map for this partition we should make sure that in mshv_partition_mem_region_map()
1282 if (mshv_partition_encrypted(partition)) { in mshv_partition_mem_region_map()
1285 pt_err(partition, in mshv_partition_mem_region_map()
1287 region->start_gfn, ret); in mshv_partition_mem_region_map()
1293 if (ret && mshv_partition_encrypted(partition)) { in mshv_partition_mem_region_map()
1300 pt_err(partition, in mshv_partition_mem_region_map()
1302 region->start_gfn, shrc); in mshv_partition_mem_region_map()
1322 * - vfio overloads vm_pgoff to store the mmio start pfn/spa.
1323 * - Two things need to happen for mapping mmio range:
1325 * 2. mapped in the hwpt (gfn <-> mmio phys addr) so guest can access it.
1331 mshv_map_user_memory(struct mshv_partition *partition, in mshv_map_user_memory() argument
1342 return -EINVAL; in mshv_map_user_memory()
1344 mmap_read_lock(current->mm); in mshv_map_user_memory()
1345 vma = vma_lookup(current->mm, mem.userspace_addr); in mshv_map_user_memory()
1346 is_mmio = vma ? !!(vma->vm_flags & (VM_IO | VM_PFNMAP)) : 0; in mshv_map_user_memory()
1347 mmio_pfn = is_mmio ? vma->vm_pgoff : 0; in mshv_map_user_memory()
1348 mmap_read_unlock(current->mm); in mshv_map_user_memory()
1351 return -EINVAL; in mshv_map_user_memory()
1353 ret = mshv_partition_create_region(partition, &mem, &region, in mshv_map_user_memory()
1359 ret = hv_call_map_mmio_pages(partition->pt_id, mem.guest_pfn, in mshv_map_user_memory()
1368 hlist_add_head(&region->hnode, &partition->pt_mem_regions); in mshv_map_user_memory()
1379 mshv_unmap_user_memory(struct mshv_partition *partition, in mshv_unmap_user_memory() argument
1386 return -EINVAL; in mshv_unmap_user_memory()
1388 region = mshv_partition_region_by_gfn(partition, mem.guest_pfn); in mshv_unmap_user_memory()
1390 return -EINVAL; in mshv_unmap_user_memory()
1393 if (region->start_uaddr != mem.userspace_addr || in mshv_unmap_user_memory()
1394 region->start_gfn != mem.guest_pfn || in mshv_unmap_user_memory()
1395 region->nr_pages != HVPFN_DOWN(mem.size)) in mshv_unmap_user_memory()
1396 return -EINVAL; in mshv_unmap_user_memory()
1398 hlist_del(&region->hnode); in mshv_unmap_user_memory()
1400 if (region->flags.large_pages) in mshv_unmap_user_memory()
1404 hv_call_unmap_gpa_pages(partition->pt_id, region->start_gfn, in mshv_unmap_user_memory()
1405 region->nr_pages, unmap_flags); in mshv_unmap_user_memory()
1414 mshv_partition_ioctl_set_memory(struct mshv_partition *partition, in mshv_partition_ioctl_set_memory() argument
1420 return -EFAULT; in mshv_partition_ioctl_set_memory()
1427 return -EINVAL; in mshv_partition_ioctl_set_memory()
1430 return mshv_unmap_user_memory(partition, mem); in mshv_partition_ioctl_set_memory()
1432 return mshv_map_user_memory(partition, mem); in mshv_partition_ioctl_set_memory()
1436 mshv_partition_ioctl_ioeventfd(struct mshv_partition *partition, in mshv_partition_ioctl_ioeventfd() argument
1442 return -EFAULT; in mshv_partition_ioctl_ioeventfd()
1444 return mshv_set_unset_ioeventfd(partition, &args); in mshv_partition_ioctl_ioeventfd()
1448 mshv_partition_ioctl_irqfd(struct mshv_partition *partition, in mshv_partition_ioctl_irqfd() argument
1454 return -EFAULT; in mshv_partition_ioctl_irqfd()
1456 return mshv_set_unset_irqfd(partition, &args); in mshv_partition_ioctl_irqfd()
1460 mshv_partition_ioctl_get_gpap_access_bitmap(struct mshv_partition *partition, in mshv_partition_ioctl_get_gpap_access_bitmap() argument
1472 return -EFAULT; in mshv_partition_ioctl_get_gpap_access_bitmap()
1478 return -EINVAL; in mshv_partition_ioctl_get_gpap_access_bitmap()
1481 return -E2BIG; in mshv_partition_ioctl_get_gpap_access_bitmap()
1488 return -EBADFD; in mshv_partition_ioctl_get_gpap_access_bitmap()
1515 return -ENOMEM; in mshv_partition_ioctl_get_gpap_access_bitmap()
1517 ret = hv_call_get_gpa_access_states(partition->pt_id, args.page_count, in mshv_partition_ioctl_get_gpap_access_bitmap()
1524 * Overwrite states buffer with bitmap - the bits in hv_type_mask in mshv_partition_ioctl_get_gpap_access_bitmap()
1536 ret = -EFAULT; in mshv_partition_ioctl_get_gpap_access_bitmap()
1544 mshv_partition_ioctl_set_msi_routing(struct mshv_partition *partition, in mshv_partition_ioctl_set_msi_routing() argument
1552 return -EFAULT; in mshv_partition_ioctl_set_msi_routing()
1556 return -EINVAL; in mshv_partition_ioctl_set_msi_routing()
1561 entries = vmemdup_user(urouting->entries, in mshv_partition_ioctl_set_msi_routing()
1567 ret = mshv_update_routing_table(partition, entries, args.nr); in mshv_partition_ioctl_set_msi_routing()
1574 mshv_partition_ioctl_initialize(struct mshv_partition *partition) in mshv_partition_ioctl_initialize() argument
1578 if (partition->pt_initialized) in mshv_partition_ioctl_initialize()
1581 ret = hv_call_initialize_partition(partition->pt_id); in mshv_partition_ioctl_initialize()
1585 partition->pt_initialized = true; in mshv_partition_ioctl_initialize()
1590 hv_call_withdraw_memory(U64_MAX, NUMA_NO_NODE, partition->pt_id); in mshv_partition_ioctl_initialize()
1598 struct mshv_partition *partition = filp->private_data; in mshv_partition_ioctl() local
1602 if (mutex_lock_killable(&partition->pt_mutex)) in mshv_partition_ioctl()
1603 return -EINTR; in mshv_partition_ioctl()
1607 ret = mshv_partition_ioctl_initialize(partition); in mshv_partition_ioctl()
1610 ret = mshv_partition_ioctl_set_memory(partition, uarg); in mshv_partition_ioctl()
1613 ret = mshv_partition_ioctl_create_vp(partition, uarg); in mshv_partition_ioctl()
1616 ret = mshv_partition_ioctl_irqfd(partition, uarg); in mshv_partition_ioctl()
1619 ret = mshv_partition_ioctl_ioeventfd(partition, uarg); in mshv_partition_ioctl()
1622 ret = mshv_partition_ioctl_set_msi_routing(partition, uarg); in mshv_partition_ioctl()
1625 ret = mshv_partition_ioctl_get_gpap_access_bitmap(partition, in mshv_partition_ioctl()
1629 ret = mshv_ioctl_passthru_hvcall(partition, true, uarg); in mshv_partition_ioctl()
1632 ret = -ENOTTY; in mshv_partition_ioctl()
1635 mutex_unlock(&partition->pt_mutex); in mshv_partition_ioctl()
1648 ret = mshv_set_vp_registers(vp->vp_index, vp->vp_partition->pt_id, in disable_vp_dispatch()
1664 ret = mshv_get_vp_registers(vp->vp_index, vp->vp_partition->pt_id, in get_vp_signaled_count()
1686 vp_signal_count = atomic64_read(&vp->run.vp_signaled_count); in drain_vp_signals()
1693 WARN_ON(hv_signal_count - vp_signal_count != 1); in drain_vp_signals()
1695 if (wait_event_interruptible(vp->run.vp_suspend_queue, in drain_vp_signals()
1696 vp->run.kicked_by_hv == 1)) in drain_vp_signals()
1698 vp->run.kicked_by_hv = 0; in drain_vp_signals()
1699 vp_signal_count = atomic64_read(&vp->run.vp_signaled_count); in drain_vp_signals()
1703 static void drain_all_vps(const struct mshv_partition *partition) in drain_all_vps() argument
1709 * VPs are reachable from ISR. It is safe to not take the partition in drain_all_vps()
1711 * partition from the list. in drain_all_vps()
1714 vp = partition->pt_vp_array[i]; in drain_all_vps()
1728 remove_partition(struct mshv_partition *partition) in remove_partition() argument
1731 hlist_del_rcu(&partition->pt_hnode); in remove_partition()
1738 * Tear down a partition and remove it from the list.
1739 * Partition's refcount must be 0
1741 static void destroy_partition(struct mshv_partition *partition) in destroy_partition() argument
1748 if (refcount_read(&partition->pt_ref_count)) { in destroy_partition()
1749 pt_err(partition, in destroy_partition()
1750 "Attempt to destroy partition but refcount > 0\n"); in destroy_partition()
1754 if (partition->pt_initialized) { in destroy_partition()
1757 * done before removing the partition from the partition list. in destroy_partition()
1760 drain_all_vps(partition); in destroy_partition()
1764 vp = partition->pt_vp_array[i]; in destroy_partition()
1769 mshv_vp_stats_unmap(partition->pt_id, vp->vp_index); in destroy_partition()
1771 if (vp->vp_register_page) { in destroy_partition()
1772 (void)hv_call_unmap_vp_state_page(partition->pt_id, in destroy_partition()
1773 vp->vp_index, in destroy_partition()
1776 vp->vp_register_page = NULL; in destroy_partition()
1779 (void)hv_call_unmap_vp_state_page(partition->pt_id, in destroy_partition()
1780 vp->vp_index, in destroy_partition()
1783 vp->vp_intercept_msg_page = NULL; in destroy_partition()
1785 if (vp->vp_ghcb_page) { in destroy_partition()
1786 (void)hv_call_unmap_vp_state_page(partition->pt_id, in destroy_partition()
1787 vp->vp_index, in destroy_partition()
1790 vp->vp_ghcb_page = NULL; in destroy_partition()
1795 partition->pt_vp_array[i] = NULL; in destroy_partition()
1799 hv_call_finalize_partition(partition->pt_id); in destroy_partition()
1801 partition->pt_initialized = false; in destroy_partition()
1804 remove_partition(partition); in destroy_partition()
1807 hlist_for_each_entry_safe(region, n, &partition->pt_mem_regions, in destroy_partition()
1809 hlist_del(&region->hnode); in destroy_partition()
1811 if (mshv_partition_encrypted(partition)) { in destroy_partition()
1814 pt_err(partition, in destroy_partition()
1827 hv_call_withdraw_memory(U64_MAX, NUMA_NO_NODE, partition->pt_id); in destroy_partition()
1828 hv_call_delete_partition(partition->pt_id); in destroy_partition()
1830 mshv_free_routing_table(partition); in destroy_partition()
1831 kfree(partition); in destroy_partition()
1835 mshv_partition *mshv_partition_get(struct mshv_partition *partition) in mshv_partition_get() argument
1837 if (refcount_inc_not_zero(&partition->pt_ref_count)) in mshv_partition_get()
1838 return partition; in mshv_partition_get()
1850 if (p->pt_id == partition_id) in mshv_partition_find()
1857 mshv_partition_put(struct mshv_partition *partition) in mshv_partition_put() argument
1859 if (refcount_dec_and_test(&partition->pt_ref_count)) in mshv_partition_put()
1860 destroy_partition(partition); in mshv_partition_put()
1866 struct mshv_partition *partition = filp->private_data; in mshv_partition_release() local
1868 mshv_eventfd_release(partition); in mshv_partition_release()
1870 cleanup_srcu_struct(&partition->pt_irq_srcu); in mshv_partition_release()
1872 mshv_partition_put(partition); in mshv_partition_release()
1878 add_partition(struct mshv_partition *partition) in add_partition() argument
1882 hash_add_rcu(mshv_root.pt_htable, &partition->pt_hnode, in add_partition()
1883 partition->pt_id); in add_partition()
1897 struct mshv_partition *partition; in mshv_ioctl_create_partition() local
1903 return -EFAULT; in mshv_ioctl_create_partition()
1907 return -EINVAL; in mshv_ioctl_create_partition()
1927 partition = kzalloc(sizeof(*partition), GFP_KERNEL); in mshv_ioctl_create_partition()
1928 if (!partition) in mshv_ioctl_create_partition()
1929 return -ENOMEM; in mshv_ioctl_create_partition()
1931 partition->pt_module_dev = module_dev; in mshv_ioctl_create_partition()
1932 partition->isolation_type = isolation_properties.isolation_type; in mshv_ioctl_create_partition()
1934 refcount_set(&partition->pt_ref_count, 1); in mshv_ioctl_create_partition()
1936 mutex_init(&partition->pt_mutex); in mshv_ioctl_create_partition()
1938 mutex_init(&partition->pt_irq_lock); in mshv_ioctl_create_partition()
1940 init_completion(&partition->async_hypercall); in mshv_ioctl_create_partition()
1942 INIT_HLIST_HEAD(&partition->irq_ack_notifier_list); in mshv_ioctl_create_partition()
1944 INIT_HLIST_HEAD(&partition->pt_devices); in mshv_ioctl_create_partition()
1946 INIT_HLIST_HEAD(&partition->pt_mem_regions); in mshv_ioctl_create_partition()
1948 mshv_eventfd_init(partition); in mshv_ioctl_create_partition()
1950 ret = init_srcu_struct(&partition->pt_irq_srcu); in mshv_ioctl_create_partition()
1957 &partition->pt_id); in mshv_ioctl_create_partition()
1961 ret = add_partition(partition); in mshv_ioctl_create_partition()
1965 ret = mshv_init_async_handler(partition); in mshv_ioctl_create_partition()
1976 partition, O_RDWR); in mshv_ioctl_create_partition()
1989 remove_partition(partition); in mshv_ioctl_create_partition()
1991 hv_call_delete_partition(partition->pt_id); in mshv_ioctl_create_partition()
1993 cleanup_srcu_struct(&partition->pt_irq_srcu); in mshv_ioctl_create_partition()
1995 kfree(partition); in mshv_ioctl_create_partition()
2003 struct miscdevice *misc = filp->private_data; in mshv_dev_ioctl()
2008 misc->this_device); in mshv_dev_ioctl()
2011 return -ENOTTY; in mshv_dev_ioctl()
2059 input->property_id = HV_SYSTEM_PROPERTY_SCHEDULER_TYPE; in hv_retrieve_scheduler_type()
2068 *out = output->scheduler_type; in hv_retrieve_scheduler_type()
2096 return -EOPNOTSUPP; in mshv_retrieve_scheduler_type()
2112 return -ENOMEM; in mshv_root_scheduler_init()
2151 ret = -ENOMEM; in root_scheduler_init()
2207 return -ENODEV; in mshv_root_partition_init()
2231 return -ENODEV; in mshv_parent_partition_init()
2234 return -ENODEV; in mshv_parent_partition_init()
2244 dev_err(dev, "Running on unvalidated Hyper-V version\n"); in mshv_parent_partition_init()
2253 ret = -ENOMEM; in mshv_parent_partition_init()