Lines Matching full:partition

38 MODULE_DESCRIPTION("Microsoft Hyper-V root partition VMM interface /dev/mshv");
80 static int mshv_init_async_handler(struct mshv_partition *partition);
124 * Only allow hypercalls that have a u64 partition id as the first member of
155 static int mshv_ioctl_passthru_hvcall(struct mshv_partition *partition, in mshv_ioctl_passthru_hvcall() argument
187 /* async hypercalls can only be called from partition fd */ in mshv_ioctl_passthru_hvcall()
190 ret = mshv_init_async_handler(partition); in mshv_ioctl_passthru_hvcall()
216 *(u64 *)input_pg = partition->pt_id; in mshv_ioctl_passthru_hvcall()
226 mshv_async_hvcall_handler(partition, &status); in mshv_ioctl_passthru_hvcall()
234 ret = hv_call_deposit_pages(NUMA_NO_NODE, partition->pt_id, 1); in mshv_ioctl_passthru_hvcall()
913 mshv_partition_ioctl_create_vp(struct mshv_partition *partition, in mshv_partition_ioctl_create_vp() argument
928 if (partition->pt_vp_array[args.vp_index]) in mshv_partition_ioctl_create_vp()
931 ret = hv_call_create_vp(NUMA_NO_NODE, partition->pt_id, args.vp_index, in mshv_partition_ioctl_create_vp()
932 0 /* Only valid for root partition VPs */); in mshv_partition_ioctl_create_vp()
936 ret = hv_call_map_vp_state_page(partition->pt_id, args.vp_index, in mshv_partition_ioctl_create_vp()
943 if (!mshv_partition_encrypted(partition)) { in mshv_partition_ioctl_create_vp()
944 ret = hv_call_map_vp_state_page(partition->pt_id, args.vp_index, in mshv_partition_ioctl_create_vp()
952 if (mshv_partition_encrypted(partition) && in mshv_partition_ioctl_create_vp()
954 ret = hv_call_map_vp_state_page(partition->pt_id, args.vp_index, in mshv_partition_ioctl_create_vp()
963 ret = mshv_vp_stats_map(partition->pt_id, args.vp_index, in mshv_partition_ioctl_create_vp()
973 vp->vp_partition = mshv_partition_get(partition); in mshv_partition_ioctl_create_vp()
985 if (!mshv_partition_encrypted(partition)) in mshv_partition_ioctl_create_vp()
988 if (mshv_partition_encrypted(partition) && is_ghcb_mapping_available()) in mshv_partition_ioctl_create_vp()
1003 /* already exclusive with the partition mutex for all ioctls */ in mshv_partition_ioctl_create_vp()
1004 partition->pt_vp_count++; in mshv_partition_ioctl_create_vp()
1005 partition->pt_vp_array[args.vp_index] = vp; in mshv_partition_ioctl_create_vp()
1010 mshv_partition_put(partition); in mshv_partition_ioctl_create_vp()
1015 mshv_vp_stats_unmap(partition->pt_id, args.vp_index); in mshv_partition_ioctl_create_vp()
1017 if (mshv_partition_encrypted(partition) && is_ghcb_mapping_available()) { in mshv_partition_ioctl_create_vp()
1018 hv_call_unmap_vp_state_page(partition->pt_id, args.vp_index, in mshv_partition_ioctl_create_vp()
1023 if (!mshv_partition_encrypted(partition)) { in mshv_partition_ioctl_create_vp()
1024 hv_call_unmap_vp_state_page(partition->pt_id, args.vp_index, in mshv_partition_ioctl_create_vp()
1029 hv_call_unmap_vp_state_page(partition->pt_id, args.vp_index, in mshv_partition_ioctl_create_vp()
1033 hv_call_delete_vp(partition->pt_id, args.vp_index); in mshv_partition_ioctl_create_vp()
1037 static int mshv_init_async_handler(struct mshv_partition *partition) in mshv_init_async_handler() argument
1039 if (completion_done(&partition->async_hypercall)) { in mshv_init_async_handler()
1040 pt_err(partition, in mshv_init_async_handler()
1045 reinit_completion(&partition->async_hypercall); in mshv_init_async_handler()
1051 struct mshv_partition *partition = data; in mshv_async_hvcall_handler() local
1053 wait_for_completion(&partition->async_hypercall); in mshv_async_hvcall_handler()
1054 pt_dbg(partition, "Async hypercall completed!\n"); in mshv_async_hvcall_handler()
1056 *status = partition->async_hypercall_status; in mshv_async_hvcall_handler()
1067 return hv_call_modify_spa_host_access(region->partition->pt_id, in mshv_partition_region_share()
1081 return hv_call_modify_spa_host_access(region->partition->pt_id, in mshv_partition_region_unshare()
1098 return hv_call_map_gpa_pages(region->partition->pt_id, in mshv_region_remap_pages()
1187 mshv_partition_region_by_gfn(struct mshv_partition *partition, u64 gfn) in mshv_partition_region_by_gfn() argument
1191 hlist_for_each_entry(region, &partition->pt_mem_regions, hnode) { in mshv_partition_region_by_gfn()
1201 mshv_partition_region_by_uaddr(struct mshv_partition *partition, u64 uaddr) in mshv_partition_region_by_uaddr() argument
1205 hlist_for_each_entry(region, &partition->pt_mem_regions, hnode) { in mshv_partition_region_by_uaddr()
1219 static int mshv_partition_create_region(struct mshv_partition *partition, in mshv_partition_create_region() argument
1228 if (mshv_partition_region_by_gfn(partition, mem->guest_pfn) || in mshv_partition_create_region()
1229 mshv_partition_region_by_gfn(partition, mem->guest_pfn + nr_pages - 1) || in mshv_partition_create_region()
1230 mshv_partition_region_by_uaddr(partition, mem->userspace_addr) || in mshv_partition_create_region()
1231 mshv_partition_region_by_uaddr(partition, mem->userspace_addr + mem->size - 1)) in mshv_partition_create_region()
1251 region->partition = partition; in mshv_partition_create_region()
1265 struct mshv_partition *partition = region->partition; in mshv_partition_mem_region_map() local
1270 pt_err(partition, "Failed to populate memory region: %d\n", in mshv_partition_mem_region_map()
1276 * For an SNP partition it is a requirement that for every memory region in mshv_partition_mem_region_map()
1277 * that we are going to map for this partition we should make sure that in mshv_partition_mem_region_map()
1282 if (mshv_partition_encrypted(partition)) { in mshv_partition_mem_region_map()
1285 pt_err(partition, in mshv_partition_mem_region_map()
1293 if (ret && mshv_partition_encrypted(partition)) { in mshv_partition_mem_region_map()
1300 pt_err(partition, in mshv_partition_mem_region_map()
1331 mshv_map_user_memory(struct mshv_partition *partition, in mshv_map_user_memory() argument
1353 ret = mshv_partition_create_region(partition, &mem, &region, in mshv_map_user_memory()
1359 ret = hv_call_map_mmio_pages(partition->pt_id, mem.guest_pfn, in mshv_map_user_memory()
1368 hlist_add_head(&region->hnode, &partition->pt_mem_regions); in mshv_map_user_memory()
1379 mshv_unmap_user_memory(struct mshv_partition *partition, in mshv_unmap_user_memory() argument
1388 region = mshv_partition_region_by_gfn(partition, mem.guest_pfn); in mshv_unmap_user_memory()
1404 hv_call_unmap_gpa_pages(partition->pt_id, region->start_gfn, in mshv_unmap_user_memory()
1414 mshv_partition_ioctl_set_memory(struct mshv_partition *partition, in mshv_partition_ioctl_set_memory() argument
1430 return mshv_unmap_user_memory(partition, mem); in mshv_partition_ioctl_set_memory()
1432 return mshv_map_user_memory(partition, mem); in mshv_partition_ioctl_set_memory()
1436 mshv_partition_ioctl_ioeventfd(struct mshv_partition *partition, in mshv_partition_ioctl_ioeventfd() argument
1444 return mshv_set_unset_ioeventfd(partition, &args); in mshv_partition_ioctl_ioeventfd()
1448 mshv_partition_ioctl_irqfd(struct mshv_partition *partition, in mshv_partition_ioctl_irqfd() argument
1456 return mshv_set_unset_irqfd(partition, &args); in mshv_partition_ioctl_irqfd()
1460 mshv_partition_ioctl_get_gpap_access_bitmap(struct mshv_partition *partition, in mshv_partition_ioctl_get_gpap_access_bitmap() argument
1517 ret = hv_call_get_gpa_access_states(partition->pt_id, args.page_count, in mshv_partition_ioctl_get_gpap_access_bitmap()
1544 mshv_partition_ioctl_set_msi_routing(struct mshv_partition *partition, in mshv_partition_ioctl_set_msi_routing() argument
1567 ret = mshv_update_routing_table(partition, entries, args.nr); in mshv_partition_ioctl_set_msi_routing()
1574 mshv_partition_ioctl_initialize(struct mshv_partition *partition) in mshv_partition_ioctl_initialize() argument
1578 if (partition->pt_initialized) in mshv_partition_ioctl_initialize()
1581 ret = hv_call_initialize_partition(partition->pt_id); in mshv_partition_ioctl_initialize()
1585 partition->pt_initialized = true; in mshv_partition_ioctl_initialize()
1590 hv_call_withdraw_memory(U64_MAX, NUMA_NO_NODE, partition->pt_id); in mshv_partition_ioctl_initialize()
1598 struct mshv_partition *partition = filp->private_data; in mshv_partition_ioctl() local
1602 if (mutex_lock_killable(&partition->pt_mutex)) in mshv_partition_ioctl()
1607 ret = mshv_partition_ioctl_initialize(partition); in mshv_partition_ioctl()
1610 ret = mshv_partition_ioctl_set_memory(partition, uarg); in mshv_partition_ioctl()
1613 ret = mshv_partition_ioctl_create_vp(partition, uarg); in mshv_partition_ioctl()
1616 ret = mshv_partition_ioctl_irqfd(partition, uarg); in mshv_partition_ioctl()
1619 ret = mshv_partition_ioctl_ioeventfd(partition, uarg); in mshv_partition_ioctl()
1622 ret = mshv_partition_ioctl_set_msi_routing(partition, uarg); in mshv_partition_ioctl()
1625 ret = mshv_partition_ioctl_get_gpap_access_bitmap(partition, in mshv_partition_ioctl()
1629 ret = mshv_ioctl_passthru_hvcall(partition, true, uarg); in mshv_partition_ioctl()
1635 mutex_unlock(&partition->pt_mutex); in mshv_partition_ioctl()
1703 static void drain_all_vps(const struct mshv_partition *partition) in drain_all_vps() argument
1709 * VPs are reachable from ISR. It is safe to not take the partition in drain_all_vps()
1711 * partition from the list. in drain_all_vps()
1714 vp = partition->pt_vp_array[i]; in drain_all_vps()
1728 remove_partition(struct mshv_partition *partition) in remove_partition() argument
1731 hlist_del_rcu(&partition->pt_hnode); in remove_partition()
1738 * Tear down a partition and remove it from the list.
1739 * Partition's refcount must be 0
1741 static void destroy_partition(struct mshv_partition *partition) in destroy_partition() argument
1748 if (refcount_read(&partition->pt_ref_count)) { in destroy_partition()
1749 pt_err(partition, in destroy_partition()
1750 "Attempt to destroy partition but refcount > 0\n"); in destroy_partition()
1754 if (partition->pt_initialized) { in destroy_partition()
1757 * done before removing the partition from the partition list. in destroy_partition()
1760 drain_all_vps(partition); in destroy_partition()
1764 vp = partition->pt_vp_array[i]; in destroy_partition()
1769 mshv_vp_stats_unmap(partition->pt_id, vp->vp_index); in destroy_partition()
1772 (void)hv_call_unmap_vp_state_page(partition->pt_id, in destroy_partition()
1779 (void)hv_call_unmap_vp_state_page(partition->pt_id, in destroy_partition()
1786 (void)hv_call_unmap_vp_state_page(partition->pt_id, in destroy_partition()
1795 partition->pt_vp_array[i] = NULL; in destroy_partition()
1799 hv_call_finalize_partition(partition->pt_id); in destroy_partition()
1801 partition->pt_initialized = false; in destroy_partition()
1804 remove_partition(partition); in destroy_partition()
1807 hlist_for_each_entry_safe(region, n, &partition->pt_mem_regions, in destroy_partition()
1811 if (mshv_partition_encrypted(partition)) { in destroy_partition()
1814 pt_err(partition, in destroy_partition()
1827 hv_call_withdraw_memory(U64_MAX, NUMA_NO_NODE, partition->pt_id); in destroy_partition()
1828 hv_call_delete_partition(partition->pt_id); in destroy_partition()
1830 mshv_free_routing_table(partition); in destroy_partition()
1831 kfree(partition); in destroy_partition()
1835 mshv_partition *mshv_partition_get(struct mshv_partition *partition) in mshv_partition_get() argument
1837 if (refcount_inc_not_zero(&partition->pt_ref_count)) in mshv_partition_get()
1838 return partition; in mshv_partition_get()
1857 mshv_partition_put(struct mshv_partition *partition) in mshv_partition_put() argument
1859 if (refcount_dec_and_test(&partition->pt_ref_count)) in mshv_partition_put()
1860 destroy_partition(partition); in mshv_partition_put()
1866 struct mshv_partition *partition = filp->private_data; in mshv_partition_release() local
1868 mshv_eventfd_release(partition); in mshv_partition_release()
1870 cleanup_srcu_struct(&partition->pt_irq_srcu); in mshv_partition_release()
1872 mshv_partition_put(partition); in mshv_partition_release()
1878 add_partition(struct mshv_partition *partition) in add_partition() argument
1882 hash_add_rcu(mshv_root.pt_htable, &partition->pt_hnode, in add_partition()
1883 partition->pt_id); in add_partition()
1897 struct mshv_partition *partition; in mshv_ioctl_create_partition() local
1927 partition = kzalloc(sizeof(*partition), GFP_KERNEL); in mshv_ioctl_create_partition()
1928 if (!partition) in mshv_ioctl_create_partition()
1931 partition->pt_module_dev = module_dev; in mshv_ioctl_create_partition()
1932 partition->isolation_type = isolation_properties.isolation_type; in mshv_ioctl_create_partition()
1934 refcount_set(&partition->pt_ref_count, 1); in mshv_ioctl_create_partition()
1936 mutex_init(&partition->pt_mutex); in mshv_ioctl_create_partition()
1938 mutex_init(&partition->pt_irq_lock); in mshv_ioctl_create_partition()
1940 init_completion(&partition->async_hypercall); in mshv_ioctl_create_partition()
1942 INIT_HLIST_HEAD(&partition->irq_ack_notifier_list); in mshv_ioctl_create_partition()
1944 INIT_HLIST_HEAD(&partition->pt_devices); in mshv_ioctl_create_partition()
1946 INIT_HLIST_HEAD(&partition->pt_mem_regions); in mshv_ioctl_create_partition()
1948 mshv_eventfd_init(partition); in mshv_ioctl_create_partition()
1950 ret = init_srcu_struct(&partition->pt_irq_srcu); in mshv_ioctl_create_partition()
1957 &partition->pt_id); in mshv_ioctl_create_partition()
1961 ret = add_partition(partition); in mshv_ioctl_create_partition()
1965 ret = mshv_init_async_handler(partition); in mshv_ioctl_create_partition()
1976 partition, O_RDWR); in mshv_ioctl_create_partition()
1989 remove_partition(partition); in mshv_ioctl_create_partition()
1991 hv_call_delete_partition(partition->pt_id); in mshv_ioctl_create_partition()
1993 cleanup_srcu_struct(&partition->pt_irq_srcu); in mshv_ioctl_create_partition()
1995 kfree(partition); in mshv_ioctl_create_partition()