Lines Matching +full:ipa +full:- +full:clock +full:- +full:query
1 // SPDX-License-Identifier: GPL-2.0
13 #define KMSG_COMPONENT "kvm-s390"
37 #include <asm/access-regs.h>
38 #include <asm/asm-offsets.h>
52 #include "kvm-s390.h"
59 #include "trace-s390.h"
226 * the feature is opt-in anyway
241 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
262 /* available subfunctions indicated via query / "test bit" */
282 * -delta to the epoch. in kvm_clock_sync_scb()
284 delta = -delta; in kvm_clock_sync_scb()
286 /* sign-extension - we're adding to signed values below */ in kvm_clock_sync_scb()
288 delta_idx = -1; in kvm_clock_sync_scb()
290 scb->epoch += delta; in kvm_clock_sync_scb()
291 if (scb->ecd & ECD_MEF) { in kvm_clock_sync_scb()
292 scb->epdx += delta_idx; in kvm_clock_sync_scb()
293 if (scb->epoch < delta) in kvm_clock_sync_scb()
294 scb->epdx += 1; in kvm_clock_sync_scb()
314 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta); in kvm_clock_sync()
316 kvm->arch.epoch = vcpu->arch.sie_block->epoch; in kvm_clock_sync()
317 kvm->arch.epdx = vcpu->arch.sie_block->epdx; in kvm_clock_sync()
319 if (vcpu->arch.cputm_enabled) in kvm_clock_sync()
320 vcpu->arch.cputm_start += *delta; in kvm_clock_sync()
321 if (vcpu->arch.vsie_block) in kvm_clock_sync()
322 kvm_clock_sync_scb(vcpu->arch.vsie_block, in kvm_clock_sync()
354 static __always_inline void pfcr_query(u8 (*query)[16]) in pfcr_query()
358 " .insn rsy,0xeb0000000016,0,0,%[query]\n" in pfcr_query()
359 : [query] "=QS" (*query) in pfcr_query()
364 static __always_inline void __sortl_query(u8 (*query)[32]) in __sortl_query()
368 " la 1,%[query]\n" in __sortl_query()
371 : [query] "=R" (*query) in __sortl_query()
376 static __always_inline void __dfltcc_query(u8 (*query)[32]) in __dfltcc_query()
380 " la 1,%[query]\n" in __dfltcc_query()
383 : [query] "=R" (*query) in __dfltcc_query()
397 if (test_facility(28)) /* TOD-clock steering */ in kvm_s390_cpu_feat_init()
496 int rc = -ENOMEM; in __kvm_s390_init()
498 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long)); in __kvm_s390_init()
500 return -ENOMEM; in __kvm_s390_init()
502 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long)); in __kvm_s390_init()
572 return -EINVAL; in kvm_arch_dev_ioctl()
700 struct gmap *gmap = kvm->arch.gmap; in kvm_arch_sync_dirty_log()
704 cur_gfn = memslot->base_gfn; in kvm_arch_sync_dirty_log()
705 last_gfn = memslot->base_gfn + memslot->npages; in kvm_arch_sync_dirty_log()
740 return -EINVAL; in kvm_vm_ioctl_get_dirty_log()
742 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
744 r = -EINVAL; in kvm_vm_ioctl_get_dirty_log()
745 if (log->slot >= KVM_USER_MEM_SLOTS) in kvm_vm_ioctl_get_dirty_log()
755 memset(memslot->dirty_bitmap, 0, n); in kvm_vm_ioctl_get_dirty_log()
759 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
777 if (cap->flags) in kvm_vm_ioctl_enable_cap()
778 return -EINVAL; in kvm_vm_ioctl_enable_cap()
780 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
783 kvm->arch.use_irqchip = 1; in kvm_vm_ioctl_enable_cap()
788 kvm->arch.user_sigp = 1; in kvm_vm_ioctl_enable_cap()
792 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
793 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
794 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
796 set_kvm_facility(kvm->arch.model.fac_mask, 129); in kvm_vm_ioctl_enable_cap()
797 set_kvm_facility(kvm->arch.model.fac_list, 129); in kvm_vm_ioctl_enable_cap()
799 set_kvm_facility(kvm->arch.model.fac_mask, 134); in kvm_vm_ioctl_enable_cap()
800 set_kvm_facility(kvm->arch.model.fac_list, 134); in kvm_vm_ioctl_enable_cap()
803 set_kvm_facility(kvm->arch.model.fac_mask, 135); in kvm_vm_ioctl_enable_cap()
804 set_kvm_facility(kvm->arch.model.fac_list, 135); in kvm_vm_ioctl_enable_cap()
807 set_kvm_facility(kvm->arch.model.fac_mask, 148); in kvm_vm_ioctl_enable_cap()
808 set_kvm_facility(kvm->arch.model.fac_list, 148); in kvm_vm_ioctl_enable_cap()
811 set_kvm_facility(kvm->arch.model.fac_mask, 152); in kvm_vm_ioctl_enable_cap()
812 set_kvm_facility(kvm->arch.model.fac_list, 152); in kvm_vm_ioctl_enable_cap()
815 set_kvm_facility(kvm->arch.model.fac_mask, 192); in kvm_vm_ioctl_enable_cap()
816 set_kvm_facility(kvm->arch.model.fac_list, 192); in kvm_vm_ioctl_enable_cap()
819 set_kvm_facility(kvm->arch.model.fac_mask, 198); in kvm_vm_ioctl_enable_cap()
820 set_kvm_facility(kvm->arch.model.fac_list, 198); in kvm_vm_ioctl_enable_cap()
823 set_kvm_facility(kvm->arch.model.fac_mask, 199); in kvm_vm_ioctl_enable_cap()
824 set_kvm_facility(kvm->arch.model.fac_list, 199); in kvm_vm_ioctl_enable_cap()
828 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
829 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
834 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
835 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
836 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
837 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
839 set_kvm_facility(kvm->arch.model.fac_mask, 64); in kvm_vm_ioctl_enable_cap()
840 set_kvm_facility(kvm->arch.model.fac_list, 64); in kvm_vm_ioctl_enable_cap()
843 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
848 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
849 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
850 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
852 set_kvm_facility(kvm->arch.model.fac_mask, 72); in kvm_vm_ioctl_enable_cap()
853 set_kvm_facility(kvm->arch.model.fac_list, 72); in kvm_vm_ioctl_enable_cap()
856 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
861 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
862 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
863 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
864 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
866 set_kvm_facility(kvm->arch.model.fac_mask, 133); in kvm_vm_ioctl_enable_cap()
867 set_kvm_facility(kvm->arch.model.fac_list, 133); in kvm_vm_ioctl_enable_cap()
870 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
875 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
876 if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
877 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
878 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_enable_cap()
879 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
882 mmap_write_lock(kvm->mm); in kvm_vm_ioctl_enable_cap()
883 kvm->mm->context.allow_gmap_hpage_1m = 1; in kvm_vm_ioctl_enable_cap()
884 mmap_write_unlock(kvm->mm); in kvm_vm_ioctl_enable_cap()
890 kvm->arch.use_skf = 0; in kvm_vm_ioctl_enable_cap()
891 kvm->arch.use_pfmfi = 0; in kvm_vm_ioctl_enable_cap()
893 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
899 kvm->arch.user_stsi = 1; in kvm_vm_ioctl_enable_cap()
904 kvm->arch.user_instr0 = 1; in kvm_vm_ioctl_enable_cap()
909 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
910 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
911 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
912 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
914 set_kvm_facility(kvm->arch.model.fac_mask, 11); in kvm_vm_ioctl_enable_cap()
915 set_kvm_facility(kvm->arch.model.fac_list, 11); in kvm_vm_ioctl_enable_cap()
918 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
923 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
933 switch (attr->attr) { in kvm_s390_get_mem_control()
936 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes", in kvm_s390_get_mem_control()
937 kvm->arch.mem_limit); in kvm_s390_get_mem_control()
938 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr)) in kvm_s390_get_mem_control()
939 ret = -EFAULT; in kvm_s390_get_mem_control()
942 ret = -ENXIO; in kvm_s390_get_mem_control()
952 switch (attr->attr) { in kvm_s390_set_mem_control()
954 ret = -ENXIO; in kvm_s390_set_mem_control()
959 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
960 if (kvm->created_vcpus) in kvm_s390_set_mem_control()
961 ret = -EBUSY; in kvm_s390_set_mem_control()
962 else if (kvm->mm->context.allow_gmap_hpage_1m) in kvm_s390_set_mem_control()
963 ret = -EINVAL; in kvm_s390_set_mem_control()
965 kvm->arch.use_cmma = 1; in kvm_s390_set_mem_control()
967 kvm->arch.use_pfmfi = 0; in kvm_s390_set_mem_control()
970 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
973 ret = -ENXIO; in kvm_s390_set_mem_control()
976 ret = -EINVAL; in kvm_s390_set_mem_control()
977 if (!kvm->arch.use_cmma) in kvm_s390_set_mem_control()
981 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
982 idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_mem_control()
983 s390_reset_cmma(kvm->arch.gmap->mm); in kvm_s390_set_mem_control()
984 srcu_read_unlock(&kvm->srcu, idx); in kvm_s390_set_mem_control()
985 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
992 return -EINVAL; in kvm_s390_set_mem_control()
994 if (get_user(new_limit, (u64 __user *)attr->addr)) in kvm_s390_set_mem_control()
995 return -EFAULT; in kvm_s390_set_mem_control()
997 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT && in kvm_s390_set_mem_control()
998 new_limit > kvm->arch.mem_limit) in kvm_s390_set_mem_control()
999 return -E2BIG; in kvm_s390_set_mem_control()
1002 return -EINVAL; in kvm_s390_set_mem_control()
1006 new_limit -= 1; in kvm_s390_set_mem_control()
1008 ret = -EBUSY; in kvm_s390_set_mem_control()
1009 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
1010 if (!kvm->created_vcpus) { in kvm_s390_set_mem_control()
1012 struct gmap *new = gmap_create(current->mm, new_limit); in kvm_s390_set_mem_control()
1015 ret = -ENOMEM; in kvm_s390_set_mem_control()
1017 gmap_remove(kvm->arch.gmap); in kvm_s390_set_mem_control()
1018 new->private = kvm; in kvm_s390_set_mem_control()
1019 kvm->arch.gmap = new; in kvm_s390_set_mem_control()
1023 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
1026 (void *) kvm->arch.gmap->asce); in kvm_s390_set_mem_control()
1030 ret = -ENXIO; in kvm_s390_set_mem_control()
1056 mutex_lock(&kvm->lock); in kvm_s390_vm_set_crypto()
1057 switch (attr->attr) { in kvm_s390_vm_set_crypto()
1060 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1061 return -EINVAL; in kvm_s390_vm_set_crypto()
1064 kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_vm_set_crypto()
1065 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1066 kvm->arch.crypto.aes_kw = 1; in kvm_s390_vm_set_crypto()
1071 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1072 return -EINVAL; in kvm_s390_vm_set_crypto()
1075 kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_vm_set_crypto()
1076 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1077 kvm->arch.crypto.dea_kw = 1; in kvm_s390_vm_set_crypto()
1082 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1083 return -EINVAL; in kvm_s390_vm_set_crypto()
1085 kvm->arch.crypto.aes_kw = 0; in kvm_s390_vm_set_crypto()
1086 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
1087 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1092 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1093 return -EINVAL; in kvm_s390_vm_set_crypto()
1095 kvm->arch.crypto.dea_kw = 0; in kvm_s390_vm_set_crypto()
1096 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
1097 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1102 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1103 return -EOPNOTSUPP; in kvm_s390_vm_set_crypto()
1105 kvm->arch.crypto.apie = 1; in kvm_s390_vm_set_crypto()
1109 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1110 return -EOPNOTSUPP; in kvm_s390_vm_set_crypto()
1112 kvm->arch.crypto.apie = 0; in kvm_s390_vm_set_crypto()
1115 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1116 return -ENXIO; in kvm_s390_vm_set_crypto()
1120 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1127 if (!vcpu->kvm->arch.use_zpci_interp) in kvm_s390_vcpu_pci_setup()
1130 vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI; in kvm_s390_vcpu_pci_setup()
1131 vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI; in kvm_s390_vcpu_pci_setup()
1139 lockdep_assert_held(&kvm->lock); in kvm_s390_vcpu_pci_enable_interp()
1148 kvm->arch.use_zpci_interp = 1; in kvm_s390_vcpu_pci_enable_interp()
1170 * Must be called with kvm->srcu held to avoid races on memslots, and with
1171 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1181 if (kvm->arch.migration_mode) in kvm_s390_vm_start_migration()
1185 return -EINVAL; in kvm_s390_vm_start_migration()
1187 if (!kvm->arch.use_cmma) { in kvm_s390_vm_start_migration()
1188 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1193 if (!ms->dirty_bitmap) in kvm_s390_vm_start_migration()
1194 return -EINVAL; in kvm_s390_vm_start_migration()
1202 ram_pages += ms->npages; in kvm_s390_vm_start_migration()
1204 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages); in kvm_s390_vm_start_migration()
1205 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1211 * Must be called with kvm->slots_lock to avoid races with ourselves and
1217 if (!kvm->arch.migration_mode) in kvm_s390_vm_stop_migration()
1219 kvm->arch.migration_mode = 0; in kvm_s390_vm_stop_migration()
1220 if (kvm->arch.use_cmma) in kvm_s390_vm_stop_migration()
1228 int res = -ENXIO; in kvm_s390_vm_set_migration()
1230 mutex_lock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1231 switch (attr->attr) { in kvm_s390_vm_set_migration()
1241 mutex_unlock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1249 u64 mig = kvm->arch.migration_mode; in kvm_s390_vm_get_migration()
1251 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS) in kvm_s390_vm_get_migration()
1252 return -ENXIO; in kvm_s390_vm_get_migration()
1254 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig))) in kvm_s390_vm_get_migration()
1255 return -EFAULT; in kvm_s390_vm_get_migration()
1265 if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) in kvm_s390_set_tod_ext()
1266 return -EFAULT; in kvm_s390_set_tod_ext()
1269 return -EINVAL; in kvm_s390_set_tod_ext()
1282 if (copy_from_user(>od_high, (void __user *)attr->addr, in kvm_s390_set_tod_high()
1284 return -EFAULT; in kvm_s390_set_tod_high()
1287 return -EINVAL; in kvm_s390_set_tod_high()
1297 if (copy_from_user(>od.tod, (void __user *)attr->addr, in kvm_s390_set_tod_low()
1299 return -EFAULT; in kvm_s390_set_tod_low()
1310 if (attr->flags) in kvm_s390_set_tod()
1311 return -EINVAL; in kvm_s390_set_tod()
1313 mutex_lock(&kvm->lock); in kvm_s390_set_tod()
1319 ret = -EOPNOTSUPP; in kvm_s390_set_tod()
1323 switch (attr->attr) { in kvm_s390_set_tod()
1334 ret = -ENXIO; in kvm_s390_set_tod()
1339 mutex_unlock(&kvm->lock); in kvm_s390_set_tod()
1352 gtod->tod = clk.tod + kvm->arch.epoch; in kvm_s390_get_tod_clock()
1353 gtod->epoch_idx = 0; in kvm_s390_get_tod_clock()
1355 gtod->epoch_idx = clk.ei + kvm->arch.epdx; in kvm_s390_get_tod_clock()
1356 if (gtod->tod < clk.tod) in kvm_s390_get_tod_clock()
1357 gtod->epoch_idx += 1; in kvm_s390_get_tod_clock()
1369 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) in kvm_s390_get_tod_ext()
1370 return -EFAULT; in kvm_s390_get_tod_ext()
1372 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx", in kvm_s390_get_tod_ext()
1381 if (copy_to_user((void __user *)attr->addr, >od_high, in kvm_s390_get_tod_high()
1383 return -EFAULT; in kvm_s390_get_tod_high()
1384 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high); in kvm_s390_get_tod_high()
1394 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) in kvm_s390_get_tod_low()
1395 return -EFAULT; in kvm_s390_get_tod_low()
1396 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod); in kvm_s390_get_tod_low()
1405 if (attr->flags) in kvm_s390_get_tod()
1406 return -EINVAL; in kvm_s390_get_tod()
1408 switch (attr->attr) { in kvm_s390_get_tod()
1419 ret = -ENXIO; in kvm_s390_get_tod()
1431 mutex_lock(&kvm->lock); in kvm_s390_set_processor()
1432 if (kvm->created_vcpus) { in kvm_s390_set_processor()
1433 ret = -EBUSY; in kvm_s390_set_processor()
1438 ret = -ENOMEM; in kvm_s390_set_processor()
1441 if (!copy_from_user(proc, (void __user *)attr->addr, in kvm_s390_set_processor()
1443 kvm->arch.model.cpuid = proc->cpuid; in kvm_s390_set_processor()
1446 if (lowest_ibc && proc->ibc) { in kvm_s390_set_processor()
1447 if (proc->ibc > unblocked_ibc) in kvm_s390_set_processor()
1448 kvm->arch.model.ibc = unblocked_ibc; in kvm_s390_set_processor()
1449 else if (proc->ibc < lowest_ibc) in kvm_s390_set_processor()
1450 kvm->arch.model.ibc = lowest_ibc; in kvm_s390_set_processor()
1452 kvm->arch.model.ibc = proc->ibc; in kvm_s390_set_processor()
1454 memcpy(kvm->arch.model.fac_list, proc->fac_list, in kvm_s390_set_processor()
1457 kvm->arch.model.ibc, in kvm_s390_set_processor()
1458 kvm->arch.model.cpuid); in kvm_s390_set_processor()
1460 kvm->arch.model.fac_list[0], in kvm_s390_set_processor()
1461 kvm->arch.model.fac_list[1], in kvm_s390_set_processor()
1462 kvm->arch.model.fac_list[2]); in kvm_s390_set_processor()
1464 ret = -EFAULT; in kvm_s390_set_processor()
1467 mutex_unlock(&kvm->lock); in kvm_s390_set_processor()
1476 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data))) in kvm_s390_set_processor_feat()
1477 return -EFAULT; in kvm_s390_set_processor_feat()
1481 return -EINVAL; in kvm_s390_set_processor_feat()
1483 mutex_lock(&kvm->lock); in kvm_s390_set_processor_feat()
1484 if (kvm->created_vcpus) { in kvm_s390_set_processor_feat()
1485 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1486 return -EBUSY; in kvm_s390_set_processor_feat()
1488 bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS); in kvm_s390_set_processor_feat()
1489 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1500 mutex_lock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1501 if (kvm->created_vcpus) { in kvm_s390_set_processor_subfunc()
1502 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1503 return -EBUSY; in kvm_s390_set_processor_subfunc()
1506 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr, in kvm_s390_set_processor_subfunc()
1508 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1509 return -EFAULT; in kvm_s390_set_processor_subfunc()
1511 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1514 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_set_processor_subfunc()
1515 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_set_processor_subfunc()
1516 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_set_processor_subfunc()
1517 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_set_processor_subfunc()
1519 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_set_processor_subfunc()
1520 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_set_processor_subfunc()
1522 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_set_processor_subfunc()
1523 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_set_processor_subfunc()
1525 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_set_processor_subfunc()
1526 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_set_processor_subfunc()
1528 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_set_processor_subfunc()
1529 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_set_processor_subfunc()
1531 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_set_processor_subfunc()
1532 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_set_processor_subfunc()
1534 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_set_processor_subfunc()
1535 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_set_processor_subfunc()
1537 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_set_processor_subfunc()
1538 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_set_processor_subfunc()
1540 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_set_processor_subfunc()
1541 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_set_processor_subfunc()
1543 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_set_processor_subfunc()
1544 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_set_processor_subfunc()
1546 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_set_processor_subfunc()
1547 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_set_processor_subfunc()
1549 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_set_processor_subfunc()
1550 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_set_processor_subfunc()
1552 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_set_processor_subfunc()
1553 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_set_processor_subfunc()
1555 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_set_processor_subfunc()
1556 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_set_processor_subfunc()
1558 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_set_processor_subfunc()
1559 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_set_processor_subfunc()
1561 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_set_processor_subfunc()
1562 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_set_processor_subfunc()
1563 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_set_processor_subfunc()
1564 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_set_processor_subfunc()
1566 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_set_processor_subfunc()
1567 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_set_processor_subfunc()
1568 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_set_processor_subfunc()
1569 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_set_processor_subfunc()
1588 struct kvm_s390_vm_cpu_uv_feat __user *ptr = (void __user *)attr->addr; in kvm_s390_set_uv_feat()
1592 if (get_user(data, &ptr->feat)) in kvm_s390_set_uv_feat()
1593 return -EFAULT; in kvm_s390_set_uv_feat()
1595 return -EINVAL; in kvm_s390_set_uv_feat()
1597 mutex_lock(&kvm->lock); in kvm_s390_set_uv_feat()
1598 if (kvm->created_vcpus) { in kvm_s390_set_uv_feat()
1599 mutex_unlock(&kvm->lock); in kvm_s390_set_uv_feat()
1600 return -EBUSY; in kvm_s390_set_uv_feat()
1602 kvm->arch.model.uv_feat_guest.feat = data; in kvm_s390_set_uv_feat()
1603 mutex_unlock(&kvm->lock); in kvm_s390_set_uv_feat()
1605 VM_EVENT(kvm, 3, "SET: guest UV-feat: 0x%16.16lx", data); in kvm_s390_set_uv_feat()
1612 int ret = -ENXIO; in kvm_s390_set_cpu_model()
1614 switch (attr->attr) { in kvm_s390_set_cpu_model()
1638 ret = -ENOMEM; in kvm_s390_get_processor()
1641 proc->cpuid = kvm->arch.model.cpuid; in kvm_s390_get_processor()
1642 proc->ibc = kvm->arch.model.ibc; in kvm_s390_get_processor()
1643 memcpy(&proc->fac_list, kvm->arch.model.fac_list, in kvm_s390_get_processor()
1646 kvm->arch.model.ibc, in kvm_s390_get_processor()
1647 kvm->arch.model.cpuid); in kvm_s390_get_processor()
1649 kvm->arch.model.fac_list[0], in kvm_s390_get_processor()
1650 kvm->arch.model.fac_list[1], in kvm_s390_get_processor()
1651 kvm->arch.model.fac_list[2]); in kvm_s390_get_processor()
1652 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) in kvm_s390_get_processor()
1653 ret = -EFAULT; in kvm_s390_get_processor()
1666 ret = -ENOMEM; in kvm_s390_get_machine()
1669 get_cpu_id((struct cpuid *) &mach->cpuid); in kvm_s390_get_machine()
1670 mach->ibc = sclp.ibc; in kvm_s390_get_machine()
1671 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, in kvm_s390_get_machine()
1673 memcpy((unsigned long *)&mach->fac_list, stfle_fac_list, in kvm_s390_get_machine()
1676 kvm->arch.model.ibc, in kvm_s390_get_machine()
1677 kvm->arch.model.cpuid); in kvm_s390_get_machine()
1679 mach->fac_mask[0], in kvm_s390_get_machine()
1680 mach->fac_mask[1], in kvm_s390_get_machine()
1681 mach->fac_mask[2]); in kvm_s390_get_machine()
1683 mach->fac_list[0], in kvm_s390_get_machine()
1684 mach->fac_list[1], in kvm_s390_get_machine()
1685 mach->fac_list[2]); in kvm_s390_get_machine()
1686 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) in kvm_s390_get_machine()
1687 ret = -EFAULT; in kvm_s390_get_machine()
1698 bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); in kvm_s390_get_processor_feat()
1699 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) in kvm_s390_get_processor_feat()
1700 return -EFAULT; in kvm_s390_get_processor_feat()
1714 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) in kvm_s390_get_machine_feat()
1715 return -EFAULT; in kvm_s390_get_machine_feat()
1726 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs, in kvm_s390_get_processor_subfunc()
1728 return -EFAULT; in kvm_s390_get_processor_subfunc()
1731 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_get_processor_subfunc()
1732 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_get_processor_subfunc()
1733 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_get_processor_subfunc()
1734 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_get_processor_subfunc()
1736 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_get_processor_subfunc()
1737 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_get_processor_subfunc()
1739 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_get_processor_subfunc()
1740 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_get_processor_subfunc()
1742 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_get_processor_subfunc()
1743 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_get_processor_subfunc()
1745 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_get_processor_subfunc()
1746 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_get_processor_subfunc()
1748 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_get_processor_subfunc()
1749 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_get_processor_subfunc()
1751 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_get_processor_subfunc()
1752 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_get_processor_subfunc()
1754 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_get_processor_subfunc()
1755 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_get_processor_subfunc()
1757 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_get_processor_subfunc()
1758 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_get_processor_subfunc()
1760 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_get_processor_subfunc()
1761 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_get_processor_subfunc()
1763 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_get_processor_subfunc()
1764 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_get_processor_subfunc()
1766 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_get_processor_subfunc()
1767 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_get_processor_subfunc()
1769 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_get_processor_subfunc()
1770 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_get_processor_subfunc()
1772 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_get_processor_subfunc()
1773 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_get_processor_subfunc()
1775 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_get_processor_subfunc()
1776 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_get_processor_subfunc()
1778 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_get_processor_subfunc()
1779 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_get_processor_subfunc()
1780 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_get_processor_subfunc()
1781 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_get_processor_subfunc()
1783 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_get_processor_subfunc()
1784 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_get_processor_subfunc()
1785 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_get_processor_subfunc()
1786 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_get_processor_subfunc()
1797 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc, in kvm_s390_get_machine_subfunc()
1799 return -EFAULT; in kvm_s390_get_machine_subfunc()
1867 struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr; in kvm_s390_get_processor_uv_feat()
1868 unsigned long feat = kvm->arch.model.uv_feat_guest.feat; in kvm_s390_get_processor_uv_feat()
1870 if (put_user(feat, &dst->feat)) in kvm_s390_get_processor_uv_feat()
1871 return -EFAULT; in kvm_s390_get_processor_uv_feat()
1872 VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat); in kvm_s390_get_processor_uv_feat()
1879 struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr; in kvm_s390_get_machine_uv_feat()
1885 if (put_user(feat, &dst->feat)) in kvm_s390_get_machine_uv_feat()
1886 return -EFAULT; in kvm_s390_get_machine_uv_feat()
1887 VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat); in kvm_s390_get_machine_uv_feat()
1894 int ret = -ENXIO; in kvm_s390_get_cpu_model()
1896 switch (attr->attr) { in kvm_s390_get_cpu_model()
1926 * kvm_s390_update_topology_change_report - update CPU topology change report
1930 * Updates the Multiprocessor Topology-Change-Report bit to signal
1941 read_lock(&kvm->arch.sca_lock); in kvm_s390_update_topology_change_report()
1942 sca = kvm->arch.sca; in kvm_s390_update_topology_change_report()
1943 old = READ_ONCE(sca->utility); in kvm_s390_update_topology_change_report()
1947 } while (!try_cmpxchg(&sca->utility.val, &old.val, new.val)); in kvm_s390_update_topology_change_report()
1948 read_unlock(&kvm->arch.sca_lock); in kvm_s390_update_topology_change_report()
1955 return -ENXIO; in kvm_s390_set_topo_change_indication()
1957 kvm_s390_update_topology_change_report(kvm, !!attr->attr); in kvm_s390_set_topo_change_indication()
1967 return -ENXIO; in kvm_s390_get_topo_change_indication()
1969 read_lock(&kvm->arch.sca_lock); in kvm_s390_get_topo_change_indication()
1970 topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr; in kvm_s390_get_topo_change_indication()
1971 read_unlock(&kvm->arch.sca_lock); in kvm_s390_get_topo_change_indication()
1973 return put_user(topo, (u8 __user *)attr->addr); in kvm_s390_get_topo_change_indication()
1980 switch (attr->group) { in kvm_s390_vm_set_attr()
2000 ret = -ENXIO; in kvm_s390_vm_set_attr()
2011 switch (attr->group) { in kvm_s390_vm_get_attr()
2028 ret = -ENXIO; in kvm_s390_vm_get_attr()
2039 switch (attr->group) { in kvm_s390_vm_has_attr()
2041 switch (attr->attr) { in kvm_s390_vm_has_attr()
2044 ret = sclp.has_cmma ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
2050 ret = -ENXIO; in kvm_s390_vm_has_attr()
2055 switch (attr->attr) { in kvm_s390_vm_has_attr()
2061 ret = -ENXIO; in kvm_s390_vm_has_attr()
2066 switch (attr->attr) { in kvm_s390_vm_has_attr()
2078 ret = -ENXIO; in kvm_s390_vm_has_attr()
2083 switch (attr->attr) { in kvm_s390_vm_has_attr()
2092 ret = ap_instructions_available() ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
2095 ret = -ENXIO; in kvm_s390_vm_has_attr()
2103 ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
2106 ret = -ENXIO; in kvm_s390_vm_has_attr()
2119 if (args->flags != 0) in kvm_s390_get_skeys()
2120 return -EINVAL; in kvm_s390_get_skeys()
2123 if (!mm_uses_skeys(current->mm)) in kvm_s390_get_skeys()
2127 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) in kvm_s390_get_skeys()
2128 return -EINVAL; in kvm_s390_get_skeys()
2130 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT); in kvm_s390_get_skeys()
2132 return -ENOMEM; in kvm_s390_get_skeys()
2134 mmap_read_lock(current->mm); in kvm_s390_get_skeys()
2135 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_skeys()
2136 for (i = 0; i < args->count; i++) { in kvm_s390_get_skeys()
2137 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_get_skeys()
2139 r = -EFAULT; in kvm_s390_get_skeys()
2143 r = get_guest_storage_key(current->mm, hva, &keys[i]); in kvm_s390_get_skeys()
2147 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_skeys()
2148 mmap_read_unlock(current->mm); in kvm_s390_get_skeys()
2151 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys, in kvm_s390_get_skeys()
2152 sizeof(uint8_t) * args->count); in kvm_s390_get_skeys()
2154 r = -EFAULT; in kvm_s390_get_skeys()
2168 if (args->flags != 0) in kvm_s390_set_skeys()
2169 return -EINVAL; in kvm_s390_set_skeys()
2172 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) in kvm_s390_set_skeys()
2173 return -EINVAL; in kvm_s390_set_skeys()
2175 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT); in kvm_s390_set_skeys()
2177 return -ENOMEM; in kvm_s390_set_skeys()
2179 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr, in kvm_s390_set_skeys()
2180 sizeof(uint8_t) * args->count); in kvm_s390_set_skeys()
2182 r = -EFAULT; in kvm_s390_set_skeys()
2192 mmap_read_lock(current->mm); in kvm_s390_set_skeys()
2193 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_skeys()
2194 while (i < args->count) { in kvm_s390_set_skeys()
2196 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_skeys()
2198 r = -EFAULT; in kvm_s390_set_skeys()
2204 r = -EINVAL; in kvm_s390_set_skeys()
2208 r = set_guest_storage_key(current->mm, hva, keys[i], 0); in kvm_s390_set_skeys()
2210 r = fixup_user_fault(current->mm, hva, in kvm_s390_set_skeys()
2218 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_skeys()
2219 mmap_read_unlock(current->mm); in kvm_s390_set_skeys()
2237 unsigned long pgstev, hva, cur_gfn = args->start_gfn; in kvm_s390_peek_cmma()
2239 args->count = 0; in kvm_s390_peek_cmma()
2240 while (args->count < bufsize) { in kvm_s390_peek_cmma()
2247 return args->count ? 0 : -EFAULT; in kvm_s390_peek_cmma()
2248 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_peek_cmma()
2250 res[args->count++] = (pgstev >> 24) & 0x43; in kvm_s390_peek_cmma()
2267 unsigned long ofs = cur_gfn - ms->base_gfn; in kvm_s390_next_dirty_cmma()
2268 struct rb_node *mnode = &ms->gfn_node[slots->node_idx]; in kvm_s390_next_dirty_cmma()
2270 if (ms->base_gfn + ms->npages <= cur_gfn) { in kvm_s390_next_dirty_cmma()
2274 mnode = rb_first(&slots->gfn_tree); in kvm_s390_next_dirty_cmma()
2276 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]); in kvm_s390_next_dirty_cmma()
2280 if (cur_gfn < ms->base_gfn) in kvm_s390_next_dirty_cmma()
2283 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs); in kvm_s390_next_dirty_cmma()
2284 while (ofs >= ms->npages && (mnode = rb_next(mnode))) { in kvm_s390_next_dirty_cmma()
2285 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]); in kvm_s390_next_dirty_cmma()
2286 ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages); in kvm_s390_next_dirty_cmma()
2288 return ms->base_gfn + ofs; in kvm_s390_next_dirty_cmma()
2301 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn); in kvm_s390_get_cmma()
2303 args->count = 0; in kvm_s390_get_cmma()
2304 args->start_gfn = cur_gfn; in kvm_s390_get_cmma()
2310 while (args->count < bufsize) { in kvm_s390_get_cmma()
2315 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) in kvm_s390_get_cmma()
2316 atomic64_dec(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma()
2317 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_get_cmma()
2320 res[args->count++] = (pgstev >> 24) & 0x43; in kvm_s390_get_cmma()
2329 (next_gfn - args->start_gfn >= bufsize)) in kvm_s390_get_cmma()
2333 if (cur_gfn - ms->base_gfn >= ms->npages) { in kvm_s390_get_cmma()
2357 if (!kvm->arch.use_cmma) in kvm_s390_get_cmma_bits()
2358 return -ENXIO; in kvm_s390_get_cmma_bits()
2360 if (args->flags & ~KVM_S390_CMMA_PEEK) in kvm_s390_get_cmma_bits()
2361 return -EINVAL; in kvm_s390_get_cmma_bits()
2362 /* Migration mode query, and we are not doing a migration */ in kvm_s390_get_cmma_bits()
2363 peek = !!(args->flags & KVM_S390_CMMA_PEEK); in kvm_s390_get_cmma_bits()
2364 if (!peek && !kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2365 return -EINVAL; in kvm_s390_get_cmma_bits()
2367 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX); in kvm_s390_get_cmma_bits()
2368 if (!bufsize || !kvm->mm->context.uses_cmm) { in kvm_s390_get_cmma_bits()
2373 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) { in kvm_s390_get_cmma_bits()
2380 return -ENOMEM; in kvm_s390_get_cmma_bits()
2382 mmap_read_lock(kvm->mm); in kvm_s390_get_cmma_bits()
2383 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_cmma_bits()
2388 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_cmma_bits()
2389 mmap_read_unlock(kvm->mm); in kvm_s390_get_cmma_bits()
2391 if (kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2392 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma_bits()
2394 args->remaining = 0; in kvm_s390_get_cmma_bits()
2396 if (copy_to_user((void __user *)args->values, values, args->count)) in kvm_s390_get_cmma_bits()
2397 ret = -EFAULT; in kvm_s390_get_cmma_bits()
2406 * set and the mm->context.uses_cmm flag is set.
2415 mask = args->mask; in kvm_s390_set_cmma_bits()
2417 if (!kvm->arch.use_cmma) in kvm_s390_set_cmma_bits()
2418 return -ENXIO; in kvm_s390_set_cmma_bits()
2420 if (args->flags != 0) in kvm_s390_set_cmma_bits()
2421 return -EINVAL; in kvm_s390_set_cmma_bits()
2423 if (args->count > KVM_S390_CMMA_SIZE_MAX) in kvm_s390_set_cmma_bits()
2424 return -EINVAL; in kvm_s390_set_cmma_bits()
2426 if (args->count == 0) in kvm_s390_set_cmma_bits()
2429 bits = vmalloc(array_size(sizeof(*bits), args->count)); in kvm_s390_set_cmma_bits()
2431 return -ENOMEM; in kvm_s390_set_cmma_bits()
2433 r = copy_from_user(bits, (void __user *)args->values, args->count); in kvm_s390_set_cmma_bits()
2435 r = -EFAULT; in kvm_s390_set_cmma_bits()
2439 mmap_read_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2440 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_cmma_bits()
2441 for (i = 0; i < args->count; i++) { in kvm_s390_set_cmma_bits()
2442 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_cmma_bits()
2444 r = -EFAULT; in kvm_s390_set_cmma_bits()
2451 set_pgste_bits(kvm->mm, hva, mask, pgstev); in kvm_s390_set_cmma_bits()
2453 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_cmma_bits()
2454 mmap_read_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2456 if (!kvm->mm->context.uses_cmm) { in kvm_s390_set_cmma_bits()
2457 mmap_write_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2458 kvm->mm->context.uses_cmm = 1; in kvm_s390_set_cmma_bits()
2459 mmap_write_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2467 * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to
2477 * Return: 0 in case of success, otherwise -EIO
2495 mutex_lock(&vcpu->mutex); in kvm_s390_cpus_from_pv()
2499 ret = -EIO; in kvm_s390_cpus_from_pv()
2501 mutex_unlock(&vcpu->mutex); in kvm_s390_cpus_from_pv()
2503 /* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */ in kvm_s390_cpus_from_pv()
2510 * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM
2518 * Return: 0 in case of success, otherwise -EIO
2533 mutex_lock(&vcpu->mutex); in kvm_s390_cpus_to_pv()
2535 mutex_unlock(&vcpu->mutex); in kvm_s390_cpus_to_pv()
2545 * Here we provide user space with a direct interface to query UV
2556 switch (info->header.id) { in kvm_s390_handle_pv_info()
2558 len_min = sizeof(info->header) + sizeof(info->vm); in kvm_s390_handle_pv_info()
2560 if (info->header.len_max < len_min) in kvm_s390_handle_pv_info()
2561 return -EINVAL; in kvm_s390_handle_pv_info()
2563 memcpy(info->vm.inst_calls_list, in kvm_s390_handle_pv_info()
2568 info->vm.max_cpus = uv_info.max_guest_cpu_id + 1; in kvm_s390_handle_pv_info()
2569 info->vm.max_guests = uv_info.max_num_sec_conf; in kvm_s390_handle_pv_info()
2570 info->vm.max_guest_addr = uv_info.max_sec_stor_addr; in kvm_s390_handle_pv_info()
2571 info->vm.feature_indication = uv_info.uv_feature_indications; in kvm_s390_handle_pv_info()
2576 len_min = sizeof(info->header) + sizeof(info->dump); in kvm_s390_handle_pv_info()
2578 if (info->header.len_max < len_min) in kvm_s390_handle_pv_info()
2579 return -EINVAL; in kvm_s390_handle_pv_info()
2581 info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len; in kvm_s390_handle_pv_info()
2582 info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len; in kvm_s390_handle_pv_info()
2583 info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len; in kvm_s390_handle_pv_info()
2587 return -EINVAL; in kvm_s390_handle_pv_info()
2594 int r = -EINVAL; in kvm_s390_pv_dmp()
2599 if (kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2609 UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc); in kvm_s390_pv_dmp()
2611 cmd->rc, cmd->rrc); in kvm_s390_pv_dmp()
2613 kvm->arch.pv.dumping = true; in kvm_s390_pv_dmp()
2616 r = -EINVAL; in kvm_s390_pv_dmp()
2621 if (!kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2630 &cmd->rc, &cmd->rrc); in kvm_s390_pv_dmp()
2634 if (!kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2637 r = -EINVAL; in kvm_s390_pv_dmp()
2642 &cmd->rc, &cmd->rrc); in kvm_s390_pv_dmp()
2646 r = -ENOTTY; in kvm_s390_pv_dmp()
2655 const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM); in kvm_s390_handle_pv()
2656 void __user *argp = (void __user *)cmd->data; in kvm_s390_handle_pv()
2661 mutex_lock(&kvm->lock); in kvm_s390_handle_pv()
2663 switch (cmd->cmd) { in kvm_s390_handle_pv()
2665 r = -EINVAL; in kvm_s390_handle_pv()
2681 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2685 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2690 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2694 r = -EINVAL; in kvm_s390_handle_pv()
2698 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2706 r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2709 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2712 r = -EINVAL; in kvm_s390_handle_pv()
2715 /* kvm->lock must not be held; this is asserted inside the function. */ in kvm_s390_handle_pv()
2716 r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2719 r = -EINVAL; in kvm_s390_handle_pv()
2723 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2731 r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2734 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2741 r = -EINVAL; in kvm_s390_handle_pv()
2745 r = -EFAULT; in kvm_s390_handle_pv()
2750 r = -EINVAL; in kvm_s390_handle_pv()
2754 r = -ENOMEM; in kvm_s390_handle_pv()
2759 r = -EFAULT; in kvm_s390_handle_pv()
2763 &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2771 r = -EINVAL; in kvm_s390_handle_pv()
2772 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm)) in kvm_s390_handle_pv()
2775 r = -EFAULT; in kvm_s390_handle_pv()
2780 &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2784 r = -EINVAL; in kvm_s390_handle_pv()
2789 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2790 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc, in kvm_s390_handle_pv()
2791 cmd->rrc); in kvm_s390_handle_pv()
2795 r = -EINVAL; in kvm_s390_handle_pv()
2800 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2802 cmd->rc, cmd->rrc); in kvm_s390_handle_pv()
2806 r = -EINVAL; in kvm_s390_handle_pv()
2811 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2813 cmd->rc, cmd->rrc); in kvm_s390_handle_pv()
2823 * Maybe user space wants to query some of the data in kvm_s390_handle_pv()
2829 r = -EFAULT; in kvm_s390_handle_pv()
2833 r = -EINVAL; in kvm_s390_handle_pv()
2849 r = -EFAULT; in kvm_s390_handle_pv()
2859 r = -EINVAL; in kvm_s390_handle_pv()
2863 r = -EFAULT; in kvm_s390_handle_pv()
2872 r = -EFAULT; in kvm_s390_handle_pv()
2879 r = -ENOTTY; in kvm_s390_handle_pv()
2882 mutex_unlock(&kvm->lock); in kvm_s390_handle_pv()
2889 if (mop->flags & ~supported_flags || !mop->size) in mem_op_validate_common()
2890 return -EINVAL; in mem_op_validate_common()
2891 if (mop->size > MEM_OP_MAX_SIZE) in mem_op_validate_common()
2892 return -E2BIG; in mem_op_validate_common()
2893 if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) { in mem_op_validate_common()
2894 if (mop->key > 0xf) in mem_op_validate_common()
2895 return -EINVAL; in mem_op_validate_common()
2897 mop->key = 0; in mem_op_validate_common()
2904 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vm_mem_op_abs()
2914 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { in kvm_s390_vm_mem_op_abs()
2915 tmpbuf = vmalloc(mop->size); in kvm_s390_vm_mem_op_abs()
2917 return -ENOMEM; in kvm_s390_vm_mem_op_abs()
2920 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_vm_mem_op_abs()
2922 if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) { in kvm_s390_vm_mem_op_abs()
2927 acc_mode = mop->op == KVM_S390_MEMOP_ABSOLUTE_READ ? GACC_FETCH : GACC_STORE; in kvm_s390_vm_mem_op_abs()
2928 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { in kvm_s390_vm_mem_op_abs()
2929 r = check_gpa_range(kvm, mop->gaddr, mop->size, acc_mode, mop->key); in kvm_s390_vm_mem_op_abs()
2933 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf, in kvm_s390_vm_mem_op_abs()
2934 mop->size, GACC_FETCH, mop->key); in kvm_s390_vm_mem_op_abs()
2937 if (copy_to_user(uaddr, tmpbuf, mop->size)) in kvm_s390_vm_mem_op_abs()
2938 r = -EFAULT; in kvm_s390_vm_mem_op_abs()
2940 if (copy_from_user(tmpbuf, uaddr, mop->size)) { in kvm_s390_vm_mem_op_abs()
2941 r = -EFAULT; in kvm_s390_vm_mem_op_abs()
2944 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf, in kvm_s390_vm_mem_op_abs()
2945 mop->size, GACC_STORE, mop->key); in kvm_s390_vm_mem_op_abs()
2949 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_vm_mem_op_abs()
2957 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vm_mem_op_cmpxchg()
2958 void __user *old_addr = (void __user *)mop->old_addr; in kvm_s390_vm_mem_op_cmpxchg()
2963 unsigned int off_in_quad = sizeof(new) - mop->size; in kvm_s390_vm_mem_op_cmpxchg()
2975 if (mop->size > sizeof(new)) in kvm_s390_vm_mem_op_cmpxchg()
2976 return -EINVAL; in kvm_s390_vm_mem_op_cmpxchg()
2977 if (copy_from_user(&new.raw[off_in_quad], uaddr, mop->size)) in kvm_s390_vm_mem_op_cmpxchg()
2978 return -EFAULT; in kvm_s390_vm_mem_op_cmpxchg()
2979 if (copy_from_user(&old.raw[off_in_quad], old_addr, mop->size)) in kvm_s390_vm_mem_op_cmpxchg()
2980 return -EFAULT; in kvm_s390_vm_mem_op_cmpxchg()
2982 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_vm_mem_op_cmpxchg()
2984 if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) { in kvm_s390_vm_mem_op_cmpxchg()
2989 r = cmpxchg_guest_abs_with_key(kvm, mop->gaddr, mop->size, &old.quad, in kvm_s390_vm_mem_op_cmpxchg()
2990 new.quad, mop->key, &success); in kvm_s390_vm_mem_op_cmpxchg()
2991 if (!success && copy_to_user(old_addr, &old.raw[off_in_quad], mop->size)) in kvm_s390_vm_mem_op_cmpxchg()
2992 r = -EFAULT; in kvm_s390_vm_mem_op_cmpxchg()
2995 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_vm_mem_op_cmpxchg()
3002 * This is technically a heuristic only, if the kvm->lock is not in kvm_s390_vm_mem_op()
3003 * taken, it is not guaranteed that the vm is/remains non-protected. in kvm_s390_vm_mem_op()
3005 * on the access, -EFAULT is returned and the vm may crash the in kvm_s390_vm_mem_op()
3011 return -EINVAL; in kvm_s390_vm_mem_op()
3013 switch (mop->op) { in kvm_s390_vm_mem_op()
3020 return -EINVAL; in kvm_s390_vm_mem_op()
3026 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
3035 r = -EFAULT; in kvm_arch_vm_ioctl()
3042 r = -EINVAL; in kvm_arch_vm_ioctl()
3043 if (kvm->arch.use_irqchip) in kvm_arch_vm_ioctl()
3048 r = -EFAULT; in kvm_arch_vm_ioctl()
3055 r = -EFAULT; in kvm_arch_vm_ioctl()
3062 r = -EFAULT; in kvm_arch_vm_ioctl()
3071 r = -EFAULT; in kvm_arch_vm_ioctl()
3081 r = -EFAULT; in kvm_arch_vm_ioctl()
3091 r = -EFAULT; in kvm_arch_vm_ioctl()
3094 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3096 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3100 r = -EFAULT; in kvm_arch_vm_ioctl()
3107 r = -EFAULT; in kvm_arch_vm_ioctl()
3110 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3112 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3122 r = -EINVAL; in kvm_arch_vm_ioctl()
3126 r = -EFAULT; in kvm_arch_vm_ioctl()
3130 r = -EINVAL; in kvm_arch_vm_ioctl()
3133 /* must be called without kvm->lock */ in kvm_arch_vm_ioctl()
3136 r = -EFAULT; in kvm_arch_vm_ioctl()
3147 r = -EFAULT; in kvm_arch_vm_ioctl()
3153 r = -EINVAL; in kvm_arch_vm_ioctl()
3157 r = -EFAULT; in kvm_arch_vm_ioctl()
3164 r = -ENOTTY; in kvm_arch_vm_ioctl()
3192 kvm->arch.crypto.crycbd = virt_to_phys(kvm->arch.crypto.crycb); in kvm_s390_set_crycb_format()
3194 /* Clear the CRYCB format bits - i.e., set format 0 by default */ in kvm_s390_set_crycb_format()
3195 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK); in kvm_s390_set_crycb_format()
3202 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; in kvm_s390_set_crycb_format()
3204 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; in kvm_s390_set_crycb_format()
3219 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3225 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; in kvm_arch_crypto_set_masks()
3229 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { in kvm_arch_crypto_set_masks()
3231 memcpy(crycb->apcb1.apm, apm, 32); in kvm_arch_crypto_set_masks()
3234 memcpy(crycb->apcb1.aqm, aqm, 32); in kvm_arch_crypto_set_masks()
3237 memcpy(crycb->apcb1.adm, adm, 32); in kvm_arch_crypto_set_masks()
3243 memcpy(crycb->apcb0.apm, apm, 8); in kvm_arch_crypto_set_masks()
3244 memcpy(crycb->apcb0.aqm, aqm, 2); in kvm_arch_crypto_set_masks()
3245 memcpy(crycb->apcb0.adm, adm, 2); in kvm_arch_crypto_set_masks()
3269 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3276 memset(&kvm->arch.crypto.crycb->apcb0, 0, in kvm_arch_crypto_clear_masks()
3277 sizeof(kvm->arch.crypto.crycb->apcb0)); in kvm_arch_crypto_clear_masks()
3278 memset(&kvm->arch.crypto.crycb->apcb1, 0, in kvm_arch_crypto_clear_masks()
3279 sizeof(kvm->arch.crypto.crycb->apcb1)); in kvm_arch_crypto_clear_masks()
3299 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; in kvm_s390_crypto_init()
3301 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem); in kvm_s390_crypto_init()
3307 kvm->arch.crypto.aes_kw = 1; in kvm_s390_crypto_init()
3308 kvm->arch.crypto.dea_kw = 1; in kvm_s390_crypto_init()
3309 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_crypto_init()
3310 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_crypto_init()
3311 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_crypto_init()
3312 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_crypto_init()
3317 if (kvm->arch.use_esca) in sca_dispose()
3318 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block)); in sca_dispose()
3320 free_page((unsigned long)(kvm->arch.sca)); in sca_dispose()
3321 kvm->arch.sca = NULL; in sca_dispose()
3339 rc = -EINVAL; in kvm_arch_init_vm()
3354 rc = -ENOMEM; in kvm_arch_init_vm()
3358 rwlock_init(&kvm->arch.sca_lock); in kvm_arch_init_vm()
3360 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); in kvm_arch_init_vm()
3361 if (!kvm->arch.sca) in kvm_arch_init_vm()
3367 kvm->arch.sca = (struct bsca_block *) in kvm_arch_init_vm()
3368 ((char *) kvm->arch.sca + sca_offset); in kvm_arch_init_vm()
3371 sprintf(debug_name, "kvm-%u", current->pid); in kvm_arch_init_vm()
3373 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); in kvm_arch_init_vm()
3374 if (!kvm->arch.dbf) in kvm_arch_init_vm()
3378 kvm->arch.sie_page2 = in kvm_arch_init_vm()
3380 if (!kvm->arch.sie_page2) in kvm_arch_init_vm()
3383 kvm->arch.sie_page2->kvm = kvm; in kvm_arch_init_vm()
3384 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; in kvm_arch_init_vm()
3387 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] & in kvm_arch_init_vm()
3390 kvm->arch.model.fac_list[i] = stfle_fac_list[i] & in kvm_arch_init_vm()
3393 kvm->arch.model.subfuncs = kvm_s390_available_subfunc; in kvm_arch_init_vm()
3395 /* we are always in czam mode - even on pre z14 machines */ in kvm_arch_init_vm()
3396 set_kvm_facility(kvm->arch.model.fac_mask, 138); in kvm_arch_init_vm()
3397 set_kvm_facility(kvm->arch.model.fac_list, 138); in kvm_arch_init_vm()
3399 set_kvm_facility(kvm->arch.model.fac_mask, 74); in kvm_arch_init_vm()
3400 set_kvm_facility(kvm->arch.model.fac_list, 74); in kvm_arch_init_vm()
3402 set_kvm_facility(kvm->arch.model.fac_mask, 147); in kvm_arch_init_vm()
3403 set_kvm_facility(kvm->arch.model.fac_list, 147); in kvm_arch_init_vm()
3407 set_kvm_facility(kvm->arch.model.fac_mask, 65); in kvm_arch_init_vm()
3409 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); in kvm_arch_init_vm()
3410 kvm->arch.model.ibc = sclp.ibc & 0x0fff; in kvm_arch_init_vm()
3412 kvm->arch.model.uv_feat_guest.feat = 0; in kvm_arch_init_vm()
3417 mutex_lock(&kvm->lock); in kvm_arch_init_vm()
3420 mutex_unlock(&kvm->lock); in kvm_arch_init_vm()
3423 mutex_init(&kvm->arch.float_int.ais_lock); in kvm_arch_init_vm()
3424 spin_lock_init(&kvm->arch.float_int.lock); in kvm_arch_init_vm()
3426 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); in kvm_arch_init_vm()
3427 init_waitqueue_head(&kvm->arch.ipte_wq); in kvm_arch_init_vm()
3428 mutex_init(&kvm->arch.ipte_mutex); in kvm_arch_init_vm()
3430 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); in kvm_arch_init_vm()
3442 kvm->arch.gmap = NULL; in kvm_arch_init_vm()
3443 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT; in kvm_arch_init_vm()
3444 /* one flat fake memslot covering the whole address-space */ in kvm_arch_init_vm()
3445 mutex_lock(&kvm->slots_lock); in kvm_arch_init_vm()
3447 mutex_unlock(&kvm->slots_lock); in kvm_arch_init_vm()
3450 kvm->arch.mem_limit = TASK_SIZE_MAX; in kvm_arch_init_vm()
3452 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX, in kvm_arch_init_vm()
3454 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1); in kvm_arch_init_vm()
3455 if (!kvm->arch.gmap) in kvm_arch_init_vm()
3457 kvm->arch.gmap->private = kvm; in kvm_arch_init_vm()
3458 kvm->arch.gmap->pfault_enabled = 0; in kvm_arch_init_vm()
3461 kvm->arch.use_pfmfi = sclp.has_pfmfi; in kvm_arch_init_vm()
3462 kvm->arch.use_skf = sclp.has_skey; in kvm_arch_init_vm()
3463 spin_lock_init(&kvm->arch.start_stop_lock); in kvm_arch_init_vm()
3467 INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup); in kvm_arch_init_vm()
3468 kvm->arch.pv.set_aside = NULL; in kvm_arch_init_vm()
3469 KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid); in kvm_arch_init_vm()
3473 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_init_vm()
3474 debug_unregister(kvm->arch.dbf); in kvm_arch_init_vm()
3485 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); in kvm_arch_vcpu_destroy()
3488 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
3490 kvm_s390_update_topology_change_report(vcpu->kvm, 1); in kvm_arch_vcpu_destroy()
3492 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
3493 gmap_remove(vcpu->arch.gmap); in kvm_arch_vcpu_destroy()
3495 if (vcpu->kvm->arch.use_cmma) in kvm_arch_vcpu_destroy()
3500 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_destroy()
3511 * We are already at the end of life and kvm->lock is not taken. in kvm_arch_destroy_vm()
3522 if (kvm->arch.pv.mmu_notifier.ops) in kvm_arch_destroy_vm()
3523 mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm); in kvm_arch_destroy_vm()
3525 debug_unregister(kvm->arch.dbf); in kvm_arch_destroy_vm()
3526 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_destroy_vm()
3528 gmap_remove(kvm->arch.gmap); in kvm_arch_destroy_vm()
3538 vcpu->arch.gmap = gmap_create(current->mm, -1UL); in __kvm_ucontrol_vcpu_init()
3539 if (!vcpu->arch.gmap) in __kvm_ucontrol_vcpu_init()
3540 return -ENOMEM; in __kvm_ucontrol_vcpu_init()
3541 vcpu->arch.gmap->private = vcpu->kvm; in __kvm_ucontrol_vcpu_init()
3550 read_lock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
3551 if (vcpu->kvm->arch.use_esca) { in sca_del_vcpu()
3552 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
3554 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); in sca_del_vcpu()
3555 sca->cpu[vcpu->vcpu_id].sda = 0; in sca_del_vcpu()
3557 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
3559 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); in sca_del_vcpu()
3560 sca->cpu[vcpu->vcpu_id].sda = 0; in sca_del_vcpu()
3562 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
3568 phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca); in sca_add_vcpu()
3571 vcpu->arch.sie_block->scaoh = sca_phys >> 32; in sca_add_vcpu()
3572 vcpu->arch.sie_block->scaol = sca_phys; in sca_add_vcpu()
3575 read_lock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
3576 if (vcpu->kvm->arch.use_esca) { in sca_add_vcpu()
3577 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
3580 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block); in sca_add_vcpu()
3581 vcpu->arch.sie_block->scaoh = sca_phys >> 32; in sca_add_vcpu()
3582 vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK; in sca_add_vcpu()
3583 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_add_vcpu()
3584 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); in sca_add_vcpu()
3586 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
3589 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block); in sca_add_vcpu()
3590 vcpu->arch.sie_block->scaoh = sca_phys >> 32; in sca_add_vcpu()
3591 vcpu->arch.sie_block->scaol = sca_phys; in sca_add_vcpu()
3592 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); in sca_add_vcpu()
3594 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
3600 d->sda = s->sda; in sca_copy_entry()
3601 d->sigp_ctrl.c = s->sigp_ctrl.c; in sca_copy_entry()
3602 d->sigp_ctrl.scn = s->sigp_ctrl.scn; in sca_copy_entry()
3609 d->ipte_control = s->ipte_control; in sca_copy_b_to_e()
3610 d->mcn[0] = s->mcn; in sca_copy_b_to_e()
3612 sca_copy_entry(&d->cpu[i], &s->cpu[i]); in sca_copy_b_to_e()
3617 struct bsca_block *old_sca = kvm->arch.sca; in sca_switch_to_extended()
3624 if (kvm->arch.use_esca) in sca_switch_to_extended()
3629 return -ENOMEM; in sca_switch_to_extended()
3636 write_lock(&kvm->arch.sca_lock); in sca_switch_to_extended()
3641 vcpu->arch.sie_block->scaoh = scaoh; in sca_switch_to_extended()
3642 vcpu->arch.sie_block->scaol = scaol; in sca_switch_to_extended()
3643 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_switch_to_extended()
3645 kvm->arch.sca = new_sca; in sca_switch_to_extended()
3646 kvm->arch.use_esca = 1; in sca_switch_to_extended()
3648 write_unlock(&kvm->arch.sca_lock); in sca_switch_to_extended()
3653 VM_EVENT(kvm, 2, "Switched to ESCA (0x%p -> 0x%p)", in sca_switch_to_extended()
3654 old_sca, kvm->arch.sca); in sca_switch_to_extended()
3672 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm); in sca_can_add_vcpu()
3680 WARN_ON_ONCE(vcpu->arch.cputm_start != 0); in __start_cpu_timer_accounting()
3681 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
3682 vcpu->arch.cputm_start = get_tod_clock_fast(); in __start_cpu_timer_accounting()
3683 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
3689 WARN_ON_ONCE(vcpu->arch.cputm_start == 0); in __stop_cpu_timer_accounting()
3690 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
3691 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start; in __stop_cpu_timer_accounting()
3692 vcpu->arch.cputm_start = 0; in __stop_cpu_timer_accounting()
3693 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
3699 WARN_ON_ONCE(vcpu->arch.cputm_enabled); in __enable_cpu_timer_accounting()
3700 vcpu->arch.cputm_enabled = true; in __enable_cpu_timer_accounting()
3707 WARN_ON_ONCE(!vcpu->arch.cputm_enabled); in __disable_cpu_timer_accounting()
3709 vcpu->arch.cputm_enabled = false; in __disable_cpu_timer_accounting()
3726 /* set the cpu timer - may only be called from the VCPU thread itself */
3730 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
3731 if (vcpu->arch.cputm_enabled) in kvm_s390_set_cpu_timer()
3732 vcpu->arch.cputm_start = get_tod_clock_fast(); in kvm_s390_set_cpu_timer()
3733 vcpu->arch.sie_block->cputm = cputm; in kvm_s390_set_cpu_timer()
3734 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
3738 /* update and get the cpu timer - can also be called from other VCPU threads */
3744 if (unlikely(!vcpu->arch.cputm_enabled)) in kvm_s390_get_cpu_timer()
3745 return vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
3749 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount); in kvm_s390_get_cpu_timer()
3754 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu); in kvm_s390_get_cpu_timer()
3755 value = vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
3757 if (likely(vcpu->arch.cputm_start)) in kvm_s390_get_cpu_timer()
3758 value -= get_tod_clock_fast() - vcpu->arch.cputm_start; in kvm_s390_get_cpu_timer()
3759 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1)); in kvm_s390_get_cpu_timer()
3768 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_load()
3770 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
3775 vcpu->cpu = -1; in kvm_arch_vcpu_put()
3776 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_put()
3784 mutex_lock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3786 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; in kvm_arch_vcpu_postcreate()
3787 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; in kvm_arch_vcpu_postcreate()
3789 mutex_unlock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3790 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_postcreate()
3791 vcpu->arch.gmap = vcpu->kvm->arch.gmap; in kvm_arch_vcpu_postcreate()
3794 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0) in kvm_arch_vcpu_postcreate()
3795 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_arch_vcpu_postcreate()
3800 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) && in kvm_has_pckmo_subfunc()
3830 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76)) in kvm_s390_vcpu_crypto_setup()
3833 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; in kvm_s390_vcpu_crypto_setup()
3834 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); in kvm_s390_vcpu_crypto_setup()
3835 vcpu->arch.sie_block->eca &= ~ECA_APIE; in kvm_s390_vcpu_crypto_setup()
3836 vcpu->arch.sie_block->ecd &= ~(ECD_ECC | ECD_HMAC); in kvm_s390_vcpu_crypto_setup()
3838 if (vcpu->kvm->arch.crypto.apie) in kvm_s390_vcpu_crypto_setup()
3839 vcpu->arch.sie_block->eca |= ECA_APIE; in kvm_s390_vcpu_crypto_setup()
3842 if (vcpu->kvm->arch.crypto.aes_kw) { in kvm_s390_vcpu_crypto_setup()
3843 vcpu->arch.sie_block->ecb3 |= ECB3_AES; in kvm_s390_vcpu_crypto_setup()
3845 if (kvm_has_pckmo_ecc(vcpu->kvm)) in kvm_s390_vcpu_crypto_setup()
3846 vcpu->arch.sie_block->ecd |= ECD_ECC; in kvm_s390_vcpu_crypto_setup()
3847 if (kvm_has_pckmo_hmac(vcpu->kvm)) in kvm_s390_vcpu_crypto_setup()
3848 vcpu->arch.sie_block->ecd |= ECD_HMAC; in kvm_s390_vcpu_crypto_setup()
3851 if (vcpu->kvm->arch.crypto.dea_kw) in kvm_s390_vcpu_crypto_setup()
3852 vcpu->arch.sie_block->ecb3 |= ECB3_DEA; in kvm_s390_vcpu_crypto_setup()
3857 free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo)); in kvm_s390_vcpu_unsetup_cmma()
3858 vcpu->arch.sie_block->cbrlo = 0; in kvm_s390_vcpu_unsetup_cmma()
3866 return -ENOMEM; in kvm_s390_vcpu_setup_cmma()
3868 vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page); in kvm_s390_vcpu_setup_cmma()
3874 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; in kvm_s390_vcpu_setup_model()
3876 vcpu->arch.sie_block->ibc = model->ibc; in kvm_s390_vcpu_setup_model()
3877 if (test_kvm_facility(vcpu->kvm, 7)) in kvm_s390_vcpu_setup_model()
3878 vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list); in kvm_s390_vcpu_setup_model()
3886 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | in kvm_s390_vcpu_setup()
3890 if (test_kvm_facility(vcpu->kvm, 78)) in kvm_s390_vcpu_setup()
3892 else if (test_kvm_facility(vcpu->kvm, 8)) in kvm_s390_vcpu_setup()
3899 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT; in kvm_s390_vcpu_setup()
3900 if (test_kvm_facility(vcpu->kvm, 9)) in kvm_s390_vcpu_setup()
3901 vcpu->arch.sie_block->ecb |= ECB_SRSI; in kvm_s390_vcpu_setup()
3902 if (test_kvm_facility(vcpu->kvm, 11)) in kvm_s390_vcpu_setup()
3903 vcpu->arch.sie_block->ecb |= ECB_PTF; in kvm_s390_vcpu_setup()
3904 if (test_kvm_facility(vcpu->kvm, 73)) in kvm_s390_vcpu_setup()
3905 vcpu->arch.sie_block->ecb |= ECB_TE; in kvm_s390_vcpu_setup()
3906 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_s390_vcpu_setup()
3907 vcpu->arch.sie_block->ecb |= ECB_SPECI; in kvm_s390_vcpu_setup()
3909 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi) in kvm_s390_vcpu_setup()
3910 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI; in kvm_s390_vcpu_setup()
3911 if (test_kvm_facility(vcpu->kvm, 130)) in kvm_s390_vcpu_setup()
3912 vcpu->arch.sie_block->ecb2 |= ECB2_IEP; in kvm_s390_vcpu_setup()
3913 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI; in kvm_s390_vcpu_setup()
3915 vcpu->arch.sie_block->eca |= ECA_CEI; in kvm_s390_vcpu_setup()
3917 vcpu->arch.sie_block->eca |= ECA_IB; in kvm_s390_vcpu_setup()
3919 vcpu->arch.sie_block->eca |= ECA_SII; in kvm_s390_vcpu_setup()
3921 vcpu->arch.sie_block->eca |= ECA_SIGPI; in kvm_s390_vcpu_setup()
3922 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_s390_vcpu_setup()
3923 vcpu->arch.sie_block->eca |= ECA_VX; in kvm_s390_vcpu_setup()
3924 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in kvm_s390_vcpu_setup()
3926 if (test_kvm_facility(vcpu->kvm, 139)) in kvm_s390_vcpu_setup()
3927 vcpu->arch.sie_block->ecd |= ECD_MEF; in kvm_s390_vcpu_setup()
3928 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_s390_vcpu_setup()
3929 vcpu->arch.sie_block->ecd |= ECD_ETOKENF; in kvm_s390_vcpu_setup()
3930 if (vcpu->arch.sie_block->gd) { in kvm_s390_vcpu_setup()
3931 vcpu->arch.sie_block->eca |= ECA_AIV; in kvm_s390_vcpu_setup()
3932 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u", in kvm_s390_vcpu_setup()
3933 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id); in kvm_s390_vcpu_setup()
3935 vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC; in kvm_s390_vcpu_setup()
3936 vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb); in kvm_s390_vcpu_setup()
3941 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; in kvm_s390_vcpu_setup()
3943 if (vcpu->kvm->arch.use_cmma) { in kvm_s390_vcpu_setup()
3948 hrtimer_setup(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup, CLOCK_MONOTONIC, in kvm_s390_vcpu_setup()
3951 vcpu->arch.sie_block->hpid = HPID_KVM; in kvm_s390_vcpu_setup()
3957 mutex_lock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3958 if (kvm_s390_pv_is_protected(vcpu->kvm)) { in kvm_s390_vcpu_setup()
3963 mutex_unlock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3971 return -EINVAL; in kvm_arch_vcpu_precreate()
3983 return -ENOMEM; in kvm_arch_vcpu_create()
3985 vcpu->arch.sie_block = &sie_page->sie_block; in kvm_arch_vcpu_create()
3986 vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb); in kvm_arch_vcpu_create()
3989 vcpu->arch.sie_block->mso = 0; in kvm_arch_vcpu_create()
3990 vcpu->arch.sie_block->msl = sclp.hamax; in kvm_arch_vcpu_create()
3992 vcpu->arch.sie_block->icpua = vcpu->vcpu_id; in kvm_arch_vcpu_create()
3993 spin_lock_init(&vcpu->arch.local_int.lock); in kvm_arch_vcpu_create()
3994 vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm); in kvm_arch_vcpu_create()
3995 seqcount_init(&vcpu->arch.cputm_seqcount); in kvm_arch_vcpu_create()
3997 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_create()
3999 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | in kvm_arch_vcpu_create()
4006 vcpu->arch.acrs_loaded = false; in kvm_arch_vcpu_create()
4008 if (test_kvm_facility(vcpu->kvm, 64)) in kvm_arch_vcpu_create()
4009 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; in kvm_arch_vcpu_create()
4010 if (test_kvm_facility(vcpu->kvm, 82)) in kvm_arch_vcpu_create()
4011 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC; in kvm_arch_vcpu_create()
4012 if (test_kvm_facility(vcpu->kvm, 133)) in kvm_arch_vcpu_create()
4013 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB; in kvm_arch_vcpu_create()
4014 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_arch_vcpu_create()
4015 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN; in kvm_arch_vcpu_create()
4020 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; in kvm_arch_vcpu_create()
4022 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS; in kvm_arch_vcpu_create()
4024 if (kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_create()
4030 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%p, sie block at 0x%p", in kvm_arch_vcpu_create()
4031 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
4032 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
4038 kvm_s390_update_topology_change_report(vcpu->kvm, 1); in kvm_arch_vcpu_create()
4042 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_create()
4043 gmap_remove(vcpu->arch.gmap); in kvm_arch_vcpu_create()
4045 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_create()
4051 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in kvm_arch_vcpu_runnable()
4057 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE); in kvm_arch_vcpu_in_kernel()
4062 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_block()
4068 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_unblock()
4073 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request()
4079 return atomic_read(&vcpu->arch.sie_block->prog20) & in kvm_s390_vcpu_sie_inhibited()
4085 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request_handled()
4096 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) in exit_sie()
4110 struct kvm *kvm = gmap->private; in kvm_gmap_notifier()
4125 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) { in kvm_gmap_notifier()
4126 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx", in kvm_gmap_notifier()
4136 if (get_lowcore()->avg_steal_timer * 100 / (TICK_USEC << 12) >= in kvm_arch_no_poll()
4138 vcpu->stat.halt_no_poll_steal++; in kvm_arch_no_poll()
4154 int r = -EINVAL; in kvm_arch_vcpu_ioctl_get_one_reg()
4156 switch (reg->id) { in kvm_arch_vcpu_ioctl_get_one_reg()
4158 r = put_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_get_one_reg()
4159 (u32 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4162 r = put_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_get_one_reg()
4163 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4167 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4170 r = put_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_get_one_reg()
4171 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4174 r = put_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_get_one_reg()
4175 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4178 r = put_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_get_one_reg()
4179 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4182 r = put_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_get_one_reg()
4183 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4186 r = put_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_get_one_reg()
4187 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4190 r = put_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_get_one_reg()
4191 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4203 int r = -EINVAL; in kvm_arch_vcpu_ioctl_set_one_reg()
4206 switch (reg->id) { in kvm_arch_vcpu_ioctl_set_one_reg()
4208 r = get_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_set_one_reg()
4209 (u32 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4212 r = get_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_set_one_reg()
4213 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4216 r = get_user(val, (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4221 r = get_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_set_one_reg()
4222 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4225 r = get_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_set_one_reg()
4226 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4227 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_vcpu_ioctl_set_one_reg()
4231 r = get_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_set_one_reg()
4232 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4235 r = get_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_set_one_reg()
4236 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4239 r = get_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_set_one_reg()
4240 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4243 r = get_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_set_one_reg()
4244 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4255 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI; in kvm_arch_vcpu_ioctl_normal_reset()
4256 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_ioctl_normal_reset()
4257 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb)); in kvm_arch_vcpu_ioctl_normal_reset()
4260 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) in kvm_arch_vcpu_ioctl_normal_reset()
4274 vcpu->arch.sie_block->gpsw.mask = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4275 vcpu->arch.sie_block->gpsw.addr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4278 vcpu->arch.sie_block->ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4279 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr)); in kvm_arch_vcpu_ioctl_initial_reset()
4280 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4281 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4284 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs)); in kvm_arch_vcpu_ioctl_initial_reset()
4285 vcpu->run->s.regs.ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4286 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4287 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4288 vcpu->run->psw_addr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4289 vcpu->run->psw_mask = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4290 vcpu->run->s.regs.todpr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4291 vcpu->run->s.regs.cputm = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4292 vcpu->run->s.regs.ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4293 vcpu->run->s.regs.pp = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4294 vcpu->run->s.regs.gbea = 1; in kvm_arch_vcpu_ioctl_initial_reset()
4295 vcpu->run->s.regs.fpc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4302 vcpu->arch.sie_block->gbea = 1; in kvm_arch_vcpu_ioctl_initial_reset()
4303 vcpu->arch.sie_block->pp = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4304 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in kvm_arch_vcpu_ioctl_initial_reset()
4305 vcpu->arch.sie_block->todpr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4311 struct kvm_sync_regs *regs = &vcpu->run->s.regs; in kvm_arch_vcpu_ioctl_clear_reset()
4316 memset(®s->gprs, 0, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_clear_reset()
4317 memset(®s->vrs, 0, sizeof(regs->vrs)); in kvm_arch_vcpu_ioctl_clear_reset()
4318 memset(®s->acrs, 0, sizeof(regs->acrs)); in kvm_arch_vcpu_ioctl_clear_reset()
4319 memset(®s->gscb, 0, sizeof(regs->gscb)); in kvm_arch_vcpu_ioctl_clear_reset()
4321 regs->etoken = 0; in kvm_arch_vcpu_ioctl_clear_reset()
4322 regs->etoken_extension = 0; in kvm_arch_vcpu_ioctl_clear_reset()
4328 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_set_regs()
4336 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_get_regs()
4346 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_set_sregs()
4347 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_set_sregs()
4358 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_get_sregs()
4359 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_get_sregs()
4371 vcpu->run->s.regs.fpc = fpu->fpc; in kvm_arch_vcpu_ioctl_set_fpu()
4373 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs, in kvm_arch_vcpu_ioctl_set_fpu()
4374 (freg_t *) fpu->fprs); in kvm_arch_vcpu_ioctl_set_fpu()
4376 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs)); in kvm_arch_vcpu_ioctl_set_fpu()
4387 convert_vx_to_fp((freg_t *) fpu->fprs, in kvm_arch_vcpu_ioctl_get_fpu()
4388 (__vector128 *) vcpu->run->s.regs.vrs); in kvm_arch_vcpu_ioctl_get_fpu()
4390 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs)); in kvm_arch_vcpu_ioctl_get_fpu()
4391 fpu->fpc = vcpu->run->s.regs.fpc; in kvm_arch_vcpu_ioctl_get_fpu()
4402 rc = -EBUSY; in kvm_arch_vcpu_ioctl_set_initial_psw()
4404 vcpu->run->psw_mask = psw.mask; in kvm_arch_vcpu_ioctl_set_initial_psw()
4405 vcpu->run->psw_addr = psw.addr; in kvm_arch_vcpu_ioctl_set_initial_psw()
4413 return -EINVAL; /* not implemented yet */ in kvm_arch_vcpu_ioctl_translate()
4427 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
4430 if (dbg->control & ~VALID_GUESTDBG_FLAGS) { in kvm_arch_vcpu_ioctl_set_guest_debug()
4431 rc = -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
4435 rc = -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
4439 if (dbg->control & KVM_GUESTDBG_ENABLE) { in kvm_arch_vcpu_ioctl_set_guest_debug()
4440 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
4444 if (dbg->control & KVM_GUESTDBG_USE_HW_BP) in kvm_arch_vcpu_ioctl_set_guest_debug()
4448 vcpu->arch.guestdbg.last_bp = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
4452 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
4484 /* user space knows about this interface - let it control the state */ in kvm_arch_vcpu_ioctl_set_mpstate()
4485 kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm); in kvm_arch_vcpu_ioctl_set_mpstate()
4487 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
4496 rc = -ENXIO; in kvm_arch_vcpu_ioctl_set_mpstate()
4504 rc = -ENXIO; in kvm_arch_vcpu_ioctl_set_mpstate()
4518 struct kvm *kvm = gmap->private; in __kvm_s390_fixup_fault_sync()
4531 rc = fixup_user_fault(gmap->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked); in __kvm_s390_fixup_fault_sync()
4538 * __kvm_s390_mprotect_many() - Apply specified protection to guest pages
4545 * Returns: 0 in case of success, < 0 in case of error - see gmap_protect_one()
4547 * Context: kvm->srcu and gmap->mm need to be held in read mode
4558 if (rc == -EAGAIN) { in __kvm_s390_mprotect_many()
4574 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_mprotect_notify_prefix()
4575 mmap_read_lock(vcpu->arch.gmap->mm); in kvm_s390_mprotect_notify_prefix()
4577 rc = __kvm_s390_mprotect_many(vcpu->arch.gmap, gaddr, 2, PROT_WRITE, GMAP_NOTIFY_MPROT); in kvm_s390_mprotect_notify_prefix()
4579 mmap_read_unlock(vcpu->arch.gmap->mm); in kvm_s390_mprotect_notify_prefix()
4580 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_s390_mprotect_notify_prefix()
4592 * If the guest prefix changed, re-arm the ipte notifier for the in kvm_s390_handle_requests()
4610 vcpu->arch.sie_block->ihcpu = 0xffff; in kvm_s390_handle_requests()
4616 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); in kvm_s390_handle_requests()
4624 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); in kvm_s390_handle_requests()
4631 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_s390_handle_requests()
4641 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA; in kvm_s390_handle_requests()
4647 * Re-enable CMM virtualization if CMMA is available and in kvm_s390_handle_requests()
4650 if ((vcpu->kvm->arch.use_cmma) && in kvm_s390_handle_requests()
4651 (vcpu->kvm->mm->context.uses_cmm)) in kvm_s390_handle_requests()
4652 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; in kvm_s390_handle_requests()
4672 kvm->arch.epoch = gtod->tod - clk.tod; in __kvm_s390_set_tod_clock()
4673 kvm->arch.epdx = 0; in __kvm_s390_set_tod_clock()
4675 kvm->arch.epdx = gtod->epoch_idx - clk.ei; in __kvm_s390_set_tod_clock()
4676 if (kvm->arch.epoch > gtod->tod) in __kvm_s390_set_tod_clock()
4677 kvm->arch.epdx -= 1; in __kvm_s390_set_tod_clock()
4682 vcpu->arch.sie_block->epoch = kvm->arch.epoch; in __kvm_s390_set_tod_clock()
4683 vcpu->arch.sie_block->epdx = kvm->arch.epdx; in __kvm_s390_set_tod_clock()
4692 if (!mutex_trylock(&kvm->lock)) in kvm_s390_try_set_tod_clock()
4695 mutex_unlock(&kvm->lock); in kvm_s390_try_set_tod_clock()
4712 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); in __kvm_inject_pfault_token()
4719 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); in kvm_arch_async_page_not_present()
4720 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); in kvm_arch_async_page_not_present()
4728 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); in kvm_arch_async_page_present()
4729 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); in kvm_arch_async_page_present()
4752 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_setup_async_pf()
4754 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != in kvm_arch_setup_async_pf()
4755 vcpu->arch.pfault_compare) in kvm_arch_setup_async_pf()
4761 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) in kvm_arch_setup_async_pf()
4763 if (!vcpu->arch.gmap->pfault_enabled) in kvm_arch_setup_async_pf()
4766 hva = gfn_to_hva(vcpu->kvm, current->thread.gmap_teid.addr); in kvm_arch_setup_async_pf()
4767 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) in kvm_arch_setup_async_pf()
4770 return kvm_setup_async_pf(vcpu, current->thread.gmap_teid.addr * PAGE_SIZE, hva, &arch); in kvm_arch_setup_async_pf()
4784 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14]; in vcpu_pre_run()
4785 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15]; in vcpu_pre_run()
4790 if (!kvm_is_ucontrol(vcpu->kvm)) { in vcpu_pre_run()
4805 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in vcpu_pre_run()
4807 vcpu->arch.sie_block->icptcode = 0; in vcpu_pre_run()
4808 current->thread.gmap_int_code = 0; in vcpu_pre_run()
4809 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); in vcpu_pre_run()
4835 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1); in vcpu_post_run_addressing_exception()
4840 /* Instruction-Fetching Exceptions - we can't detect the ilen. in vcpu_post_run_addressing_exception()
4844 pgm_info = vcpu->arch.pgm; in vcpu_post_run_addressing_exception()
4854 KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm, in kvm_s390_assert_primary_as()
4856 current->thread.gmap_int_code, current->thread.gmap_teid.val); in kvm_s390_assert_primary_as()
4860 * __kvm_s390_handle_dat_fault() - handle a dat fault for the gmap of a vcpu
4880 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) in __kvm_s390_handle_dat_fault()
4884 if (vcpu->arch.gmap->pfault_enabled) in __kvm_s390_handle_dat_fault()
4896 return -EAGAIN; in __kvm_s390_handle_dat_fault()
4903 vcpu->stat.pfault_sync++; in __kvm_s390_handle_dat_fault()
4910 return -EFAULT; in __kvm_s390_handle_dat_fault()
4913 mmap_read_lock(vcpu->arch.gmap->mm); in __kvm_s390_handle_dat_fault()
4915 rc = fixup_user_fault(vcpu->arch.gmap->mm, vmaddr, fault_flags, &unlocked); in __kvm_s390_handle_dat_fault()
4917 rc = __gmap_link(vcpu->arch.gmap, gaddr, vmaddr); in __kvm_s390_handle_dat_fault()
4918 scoped_guard(spinlock, &vcpu->kvm->mmu_lock) { in __kvm_s390_handle_dat_fault()
4919 kvm_release_faultin_page(vcpu->kvm, page, false, writable); in __kvm_s390_handle_dat_fault()
4921 mmap_read_unlock(vcpu->arch.gmap->mm); in __kvm_s390_handle_dat_fault()
4931 if (kvm_is_ucontrol(vcpu->kvm)) { in vcpu_dat_fault_handler()
4933 * This translates the per-vCPU guest address into a in vcpu_dat_fault_handler()
4939 mmap_read_lock(vcpu->arch.gmap->mm); in vcpu_dat_fault_handler()
4940 gaddr_tmp = __gmap_translate(vcpu->arch.gmap, gaddr); in vcpu_dat_fault_handler()
4941 mmap_read_unlock(vcpu->arch.gmap->mm); in vcpu_dat_fault_handler()
4942 if (gaddr_tmp == -EFAULT) { in vcpu_dat_fault_handler()
4943 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; in vcpu_dat_fault_handler()
4944 vcpu->run->s390_ucontrol.trans_exc_code = gaddr; in vcpu_dat_fault_handler()
4945 vcpu->run->s390_ucontrol.pgm_code = PGM_SEGMENT_TRANSLATION; in vcpu_dat_fault_handler()
4946 return -EREMOTE; in vcpu_dat_fault_handler()
4959 gaddr = current->thread.gmap_teid.addr * PAGE_SIZE; in vcpu_post_run_handle_fault()
4963 switch (current->thread.gmap_int_code & PGM_INT_CODE_MASK) { in vcpu_post_run_handle_fault()
4965 vcpu->stat.exit_null++; in vcpu_post_run_handle_fault()
4976 if (gmap_destroy_page(vcpu->arch.gmap, gaddr)) { in vcpu_post_run_handle_fault()
4986 current->thread.gmap_int_code, current->comm, in vcpu_post_run_handle_fault()
4987 current->pid); in vcpu_post_run_handle_fault()
4998 rc = gmap_convert_to_secure(vcpu->arch.gmap, gaddr); in vcpu_post_run_handle_fault()
4999 if (rc == -EINVAL) in vcpu_post_run_handle_fault()
5001 if (rc != -ENXIO) in vcpu_post_run_handle_fault()
5015 KVM_BUG(1, vcpu->kvm, "Unexpected program interrupt 0x%x, TEID 0x%016lx", in vcpu_post_run_handle_fault()
5016 current->thread.gmap_int_code, current->thread.gmap_teid.val); in vcpu_post_run_handle_fault()
5030 vcpu->arch.sie_block->icptcode); in vcpu_post_run()
5031 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); in vcpu_post_run()
5036 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14; in vcpu_post_run()
5037 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15; in vcpu_post_run()
5039 if (exit_reason == -EINTR) { in vcpu_post_run()
5041 sie_page = container_of(vcpu->arch.sie_block, in vcpu_post_run()
5043 mcck_info = &sie_page->mcck_info; in vcpu_post_run()
5048 if (vcpu->arch.sie_block->icptcode > 0) { in vcpu_post_run()
5051 if (rc != -EOPNOTSUPP) in vcpu_post_run()
5053 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC; in vcpu_post_run()
5054 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; in vcpu_post_run()
5055 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; in vcpu_post_run()
5056 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; in vcpu_post_run()
5057 return -EREMOTE; in vcpu_post_run()
5067 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block; in __vcpu_run()
5070 * We try to hold kvm->srcu during most of vcpu_run (except when run- in __vcpu_run()
5090 memcpy(sie_page->pv_grregs, in __vcpu_run()
5091 vcpu->run->s.regs.gprs, in __vcpu_run()
5092 sizeof(sie_page->pv_grregs)); in __vcpu_run()
5094 exit_reason = sie64a(vcpu->arch.sie_block, in __vcpu_run()
5095 vcpu->run->s.regs.gprs, in __vcpu_run()
5096 vcpu->arch.gmap->asce); in __vcpu_run()
5098 memcpy(vcpu->run->s.regs.gprs, in __vcpu_run()
5099 sie_page->pv_grregs, in __vcpu_run()
5100 sizeof(sie_page->pv_grregs)); in __vcpu_run()
5103 * that leave the guest state in an "in-between" state in __vcpu_run()
5107 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR || in __vcpu_run()
5108 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) { in __vcpu_run()
5109 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; in __vcpu_run()
5127 struct kvm_run *kvm_run = vcpu->run; in sync_regs_fmt2()
5131 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb; in sync_regs_fmt2()
5132 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb; in sync_regs_fmt2()
5133 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; in sync_regs_fmt2()
5134 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; in sync_regs_fmt2()
5135 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { in sync_regs_fmt2()
5136 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; in sync_regs_fmt2()
5137 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; in sync_regs_fmt2()
5138 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; in sync_regs_fmt2()
5140 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) { in sync_regs_fmt2()
5141 vcpu->arch.pfault_token = kvm_run->s.regs.pft; in sync_regs_fmt2()
5142 vcpu->arch.pfault_select = kvm_run->s.regs.pfs; in sync_regs_fmt2()
5143 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; in sync_regs_fmt2()
5144 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in sync_regs_fmt2()
5147 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) { in sync_regs_fmt2()
5148 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318; in sync_regs_fmt2()
5149 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc; in sync_regs_fmt2()
5150 VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc); in sync_regs_fmt2()
5156 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) && in sync_regs_fmt2()
5157 test_kvm_facility(vcpu->kvm, 64) && in sync_regs_fmt2()
5158 riccb->v && in sync_regs_fmt2()
5159 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) { in sync_regs_fmt2()
5161 vcpu->arch.sie_block->ecb3 |= ECB3_RI; in sync_regs_fmt2()
5164 * If userspace sets the gscb (e.g. after migration) to non-zero, in sync_regs_fmt2()
5167 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) && in sync_regs_fmt2()
5168 test_kvm_facility(vcpu->kvm, 133) && in sync_regs_fmt2()
5169 gscb->gssm && in sync_regs_fmt2()
5170 !vcpu->arch.gs_enabled) { in sync_regs_fmt2()
5172 vcpu->arch.sie_block->ecb |= ECB_GS; in sync_regs_fmt2()
5173 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in sync_regs_fmt2()
5174 vcpu->arch.gs_enabled = 1; in sync_regs_fmt2()
5176 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) && in sync_regs_fmt2()
5177 test_kvm_facility(vcpu->kvm, 82)) { in sync_regs_fmt2()
5178 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in sync_regs_fmt2()
5179 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0; in sync_regs_fmt2()
5184 if (current->thread.gs_cb) { in sync_regs_fmt2()
5185 vcpu->arch.host_gscb = current->thread.gs_cb; in sync_regs_fmt2()
5186 save_gs_cb(vcpu->arch.host_gscb); in sync_regs_fmt2()
5188 if (vcpu->arch.gs_enabled) { in sync_regs_fmt2()
5189 current->thread.gs_cb = (struct gs_cb *) in sync_regs_fmt2()
5190 &vcpu->run->s.regs.gscb; in sync_regs_fmt2()
5191 restore_gs_cb(current->thread.gs_cb); in sync_regs_fmt2()
5200 struct kvm_run *kvm_run = vcpu->run; in sync_regs()
5202 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) in sync_regs()
5203 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); in sync_regs()
5204 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { in sync_regs()
5205 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); in sync_regs()
5209 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { in sync_regs()
5210 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm); in sync_regs()
5211 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; in sync_regs()
5213 save_access_regs(vcpu->arch.host_acrs); in sync_regs()
5214 restore_access_regs(vcpu->run->s.regs.acrs); in sync_regs()
5215 vcpu->arch.acrs_loaded = true; in sync_regs()
5216 kvm_s390_fpu_load(vcpu->run); in sync_regs()
5230 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC; in sync_regs()
5231 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask & in sync_regs()
5235 kvm_run->kvm_dirty_regs = 0; in sync_regs()
5240 struct kvm_run *kvm_run = vcpu->run; in store_regs_fmt2()
5242 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; in store_regs_fmt2()
5243 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; in store_regs_fmt2()
5244 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; in store_regs_fmt2()
5245 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC; in store_regs_fmt2()
5246 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val; in store_regs_fmt2()
5250 if (vcpu->arch.gs_enabled) in store_regs_fmt2()
5251 save_gs_cb(current->thread.gs_cb); in store_regs_fmt2()
5252 current->thread.gs_cb = vcpu->arch.host_gscb; in store_regs_fmt2()
5253 restore_gs_cb(vcpu->arch.host_gscb); in store_regs_fmt2()
5254 if (!vcpu->arch.host_gscb) in store_regs_fmt2()
5256 vcpu->arch.host_gscb = NULL; in store_regs_fmt2()
5264 struct kvm_run *kvm_run = vcpu->run; in store_regs()
5266 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; in store_regs()
5267 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; in store_regs()
5268 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); in store_regs()
5269 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); in store_regs()
5270 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu); in store_regs()
5271 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; in store_regs()
5272 kvm_run->s.regs.pft = vcpu->arch.pfault_token; in store_regs()
5273 kvm_run->s.regs.pfs = vcpu->arch.pfault_select; in store_regs()
5274 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; in store_regs()
5275 save_access_regs(vcpu->run->s.regs.acrs); in store_regs()
5276 restore_access_regs(vcpu->arch.host_acrs); in store_regs()
5277 vcpu->arch.acrs_loaded = false; in store_regs()
5278 kvm_s390_fpu_store(vcpu->run); in store_regs()
5285 struct kvm_run *kvm_run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
5295 if (vcpu->kvm->arch.pv.dumping) in kvm_arch_vcpu_ioctl_run()
5296 return -EINVAL; in kvm_arch_vcpu_ioctl_run()
5298 if (!vcpu->wants_to_run) in kvm_arch_vcpu_ioctl_run()
5299 return -EINTR; in kvm_arch_vcpu_ioctl_run()
5301 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS || in kvm_arch_vcpu_ioctl_run()
5302 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS) in kvm_arch_vcpu_ioctl_run()
5303 return -EINVAL; in kvm_arch_vcpu_ioctl_run()
5319 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { in kvm_arch_vcpu_ioctl_run()
5323 vcpu->vcpu_id); in kvm_arch_vcpu_ioctl_run()
5324 rc = -EINVAL; in kvm_arch_vcpu_ioctl_run()
5336 kvm_run->exit_reason = KVM_EXIT_INTR; in kvm_arch_vcpu_ioctl_run()
5337 rc = -EINTR; in kvm_arch_vcpu_ioctl_run()
5345 if (rc == -EREMOTE) { in kvm_arch_vcpu_ioctl_run()
5356 vcpu->stat.exit_userspace++; in kvm_arch_vcpu_ioctl_run()
5365 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
5366 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
5379 return -EFAULT; in kvm_s390_store_status_unloaded()
5383 return -EFAULT; in kvm_s390_store_status_unloaded()
5386 gpa -= __LC_FPREGS_SAVE_AREA; in kvm_s390_store_status_unloaded()
5390 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs); in kvm_s390_store_status_unloaded()
5395 vcpu->run->s.regs.fprs, 128); in kvm_s390_store_status_unloaded()
5398 vcpu->run->s.regs.gprs, 128); in kvm_s390_store_status_unloaded()
5400 &vcpu->arch.sie_block->gpsw, 16); in kvm_s390_store_status_unloaded()
5404 &vcpu->run->s.regs.fpc, 4); in kvm_s390_store_status_unloaded()
5406 &vcpu->arch.sie_block->todpr, 4); in kvm_s390_store_status_unloaded()
5410 clkcomp = vcpu->arch.sie_block->ckc >> 8; in kvm_s390_store_status_unloaded()
5414 &vcpu->run->s.regs.acrs, 64); in kvm_s390_store_status_unloaded()
5416 &vcpu->arch.sie_block->gcr, 128); in kvm_s390_store_status_unloaded()
5417 return rc ? -EFAULT : 0; in kvm_s390_store_status_unloaded()
5427 kvm_s390_fpu_store(vcpu->run); in kvm_s390_vcpu_store_status()
5428 save_access_regs(vcpu->run->s.regs.acrs); in kvm_s390_vcpu_store_status()
5464 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); in kvm_s390_vcpu_start()
5466 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5467 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_start()
5473 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5479 if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i))) in kvm_s390_vcpu_start()
5484 /* we're the only active VCPU -> speed it up */ in kvm_s390_vcpu_start()
5492 __disable_ibs_on_all_vcpus(vcpu->kvm); in kvm_s390_vcpu_start()
5502 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; in kvm_s390_vcpu_start()
5508 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5520 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); in kvm_s390_vcpu_stop()
5522 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5523 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_stop()
5529 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5546 struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i); in kvm_s390_vcpu_stop()
5562 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5571 if (cap->flags) in kvm_vcpu_ioctl_enable_cap()
5572 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
5574 switch (cap->cap) { in kvm_vcpu_ioctl_enable_cap()
5576 if (!vcpu->kvm->arch.css_support) { in kvm_vcpu_ioctl_enable_cap()
5577 vcpu->kvm->arch.css_support = 1; in kvm_vcpu_ioctl_enable_cap()
5578 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support"); in kvm_vcpu_ioctl_enable_cap()
5579 trace_kvm_s390_enable_css(vcpu->kvm); in kvm_vcpu_ioctl_enable_cap()
5584 r = -EINVAL; in kvm_vcpu_ioctl_enable_cap()
5593 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vcpu_sida_op()
5597 if (mop->flags || !mop->size) in kvm_s390_vcpu_sida_op()
5598 return -EINVAL; in kvm_s390_vcpu_sida_op()
5599 if (mop->size + mop->sida_offset < mop->size) in kvm_s390_vcpu_sida_op()
5600 return -EINVAL; in kvm_s390_vcpu_sida_op()
5601 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block)) in kvm_s390_vcpu_sida_op()
5602 return -E2BIG; in kvm_s390_vcpu_sida_op()
5604 return -EINVAL; in kvm_s390_vcpu_sida_op()
5606 sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset; in kvm_s390_vcpu_sida_op()
5608 switch (mop->op) { in kvm_s390_vcpu_sida_op()
5610 if (copy_to_user(uaddr, sida_addr, mop->size)) in kvm_s390_vcpu_sida_op()
5611 r = -EFAULT; in kvm_s390_vcpu_sida_op()
5615 if (copy_from_user(sida_addr, uaddr, mop->size)) in kvm_s390_vcpu_sida_op()
5616 r = -EFAULT; in kvm_s390_vcpu_sida_op()
5625 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vcpu_mem_op()
5635 if (mop->ar >= NUM_ACRS) in kvm_s390_vcpu_mem_op()
5636 return -EINVAL; in kvm_s390_vcpu_mem_op()
5638 return -EINVAL; in kvm_s390_vcpu_mem_op()
5639 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { in kvm_s390_vcpu_mem_op()
5640 tmpbuf = vmalloc(mop->size); in kvm_s390_vcpu_mem_op()
5642 return -ENOMEM; in kvm_s390_vcpu_mem_op()
5645 acc_mode = mop->op == KVM_S390_MEMOP_LOGICAL_READ ? GACC_FETCH : GACC_STORE; in kvm_s390_vcpu_mem_op()
5646 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { in kvm_s390_vcpu_mem_op()
5647 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, in kvm_s390_vcpu_mem_op()
5648 acc_mode, mop->key); in kvm_s390_vcpu_mem_op()
5652 r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf, in kvm_s390_vcpu_mem_op()
5653 mop->size, mop->key); in kvm_s390_vcpu_mem_op()
5656 if (copy_to_user(uaddr, tmpbuf, mop->size)) { in kvm_s390_vcpu_mem_op()
5657 r = -EFAULT; in kvm_s390_vcpu_mem_op()
5661 if (copy_from_user(tmpbuf, uaddr, mop->size)) { in kvm_s390_vcpu_mem_op()
5662 r = -EFAULT; in kvm_s390_vcpu_mem_op()
5665 r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf, in kvm_s390_vcpu_mem_op()
5666 mop->size, mop->key); in kvm_s390_vcpu_mem_op()
5670 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0) in kvm_s390_vcpu_mem_op()
5671 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in kvm_s390_vcpu_mem_op()
5683 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_vcpu_memsida_op()
5685 switch (mop->op) { in kvm_s390_vcpu_memsida_op()
5692 /* we are locked against sida going away by the vcpu->mutex */ in kvm_s390_vcpu_memsida_op()
5696 r = -EINVAL; in kvm_s390_vcpu_memsida_op()
5699 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvm_s390_vcpu_memsida_op()
5706 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl()
5715 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
5724 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
5726 return -EINVAL; in kvm_arch_vcpu_async_ioctl()
5731 rc = -ENOIOCTLCMD; in kvm_arch_vcpu_async_ioctl()
5736 * To simplify single stepping of userspace-emulated instructions, in kvm_arch_vcpu_async_ioctl()
5743 vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING; in kvm_arch_vcpu_async_ioctl()
5756 if (!vcpu->kvm->arch.pv.dumping) in kvm_s390_handle_pv_vcpu_dump()
5757 return -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5759 if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp))) in kvm_s390_handle_pv_vcpu_dump()
5760 return -EFAULT; in kvm_s390_handle_pv_vcpu_dump()
5764 return -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5768 return -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5772 return -ENOMEM; in kvm_s390_handle_pv_vcpu_dump()
5774 ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv_vcpu_dump()
5777 vcpu->vcpu_id, cmd->rc, cmd->rrc); in kvm_s390_handle_pv_vcpu_dump()
5780 ret = -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5784 ret = -EFAULT; in kvm_s390_handle_pv_vcpu_dump()
5793 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
5803 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5805 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5810 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5850 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5853 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5867 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5871 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
5872 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5876 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, in kvm_arch_vcpu_ioctl()
5884 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5888 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
5889 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5893 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, in kvm_arch_vcpu_ioctl()
5899 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5901 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5907 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5919 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5925 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5931 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5943 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5947 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5959 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5963 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5967 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5980 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5984 r = -ENOTTY; in kvm_arch_vcpu_ioctl()
5994 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) in kvm_arch_vcpu_fault()
5995 && (kvm_is_ucontrol(vcpu->kvm))) { in kvm_arch_vcpu_fault()
5996 vmf->page = virt_to_page(vcpu->arch.sie_block); in kvm_arch_vcpu_fault()
5997 get_page(vmf->page); in kvm_arch_vcpu_fault()
6017 if (kvm_is_ucontrol(kvm) && new->id < KVM_USER_MEM_SLOTS) in kvm_arch_prepare_memory_region()
6018 return -EINVAL; in kvm_arch_prepare_memory_region()
6022 return -EINVAL; in kvm_arch_prepare_memory_region()
6032 if (new->userspace_addr & 0xffffful) in kvm_arch_prepare_memory_region()
6033 return -EINVAL; in kvm_arch_prepare_memory_region()
6035 size = new->npages * PAGE_SIZE; in kvm_arch_prepare_memory_region()
6037 return -EINVAL; in kvm_arch_prepare_memory_region()
6039 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit) in kvm_arch_prepare_memory_region()
6040 return -EINVAL; in kvm_arch_prepare_memory_region()
6043 if (!kvm->arch.migration_mode) in kvm_arch_prepare_memory_region()
6048 * - userspace creates a new memslot with dirty logging off, in kvm_arch_prepare_memory_region()
6049 * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and in kvm_arch_prepare_memory_region()
6055 !(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) in kvm_arch_prepare_memory_region()
6074 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
6075 old->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
6078 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
6079 old->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
6084 rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr, in kvm_arch_commit_memory_region()
6085 new->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
6086 new->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
6111 return -ENODEV; in kvm_s390_init()
6116 return -EINVAL; in kvm_s390_init()