1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Migration Test for s390x 4 * 5 * Copyright IBM Corp. 2022 6 * 7 * Authors: 8 * Nico Boehr <nrb@linux.ibm.com> 9 */ 10 #include <libcflat.h> 11 #include <asm/arch_def.h> 12 #include <asm/vector.h> 13 #include <asm/barrier.h> 14 #include <asm/facility.h> 15 #include <gs.h> 16 #include <bitops.h> 17 #include <smp.h> 18 19 static struct gs_cb gs_cb; 20 static struct gs_epl gs_epl; 21 22 /* set by CPU1 to signal it has completed */ 23 static int flag_thread_complete; 24 /* set by CPU0 to signal migration has completed */ 25 static int flag_migration_complete; 26 27 static void write_gs_regs(void) 28 { 29 const unsigned long gs_area = 0x2000000; 30 const unsigned long gsc = 25; /* align = 32 M, section size = 512K */ 31 32 gs_cb.gsd = gs_area | gsc; 33 gs_cb.gssm = 0xfeedc0ffe; 34 gs_cb.gs_epl_a = (uint64_t) &gs_epl; 35 36 load_gs_cb(&gs_cb); 37 } 38 39 static void check_gs_regs(void) 40 { 41 struct gs_cb gs_cb_after_migration; 42 43 store_gs_cb(&gs_cb_after_migration); 44 45 report_prefix_push("guarded-storage registers"); 46 47 report(gs_cb_after_migration.gsd == gs_cb.gsd, "gsd matches"); 48 report(gs_cb_after_migration.gssm == gs_cb.gssm, "gssm matches"); 49 report(gs_cb_after_migration.gs_epl_a == gs_cb.gs_epl_a, "gs_epl_a matches"); 50 51 report_prefix_pop(); 52 } 53 54 static bool have_vector_facility(void) 55 { 56 return test_facility(129); 57 } 58 59 static bool have_guarded_storage_facility(void) 60 { 61 return test_facility(133); 62 } 63 64 static void test_func(void) 65 { 66 uint8_t expected_vec_contents[VEC_REGISTER_NUM][VEC_REGISTER_SIZE]; 67 uint8_t actual_vec_contents[VEC_REGISTER_NUM][VEC_REGISTER_SIZE]; 68 uint8_t *vec_reg; 69 int i; 70 int vec_result = 0; 71 72 if (have_guarded_storage_facility()) { 73 ctl_set_bit(2, CTL2_GUARDED_STORAGE); 74 75 write_gs_regs(); 76 } 77 78 if (have_vector_facility()) { 79 for (i = 0; i < VEC_REGISTER_NUM; i++) { 80 vec_reg = &expected_vec_contents[i][0]; 81 /* i+1 to avoid zero content */ 82 memset(vec_reg, i + 1, VEC_REGISTER_SIZE); 83 } 84 85 ctl_set_bit(0, CTL0_VECTOR); 86 87 /* 88 * It is important loading the vector/floating point registers and 89 * comparing their contents occurs in the same inline assembly block. 90 * Otherwise, the compiler is allowed to re-use the registers for 91 * something else in between. 92 * For this very reason, this also runs on a second CPU, so all the 93 * complex console stuff can be done in C on the first CPU and here we 94 * just need to wait for it to set the flag. 95 */ 96 asm inline( 97 " .machine z13\n" 98 /* load vector registers: vlm handles at most 16 registers at a time */ 99 " vlm 0,15, 0(%[expected_vec_reg])\n" 100 " vlm 16,31, 256(%[expected_vec_reg])\n" 101 /* inform CPU0 we are done, it will request migration */ 102 " mvhi %[flag_thread_complete], 1\n" 103 /* wait for migration to finish */ 104 "0: clfhsi %[flag_migration_complete], 1\n" 105 " jnz 0b\n" 106 /* 107 * store vector register contents in actual_vec_reg: vstm 108 * handles at most 16 registers at a time 109 */ 110 " vstm 0,15, 0(%[actual_vec_reg])\n" 111 " vstm 16,31, 256(%[actual_vec_reg])\n" 112 /* 113 * compare the contents in expected_vec_reg with actual_vec_reg: 114 * clc handles at most 256 bytes at a time 115 */ 116 " clc 0(256, %[expected_vec_reg]), 0(%[actual_vec_reg])\n" 117 " jnz 1f\n" 118 " clc 256(256, %[expected_vec_reg]), 256(%[actual_vec_reg])\n" 119 " jnz 1f\n" 120 /* success */ 121 " mvhi %[vec_result], 1\n" 122 "1:" 123 : 124 : [expected_vec_reg] "a"(expected_vec_contents), 125 [actual_vec_reg] "a"(actual_vec_contents), 126 [flag_thread_complete] "Q"(flag_thread_complete), 127 [flag_migration_complete] "Q"(flag_migration_complete), 128 [vec_result] "Q"(vec_result) 129 : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", 130 "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", 131 "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", 132 "v28", "v29", "v30", "v31", "cc", "memory" 133 ); 134 135 report(vec_result, "vector contents match"); 136 137 report(stctg(0) & BIT(CTL0_VECTOR), "ctl0 vector bit set"); 138 139 ctl_clear_bit(0, CTL0_VECTOR); 140 } else { 141 flag_thread_complete = 1; 142 while(!flag_migration_complete) 143 mb(); 144 } 145 146 report_pass("Migrated"); 147 148 if (have_guarded_storage_facility()) { 149 check_gs_regs(); 150 151 report(stctg(2) & BIT(CTL2_GUARDED_STORAGE), "ctl2 guarded-storage bit set"); 152 153 ctl_clear_bit(2, CTL2_GUARDED_STORAGE); 154 } 155 156 flag_thread_complete = 1; 157 } 158 159 int main(void) 160 { 161 struct psw psw; 162 163 /* don't say migrate here otherwise we will migrate right away */ 164 report_prefix_push("migration"); 165 166 if (smp_query_num_cpus() == 1) { 167 report_skip("need at least 2 cpus for this test"); 168 goto done; 169 } 170 171 /* Second CPU does the actual tests */ 172 psw.mask = extract_psw_mask(); 173 psw.addr = (unsigned long)test_func; 174 smp_cpu_setup(1, psw); 175 176 /* wait for thread setup */ 177 while(!flag_thread_complete) 178 mb(); 179 flag_thread_complete = 0; 180 181 /* ask migrate_cmd to migrate (it listens for 'migrate') */ 182 puts("Please migrate me, then press return\n"); 183 184 /* wait for migration to finish, we will read a newline */ 185 (void)getchar(); 186 187 flag_migration_complete = 1; 188 189 /* wait for thread to complete assertions */ 190 while(!flag_thread_complete) 191 mb(); 192 193 smp_cpu_destroy(1); 194 195 done: 196 report_prefix_pop(); 197 return report_summary(); 198 } 199