1# 2# Migration test main engine 3# 4# Copyright (c) 2016 Red Hat, Inc. 5# 6# This library is free software; you can redistribute it and/or 7# modify it under the terms of the GNU Lesser General Public 8# License as published by the Free Software Foundation; either 9# version 2.1 of the License, or (at your option) any later version. 10# 11# This library is distributed in the hope that it will be useful, 12# but WITHOUT ANY WARRANTY; without even the implied warranty of 13# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14# Lesser General Public License for more details. 15# 16# You should have received a copy of the GNU Lesser General Public 17# License along with this library; if not, see <http://www.gnu.org/licenses/>. 18# 19 20 21import os 22import re 23import sys 24import time 25 26from guestperf.progress import Progress, ProgressStats 27from guestperf.report import Report 28from guestperf.timings import TimingRecord, Timings 29 30sys.path.append(os.path.join(os.path.dirname(__file__), 31 '..', '..', '..', 'python')) 32from qemu.machine import QEMUMachine 33 34# multifd supported compression algorithms 35MULTIFD_CMP_ALGS = ("zlib", "zstd", "qpl", "uadk") 36 37class Engine(object): 38 39 def __init__(self, binary, dst_host, kernel, initrd, transport="tcp", 40 sleep=15, verbose=False, debug=False): 41 42 self._binary = binary # Path to QEMU binary 43 self._dst_host = dst_host # Hostname of target host 44 self._kernel = kernel # Path to kernel image 45 self._initrd = initrd # Path to stress initrd 46 self._transport = transport # 'unix' or 'tcp' or 'rdma' 47 self._sleep = sleep 48 self._verbose = verbose 49 self._debug = debug 50 51 if debug: 52 self._verbose = debug 53 54 def _vcpu_timing(self, pid, tid_list): 55 records = [] 56 now = time.time() 57 58 jiffies_per_sec = os.sysconf(os.sysconf_names['SC_CLK_TCK']) 59 for tid in tid_list: 60 statfile = "/proc/%d/task/%d/stat" % (pid, tid) 61 with open(statfile, "r") as fh: 62 stat = fh.readline() 63 fields = stat.split(" ") 64 stime = int(fields[13]) 65 utime = int(fields[14]) 66 records.append(TimingRecord(tid, now, 1000 * (stime + utime) / jiffies_per_sec)) 67 return records 68 69 def _cpu_timing(self, pid): 70 now = time.time() 71 72 jiffies_per_sec = os.sysconf(os.sysconf_names['SC_CLK_TCK']) 73 statfile = "/proc/%d/stat" % pid 74 with open(statfile, "r") as fh: 75 stat = fh.readline() 76 fields = stat.split(" ") 77 stime = int(fields[13]) 78 utime = int(fields[14]) 79 return TimingRecord(pid, now, 1000 * (stime + utime) / jiffies_per_sec) 80 81 def _migrate_progress(self, vm): 82 info = vm.cmd("query-migrate") 83 84 if "ram" not in info: 85 info["ram"] = {} 86 87 return Progress( 88 info.get("status", "active"), 89 ProgressStats( 90 info["ram"].get("transferred", 0), 91 info["ram"].get("remaining", 0), 92 info["ram"].get("total", 0), 93 info["ram"].get("duplicate", 0), 94 info["ram"].get("skipped", 0), 95 info["ram"].get("normal", 0), 96 info["ram"].get("normal-bytes", 0), 97 info["ram"].get("dirty-pages-rate", 0), 98 info["ram"].get("mbps", 0), 99 info["ram"].get("dirty-sync-count", 0) 100 ), 101 time.time(), 102 info.get("total-time", 0), 103 info.get("downtime", 0), 104 info.get("expected-downtime", 0), 105 info.get("setup-time", 0), 106 info.get("cpu-throttle-percentage", 0), 107 info.get("dirty-limit-throttle-time-per-round", 0), 108 info.get("dirty-limit-ring-full-time", 0), 109 ) 110 111 def _migrate(self, hardware, scenario, src, 112 dst, connect_uri, defer_migrate): 113 src_qemu_time = [] 114 src_vcpu_time = [] 115 src_pid = src.get_pid() 116 117 vcpus = src.cmd("query-cpus-fast") 118 src_threads = [] 119 for vcpu in vcpus: 120 src_threads.append(vcpu["thread-id"]) 121 122 # XXX how to get dst timings on remote host ? 123 124 if self._verbose: 125 print("Sleeping %d seconds for initial guest workload run" % self._sleep) 126 sleep_secs = self._sleep 127 while sleep_secs > 1: 128 src_qemu_time.append(self._cpu_timing(src_pid)) 129 src_vcpu_time.extend(self._vcpu_timing(src_pid, src_threads)) 130 time.sleep(1) 131 sleep_secs -= 1 132 133 if self._verbose: 134 print("Starting migration") 135 if scenario._auto_converge: 136 resp = src.cmd("migrate-set-capabilities", 137 capabilities = [ 138 { "capability": "auto-converge", 139 "state": True } 140 ]) 141 resp = src.cmd("migrate-set-parameters", 142 cpu_throttle_increment=scenario._auto_converge_step) 143 144 if scenario._post_copy: 145 resp = src.cmd("migrate-set-capabilities", 146 capabilities = [ 147 { "capability": "postcopy-ram", 148 "state": True } 149 ]) 150 resp = dst.cmd("migrate-set-capabilities", 151 capabilities = [ 152 { "capability": "postcopy-ram", 153 "state": True } 154 ]) 155 156 resp = src.cmd("migrate-set-parameters", 157 max_bandwidth=scenario._bandwidth * 1024 * 1024) 158 159 resp = src.cmd("migrate-set-parameters", 160 downtime_limit=scenario._downtime) 161 162 if scenario._compression_mt: 163 resp = src.cmd("migrate-set-capabilities", 164 capabilities = [ 165 { "capability": "compress", 166 "state": True } 167 ]) 168 resp = src.cmd("migrate-set-parameters", 169 compress_threads=scenario._compression_mt_threads) 170 resp = dst.cmd("migrate-set-capabilities", 171 capabilities = [ 172 { "capability": "compress", 173 "state": True } 174 ]) 175 resp = dst.cmd("migrate-set-parameters", 176 decompress_threads=scenario._compression_mt_threads) 177 178 if scenario._compression_xbzrle: 179 resp = src.cmd("migrate-set-capabilities", 180 capabilities = [ 181 { "capability": "xbzrle", 182 "state": True } 183 ]) 184 resp = dst.cmd("migrate-set-capabilities", 185 capabilities = [ 186 { "capability": "xbzrle", 187 "state": True } 188 ]) 189 resp = src.cmd("migrate-set-parameters", 190 xbzrle_cache_size=( 191 hardware._mem * 192 1024 * 1024 * 1024 / 100 * 193 scenario._compression_xbzrle_cache)) 194 195 if scenario._multifd: 196 if (scenario._multifd_compression and 197 (scenario._multifd_compression not in MULTIFD_CMP_ALGS)): 198 raise Exception("unsupported multifd compression " 199 "algorithm: %s" % 200 scenario._multifd_compression) 201 202 resp = src.cmd("migrate-set-capabilities", 203 capabilities = [ 204 { "capability": "multifd", 205 "state": True } 206 ]) 207 resp = src.cmd("migrate-set-parameters", 208 multifd_channels=scenario._multifd_channels) 209 resp = dst.cmd("migrate-set-capabilities", 210 capabilities = [ 211 { "capability": "multifd", 212 "state": True } 213 ]) 214 resp = dst.cmd("migrate-set-parameters", 215 multifd_channels=scenario._multifd_channels) 216 217 if scenario._multifd_compression: 218 resp = src.cmd("migrate-set-parameters", 219 multifd_compression=scenario._multifd_compression) 220 resp = dst.cmd("migrate-set-parameters", 221 multifd_compression=scenario._multifd_compression) 222 223 if scenario._dirty_limit: 224 if not hardware._dirty_ring_size: 225 raise Exception("dirty ring size must be configured when " 226 "testing dirty limit migration") 227 228 resp = src.cmd("migrate-set-capabilities", 229 capabilities = [ 230 { "capability": "dirty-limit", 231 "state": True } 232 ]) 233 resp = src.cmd("migrate-set-parameters", 234 x_vcpu_dirty_limit_period=scenario._x_vcpu_dirty_limit_period) 235 resp = src.cmd("migrate-set-parameters", 236 vcpu_dirty_limit=scenario._vcpu_dirty_limit) 237 238 if defer_migrate: 239 resp = dst.cmd("migrate-incoming", uri=connect_uri) 240 resp = src.cmd("migrate", uri=connect_uri) 241 242 post_copy = False 243 paused = False 244 245 progress_history = [] 246 247 start = time.time() 248 loop = 0 249 while True: 250 loop = loop + 1 251 time.sleep(0.05) 252 253 progress = self._migrate_progress(src) 254 if (loop % 20) == 0: 255 src_qemu_time.append(self._cpu_timing(src_pid)) 256 src_vcpu_time.extend(self._vcpu_timing(src_pid, src_threads)) 257 258 if (len(progress_history) == 0 or 259 (progress_history[-1]._ram._iterations < 260 progress._ram._iterations)): 261 progress_history.append(progress) 262 263 if progress._status in ("completed", "failed", "cancelled"): 264 if progress._status == "completed" and paused: 265 dst.cmd("cont") 266 if progress_history[-1] != progress: 267 progress_history.append(progress) 268 269 if progress._status == "completed": 270 if self._verbose: 271 print("Sleeping %d seconds for final guest workload run" % self._sleep) 272 sleep_secs = self._sleep 273 while sleep_secs > 1: 274 time.sleep(1) 275 src_qemu_time.append(self._cpu_timing(src_pid)) 276 src_vcpu_time.extend(self._vcpu_timing(src_pid, src_threads)) 277 sleep_secs -= 1 278 279 return [progress_history, src_qemu_time, src_vcpu_time] 280 281 if self._verbose and (loop % 20) == 0: 282 print("Iter %d: remain %5dMB of %5dMB (total %5dMB @ %5dMb/sec)" % ( 283 progress._ram._iterations, 284 progress._ram._remaining_bytes / (1024 * 1024), 285 progress._ram._total_bytes / (1024 * 1024), 286 progress._ram._transferred_bytes / (1024 * 1024), 287 progress._ram._transfer_rate_mbs, 288 )) 289 290 if progress._ram._iterations > scenario._max_iters: 291 if self._verbose: 292 print("No completion after %d iterations over RAM" % scenario._max_iters) 293 src.cmd("migrate_cancel") 294 continue 295 296 if time.time() > (start + scenario._max_time): 297 if self._verbose: 298 print("No completion after %d seconds" % scenario._max_time) 299 src.cmd("migrate_cancel") 300 continue 301 302 if (scenario._post_copy and 303 progress._ram._iterations >= scenario._post_copy_iters and 304 not post_copy): 305 if self._verbose: 306 print("Switching to post-copy after %d iterations" % scenario._post_copy_iters) 307 resp = src.cmd("migrate-start-postcopy") 308 post_copy = True 309 310 if (scenario._pause and 311 progress._ram._iterations >= scenario._pause_iters and 312 not paused): 313 if self._verbose: 314 print("Pausing VM after %d iterations" % scenario._pause_iters) 315 resp = src.cmd("stop") 316 paused = True 317 318 def _is_ppc64le(self): 319 _, _, _, _, machine = os.uname() 320 if machine == "ppc64le": 321 return True 322 return False 323 324 def _get_guest_console_args(self): 325 if self._is_ppc64le(): 326 return "console=hvc0" 327 else: 328 return "console=ttyS0" 329 330 def _get_qemu_serial_args(self): 331 if self._is_ppc64le(): 332 return ["-chardev", "stdio,id=cdev0", 333 "-device", "spapr-vty,chardev=cdev0"] 334 else: 335 return ["-chardev", "stdio,id=cdev0", 336 "-device", "isa-serial,chardev=cdev0"] 337 338 def _get_common_args(self, hardware, tunnelled=False): 339 args = [ 340 "noapic", 341 "edd=off", 342 "printk.time=1", 343 "noreplace-smp", 344 "cgroup_disable=memory", 345 "pci=noearly", 346 ] 347 348 args.append(self._get_guest_console_args()) 349 350 if self._debug: 351 args.append("debug") 352 else: 353 args.append("quiet") 354 355 args.append("ramsize=%s" % hardware._mem) 356 357 cmdline = " ".join(args) 358 if tunnelled: 359 cmdline = "'" + cmdline + "'" 360 361 argv = [ 362 "-cpu", "host", 363 "-kernel", self._kernel, 364 "-initrd", self._initrd, 365 "-append", cmdline, 366 "-m", str((hardware._mem * 1024) + 512), 367 "-smp", str(hardware._cpus), 368 ] 369 if hardware._dirty_ring_size: 370 argv.extend(["-accel", "kvm,dirty-ring-size=%s" % 371 hardware._dirty_ring_size]) 372 else: 373 argv.extend(["-accel", "kvm"]) 374 375 argv.extend(self._get_qemu_serial_args()) 376 377 if self._debug: 378 argv.extend(["-machine", "graphics=off"]) 379 380 if hardware._prealloc_pages: 381 argv_source += ["-mem-path", "/dev/shm", 382 "-mem-prealloc"] 383 if hardware._locked_pages: 384 argv_source += ["-overcommit", "mem-lock=on"] 385 if hardware._huge_pages: 386 pass 387 388 return argv 389 390 def _get_src_args(self, hardware): 391 return self._get_common_args(hardware) 392 393 def _get_dst_args(self, hardware, uri, defer_migrate): 394 tunnelled = False 395 if self._dst_host != "localhost": 396 tunnelled = True 397 argv = self._get_common_args(hardware, tunnelled) 398 399 if defer_migrate: 400 return argv + ["-incoming", "defer"] 401 return argv + ["-incoming", uri] 402 403 @staticmethod 404 def _get_common_wrapper(cpu_bind, mem_bind): 405 wrapper = [] 406 if len(cpu_bind) > 0 or len(mem_bind) > 0: 407 wrapper.append("numactl") 408 if cpu_bind: 409 wrapper.append("--physcpubind=%s" % ",".join(cpu_bind)) 410 if mem_bind: 411 wrapper.append("--membind=%s" % ",".join(mem_bind)) 412 413 return wrapper 414 415 def _get_src_wrapper(self, hardware): 416 return self._get_common_wrapper(hardware._src_cpu_bind, hardware._src_mem_bind) 417 418 def _get_dst_wrapper(self, hardware): 419 wrapper = self._get_common_wrapper(hardware._dst_cpu_bind, hardware._dst_mem_bind) 420 if self._dst_host != "localhost": 421 return ["ssh", 422 "-R", "9001:localhost:9001", 423 self._dst_host] + wrapper 424 else: 425 return wrapper 426 427 def _get_timings(self, vm): 428 log = vm.get_log() 429 if not log: 430 return [] 431 if self._debug: 432 print(log) 433 434 regex = r"[^\s]+\s\((\d+)\):\sINFO:\s(\d+)ms\scopied\s\d+\sGB\sin\s(\d+)ms" 435 matcher = re.compile(regex) 436 records = [] 437 for line in log.split("\n"): 438 match = matcher.match(line) 439 if match: 440 records.append(TimingRecord(int(match.group(1)), 441 int(match.group(2)) / 1000.0, 442 int(match.group(3)))) 443 return records 444 445 def run(self, hardware, scenario, result_dir=os.getcwd()): 446 abs_result_dir = os.path.join(result_dir, scenario._name) 447 defer_migrate = False 448 449 if self._transport == "tcp": 450 uri = "tcp:%s:9000" % self._dst_host 451 elif self._transport == "rdma": 452 uri = "rdma:%s:9000" % self._dst_host 453 elif self._transport == "unix": 454 if self._dst_host != "localhost": 455 raise Exception("Running use unix migration transport for non-local host") 456 uri = "unix:/var/tmp/qemu-migrate-%d.migrate" % os.getpid() 457 try: 458 os.remove(uri[5:]) 459 os.remove(monaddr) 460 except: 461 pass 462 463 if scenario._multifd: 464 defer_migrate = True 465 466 if self._dst_host != "localhost": 467 dstmonaddr = ("localhost", 9001) 468 else: 469 dstmonaddr = "/var/tmp/qemu-dst-%d-monitor.sock" % os.getpid() 470 srcmonaddr = "/var/tmp/qemu-src-%d-monitor.sock" % os.getpid() 471 472 src = QEMUMachine(self._binary, 473 args=self._get_src_args(hardware), 474 wrapper=self._get_src_wrapper(hardware), 475 name="qemu-src-%d" % os.getpid(), 476 monitor_address=srcmonaddr) 477 478 dst = QEMUMachine(self._binary, 479 args=self._get_dst_args(hardware, uri, defer_migrate), 480 wrapper=self._get_dst_wrapper(hardware), 481 name="qemu-dst-%d" % os.getpid(), 482 monitor_address=dstmonaddr) 483 484 try: 485 src.launch() 486 dst.launch() 487 488 ret = self._migrate(hardware, scenario, src, 489 dst, uri, defer_migrate) 490 progress_history = ret[0] 491 qemu_timings = ret[1] 492 vcpu_timings = ret[2] 493 if uri[0:5] == "unix:" and os.path.exists(uri[5:]): 494 os.remove(uri[5:]) 495 496 if os.path.exists(srcmonaddr): 497 os.remove(srcmonaddr) 498 499 if self._dst_host == "localhost" and os.path.exists(dstmonaddr): 500 os.remove(dstmonaddr) 501 502 if self._verbose: 503 print("Finished migration") 504 505 src.shutdown() 506 dst.shutdown() 507 508 return Report(hardware, scenario, progress_history, 509 Timings(self._get_timings(src) + self._get_timings(dst)), 510 Timings(qemu_timings), 511 Timings(vcpu_timings), 512 self._binary, self._dst_host, self._kernel, 513 self._initrd, self._transport, self._sleep) 514 except Exception as e: 515 if self._debug: 516 print("Failed: %s" % str(e)) 517 try: 518 src.shutdown() 519 except: 520 pass 521 try: 522 dst.shutdown() 523 except: 524 pass 525 526 if self._debug: 527 print(src.get_log()) 528 print(dst.get_log()) 529 raise 530 531