1 /* 2 * QEMU Enhanced Disk Format 3 * 4 * Copyright IBM, Corp. 2010 5 * 6 * Authors: 7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * 10 * This work is licensed under the terms of the GNU LGPL, version 2 or later. 11 * See the COPYING.LIB file in the top-level directory. 12 * 13 */ 14 15 #include "qemu/timer.h" 16 #include "trace.h" 17 #include "qed.h" 18 #include "qapi/qmp/qerror.h" 19 #include "migration/migration.h" 20 21 static void qed_aio_cancel(BlockDriverAIOCB *blockacb) 22 { 23 QEDAIOCB *acb = (QEDAIOCB *)blockacb; 24 bool finished = false; 25 26 /* Wait for the request to finish */ 27 acb->finished = &finished; 28 while (!finished) { 29 qemu_aio_wait(); 30 } 31 } 32 33 static const AIOCBInfo qed_aiocb_info = { 34 .aiocb_size = sizeof(QEDAIOCB), 35 .cancel = qed_aio_cancel, 36 }; 37 38 static int bdrv_qed_probe(const uint8_t *buf, int buf_size, 39 const char *filename) 40 { 41 const QEDHeader *header = (const QEDHeader *)buf; 42 43 if (buf_size < sizeof(*header)) { 44 return 0; 45 } 46 if (le32_to_cpu(header->magic) != QED_MAGIC) { 47 return 0; 48 } 49 return 100; 50 } 51 52 /** 53 * Check whether an image format is raw 54 * 55 * @fmt: Backing file format, may be NULL 56 */ 57 static bool qed_fmt_is_raw(const char *fmt) 58 { 59 return fmt && strcmp(fmt, "raw") == 0; 60 } 61 62 static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu) 63 { 64 cpu->magic = le32_to_cpu(le->magic); 65 cpu->cluster_size = le32_to_cpu(le->cluster_size); 66 cpu->table_size = le32_to_cpu(le->table_size); 67 cpu->header_size = le32_to_cpu(le->header_size); 68 cpu->features = le64_to_cpu(le->features); 69 cpu->compat_features = le64_to_cpu(le->compat_features); 70 cpu->autoclear_features = le64_to_cpu(le->autoclear_features); 71 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset); 72 cpu->image_size = le64_to_cpu(le->image_size); 73 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset); 74 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size); 75 } 76 77 static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le) 78 { 79 le->magic = cpu_to_le32(cpu->magic); 80 le->cluster_size = cpu_to_le32(cpu->cluster_size); 81 le->table_size = cpu_to_le32(cpu->table_size); 82 le->header_size = cpu_to_le32(cpu->header_size); 83 le->features = cpu_to_le64(cpu->features); 84 le->compat_features = cpu_to_le64(cpu->compat_features); 85 le->autoclear_features = cpu_to_le64(cpu->autoclear_features); 86 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset); 87 le->image_size = cpu_to_le64(cpu->image_size); 88 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset); 89 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size); 90 } 91 92 int qed_write_header_sync(BDRVQEDState *s) 93 { 94 QEDHeader le; 95 int ret; 96 97 qed_header_cpu_to_le(&s->header, &le); 98 ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le)); 99 if (ret != sizeof(le)) { 100 return ret; 101 } 102 return 0; 103 } 104 105 typedef struct { 106 GenericCB gencb; 107 BDRVQEDState *s; 108 struct iovec iov; 109 QEMUIOVector qiov; 110 int nsectors; 111 uint8_t *buf; 112 } QEDWriteHeaderCB; 113 114 static void qed_write_header_cb(void *opaque, int ret) 115 { 116 QEDWriteHeaderCB *write_header_cb = opaque; 117 118 qemu_vfree(write_header_cb->buf); 119 gencb_complete(write_header_cb, ret); 120 } 121 122 static void qed_write_header_read_cb(void *opaque, int ret) 123 { 124 QEDWriteHeaderCB *write_header_cb = opaque; 125 BDRVQEDState *s = write_header_cb->s; 126 127 if (ret) { 128 qed_write_header_cb(write_header_cb, ret); 129 return; 130 } 131 132 /* Update header */ 133 qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf); 134 135 bdrv_aio_writev(s->bs->file, 0, &write_header_cb->qiov, 136 write_header_cb->nsectors, qed_write_header_cb, 137 write_header_cb); 138 } 139 140 /** 141 * Update header in-place (does not rewrite backing filename or other strings) 142 * 143 * This function only updates known header fields in-place and does not affect 144 * extra data after the QED header. 145 */ 146 static void qed_write_header(BDRVQEDState *s, BlockDriverCompletionFunc cb, 147 void *opaque) 148 { 149 /* We must write full sectors for O_DIRECT but cannot necessarily generate 150 * the data following the header if an unrecognized compat feature is 151 * active. Therefore, first read the sectors containing the header, update 152 * them, and write back. 153 */ 154 155 int nsectors = (sizeof(QEDHeader) + BDRV_SECTOR_SIZE - 1) / 156 BDRV_SECTOR_SIZE; 157 size_t len = nsectors * BDRV_SECTOR_SIZE; 158 QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb), 159 cb, opaque); 160 161 write_header_cb->s = s; 162 write_header_cb->nsectors = nsectors; 163 write_header_cb->buf = qemu_blockalign(s->bs, len); 164 write_header_cb->iov.iov_base = write_header_cb->buf; 165 write_header_cb->iov.iov_len = len; 166 qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1); 167 168 bdrv_aio_readv(s->bs->file, 0, &write_header_cb->qiov, nsectors, 169 qed_write_header_read_cb, write_header_cb); 170 } 171 172 static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size) 173 { 174 uint64_t table_entries; 175 uint64_t l2_size; 176 177 table_entries = (table_size * cluster_size) / sizeof(uint64_t); 178 l2_size = table_entries * cluster_size; 179 180 return l2_size * table_entries; 181 } 182 183 static bool qed_is_cluster_size_valid(uint32_t cluster_size) 184 { 185 if (cluster_size < QED_MIN_CLUSTER_SIZE || 186 cluster_size > QED_MAX_CLUSTER_SIZE) { 187 return false; 188 } 189 if (cluster_size & (cluster_size - 1)) { 190 return false; /* not power of 2 */ 191 } 192 return true; 193 } 194 195 static bool qed_is_table_size_valid(uint32_t table_size) 196 { 197 if (table_size < QED_MIN_TABLE_SIZE || 198 table_size > QED_MAX_TABLE_SIZE) { 199 return false; 200 } 201 if (table_size & (table_size - 1)) { 202 return false; /* not power of 2 */ 203 } 204 return true; 205 } 206 207 static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size, 208 uint32_t table_size) 209 { 210 if (image_size % BDRV_SECTOR_SIZE != 0) { 211 return false; /* not multiple of sector size */ 212 } 213 if (image_size > qed_max_image_size(cluster_size, table_size)) { 214 return false; /* image is too large */ 215 } 216 return true; 217 } 218 219 /** 220 * Read a string of known length from the image file 221 * 222 * @file: Image file 223 * @offset: File offset to start of string, in bytes 224 * @n: String length in bytes 225 * @buf: Destination buffer 226 * @buflen: Destination buffer length in bytes 227 * @ret: 0 on success, -errno on failure 228 * 229 * The string is NUL-terminated. 230 */ 231 static int qed_read_string(BlockDriverState *file, uint64_t offset, size_t n, 232 char *buf, size_t buflen) 233 { 234 int ret; 235 if (n >= buflen) { 236 return -EINVAL; 237 } 238 ret = bdrv_pread(file, offset, buf, n); 239 if (ret < 0) { 240 return ret; 241 } 242 buf[n] = '\0'; 243 return 0; 244 } 245 246 /** 247 * Allocate new clusters 248 * 249 * @s: QED state 250 * @n: Number of contiguous clusters to allocate 251 * @ret: Offset of first allocated cluster 252 * 253 * This function only produces the offset where the new clusters should be 254 * written. It updates BDRVQEDState but does not make any changes to the image 255 * file. 256 */ 257 static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n) 258 { 259 uint64_t offset = s->file_size; 260 s->file_size += n * s->header.cluster_size; 261 return offset; 262 } 263 264 QEDTable *qed_alloc_table(BDRVQEDState *s) 265 { 266 /* Honor O_DIRECT memory alignment requirements */ 267 return qemu_blockalign(s->bs, 268 s->header.cluster_size * s->header.table_size); 269 } 270 271 /** 272 * Allocate a new zeroed L2 table 273 */ 274 static CachedL2Table *qed_new_l2_table(BDRVQEDState *s) 275 { 276 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache); 277 278 l2_table->table = qed_alloc_table(s); 279 l2_table->offset = qed_alloc_clusters(s, s->header.table_size); 280 281 memset(l2_table->table->offsets, 0, 282 s->header.cluster_size * s->header.table_size); 283 return l2_table; 284 } 285 286 static void qed_aio_next_io(void *opaque, int ret); 287 288 static void qed_plug_allocating_write_reqs(BDRVQEDState *s) 289 { 290 assert(!s->allocating_write_reqs_plugged); 291 292 s->allocating_write_reqs_plugged = true; 293 } 294 295 static void qed_unplug_allocating_write_reqs(BDRVQEDState *s) 296 { 297 QEDAIOCB *acb; 298 299 assert(s->allocating_write_reqs_plugged); 300 301 s->allocating_write_reqs_plugged = false; 302 303 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); 304 if (acb) { 305 qed_aio_next_io(acb, 0); 306 } 307 } 308 309 static void qed_finish_clear_need_check(void *opaque, int ret) 310 { 311 /* Do nothing */ 312 } 313 314 static void qed_flush_after_clear_need_check(void *opaque, int ret) 315 { 316 BDRVQEDState *s = opaque; 317 318 bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s); 319 320 /* No need to wait until flush completes */ 321 qed_unplug_allocating_write_reqs(s); 322 } 323 324 static void qed_clear_need_check(void *opaque, int ret) 325 { 326 BDRVQEDState *s = opaque; 327 328 if (ret) { 329 qed_unplug_allocating_write_reqs(s); 330 return; 331 } 332 333 s->header.features &= ~QED_F_NEED_CHECK; 334 qed_write_header(s, qed_flush_after_clear_need_check, s); 335 } 336 337 static void qed_need_check_timer_cb(void *opaque) 338 { 339 BDRVQEDState *s = opaque; 340 341 /* The timer should only fire when allocating writes have drained */ 342 assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs)); 343 344 trace_qed_need_check_timer_cb(s); 345 346 qed_plug_allocating_write_reqs(s); 347 348 /* Ensure writes are on disk before clearing flag */ 349 bdrv_aio_flush(s->bs, qed_clear_need_check, s); 350 } 351 352 static void qed_start_need_check_timer(BDRVQEDState *s) 353 { 354 trace_qed_start_need_check_timer(s); 355 356 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for 357 * migration. 358 */ 359 timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 360 get_ticks_per_sec() * QED_NEED_CHECK_TIMEOUT); 361 } 362 363 /* It's okay to call this multiple times or when no timer is started */ 364 static void qed_cancel_need_check_timer(BDRVQEDState *s) 365 { 366 trace_qed_cancel_need_check_timer(s); 367 timer_del(s->need_check_timer); 368 } 369 370 static void bdrv_qed_rebind(BlockDriverState *bs) 371 { 372 BDRVQEDState *s = bs->opaque; 373 s->bs = bs; 374 } 375 376 static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags, 377 Error **errp) 378 { 379 BDRVQEDState *s = bs->opaque; 380 QEDHeader le_header; 381 int64_t file_size; 382 int ret; 383 384 s->bs = bs; 385 QSIMPLEQ_INIT(&s->allocating_write_reqs); 386 387 ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header)); 388 if (ret < 0) { 389 return ret; 390 } 391 qed_header_le_to_cpu(&le_header, &s->header); 392 393 if (s->header.magic != QED_MAGIC) { 394 return -EMEDIUMTYPE; 395 } 396 if (s->header.features & ~QED_FEATURE_MASK) { 397 /* image uses unsupported feature bits */ 398 char buf[64]; 399 snprintf(buf, sizeof(buf), "%" PRIx64, 400 s->header.features & ~QED_FEATURE_MASK); 401 qerror_report(QERR_UNKNOWN_BLOCK_FORMAT_FEATURE, 402 bs->device_name, "QED", buf); 403 return -ENOTSUP; 404 } 405 if (!qed_is_cluster_size_valid(s->header.cluster_size)) { 406 return -EINVAL; 407 } 408 409 /* Round down file size to the last cluster */ 410 file_size = bdrv_getlength(bs->file); 411 if (file_size < 0) { 412 return file_size; 413 } 414 s->file_size = qed_start_of_cluster(s, file_size); 415 416 if (!qed_is_table_size_valid(s->header.table_size)) { 417 return -EINVAL; 418 } 419 if (!qed_is_image_size_valid(s->header.image_size, 420 s->header.cluster_size, 421 s->header.table_size)) { 422 return -EINVAL; 423 } 424 if (!qed_check_table_offset(s, s->header.l1_table_offset)) { 425 return -EINVAL; 426 } 427 428 s->table_nelems = (s->header.cluster_size * s->header.table_size) / 429 sizeof(uint64_t); 430 s->l2_shift = ffs(s->header.cluster_size) - 1; 431 s->l2_mask = s->table_nelems - 1; 432 s->l1_shift = s->l2_shift + ffs(s->table_nelems) - 1; 433 434 if ((s->header.features & QED_F_BACKING_FILE)) { 435 if ((uint64_t)s->header.backing_filename_offset + 436 s->header.backing_filename_size > 437 s->header.cluster_size * s->header.header_size) { 438 return -EINVAL; 439 } 440 441 ret = qed_read_string(bs->file, s->header.backing_filename_offset, 442 s->header.backing_filename_size, bs->backing_file, 443 sizeof(bs->backing_file)); 444 if (ret < 0) { 445 return ret; 446 } 447 448 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) { 449 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw"); 450 } 451 } 452 453 /* Reset unknown autoclear feature bits. This is a backwards 454 * compatibility mechanism that allows images to be opened by older 455 * programs, which "knock out" unknown feature bits. When an image is 456 * opened by a newer program again it can detect that the autoclear 457 * feature is no longer valid. 458 */ 459 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 && 460 !bdrv_is_read_only(bs->file) && !(flags & BDRV_O_INCOMING)) { 461 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK; 462 463 ret = qed_write_header_sync(s); 464 if (ret) { 465 return ret; 466 } 467 468 /* From here on only known autoclear feature bits are valid */ 469 bdrv_flush(bs->file); 470 } 471 472 s->l1_table = qed_alloc_table(s); 473 qed_init_l2_cache(&s->l2_cache); 474 475 ret = qed_read_l1_table_sync(s); 476 if (ret) { 477 goto out; 478 } 479 480 /* If image was not closed cleanly, check consistency */ 481 if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) { 482 /* Read-only images cannot be fixed. There is no risk of corruption 483 * since write operations are not possible. Therefore, allow 484 * potentially inconsistent images to be opened read-only. This can 485 * aid data recovery from an otherwise inconsistent image. 486 */ 487 if (!bdrv_is_read_only(bs->file) && 488 !(flags & BDRV_O_INCOMING)) { 489 BdrvCheckResult result = {0}; 490 491 ret = qed_check(s, &result, true); 492 if (ret) { 493 goto out; 494 } 495 } 496 } 497 498 bs->bl.write_zeroes_alignment = s->header.cluster_size >> BDRV_SECTOR_BITS; 499 s->need_check_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 500 qed_need_check_timer_cb, s); 501 502 out: 503 if (ret) { 504 qed_free_l2_cache(&s->l2_cache); 505 qemu_vfree(s->l1_table); 506 } 507 return ret; 508 } 509 510 /* We have nothing to do for QED reopen, stubs just return 511 * success */ 512 static int bdrv_qed_reopen_prepare(BDRVReopenState *state, 513 BlockReopenQueue *queue, Error **errp) 514 { 515 return 0; 516 } 517 518 static void bdrv_qed_close(BlockDriverState *bs) 519 { 520 BDRVQEDState *s = bs->opaque; 521 522 qed_cancel_need_check_timer(s); 523 timer_free(s->need_check_timer); 524 525 /* Ensure writes reach stable storage */ 526 bdrv_flush(bs->file); 527 528 /* Clean shutdown, no check required on next open */ 529 if (s->header.features & QED_F_NEED_CHECK) { 530 s->header.features &= ~QED_F_NEED_CHECK; 531 qed_write_header_sync(s); 532 } 533 534 qed_free_l2_cache(&s->l2_cache); 535 qemu_vfree(s->l1_table); 536 } 537 538 static int qed_create(const char *filename, uint32_t cluster_size, 539 uint64_t image_size, uint32_t table_size, 540 const char *backing_file, const char *backing_fmt) 541 { 542 QEDHeader header = { 543 .magic = QED_MAGIC, 544 .cluster_size = cluster_size, 545 .table_size = table_size, 546 .header_size = 1, 547 .features = 0, 548 .compat_features = 0, 549 .l1_table_offset = cluster_size, 550 .image_size = image_size, 551 }; 552 QEDHeader le_header; 553 uint8_t *l1_table = NULL; 554 size_t l1_size = header.cluster_size * header.table_size; 555 Error *local_err = NULL; 556 int ret = 0; 557 BlockDriverState *bs = NULL; 558 559 ret = bdrv_create_file(filename, NULL, &local_err); 560 if (ret < 0) { 561 qerror_report_err(local_err); 562 error_free(local_err); 563 return ret; 564 } 565 566 ret = bdrv_file_open(&bs, filename, NULL, BDRV_O_RDWR | BDRV_O_CACHE_WB, 567 &local_err); 568 if (ret < 0) { 569 qerror_report_err(local_err); 570 error_free(local_err); 571 return ret; 572 } 573 574 /* File must start empty and grow, check truncate is supported */ 575 ret = bdrv_truncate(bs, 0); 576 if (ret < 0) { 577 goto out; 578 } 579 580 if (backing_file) { 581 header.features |= QED_F_BACKING_FILE; 582 header.backing_filename_offset = sizeof(le_header); 583 header.backing_filename_size = strlen(backing_file); 584 585 if (qed_fmt_is_raw(backing_fmt)) { 586 header.features |= QED_F_BACKING_FORMAT_NO_PROBE; 587 } 588 } 589 590 qed_header_cpu_to_le(&header, &le_header); 591 ret = bdrv_pwrite(bs, 0, &le_header, sizeof(le_header)); 592 if (ret < 0) { 593 goto out; 594 } 595 ret = bdrv_pwrite(bs, sizeof(le_header), backing_file, 596 header.backing_filename_size); 597 if (ret < 0) { 598 goto out; 599 } 600 601 l1_table = g_malloc0(l1_size); 602 ret = bdrv_pwrite(bs, header.l1_table_offset, l1_table, l1_size); 603 if (ret < 0) { 604 goto out; 605 } 606 607 ret = 0; /* success */ 608 out: 609 g_free(l1_table); 610 bdrv_unref(bs); 611 return ret; 612 } 613 614 static int bdrv_qed_create(const char *filename, QEMUOptionParameter *options, 615 Error **errp) 616 { 617 uint64_t image_size = 0; 618 uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE; 619 uint32_t table_size = QED_DEFAULT_TABLE_SIZE; 620 const char *backing_file = NULL; 621 const char *backing_fmt = NULL; 622 623 while (options && options->name) { 624 if (!strcmp(options->name, BLOCK_OPT_SIZE)) { 625 image_size = options->value.n; 626 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) { 627 backing_file = options->value.s; 628 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) { 629 backing_fmt = options->value.s; 630 } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) { 631 if (options->value.n) { 632 cluster_size = options->value.n; 633 } 634 } else if (!strcmp(options->name, BLOCK_OPT_TABLE_SIZE)) { 635 if (options->value.n) { 636 table_size = options->value.n; 637 } 638 } 639 options++; 640 } 641 642 if (!qed_is_cluster_size_valid(cluster_size)) { 643 fprintf(stderr, "QED cluster size must be within range [%u, %u] and power of 2\n", 644 QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE); 645 return -EINVAL; 646 } 647 if (!qed_is_table_size_valid(table_size)) { 648 fprintf(stderr, "QED table size must be within range [%u, %u] and power of 2\n", 649 QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE); 650 return -EINVAL; 651 } 652 if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) { 653 fprintf(stderr, "QED image size must be a non-zero multiple of " 654 "cluster size and less than %" PRIu64 " bytes\n", 655 qed_max_image_size(cluster_size, table_size)); 656 return -EINVAL; 657 } 658 659 return qed_create(filename, cluster_size, image_size, table_size, 660 backing_file, backing_fmt); 661 } 662 663 typedef struct { 664 BlockDriverState *bs; 665 Coroutine *co; 666 uint64_t pos; 667 int64_t status; 668 int *pnum; 669 } QEDIsAllocatedCB; 670 671 static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len) 672 { 673 QEDIsAllocatedCB *cb = opaque; 674 BDRVQEDState *s = cb->bs->opaque; 675 *cb->pnum = len / BDRV_SECTOR_SIZE; 676 switch (ret) { 677 case QED_CLUSTER_FOUND: 678 offset |= qed_offset_into_cluster(s, cb->pos); 679 cb->status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset; 680 break; 681 case QED_CLUSTER_ZERO: 682 cb->status = BDRV_BLOCK_ZERO; 683 break; 684 case QED_CLUSTER_L2: 685 case QED_CLUSTER_L1: 686 cb->status = 0; 687 break; 688 default: 689 assert(ret < 0); 690 cb->status = ret; 691 break; 692 } 693 694 if (cb->co) { 695 qemu_coroutine_enter(cb->co, NULL); 696 } 697 } 698 699 static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs, 700 int64_t sector_num, 701 int nb_sectors, int *pnum) 702 { 703 BDRVQEDState *s = bs->opaque; 704 size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE; 705 QEDIsAllocatedCB cb = { 706 .bs = bs, 707 .pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE, 708 .status = BDRV_BLOCK_OFFSET_MASK, 709 .pnum = pnum, 710 }; 711 QEDRequest request = { .l2_table = NULL }; 712 713 qed_find_cluster(s, &request, cb.pos, len, qed_is_allocated_cb, &cb); 714 715 /* Now sleep if the callback wasn't invoked immediately */ 716 while (cb.status == BDRV_BLOCK_OFFSET_MASK) { 717 cb.co = qemu_coroutine_self(); 718 qemu_coroutine_yield(); 719 } 720 721 qed_unref_l2_cache_entry(request.l2_table); 722 723 return cb.status; 724 } 725 726 static int bdrv_qed_make_empty(BlockDriverState *bs) 727 { 728 return -ENOTSUP; 729 } 730 731 static BDRVQEDState *acb_to_s(QEDAIOCB *acb) 732 { 733 return acb->common.bs->opaque; 734 } 735 736 /** 737 * Read from the backing file or zero-fill if no backing file 738 * 739 * @s: QED state 740 * @pos: Byte position in device 741 * @qiov: Destination I/O vector 742 * @cb: Completion function 743 * @opaque: User data for completion function 744 * 745 * This function reads qiov->size bytes starting at pos from the backing file. 746 * If there is no backing file then zeroes are read. 747 */ 748 static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos, 749 QEMUIOVector *qiov, 750 BlockDriverCompletionFunc *cb, void *opaque) 751 { 752 uint64_t backing_length = 0; 753 size_t size; 754 755 /* If there is a backing file, get its length. Treat the absence of a 756 * backing file like a zero length backing file. 757 */ 758 if (s->bs->backing_hd) { 759 int64_t l = bdrv_getlength(s->bs->backing_hd); 760 if (l < 0) { 761 cb(opaque, l); 762 return; 763 } 764 backing_length = l; 765 } 766 767 /* Zero all sectors if reading beyond the end of the backing file */ 768 if (pos >= backing_length || 769 pos + qiov->size > backing_length) { 770 qemu_iovec_memset(qiov, 0, 0, qiov->size); 771 } 772 773 /* Complete now if there are no backing file sectors to read */ 774 if (pos >= backing_length) { 775 cb(opaque, 0); 776 return; 777 } 778 779 /* If the read straddles the end of the backing file, shorten it */ 780 size = MIN((uint64_t)backing_length - pos, qiov->size); 781 782 BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO); 783 bdrv_aio_readv(s->bs->backing_hd, pos / BDRV_SECTOR_SIZE, 784 qiov, size / BDRV_SECTOR_SIZE, cb, opaque); 785 } 786 787 typedef struct { 788 GenericCB gencb; 789 BDRVQEDState *s; 790 QEMUIOVector qiov; 791 struct iovec iov; 792 uint64_t offset; 793 } CopyFromBackingFileCB; 794 795 static void qed_copy_from_backing_file_cb(void *opaque, int ret) 796 { 797 CopyFromBackingFileCB *copy_cb = opaque; 798 qemu_vfree(copy_cb->iov.iov_base); 799 gencb_complete(©_cb->gencb, ret); 800 } 801 802 static void qed_copy_from_backing_file_write(void *opaque, int ret) 803 { 804 CopyFromBackingFileCB *copy_cb = opaque; 805 BDRVQEDState *s = copy_cb->s; 806 807 if (ret) { 808 qed_copy_from_backing_file_cb(copy_cb, ret); 809 return; 810 } 811 812 BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE); 813 bdrv_aio_writev(s->bs->file, copy_cb->offset / BDRV_SECTOR_SIZE, 814 ©_cb->qiov, copy_cb->qiov.size / BDRV_SECTOR_SIZE, 815 qed_copy_from_backing_file_cb, copy_cb); 816 } 817 818 /** 819 * Copy data from backing file into the image 820 * 821 * @s: QED state 822 * @pos: Byte position in device 823 * @len: Number of bytes 824 * @offset: Byte offset in image file 825 * @cb: Completion function 826 * @opaque: User data for completion function 827 */ 828 static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos, 829 uint64_t len, uint64_t offset, 830 BlockDriverCompletionFunc *cb, 831 void *opaque) 832 { 833 CopyFromBackingFileCB *copy_cb; 834 835 /* Skip copy entirely if there is no work to do */ 836 if (len == 0) { 837 cb(opaque, 0); 838 return; 839 } 840 841 copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque); 842 copy_cb->s = s; 843 copy_cb->offset = offset; 844 copy_cb->iov.iov_base = qemu_blockalign(s->bs, len); 845 copy_cb->iov.iov_len = len; 846 qemu_iovec_init_external(©_cb->qiov, ©_cb->iov, 1); 847 848 qed_read_backing_file(s, pos, ©_cb->qiov, 849 qed_copy_from_backing_file_write, copy_cb); 850 } 851 852 /** 853 * Link one or more contiguous clusters into a table 854 * 855 * @s: QED state 856 * @table: L2 table 857 * @index: First cluster index 858 * @n: Number of contiguous clusters 859 * @cluster: First cluster offset 860 * 861 * The cluster offset may be an allocated byte offset in the image file, the 862 * zero cluster marker, or the unallocated cluster marker. 863 */ 864 static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index, 865 unsigned int n, uint64_t cluster) 866 { 867 int i; 868 for (i = index; i < index + n; i++) { 869 table->offsets[i] = cluster; 870 if (!qed_offset_is_unalloc_cluster(cluster) && 871 !qed_offset_is_zero_cluster(cluster)) { 872 cluster += s->header.cluster_size; 873 } 874 } 875 } 876 877 static void qed_aio_complete_bh(void *opaque) 878 { 879 QEDAIOCB *acb = opaque; 880 BlockDriverCompletionFunc *cb = acb->common.cb; 881 void *user_opaque = acb->common.opaque; 882 int ret = acb->bh_ret; 883 bool *finished = acb->finished; 884 885 qemu_bh_delete(acb->bh); 886 qemu_aio_release(acb); 887 888 /* Invoke callback */ 889 cb(user_opaque, ret); 890 891 /* Signal cancel completion */ 892 if (finished) { 893 *finished = true; 894 } 895 } 896 897 static void qed_aio_complete(QEDAIOCB *acb, int ret) 898 { 899 BDRVQEDState *s = acb_to_s(acb); 900 901 trace_qed_aio_complete(s, acb, ret); 902 903 /* Free resources */ 904 qemu_iovec_destroy(&acb->cur_qiov); 905 qed_unref_l2_cache_entry(acb->request.l2_table); 906 907 /* Free the buffer we may have allocated for zero writes */ 908 if (acb->flags & QED_AIOCB_ZERO) { 909 qemu_vfree(acb->qiov->iov[0].iov_base); 910 acb->qiov->iov[0].iov_base = NULL; 911 } 912 913 /* Arrange for a bh to invoke the completion function */ 914 acb->bh_ret = ret; 915 acb->bh = qemu_bh_new(qed_aio_complete_bh, acb); 916 qemu_bh_schedule(acb->bh); 917 918 /* Start next allocating write request waiting behind this one. Note that 919 * requests enqueue themselves when they first hit an unallocated cluster 920 * but they wait until the entire request is finished before waking up the 921 * next request in the queue. This ensures that we don't cycle through 922 * requests multiple times but rather finish one at a time completely. 923 */ 924 if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) { 925 QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next); 926 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); 927 if (acb) { 928 qed_aio_next_io(acb, 0); 929 } else if (s->header.features & QED_F_NEED_CHECK) { 930 qed_start_need_check_timer(s); 931 } 932 } 933 } 934 935 /** 936 * Commit the current L2 table to the cache 937 */ 938 static void qed_commit_l2_update(void *opaque, int ret) 939 { 940 QEDAIOCB *acb = opaque; 941 BDRVQEDState *s = acb_to_s(acb); 942 CachedL2Table *l2_table = acb->request.l2_table; 943 uint64_t l2_offset = l2_table->offset; 944 945 qed_commit_l2_cache_entry(&s->l2_cache, l2_table); 946 947 /* This is guaranteed to succeed because we just committed the entry to the 948 * cache. 949 */ 950 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset); 951 assert(acb->request.l2_table != NULL); 952 953 qed_aio_next_io(opaque, ret); 954 } 955 956 /** 957 * Update L1 table with new L2 table offset and write it out 958 */ 959 static void qed_aio_write_l1_update(void *opaque, int ret) 960 { 961 QEDAIOCB *acb = opaque; 962 BDRVQEDState *s = acb_to_s(acb); 963 int index; 964 965 if (ret) { 966 qed_aio_complete(acb, ret); 967 return; 968 } 969 970 index = qed_l1_index(s, acb->cur_pos); 971 s->l1_table->offsets[index] = acb->request.l2_table->offset; 972 973 qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb); 974 } 975 976 /** 977 * Update L2 table with new cluster offsets and write them out 978 */ 979 static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset) 980 { 981 BDRVQEDState *s = acb_to_s(acb); 982 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1; 983 int index; 984 985 if (ret) { 986 goto err; 987 } 988 989 if (need_alloc) { 990 qed_unref_l2_cache_entry(acb->request.l2_table); 991 acb->request.l2_table = qed_new_l2_table(s); 992 } 993 994 index = qed_l2_index(s, acb->cur_pos); 995 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters, 996 offset); 997 998 if (need_alloc) { 999 /* Write out the whole new L2 table */ 1000 qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true, 1001 qed_aio_write_l1_update, acb); 1002 } else { 1003 /* Write out only the updated part of the L2 table */ 1004 qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false, 1005 qed_aio_next_io, acb); 1006 } 1007 return; 1008 1009 err: 1010 qed_aio_complete(acb, ret); 1011 } 1012 1013 static void qed_aio_write_l2_update_cb(void *opaque, int ret) 1014 { 1015 QEDAIOCB *acb = opaque; 1016 qed_aio_write_l2_update(acb, ret, acb->cur_cluster); 1017 } 1018 1019 /** 1020 * Flush new data clusters before updating the L2 table 1021 * 1022 * This flush is necessary when a backing file is in use. A crash during an 1023 * allocating write could result in empty clusters in the image. If the write 1024 * only touched a subregion of the cluster, then backing image sectors have 1025 * been lost in the untouched region. The solution is to flush after writing a 1026 * new data cluster and before updating the L2 table. 1027 */ 1028 static void qed_aio_write_flush_before_l2_update(void *opaque, int ret) 1029 { 1030 QEDAIOCB *acb = opaque; 1031 BDRVQEDState *s = acb_to_s(acb); 1032 1033 if (!bdrv_aio_flush(s->bs->file, qed_aio_write_l2_update_cb, opaque)) { 1034 qed_aio_complete(acb, -EIO); 1035 } 1036 } 1037 1038 /** 1039 * Write data to the image file 1040 */ 1041 static void qed_aio_write_main(void *opaque, int ret) 1042 { 1043 QEDAIOCB *acb = opaque; 1044 BDRVQEDState *s = acb_to_s(acb); 1045 uint64_t offset = acb->cur_cluster + 1046 qed_offset_into_cluster(s, acb->cur_pos); 1047 BlockDriverCompletionFunc *next_fn; 1048 1049 trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size); 1050 1051 if (ret) { 1052 qed_aio_complete(acb, ret); 1053 return; 1054 } 1055 1056 if (acb->find_cluster_ret == QED_CLUSTER_FOUND) { 1057 next_fn = qed_aio_next_io; 1058 } else { 1059 if (s->bs->backing_hd) { 1060 next_fn = qed_aio_write_flush_before_l2_update; 1061 } else { 1062 next_fn = qed_aio_write_l2_update_cb; 1063 } 1064 } 1065 1066 BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO); 1067 bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE, 1068 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE, 1069 next_fn, acb); 1070 } 1071 1072 /** 1073 * Populate back untouched region of new data cluster 1074 */ 1075 static void qed_aio_write_postfill(void *opaque, int ret) 1076 { 1077 QEDAIOCB *acb = opaque; 1078 BDRVQEDState *s = acb_to_s(acb); 1079 uint64_t start = acb->cur_pos + acb->cur_qiov.size; 1080 uint64_t len = 1081 qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start; 1082 uint64_t offset = acb->cur_cluster + 1083 qed_offset_into_cluster(s, acb->cur_pos) + 1084 acb->cur_qiov.size; 1085 1086 if (ret) { 1087 qed_aio_complete(acb, ret); 1088 return; 1089 } 1090 1091 trace_qed_aio_write_postfill(s, acb, start, len, offset); 1092 qed_copy_from_backing_file(s, start, len, offset, 1093 qed_aio_write_main, acb); 1094 } 1095 1096 /** 1097 * Populate front untouched region of new data cluster 1098 */ 1099 static void qed_aio_write_prefill(void *opaque, int ret) 1100 { 1101 QEDAIOCB *acb = opaque; 1102 BDRVQEDState *s = acb_to_s(acb); 1103 uint64_t start = qed_start_of_cluster(s, acb->cur_pos); 1104 uint64_t len = qed_offset_into_cluster(s, acb->cur_pos); 1105 1106 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster); 1107 qed_copy_from_backing_file(s, start, len, acb->cur_cluster, 1108 qed_aio_write_postfill, acb); 1109 } 1110 1111 /** 1112 * Check if the QED_F_NEED_CHECK bit should be set during allocating write 1113 */ 1114 static bool qed_should_set_need_check(BDRVQEDState *s) 1115 { 1116 /* The flush before L2 update path ensures consistency */ 1117 if (s->bs->backing_hd) { 1118 return false; 1119 } 1120 1121 return !(s->header.features & QED_F_NEED_CHECK); 1122 } 1123 1124 static void qed_aio_write_zero_cluster(void *opaque, int ret) 1125 { 1126 QEDAIOCB *acb = opaque; 1127 1128 if (ret) { 1129 qed_aio_complete(acb, ret); 1130 return; 1131 } 1132 1133 qed_aio_write_l2_update(acb, 0, 1); 1134 } 1135 1136 /** 1137 * Write new data cluster 1138 * 1139 * @acb: Write request 1140 * @len: Length in bytes 1141 * 1142 * This path is taken when writing to previously unallocated clusters. 1143 */ 1144 static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len) 1145 { 1146 BDRVQEDState *s = acb_to_s(acb); 1147 BlockDriverCompletionFunc *cb; 1148 1149 /* Cancel timer when the first allocating request comes in */ 1150 if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) { 1151 qed_cancel_need_check_timer(s); 1152 } 1153 1154 /* Freeze this request if another allocating write is in progress */ 1155 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) { 1156 QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next); 1157 } 1158 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) || 1159 s->allocating_write_reqs_plugged) { 1160 return; /* wait for existing request to finish */ 1161 } 1162 1163 acb->cur_nclusters = qed_bytes_to_clusters(s, 1164 qed_offset_into_cluster(s, acb->cur_pos) + len); 1165 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1166 1167 if (acb->flags & QED_AIOCB_ZERO) { 1168 /* Skip ahead if the clusters are already zero */ 1169 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) { 1170 qed_aio_next_io(acb, 0); 1171 return; 1172 } 1173 1174 cb = qed_aio_write_zero_cluster; 1175 } else { 1176 cb = qed_aio_write_prefill; 1177 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters); 1178 } 1179 1180 if (qed_should_set_need_check(s)) { 1181 s->header.features |= QED_F_NEED_CHECK; 1182 qed_write_header(s, cb, acb); 1183 } else { 1184 cb(acb, 0); 1185 } 1186 } 1187 1188 /** 1189 * Write data cluster in place 1190 * 1191 * @acb: Write request 1192 * @offset: Cluster offset in bytes 1193 * @len: Length in bytes 1194 * 1195 * This path is taken when writing to already allocated clusters. 1196 */ 1197 static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len) 1198 { 1199 /* Allocate buffer for zero writes */ 1200 if (acb->flags & QED_AIOCB_ZERO) { 1201 struct iovec *iov = acb->qiov->iov; 1202 1203 if (!iov->iov_base) { 1204 iov->iov_base = qemu_blockalign(acb->common.bs, iov->iov_len); 1205 memset(iov->iov_base, 0, iov->iov_len); 1206 } 1207 } 1208 1209 /* Calculate the I/O vector */ 1210 acb->cur_cluster = offset; 1211 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1212 1213 /* Do the actual write */ 1214 qed_aio_write_main(acb, 0); 1215 } 1216 1217 /** 1218 * Write data cluster 1219 * 1220 * @opaque: Write request 1221 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1, 1222 * or -errno 1223 * @offset: Cluster offset in bytes 1224 * @len: Length in bytes 1225 * 1226 * Callback from qed_find_cluster(). 1227 */ 1228 static void qed_aio_write_data(void *opaque, int ret, 1229 uint64_t offset, size_t len) 1230 { 1231 QEDAIOCB *acb = opaque; 1232 1233 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len); 1234 1235 acb->find_cluster_ret = ret; 1236 1237 switch (ret) { 1238 case QED_CLUSTER_FOUND: 1239 qed_aio_write_inplace(acb, offset, len); 1240 break; 1241 1242 case QED_CLUSTER_L2: 1243 case QED_CLUSTER_L1: 1244 case QED_CLUSTER_ZERO: 1245 qed_aio_write_alloc(acb, len); 1246 break; 1247 1248 default: 1249 qed_aio_complete(acb, ret); 1250 break; 1251 } 1252 } 1253 1254 /** 1255 * Read data cluster 1256 * 1257 * @opaque: Read request 1258 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1, 1259 * or -errno 1260 * @offset: Cluster offset in bytes 1261 * @len: Length in bytes 1262 * 1263 * Callback from qed_find_cluster(). 1264 */ 1265 static void qed_aio_read_data(void *opaque, int ret, 1266 uint64_t offset, size_t len) 1267 { 1268 QEDAIOCB *acb = opaque; 1269 BDRVQEDState *s = acb_to_s(acb); 1270 BlockDriverState *bs = acb->common.bs; 1271 1272 /* Adjust offset into cluster */ 1273 offset += qed_offset_into_cluster(s, acb->cur_pos); 1274 1275 trace_qed_aio_read_data(s, acb, ret, offset, len); 1276 1277 if (ret < 0) { 1278 goto err; 1279 } 1280 1281 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1282 1283 /* Handle zero cluster and backing file reads */ 1284 if (ret == QED_CLUSTER_ZERO) { 1285 qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size); 1286 qed_aio_next_io(acb, 0); 1287 return; 1288 } else if (ret != QED_CLUSTER_FOUND) { 1289 qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov, 1290 qed_aio_next_io, acb); 1291 return; 1292 } 1293 1294 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 1295 bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE, 1296 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE, 1297 qed_aio_next_io, acb); 1298 return; 1299 1300 err: 1301 qed_aio_complete(acb, ret); 1302 } 1303 1304 /** 1305 * Begin next I/O or complete the request 1306 */ 1307 static void qed_aio_next_io(void *opaque, int ret) 1308 { 1309 QEDAIOCB *acb = opaque; 1310 BDRVQEDState *s = acb_to_s(acb); 1311 QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ? 1312 qed_aio_write_data : qed_aio_read_data; 1313 1314 trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size); 1315 1316 /* Handle I/O error */ 1317 if (ret) { 1318 qed_aio_complete(acb, ret); 1319 return; 1320 } 1321 1322 acb->qiov_offset += acb->cur_qiov.size; 1323 acb->cur_pos += acb->cur_qiov.size; 1324 qemu_iovec_reset(&acb->cur_qiov); 1325 1326 /* Complete request */ 1327 if (acb->cur_pos >= acb->end_pos) { 1328 qed_aio_complete(acb, 0); 1329 return; 1330 } 1331 1332 /* Find next cluster and start I/O */ 1333 qed_find_cluster(s, &acb->request, 1334 acb->cur_pos, acb->end_pos - acb->cur_pos, 1335 io_fn, acb); 1336 } 1337 1338 static BlockDriverAIOCB *qed_aio_setup(BlockDriverState *bs, 1339 int64_t sector_num, 1340 QEMUIOVector *qiov, int nb_sectors, 1341 BlockDriverCompletionFunc *cb, 1342 void *opaque, int flags) 1343 { 1344 QEDAIOCB *acb = qemu_aio_get(&qed_aiocb_info, bs, cb, opaque); 1345 1346 trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors, 1347 opaque, flags); 1348 1349 acb->flags = flags; 1350 acb->finished = NULL; 1351 acb->qiov = qiov; 1352 acb->qiov_offset = 0; 1353 acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE; 1354 acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE; 1355 acb->request.l2_table = NULL; 1356 qemu_iovec_init(&acb->cur_qiov, qiov->niov); 1357 1358 /* Start request */ 1359 qed_aio_next_io(acb, 0); 1360 return &acb->common; 1361 } 1362 1363 static BlockDriverAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs, 1364 int64_t sector_num, 1365 QEMUIOVector *qiov, int nb_sectors, 1366 BlockDriverCompletionFunc *cb, 1367 void *opaque) 1368 { 1369 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); 1370 } 1371 1372 static BlockDriverAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs, 1373 int64_t sector_num, 1374 QEMUIOVector *qiov, int nb_sectors, 1375 BlockDriverCompletionFunc *cb, 1376 void *opaque) 1377 { 1378 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, 1379 opaque, QED_AIOCB_WRITE); 1380 } 1381 1382 typedef struct { 1383 Coroutine *co; 1384 int ret; 1385 bool done; 1386 } QEDWriteZeroesCB; 1387 1388 static void coroutine_fn qed_co_write_zeroes_cb(void *opaque, int ret) 1389 { 1390 QEDWriteZeroesCB *cb = opaque; 1391 1392 cb->done = true; 1393 cb->ret = ret; 1394 if (cb->co) { 1395 qemu_coroutine_enter(cb->co, NULL); 1396 } 1397 } 1398 1399 static int coroutine_fn bdrv_qed_co_write_zeroes(BlockDriverState *bs, 1400 int64_t sector_num, 1401 int nb_sectors, 1402 BdrvRequestFlags flags) 1403 { 1404 BlockDriverAIOCB *blockacb; 1405 BDRVQEDState *s = bs->opaque; 1406 QEDWriteZeroesCB cb = { .done = false }; 1407 QEMUIOVector qiov; 1408 struct iovec iov; 1409 1410 /* Refuse if there are untouched backing file sectors */ 1411 if (bs->backing_hd) { 1412 if (qed_offset_into_cluster(s, sector_num * BDRV_SECTOR_SIZE) != 0) { 1413 return -ENOTSUP; 1414 } 1415 if (qed_offset_into_cluster(s, nb_sectors * BDRV_SECTOR_SIZE) != 0) { 1416 return -ENOTSUP; 1417 } 1418 } 1419 1420 /* Zero writes start without an I/O buffer. If a buffer becomes necessary 1421 * then it will be allocated during request processing. 1422 */ 1423 iov.iov_base = NULL, 1424 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE, 1425 1426 qemu_iovec_init_external(&qiov, &iov, 1); 1427 blockacb = qed_aio_setup(bs, sector_num, &qiov, nb_sectors, 1428 qed_co_write_zeroes_cb, &cb, 1429 QED_AIOCB_WRITE | QED_AIOCB_ZERO); 1430 if (!blockacb) { 1431 return -EIO; 1432 } 1433 if (!cb.done) { 1434 cb.co = qemu_coroutine_self(); 1435 qemu_coroutine_yield(); 1436 } 1437 assert(cb.done); 1438 return cb.ret; 1439 } 1440 1441 static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset) 1442 { 1443 BDRVQEDState *s = bs->opaque; 1444 uint64_t old_image_size; 1445 int ret; 1446 1447 if (!qed_is_image_size_valid(offset, s->header.cluster_size, 1448 s->header.table_size)) { 1449 return -EINVAL; 1450 } 1451 1452 /* Shrinking is currently not supported */ 1453 if ((uint64_t)offset < s->header.image_size) { 1454 return -ENOTSUP; 1455 } 1456 1457 old_image_size = s->header.image_size; 1458 s->header.image_size = offset; 1459 ret = qed_write_header_sync(s); 1460 if (ret < 0) { 1461 s->header.image_size = old_image_size; 1462 } 1463 return ret; 1464 } 1465 1466 static int64_t bdrv_qed_getlength(BlockDriverState *bs) 1467 { 1468 BDRVQEDState *s = bs->opaque; 1469 return s->header.image_size; 1470 } 1471 1472 static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 1473 { 1474 BDRVQEDState *s = bs->opaque; 1475 1476 memset(bdi, 0, sizeof(*bdi)); 1477 bdi->cluster_size = s->header.cluster_size; 1478 bdi->is_dirty = s->header.features & QED_F_NEED_CHECK; 1479 bdi->unallocated_blocks_are_zero = true; 1480 bdi->can_write_zeroes_with_unmap = true; 1481 return 0; 1482 } 1483 1484 static int bdrv_qed_change_backing_file(BlockDriverState *bs, 1485 const char *backing_file, 1486 const char *backing_fmt) 1487 { 1488 BDRVQEDState *s = bs->opaque; 1489 QEDHeader new_header, le_header; 1490 void *buffer; 1491 size_t buffer_len, backing_file_len; 1492 int ret; 1493 1494 /* Refuse to set backing filename if unknown compat feature bits are 1495 * active. If the image uses an unknown compat feature then we may not 1496 * know the layout of data following the header structure and cannot safely 1497 * add a new string. 1498 */ 1499 if (backing_file && (s->header.compat_features & 1500 ~QED_COMPAT_FEATURE_MASK)) { 1501 return -ENOTSUP; 1502 } 1503 1504 memcpy(&new_header, &s->header, sizeof(new_header)); 1505 1506 new_header.features &= ~(QED_F_BACKING_FILE | 1507 QED_F_BACKING_FORMAT_NO_PROBE); 1508 1509 /* Adjust feature flags */ 1510 if (backing_file) { 1511 new_header.features |= QED_F_BACKING_FILE; 1512 1513 if (qed_fmt_is_raw(backing_fmt)) { 1514 new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE; 1515 } 1516 } 1517 1518 /* Calculate new header size */ 1519 backing_file_len = 0; 1520 1521 if (backing_file) { 1522 backing_file_len = strlen(backing_file); 1523 } 1524 1525 buffer_len = sizeof(new_header); 1526 new_header.backing_filename_offset = buffer_len; 1527 new_header.backing_filename_size = backing_file_len; 1528 buffer_len += backing_file_len; 1529 1530 /* Make sure we can rewrite header without failing */ 1531 if (buffer_len > new_header.header_size * new_header.cluster_size) { 1532 return -ENOSPC; 1533 } 1534 1535 /* Prepare new header */ 1536 buffer = g_malloc(buffer_len); 1537 1538 qed_header_cpu_to_le(&new_header, &le_header); 1539 memcpy(buffer, &le_header, sizeof(le_header)); 1540 buffer_len = sizeof(le_header); 1541 1542 if (backing_file) { 1543 memcpy(buffer + buffer_len, backing_file, backing_file_len); 1544 buffer_len += backing_file_len; 1545 } 1546 1547 /* Write new header */ 1548 ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len); 1549 g_free(buffer); 1550 if (ret == 0) { 1551 memcpy(&s->header, &new_header, sizeof(new_header)); 1552 } 1553 return ret; 1554 } 1555 1556 static void bdrv_qed_invalidate_cache(BlockDriverState *bs) 1557 { 1558 BDRVQEDState *s = bs->opaque; 1559 1560 bdrv_qed_close(bs); 1561 memset(s, 0, sizeof(BDRVQEDState)); 1562 bdrv_qed_open(bs, NULL, bs->open_flags, NULL); 1563 } 1564 1565 static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result, 1566 BdrvCheckMode fix) 1567 { 1568 BDRVQEDState *s = bs->opaque; 1569 1570 return qed_check(s, result, !!fix); 1571 } 1572 1573 static QEMUOptionParameter qed_create_options[] = { 1574 { 1575 .name = BLOCK_OPT_SIZE, 1576 .type = OPT_SIZE, 1577 .help = "Virtual disk size (in bytes)" 1578 }, { 1579 .name = BLOCK_OPT_BACKING_FILE, 1580 .type = OPT_STRING, 1581 .help = "File name of a base image" 1582 }, { 1583 .name = BLOCK_OPT_BACKING_FMT, 1584 .type = OPT_STRING, 1585 .help = "Image format of the base image" 1586 }, { 1587 .name = BLOCK_OPT_CLUSTER_SIZE, 1588 .type = OPT_SIZE, 1589 .help = "Cluster size (in bytes)", 1590 .value = { .n = QED_DEFAULT_CLUSTER_SIZE }, 1591 }, { 1592 .name = BLOCK_OPT_TABLE_SIZE, 1593 .type = OPT_SIZE, 1594 .help = "L1/L2 table size (in clusters)" 1595 }, 1596 { /* end of list */ } 1597 }; 1598 1599 static BlockDriver bdrv_qed = { 1600 .format_name = "qed", 1601 .instance_size = sizeof(BDRVQEDState), 1602 .create_options = qed_create_options, 1603 1604 .bdrv_probe = bdrv_qed_probe, 1605 .bdrv_rebind = bdrv_qed_rebind, 1606 .bdrv_open = bdrv_qed_open, 1607 .bdrv_close = bdrv_qed_close, 1608 .bdrv_reopen_prepare = bdrv_qed_reopen_prepare, 1609 .bdrv_create = bdrv_qed_create, 1610 .bdrv_has_zero_init = bdrv_has_zero_init_1, 1611 .bdrv_co_get_block_status = bdrv_qed_co_get_block_status, 1612 .bdrv_make_empty = bdrv_qed_make_empty, 1613 .bdrv_aio_readv = bdrv_qed_aio_readv, 1614 .bdrv_aio_writev = bdrv_qed_aio_writev, 1615 .bdrv_co_write_zeroes = bdrv_qed_co_write_zeroes, 1616 .bdrv_truncate = bdrv_qed_truncate, 1617 .bdrv_getlength = bdrv_qed_getlength, 1618 .bdrv_get_info = bdrv_qed_get_info, 1619 .bdrv_change_backing_file = bdrv_qed_change_backing_file, 1620 .bdrv_invalidate_cache = bdrv_qed_invalidate_cache, 1621 .bdrv_check = bdrv_qed_check, 1622 }; 1623 1624 static void bdrv_qed_init(void) 1625 { 1626 bdrv_register(&bdrv_qed); 1627 } 1628 1629 block_init(bdrv_qed_init); 1630