1 /* 2 * SCSI Device emulation 3 * 4 * Copyright (c) 2006 CodeSourcery. 5 * Based on code by Fabrice Bellard 6 * 7 * Written by Paul Brook 8 * Modifications: 9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case 10 * when the allocation length of CDB is smaller 11 * than 36. 12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the 13 * MODE SENSE response. 14 * 15 * This code is licensed under the LGPL. 16 * 17 * Note that this file only handles the SCSI architecture model and device 18 * commands. Emulation of interface/link layer protocols is handled by 19 * the host adapter emulator. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "qemu/units.h" 24 #include "qapi/error.h" 25 #include "qemu/error-report.h" 26 #include "qemu/main-loop.h" 27 #include "qemu/module.h" 28 #include "hw/scsi/scsi.h" 29 #include "migration/qemu-file-types.h" 30 #include "migration/vmstate.h" 31 #include "hw/scsi/emulation.h" 32 #include "scsi/constants.h" 33 #include "sysemu/sysemu.h" 34 #include "sysemu/block-backend.h" 35 #include "sysemu/blockdev.h" 36 #include "hw/block/block.h" 37 #include "hw/qdev-properties.h" 38 #include "sysemu/dma.h" 39 #include "qemu/cutils.h" 40 #include "trace.h" 41 42 #ifdef __linux 43 #include <scsi/sg.h> 44 #endif 45 46 #define SCSI_WRITE_SAME_MAX (512 * KiB) 47 #define SCSI_DMA_BUF_SIZE (128 * KiB) 48 #define SCSI_MAX_INQUIRY_LEN 256 49 #define SCSI_MAX_MODE_LEN 256 50 51 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB) 52 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB) 53 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */ 54 55 #define TYPE_SCSI_DISK_BASE "scsi-disk-base" 56 57 #define SCSI_DISK_BASE(obj) \ 58 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE) 59 #define SCSI_DISK_BASE_CLASS(klass) \ 60 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE) 61 #define SCSI_DISK_BASE_GET_CLASS(obj) \ 62 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE) 63 64 typedef struct SCSIDiskClass { 65 SCSIDeviceClass parent_class; 66 DMAIOFunc *dma_readv; 67 DMAIOFunc *dma_writev; 68 bool (*need_fua_emulation)(SCSICommand *cmd); 69 void (*update_sense)(SCSIRequest *r); 70 } SCSIDiskClass; 71 72 typedef struct SCSIDiskReq { 73 SCSIRequest req; 74 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */ 75 uint64_t sector; 76 uint32_t sector_count; 77 uint32_t buflen; 78 bool started; 79 bool need_fua_emulation; 80 struct iovec iov; 81 QEMUIOVector qiov; 82 BlockAcctCookie acct; 83 unsigned char *status; 84 } SCSIDiskReq; 85 86 #define SCSI_DISK_F_REMOVABLE 0 87 #define SCSI_DISK_F_DPOFUA 1 88 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2 89 90 typedef struct SCSIDiskState 91 { 92 SCSIDevice qdev; 93 uint32_t features; 94 bool media_changed; 95 bool media_event; 96 bool eject_request; 97 uint16_t port_index; 98 uint64_t max_unmap_size; 99 uint64_t max_io_size; 100 QEMUBH *bh; 101 char *version; 102 char *serial; 103 char *vendor; 104 char *product; 105 char *device_id; 106 bool tray_open; 107 bool tray_locked; 108 /* 109 * 0x0000 - rotation rate not reported 110 * 0x0001 - non-rotating medium (SSD) 111 * 0x0002-0x0400 - reserved 112 * 0x0401-0xffe - rotations per minute 113 * 0xffff - reserved 114 */ 115 uint16_t rotation_rate; 116 } SCSIDiskState; 117 118 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed); 119 120 static void scsi_free_request(SCSIRequest *req) 121 { 122 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 123 124 qemu_vfree(r->iov.iov_base); 125 } 126 127 /* Helper function for command completion with sense. */ 128 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) 129 { 130 trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc, 131 sense.ascq); 132 scsi_req_build_sense(&r->req, sense); 133 scsi_req_complete(&r->req, CHECK_CONDITION); 134 } 135 136 static void scsi_init_iovec(SCSIDiskReq *r, size_t size) 137 { 138 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 139 140 if (!r->iov.iov_base) { 141 r->buflen = size; 142 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 143 } 144 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen); 145 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 146 } 147 148 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) 149 { 150 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 151 152 qemu_put_be64s(f, &r->sector); 153 qemu_put_be32s(f, &r->sector_count); 154 qemu_put_be32s(f, &r->buflen); 155 if (r->buflen) { 156 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 157 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 158 } else if (!req->retry) { 159 uint32_t len = r->iov.iov_len; 160 qemu_put_be32s(f, &len); 161 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 162 } 163 } 164 } 165 166 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) 167 { 168 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 169 170 qemu_get_be64s(f, &r->sector); 171 qemu_get_be32s(f, &r->sector_count); 172 qemu_get_be32s(f, &r->buflen); 173 if (r->buflen) { 174 scsi_init_iovec(r, r->buflen); 175 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 176 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 177 } else if (!r->req.retry) { 178 uint32_t len; 179 qemu_get_be32s(f, &len); 180 r->iov.iov_len = len; 181 assert(r->iov.iov_len <= r->buflen); 182 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 183 } 184 } 185 186 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 187 } 188 189 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) 190 { 191 if (r->req.io_canceled) { 192 scsi_req_cancel_complete(&r->req); 193 return true; 194 } 195 196 if (ret < 0 || (r->status && *r->status)) { 197 return scsi_handle_rw_error(r, -ret, acct_failed); 198 } 199 200 return false; 201 } 202 203 static void scsi_aio_complete(void *opaque, int ret) 204 { 205 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 206 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 207 208 assert(r->req.aiocb != NULL); 209 r->req.aiocb = NULL; 210 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 211 if (scsi_disk_req_check_error(r, ret, true)) { 212 goto done; 213 } 214 215 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 216 scsi_req_complete(&r->req, GOOD); 217 218 done: 219 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 220 scsi_req_unref(&r->req); 221 } 222 223 static bool scsi_is_cmd_fua(SCSICommand *cmd) 224 { 225 switch (cmd->buf[0]) { 226 case READ_10: 227 case READ_12: 228 case READ_16: 229 case WRITE_10: 230 case WRITE_12: 231 case WRITE_16: 232 return (cmd->buf[1] & 8) != 0; 233 234 case VERIFY_10: 235 case VERIFY_12: 236 case VERIFY_16: 237 case WRITE_VERIFY_10: 238 case WRITE_VERIFY_12: 239 case WRITE_VERIFY_16: 240 return true; 241 242 case READ_6: 243 case WRITE_6: 244 default: 245 return false; 246 } 247 } 248 249 static void scsi_write_do_fua(SCSIDiskReq *r) 250 { 251 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 252 253 assert(r->req.aiocb == NULL); 254 assert(!r->req.io_canceled); 255 256 if (r->need_fua_emulation) { 257 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 258 BLOCK_ACCT_FLUSH); 259 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 260 return; 261 } 262 263 scsi_req_complete(&r->req, GOOD); 264 scsi_req_unref(&r->req); 265 } 266 267 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) 268 { 269 assert(r->req.aiocb == NULL); 270 if (scsi_disk_req_check_error(r, ret, false)) { 271 goto done; 272 } 273 274 r->sector += r->sector_count; 275 r->sector_count = 0; 276 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 277 scsi_write_do_fua(r); 278 return; 279 } else { 280 scsi_req_complete(&r->req, GOOD); 281 } 282 283 done: 284 scsi_req_unref(&r->req); 285 } 286 287 static void scsi_dma_complete(void *opaque, int ret) 288 { 289 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 290 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 291 292 assert(r->req.aiocb != NULL); 293 r->req.aiocb = NULL; 294 295 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 296 if (ret < 0) { 297 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 298 } else { 299 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 300 } 301 scsi_dma_complete_noio(r, ret); 302 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 303 } 304 305 static void scsi_read_complete_noio(SCSIDiskReq *r, int ret) 306 { 307 uint32_t n; 308 309 assert(r->req.aiocb == NULL); 310 if (scsi_disk_req_check_error(r, ret, false)) { 311 goto done; 312 } 313 314 n = r->qiov.size / 512; 315 r->sector += n; 316 r->sector_count -= n; 317 scsi_req_data(&r->req, r->qiov.size); 318 319 done: 320 scsi_req_unref(&r->req); 321 } 322 323 static void scsi_read_complete(void *opaque, int ret) 324 { 325 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 326 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 327 328 assert(r->req.aiocb != NULL); 329 r->req.aiocb = NULL; 330 331 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 332 if (ret < 0) { 333 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 334 } else { 335 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 336 trace_scsi_disk_read_complete(r->req.tag, r->qiov.size); 337 } 338 scsi_read_complete_noio(r, ret); 339 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 340 } 341 342 /* Actually issue a read to the block device. */ 343 static void scsi_do_read(SCSIDiskReq *r, int ret) 344 { 345 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 346 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 347 348 assert (r->req.aiocb == NULL); 349 if (scsi_disk_req_check_error(r, ret, false)) { 350 goto done; 351 } 352 353 /* The request is used as the AIO opaque value, so add a ref. */ 354 scsi_req_ref(&r->req); 355 356 if (r->req.sg) { 357 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); 358 r->req.resid -= r->req.sg->size; 359 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 360 r->req.sg, r->sector << BDRV_SECTOR_BITS, 361 BDRV_SECTOR_SIZE, 362 sdc->dma_readv, r, scsi_dma_complete, r, 363 DMA_DIRECTION_FROM_DEVICE); 364 } else { 365 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 366 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 367 r->qiov.size, BLOCK_ACCT_READ); 368 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, 369 scsi_read_complete, r, r); 370 } 371 372 done: 373 scsi_req_unref(&r->req); 374 } 375 376 static void scsi_do_read_cb(void *opaque, int ret) 377 { 378 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 379 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 380 381 assert (r->req.aiocb != NULL); 382 r->req.aiocb = NULL; 383 384 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 385 if (ret < 0) { 386 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 387 } else { 388 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 389 } 390 scsi_do_read(opaque, ret); 391 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 392 } 393 394 /* Read more data from scsi device into buffer. */ 395 static void scsi_read_data(SCSIRequest *req) 396 { 397 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 398 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 399 bool first; 400 401 trace_scsi_disk_read_data_count(r->sector_count); 402 if (r->sector_count == 0) { 403 /* This also clears the sense buffer for REQUEST SENSE. */ 404 scsi_req_complete(&r->req, GOOD); 405 return; 406 } 407 408 /* No data transfer may already be in progress */ 409 assert(r->req.aiocb == NULL); 410 411 /* The request is used as the AIO opaque value, so add a ref. */ 412 scsi_req_ref(&r->req); 413 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 414 trace_scsi_disk_read_data_invalid(); 415 scsi_read_complete_noio(r, -EINVAL); 416 return; 417 } 418 419 if (!blk_is_available(req->dev->conf.blk)) { 420 scsi_read_complete_noio(r, -ENOMEDIUM); 421 return; 422 } 423 424 first = !r->started; 425 r->started = true; 426 if (first && r->need_fua_emulation) { 427 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 428 BLOCK_ACCT_FLUSH); 429 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); 430 } else { 431 scsi_do_read(r, 0); 432 } 433 } 434 435 /* 436 * scsi_handle_rw_error has two return values. False means that the error 437 * must be ignored, true means that the error has been processed and the 438 * caller should not do anything else for this request. Note that 439 * scsi_handle_rw_error always manages its reference counts, independent 440 * of the return value. 441 */ 442 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed) 443 { 444 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); 445 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 446 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 447 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk, 448 is_read, error); 449 450 if (action == BLOCK_ERROR_ACTION_REPORT) { 451 if (acct_failed) { 452 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 453 } 454 switch (error) { 455 case 0: 456 /* A passthrough command has run and has produced sense data; check 457 * whether the error has to be handled by the guest or should rather 458 * pause the host. 459 */ 460 assert(r->status && *r->status); 461 if (scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) { 462 /* These errors are handled by guest. */ 463 sdc->update_sense(&r->req); 464 scsi_req_complete(&r->req, *r->status); 465 return true; 466 } 467 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); 468 break; 469 case ENOMEDIUM: 470 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 471 break; 472 case ENOMEM: 473 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE)); 474 break; 475 case EINVAL: 476 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 477 break; 478 case ENOSPC: 479 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED)); 480 break; 481 default: 482 scsi_check_condition(r, SENSE_CODE(IO_ERROR)); 483 break; 484 } 485 } 486 487 blk_error_action(s->qdev.conf.blk, action, is_read, error); 488 if (action == BLOCK_ERROR_ACTION_IGNORE) { 489 scsi_req_complete(&r->req, 0); 490 return true; 491 } 492 493 if (action == BLOCK_ERROR_ACTION_STOP) { 494 scsi_req_retry(&r->req); 495 } 496 return true; 497 } 498 499 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) 500 { 501 uint32_t n; 502 503 assert (r->req.aiocb == NULL); 504 if (scsi_disk_req_check_error(r, ret, false)) { 505 goto done; 506 } 507 508 n = r->qiov.size / 512; 509 r->sector += n; 510 r->sector_count -= n; 511 if (r->sector_count == 0) { 512 scsi_write_do_fua(r); 513 return; 514 } else { 515 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 516 trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size); 517 scsi_req_data(&r->req, r->qiov.size); 518 } 519 520 done: 521 scsi_req_unref(&r->req); 522 } 523 524 static void scsi_write_complete(void * opaque, int ret) 525 { 526 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 527 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 528 529 assert (r->req.aiocb != NULL); 530 r->req.aiocb = NULL; 531 532 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 533 if (ret < 0) { 534 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 535 } else { 536 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 537 } 538 scsi_write_complete_noio(r, ret); 539 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 540 } 541 542 static void scsi_write_data(SCSIRequest *req) 543 { 544 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 545 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 546 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 547 548 /* No data transfer may already be in progress */ 549 assert(r->req.aiocb == NULL); 550 551 /* The request is used as the AIO opaque value, so add a ref. */ 552 scsi_req_ref(&r->req); 553 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { 554 trace_scsi_disk_write_data_invalid(); 555 scsi_write_complete_noio(r, -EINVAL); 556 return; 557 } 558 559 if (!r->req.sg && !r->qiov.size) { 560 /* Called for the first time. Ask the driver to send us more data. */ 561 r->started = true; 562 scsi_write_complete_noio(r, 0); 563 return; 564 } 565 if (!blk_is_available(req->dev->conf.blk)) { 566 scsi_write_complete_noio(r, -ENOMEDIUM); 567 return; 568 } 569 570 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || 571 r->req.cmd.buf[0] == VERIFY_16) { 572 if (r->req.sg) { 573 scsi_dma_complete_noio(r, 0); 574 } else { 575 scsi_write_complete_noio(r, 0); 576 } 577 return; 578 } 579 580 if (r->req.sg) { 581 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); 582 r->req.resid -= r->req.sg->size; 583 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 584 r->req.sg, r->sector << BDRV_SECTOR_BITS, 585 BDRV_SECTOR_SIZE, 586 sdc->dma_writev, r, scsi_dma_complete, r, 587 DMA_DIRECTION_TO_DEVICE); 588 } else { 589 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 590 r->qiov.size, BLOCK_ACCT_WRITE); 591 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, 592 scsi_write_complete, r, r); 593 } 594 } 595 596 /* Return a pointer to the data buffer. */ 597 static uint8_t *scsi_get_buf(SCSIRequest *req) 598 { 599 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 600 601 return (uint8_t *)r->iov.iov_base; 602 } 603 604 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf) 605 { 606 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 607 uint8_t page_code = req->cmd.buf[2]; 608 int start, buflen = 0; 609 610 outbuf[buflen++] = s->qdev.type & 0x1f; 611 outbuf[buflen++] = page_code; 612 outbuf[buflen++] = 0x00; 613 outbuf[buflen++] = 0x00; 614 start = buflen; 615 616 switch (page_code) { 617 case 0x00: /* Supported page codes, mandatory */ 618 { 619 trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer); 620 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */ 621 if (s->serial) { 622 outbuf[buflen++] = 0x80; /* unit serial number */ 623 } 624 outbuf[buflen++] = 0x83; /* device identification */ 625 if (s->qdev.type == TYPE_DISK) { 626 outbuf[buflen++] = 0xb0; /* block limits */ 627 outbuf[buflen++] = 0xb1; /* block device characteristics */ 628 outbuf[buflen++] = 0xb2; /* thin provisioning */ 629 } 630 break; 631 } 632 case 0x80: /* Device serial number, optional */ 633 { 634 int l; 635 636 if (!s->serial) { 637 trace_scsi_disk_emulate_vpd_page_80_not_supported(); 638 return -1; 639 } 640 641 l = strlen(s->serial); 642 if (l > 36) { 643 l = 36; 644 } 645 646 trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer); 647 memcpy(outbuf + buflen, s->serial, l); 648 buflen += l; 649 break; 650 } 651 652 case 0x83: /* Device identification page, mandatory */ 653 { 654 int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0; 655 656 trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer); 657 658 if (id_len) { 659 outbuf[buflen++] = 0x2; /* ASCII */ 660 outbuf[buflen++] = 0; /* not officially assigned */ 661 outbuf[buflen++] = 0; /* reserved */ 662 outbuf[buflen++] = id_len; /* length of data following */ 663 memcpy(outbuf + buflen, s->device_id, id_len); 664 buflen += id_len; 665 } 666 667 if (s->qdev.wwn) { 668 outbuf[buflen++] = 0x1; /* Binary */ 669 outbuf[buflen++] = 0x3; /* NAA */ 670 outbuf[buflen++] = 0; /* reserved */ 671 outbuf[buflen++] = 8; 672 stq_be_p(&outbuf[buflen], s->qdev.wwn); 673 buflen += 8; 674 } 675 676 if (s->qdev.port_wwn) { 677 outbuf[buflen++] = 0x61; /* SAS / Binary */ 678 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */ 679 outbuf[buflen++] = 0; /* reserved */ 680 outbuf[buflen++] = 8; 681 stq_be_p(&outbuf[buflen], s->qdev.port_wwn); 682 buflen += 8; 683 } 684 685 if (s->port_index) { 686 outbuf[buflen++] = 0x61; /* SAS / Binary */ 687 688 /* PIV/Target port/relative target port */ 689 outbuf[buflen++] = 0x94; 690 691 outbuf[buflen++] = 0; /* reserved */ 692 outbuf[buflen++] = 4; 693 stw_be_p(&outbuf[buflen + 2], s->port_index); 694 buflen += 4; 695 } 696 break; 697 } 698 case 0xb0: /* block limits */ 699 { 700 SCSIBlockLimits bl = {}; 701 702 if (s->qdev.type == TYPE_ROM) { 703 trace_scsi_disk_emulate_vpd_page_b0_not_supported(); 704 return -1; 705 } 706 bl.wsnz = 1; 707 bl.unmap_sectors = 708 s->qdev.conf.discard_granularity / s->qdev.blocksize; 709 bl.min_io_size = 710 s->qdev.conf.min_io_size / s->qdev.blocksize; 711 bl.opt_io_size = 712 s->qdev.conf.opt_io_size / s->qdev.blocksize; 713 bl.max_unmap_sectors = 714 s->max_unmap_size / s->qdev.blocksize; 715 bl.max_io_sectors = 716 s->max_io_size / s->qdev.blocksize; 717 /* 255 descriptors fit in 4 KiB with an 8-byte header */ 718 bl.max_unmap_descr = 255; 719 720 if (s->qdev.type == TYPE_DISK) { 721 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk); 722 int max_io_sectors_blk = 723 max_transfer_blk / s->qdev.blocksize; 724 725 bl.max_io_sectors = 726 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors); 727 } 728 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl); 729 break; 730 } 731 case 0xb1: /* block device characteristics */ 732 { 733 buflen = 0x40; 734 outbuf[4] = (s->rotation_rate >> 8) & 0xff; 735 outbuf[5] = s->rotation_rate & 0xff; 736 outbuf[6] = 0; /* PRODUCT TYPE */ 737 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */ 738 outbuf[8] = 0; /* VBULS */ 739 break; 740 } 741 case 0xb2: /* thin provisioning */ 742 { 743 buflen = 8; 744 outbuf[4] = 0; 745 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ 746 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; 747 outbuf[7] = 0; 748 break; 749 } 750 default: 751 return -1; 752 } 753 /* done with EVPD */ 754 assert(buflen - start <= 255); 755 outbuf[start - 1] = buflen - start; 756 return buflen; 757 } 758 759 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) 760 { 761 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 762 int buflen = 0; 763 764 if (req->cmd.buf[1] & 0x1) { 765 /* Vital product data */ 766 return scsi_disk_emulate_vpd_page(req, outbuf); 767 } 768 769 /* Standard INQUIRY data */ 770 if (req->cmd.buf[2] != 0) { 771 return -1; 772 } 773 774 /* PAGE CODE == 0 */ 775 buflen = req->cmd.xfer; 776 if (buflen > SCSI_MAX_INQUIRY_LEN) { 777 buflen = SCSI_MAX_INQUIRY_LEN; 778 } 779 780 outbuf[0] = s->qdev.type & 0x1f; 781 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; 782 783 strpadcpy((char *) &outbuf[16], 16, s->product, ' '); 784 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); 785 786 memset(&outbuf[32], 0, 4); 787 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); 788 /* 789 * We claim conformance to SPC-3, which is required for guests 790 * to ask for modern features like READ CAPACITY(16) or the 791 * block characteristics VPD page by default. Not all of SPC-3 792 * is actually implemented, but we're good enough. 793 */ 794 outbuf[2] = s->qdev.default_scsi_version; 795 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ 796 797 if (buflen > 36) { 798 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ 799 } else { 800 /* If the allocation length of CDB is too small, 801 the additional length is not adjusted */ 802 outbuf[4] = 36 - 5; 803 } 804 805 /* Sync data transfer and TCQ. */ 806 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); 807 return buflen; 808 } 809 810 static inline bool media_is_dvd(SCSIDiskState *s) 811 { 812 uint64_t nb_sectors; 813 if (s->qdev.type != TYPE_ROM) { 814 return false; 815 } 816 if (!blk_is_available(s->qdev.conf.blk)) { 817 return false; 818 } 819 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 820 return nb_sectors > CD_MAX_SECTORS; 821 } 822 823 static inline bool media_is_cd(SCSIDiskState *s) 824 { 825 uint64_t nb_sectors; 826 if (s->qdev.type != TYPE_ROM) { 827 return false; 828 } 829 if (!blk_is_available(s->qdev.conf.blk)) { 830 return false; 831 } 832 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 833 return nb_sectors <= CD_MAX_SECTORS; 834 } 835 836 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, 837 uint8_t *outbuf) 838 { 839 uint8_t type = r->req.cmd.buf[1] & 7; 840 841 if (s->qdev.type != TYPE_ROM) { 842 return -1; 843 } 844 845 /* Types 1/2 are only defined for Blu-Ray. */ 846 if (type != 0) { 847 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 848 return -1; 849 } 850 851 memset(outbuf, 0, 34); 852 outbuf[1] = 32; 853 outbuf[2] = 0xe; /* last session complete, disc finalized */ 854 outbuf[3] = 1; /* first track on disc */ 855 outbuf[4] = 1; /* # of sessions */ 856 outbuf[5] = 1; /* first track of last session */ 857 outbuf[6] = 1; /* last track of last session */ 858 outbuf[7] = 0x20; /* unrestricted use */ 859 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ 860 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ 861 /* 12-23: not meaningful for CD-ROM or DVD-ROM */ 862 /* 24-31: disc bar code */ 863 /* 32: disc application code */ 864 /* 33: number of OPC tables */ 865 866 return 34; 867 } 868 869 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, 870 uint8_t *outbuf) 871 { 872 static const int rds_caps_size[5] = { 873 [0] = 2048 + 4, 874 [1] = 4 + 4, 875 [3] = 188 + 4, 876 [4] = 2048 + 4, 877 }; 878 879 uint8_t media = r->req.cmd.buf[1]; 880 uint8_t layer = r->req.cmd.buf[6]; 881 uint8_t format = r->req.cmd.buf[7]; 882 int size = -1; 883 884 if (s->qdev.type != TYPE_ROM) { 885 return -1; 886 } 887 if (media != 0) { 888 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 889 return -1; 890 } 891 892 if (format != 0xff) { 893 if (!blk_is_available(s->qdev.conf.blk)) { 894 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 895 return -1; 896 } 897 if (media_is_cd(s)) { 898 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT)); 899 return -1; 900 } 901 if (format >= ARRAY_SIZE(rds_caps_size)) { 902 return -1; 903 } 904 size = rds_caps_size[format]; 905 memset(outbuf, 0, size); 906 } 907 908 switch (format) { 909 case 0x00: { 910 /* Physical format information */ 911 uint64_t nb_sectors; 912 if (layer != 0) { 913 goto fail; 914 } 915 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 916 917 outbuf[4] = 1; /* DVD-ROM, part version 1 */ 918 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ 919 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ 920 outbuf[7] = 0; /* default densities */ 921 922 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */ 923 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */ 924 break; 925 } 926 927 case 0x01: /* DVD copyright information, all zeros */ 928 break; 929 930 case 0x03: /* BCA information - invalid field for no BCA info */ 931 return -1; 932 933 case 0x04: /* DVD disc manufacturing information, all zeros */ 934 break; 935 936 case 0xff: { /* List capabilities */ 937 int i; 938 size = 4; 939 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) { 940 if (!rds_caps_size[i]) { 941 continue; 942 } 943 outbuf[size] = i; 944 outbuf[size + 1] = 0x40; /* Not writable, readable */ 945 stw_be_p(&outbuf[size + 2], rds_caps_size[i]); 946 size += 4; 947 } 948 break; 949 } 950 951 default: 952 return -1; 953 } 954 955 /* Size of buffer, not including 2 byte size field */ 956 stw_be_p(outbuf, size - 2); 957 return size; 958 959 fail: 960 return -1; 961 } 962 963 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) 964 { 965 uint8_t event_code, media_status; 966 967 media_status = 0; 968 if (s->tray_open) { 969 media_status = MS_TRAY_OPEN; 970 } else if (blk_is_inserted(s->qdev.conf.blk)) { 971 media_status = MS_MEDIA_PRESENT; 972 } 973 974 /* Event notification descriptor */ 975 event_code = MEC_NO_CHANGE; 976 if (media_status != MS_TRAY_OPEN) { 977 if (s->media_event) { 978 event_code = MEC_NEW_MEDIA; 979 s->media_event = false; 980 } else if (s->eject_request) { 981 event_code = MEC_EJECT_REQUESTED; 982 s->eject_request = false; 983 } 984 } 985 986 outbuf[0] = event_code; 987 outbuf[1] = media_status; 988 989 /* These fields are reserved, just clear them. */ 990 outbuf[2] = 0; 991 outbuf[3] = 0; 992 return 4; 993 } 994 995 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r, 996 uint8_t *outbuf) 997 { 998 int size; 999 uint8_t *buf = r->req.cmd.buf; 1000 uint8_t notification_class_request = buf[4]; 1001 if (s->qdev.type != TYPE_ROM) { 1002 return -1; 1003 } 1004 if ((buf[1] & 1) == 0) { 1005 /* asynchronous */ 1006 return -1; 1007 } 1008 1009 size = 4; 1010 outbuf[0] = outbuf[1] = 0; 1011 outbuf[3] = 1 << GESN_MEDIA; /* supported events */ 1012 if (notification_class_request & (1 << GESN_MEDIA)) { 1013 outbuf[2] = GESN_MEDIA; 1014 size += scsi_event_status_media(s, &outbuf[size]); 1015 } else { 1016 outbuf[2] = 0x80; 1017 } 1018 stw_be_p(outbuf, size - 4); 1019 return size; 1020 } 1021 1022 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) 1023 { 1024 int current; 1025 1026 if (s->qdev.type != TYPE_ROM) { 1027 return -1; 1028 } 1029 1030 if (media_is_dvd(s)) { 1031 current = MMC_PROFILE_DVD_ROM; 1032 } else if (media_is_cd(s)) { 1033 current = MMC_PROFILE_CD_ROM; 1034 } else { 1035 current = MMC_PROFILE_NONE; 1036 } 1037 1038 memset(outbuf, 0, 40); 1039 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ 1040 stw_be_p(&outbuf[6], current); 1041 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ 1042 outbuf[10] = 0x03; /* persistent, current */ 1043 outbuf[11] = 8; /* two profiles */ 1044 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); 1045 outbuf[14] = (current == MMC_PROFILE_DVD_ROM); 1046 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); 1047 outbuf[18] = (current == MMC_PROFILE_CD_ROM); 1048 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ 1049 stw_be_p(&outbuf[20], 1); 1050 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ 1051 outbuf[23] = 8; 1052 stl_be_p(&outbuf[24], 1); /* SCSI */ 1053 outbuf[28] = 1; /* DBE = 1, mandatory */ 1054 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ 1055 stw_be_p(&outbuf[32], 3); 1056 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ 1057 outbuf[35] = 4; 1058 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ 1059 /* TODO: Random readable, CD read, DVD read, drive serial number, 1060 power management */ 1061 return 40; 1062 } 1063 1064 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf) 1065 { 1066 if (s->qdev.type != TYPE_ROM) { 1067 return -1; 1068 } 1069 memset(outbuf, 0, 8); 1070 outbuf[5] = 1; /* CD-ROM */ 1071 return 8; 1072 } 1073 1074 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, 1075 int page_control) 1076 { 1077 static const int mode_sense_valid[0x3f] = { 1078 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), 1079 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), 1080 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1081 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1082 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), 1083 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), 1084 }; 1085 1086 uint8_t *p = *p_outbuf + 2; 1087 int length; 1088 1089 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { 1090 return -1; 1091 } 1092 1093 /* 1094 * If Changeable Values are requested, a mask denoting those mode parameters 1095 * that are changeable shall be returned. As we currently don't support 1096 * parameter changes via MODE_SELECT all bits are returned set to zero. 1097 * The buffer was already menset to zero by the caller of this function. 1098 * 1099 * The offsets here are off by two compared to the descriptions in the 1100 * SCSI specs, because those include a 2-byte header. This is unfortunate, 1101 * but it is done so that offsets are consistent within our implementation 1102 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both 1103 * 2-byte and 4-byte headers. 1104 */ 1105 switch (page) { 1106 case MODE_PAGE_HD_GEOMETRY: 1107 length = 0x16; 1108 if (page_control == 1) { /* Changeable Values */ 1109 break; 1110 } 1111 /* if a geometry hint is available, use it */ 1112 p[0] = (s->qdev.conf.cyls >> 16) & 0xff; 1113 p[1] = (s->qdev.conf.cyls >> 8) & 0xff; 1114 p[2] = s->qdev.conf.cyls & 0xff; 1115 p[3] = s->qdev.conf.heads & 0xff; 1116 /* Write precomp start cylinder, disabled */ 1117 p[4] = (s->qdev.conf.cyls >> 16) & 0xff; 1118 p[5] = (s->qdev.conf.cyls >> 8) & 0xff; 1119 p[6] = s->qdev.conf.cyls & 0xff; 1120 /* Reduced current start cylinder, disabled */ 1121 p[7] = (s->qdev.conf.cyls >> 16) & 0xff; 1122 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1123 p[9] = s->qdev.conf.cyls & 0xff; 1124 /* Device step rate [ns], 200ns */ 1125 p[10] = 0; 1126 p[11] = 200; 1127 /* Landing zone cylinder */ 1128 p[12] = 0xff; 1129 p[13] = 0xff; 1130 p[14] = 0xff; 1131 /* Medium rotation rate [rpm], 5400 rpm */ 1132 p[18] = (5400 >> 8) & 0xff; 1133 p[19] = 5400 & 0xff; 1134 break; 1135 1136 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY: 1137 length = 0x1e; 1138 if (page_control == 1) { /* Changeable Values */ 1139 break; 1140 } 1141 /* Transfer rate [kbit/s], 5Mbit/s */ 1142 p[0] = 5000 >> 8; 1143 p[1] = 5000 & 0xff; 1144 /* if a geometry hint is available, use it */ 1145 p[2] = s->qdev.conf.heads & 0xff; 1146 p[3] = s->qdev.conf.secs & 0xff; 1147 p[4] = s->qdev.blocksize >> 8; 1148 p[6] = (s->qdev.conf.cyls >> 8) & 0xff; 1149 p[7] = s->qdev.conf.cyls & 0xff; 1150 /* Write precomp start cylinder, disabled */ 1151 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1152 p[9] = s->qdev.conf.cyls & 0xff; 1153 /* Reduced current start cylinder, disabled */ 1154 p[10] = (s->qdev.conf.cyls >> 8) & 0xff; 1155 p[11] = s->qdev.conf.cyls & 0xff; 1156 /* Device step rate [100us], 100us */ 1157 p[12] = 0; 1158 p[13] = 1; 1159 /* Device step pulse width [us], 1us */ 1160 p[14] = 1; 1161 /* Device head settle delay [100us], 100us */ 1162 p[15] = 0; 1163 p[16] = 1; 1164 /* Motor on delay [0.1s], 0.1s */ 1165 p[17] = 1; 1166 /* Motor off delay [0.1s], 0.1s */ 1167 p[18] = 1; 1168 /* Medium rotation rate [rpm], 5400 rpm */ 1169 p[26] = (5400 >> 8) & 0xff; 1170 p[27] = 5400 & 0xff; 1171 break; 1172 1173 case MODE_PAGE_CACHING: 1174 length = 0x12; 1175 if (page_control == 1 || /* Changeable Values */ 1176 blk_enable_write_cache(s->qdev.conf.blk)) { 1177 p[0] = 4; /* WCE */ 1178 } 1179 break; 1180 1181 case MODE_PAGE_R_W_ERROR: 1182 length = 10; 1183 if (page_control == 1) { /* Changeable Values */ 1184 break; 1185 } 1186 p[0] = 0x80; /* Automatic Write Reallocation Enabled */ 1187 if (s->qdev.type == TYPE_ROM) { 1188 p[1] = 0x20; /* Read Retry Count */ 1189 } 1190 break; 1191 1192 case MODE_PAGE_AUDIO_CTL: 1193 length = 14; 1194 break; 1195 1196 case MODE_PAGE_CAPABILITIES: 1197 length = 0x14; 1198 if (page_control == 1) { /* Changeable Values */ 1199 break; 1200 } 1201 1202 p[0] = 0x3b; /* CD-R & CD-RW read */ 1203 p[1] = 0; /* Writing not supported */ 1204 p[2] = 0x7f; /* Audio, composite, digital out, 1205 mode 2 form 1&2, multi session */ 1206 p[3] = 0xff; /* CD DA, DA accurate, RW supported, 1207 RW corrected, C2 errors, ISRC, 1208 UPC, Bar code */ 1209 p[4] = 0x2d | (s->tray_locked ? 2 : 0); 1210 /* Locking supported, jumper present, eject, tray */ 1211 p[5] = 0; /* no volume & mute control, no 1212 changer */ 1213 p[6] = (50 * 176) >> 8; /* 50x read speed */ 1214 p[7] = (50 * 176) & 0xff; 1215 p[8] = 2 >> 8; /* Two volume levels */ 1216 p[9] = 2 & 0xff; 1217 p[10] = 2048 >> 8; /* 2M buffer */ 1218 p[11] = 2048 & 0xff; 1219 p[12] = (16 * 176) >> 8; /* 16x read speed current */ 1220 p[13] = (16 * 176) & 0xff; 1221 p[16] = (16 * 176) >> 8; /* 16x write speed */ 1222 p[17] = (16 * 176) & 0xff; 1223 p[18] = (16 * 176) >> 8; /* 16x write speed current */ 1224 p[19] = (16 * 176) & 0xff; 1225 break; 1226 1227 default: 1228 return -1; 1229 } 1230 1231 assert(length < 256); 1232 (*p_outbuf)[0] = page; 1233 (*p_outbuf)[1] = length; 1234 *p_outbuf += length + 2; 1235 return length + 2; 1236 } 1237 1238 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) 1239 { 1240 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1241 uint64_t nb_sectors; 1242 bool dbd; 1243 int page, buflen, ret, page_control; 1244 uint8_t *p; 1245 uint8_t dev_specific_param; 1246 1247 dbd = (r->req.cmd.buf[1] & 0x8) != 0; 1248 page = r->req.cmd.buf[2] & 0x3f; 1249 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; 1250 1251 trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 1252 10, page, r->req.cmd.xfer, page_control); 1253 memset(outbuf, 0, r->req.cmd.xfer); 1254 p = outbuf; 1255 1256 if (s->qdev.type == TYPE_DISK) { 1257 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0; 1258 if (blk_is_read_only(s->qdev.conf.blk)) { 1259 dev_specific_param |= 0x80; /* Readonly. */ 1260 } 1261 } else { 1262 /* MMC prescribes that CD/DVD drives have no block descriptors, 1263 * and defines no device-specific parameter. */ 1264 dev_specific_param = 0x00; 1265 dbd = true; 1266 } 1267 1268 if (r->req.cmd.buf[0] == MODE_SENSE) { 1269 p[1] = 0; /* Default media type. */ 1270 p[2] = dev_specific_param; 1271 p[3] = 0; /* Block descriptor length. */ 1272 p += 4; 1273 } else { /* MODE_SENSE_10 */ 1274 p[2] = 0; /* Default media type. */ 1275 p[3] = dev_specific_param; 1276 p[6] = p[7] = 0; /* Block descriptor length. */ 1277 p += 8; 1278 } 1279 1280 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1281 if (!dbd && nb_sectors) { 1282 if (r->req.cmd.buf[0] == MODE_SENSE) { 1283 outbuf[3] = 8; /* Block descriptor length */ 1284 } else { /* MODE_SENSE_10 */ 1285 outbuf[7] = 8; /* Block descriptor length */ 1286 } 1287 nb_sectors /= (s->qdev.blocksize / 512); 1288 if (nb_sectors > 0xffffff) { 1289 nb_sectors = 0; 1290 } 1291 p[0] = 0; /* media density code */ 1292 p[1] = (nb_sectors >> 16) & 0xff; 1293 p[2] = (nb_sectors >> 8) & 0xff; 1294 p[3] = nb_sectors & 0xff; 1295 p[4] = 0; /* reserved */ 1296 p[5] = 0; /* bytes 5-7 are the sector size in bytes */ 1297 p[6] = s->qdev.blocksize >> 8; 1298 p[7] = 0; 1299 p += 8; 1300 } 1301 1302 if (page_control == 3) { 1303 /* Saved Values */ 1304 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); 1305 return -1; 1306 } 1307 1308 if (page == 0x3f) { 1309 for (page = 0; page <= 0x3e; page++) { 1310 mode_sense_page(s, page, &p, page_control); 1311 } 1312 } else { 1313 ret = mode_sense_page(s, page, &p, page_control); 1314 if (ret == -1) { 1315 return -1; 1316 } 1317 } 1318 1319 buflen = p - outbuf; 1320 /* 1321 * The mode data length field specifies the length in bytes of the 1322 * following data that is available to be transferred. The mode data 1323 * length does not include itself. 1324 */ 1325 if (r->req.cmd.buf[0] == MODE_SENSE) { 1326 outbuf[0] = buflen - 1; 1327 } else { /* MODE_SENSE_10 */ 1328 outbuf[0] = ((buflen - 2) >> 8) & 0xff; 1329 outbuf[1] = (buflen - 2) & 0xff; 1330 } 1331 return buflen; 1332 } 1333 1334 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) 1335 { 1336 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1337 int start_track, format, msf, toclen; 1338 uint64_t nb_sectors; 1339 1340 msf = req->cmd.buf[1] & 2; 1341 format = req->cmd.buf[2] & 0xf; 1342 start_track = req->cmd.buf[6]; 1343 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1344 trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1); 1345 nb_sectors /= s->qdev.blocksize / 512; 1346 switch (format) { 1347 case 0: 1348 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); 1349 break; 1350 case 1: 1351 /* multi session : only a single session defined */ 1352 toclen = 12; 1353 memset(outbuf, 0, 12); 1354 outbuf[1] = 0x0a; 1355 outbuf[2] = 0x01; 1356 outbuf[3] = 0x01; 1357 break; 1358 case 2: 1359 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); 1360 break; 1361 default: 1362 return -1; 1363 } 1364 return toclen; 1365 } 1366 1367 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) 1368 { 1369 SCSIRequest *req = &r->req; 1370 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1371 bool start = req->cmd.buf[4] & 1; 1372 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ 1373 int pwrcnd = req->cmd.buf[4] & 0xf0; 1374 1375 if (pwrcnd) { 1376 /* eject/load only happens for power condition == 0 */ 1377 return 0; 1378 } 1379 1380 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) { 1381 if (!start && !s->tray_open && s->tray_locked) { 1382 scsi_check_condition(r, 1383 blk_is_inserted(s->qdev.conf.blk) 1384 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) 1385 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); 1386 return -1; 1387 } 1388 1389 if (s->tray_open != !start) { 1390 blk_eject(s->qdev.conf.blk, !start); 1391 s->tray_open = !start; 1392 } 1393 } 1394 return 0; 1395 } 1396 1397 static void scsi_disk_emulate_read_data(SCSIRequest *req) 1398 { 1399 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1400 int buflen = r->iov.iov_len; 1401 1402 if (buflen) { 1403 trace_scsi_disk_emulate_read_data(buflen); 1404 r->iov.iov_len = 0; 1405 r->started = true; 1406 scsi_req_data(&r->req, buflen); 1407 return; 1408 } 1409 1410 /* This also clears the sense buffer for REQUEST SENSE. */ 1411 scsi_req_complete(&r->req, GOOD); 1412 } 1413 1414 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, 1415 uint8_t *inbuf, int inlen) 1416 { 1417 uint8_t mode_current[SCSI_MAX_MODE_LEN]; 1418 uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; 1419 uint8_t *p; 1420 int len, expected_len, changeable_len, i; 1421 1422 /* The input buffer does not include the page header, so it is 1423 * off by 2 bytes. 1424 */ 1425 expected_len = inlen + 2; 1426 if (expected_len > SCSI_MAX_MODE_LEN) { 1427 return -1; 1428 } 1429 1430 p = mode_current; 1431 memset(mode_current, 0, inlen + 2); 1432 len = mode_sense_page(s, page, &p, 0); 1433 if (len < 0 || len != expected_len) { 1434 return -1; 1435 } 1436 1437 p = mode_changeable; 1438 memset(mode_changeable, 0, inlen + 2); 1439 changeable_len = mode_sense_page(s, page, &p, 1); 1440 assert(changeable_len == len); 1441 1442 /* Check that unchangeable bits are the same as what MODE SENSE 1443 * would return. 1444 */ 1445 for (i = 2; i < len; i++) { 1446 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { 1447 return -1; 1448 } 1449 } 1450 return 0; 1451 } 1452 1453 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) 1454 { 1455 switch (page) { 1456 case MODE_PAGE_CACHING: 1457 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0); 1458 break; 1459 1460 default: 1461 break; 1462 } 1463 } 1464 1465 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) 1466 { 1467 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1468 1469 while (len > 0) { 1470 int page, subpage, page_len; 1471 1472 /* Parse both possible formats for the mode page headers. */ 1473 page = p[0] & 0x3f; 1474 if (p[0] & 0x40) { 1475 if (len < 4) { 1476 goto invalid_param_len; 1477 } 1478 subpage = p[1]; 1479 page_len = lduw_be_p(&p[2]); 1480 p += 4; 1481 len -= 4; 1482 } else { 1483 if (len < 2) { 1484 goto invalid_param_len; 1485 } 1486 subpage = 0; 1487 page_len = p[1]; 1488 p += 2; 1489 len -= 2; 1490 } 1491 1492 if (subpage) { 1493 goto invalid_param; 1494 } 1495 if (page_len > len) { 1496 goto invalid_param_len; 1497 } 1498 1499 if (!change) { 1500 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) { 1501 goto invalid_param; 1502 } 1503 } else { 1504 scsi_disk_apply_mode_select(s, page, p); 1505 } 1506 1507 p += page_len; 1508 len -= page_len; 1509 } 1510 return 0; 1511 1512 invalid_param: 1513 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1514 return -1; 1515 1516 invalid_param_len: 1517 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1518 return -1; 1519 } 1520 1521 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) 1522 { 1523 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1524 uint8_t *p = inbuf; 1525 int cmd = r->req.cmd.buf[0]; 1526 int len = r->req.cmd.xfer; 1527 int hdr_len = (cmd == MODE_SELECT ? 4 : 8); 1528 int bd_len; 1529 int pass; 1530 1531 /* We only support PF=1, SP=0. */ 1532 if ((r->req.cmd.buf[1] & 0x11) != 0x10) { 1533 goto invalid_field; 1534 } 1535 1536 if (len < hdr_len) { 1537 goto invalid_param_len; 1538 } 1539 1540 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); 1541 len -= hdr_len; 1542 p += hdr_len; 1543 if (len < bd_len) { 1544 goto invalid_param_len; 1545 } 1546 if (bd_len != 0 && bd_len != 8) { 1547 goto invalid_param; 1548 } 1549 1550 len -= bd_len; 1551 p += bd_len; 1552 1553 /* Ensure no change is made if there is an error! */ 1554 for (pass = 0; pass < 2; pass++) { 1555 if (mode_select_pages(r, p, len, pass == 1) < 0) { 1556 assert(pass == 0); 1557 return; 1558 } 1559 } 1560 if (!blk_enable_write_cache(s->qdev.conf.blk)) { 1561 /* The request is used as the AIO opaque value, so add a ref. */ 1562 scsi_req_ref(&r->req); 1563 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 1564 BLOCK_ACCT_FLUSH); 1565 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 1566 return; 1567 } 1568 1569 scsi_req_complete(&r->req, GOOD); 1570 return; 1571 1572 invalid_param: 1573 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1574 return; 1575 1576 invalid_param_len: 1577 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1578 return; 1579 1580 invalid_field: 1581 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1582 } 1583 1584 static inline bool check_lba_range(SCSIDiskState *s, 1585 uint64_t sector_num, uint32_t nb_sectors) 1586 { 1587 /* 1588 * The first line tests that no overflow happens when computing the last 1589 * sector. The second line tests that the last accessed sector is in 1590 * range. 1591 * 1592 * Careful, the computations should not underflow for nb_sectors == 0, 1593 * and a 0-block read to the first LBA beyond the end of device is 1594 * valid. 1595 */ 1596 return (sector_num <= sector_num + nb_sectors && 1597 sector_num + nb_sectors <= s->qdev.max_lba + 1); 1598 } 1599 1600 typedef struct UnmapCBData { 1601 SCSIDiskReq *r; 1602 uint8_t *inbuf; 1603 int count; 1604 } UnmapCBData; 1605 1606 static void scsi_unmap_complete(void *opaque, int ret); 1607 1608 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) 1609 { 1610 SCSIDiskReq *r = data->r; 1611 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1612 uint64_t sector_num; 1613 uint32_t nb_sectors; 1614 1615 assert(r->req.aiocb == NULL); 1616 if (scsi_disk_req_check_error(r, ret, false)) { 1617 goto done; 1618 } 1619 1620 if (data->count > 0) { 1621 sector_num = ldq_be_p(&data->inbuf[0]); 1622 nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; 1623 if (!check_lba_range(s, sector_num, nb_sectors)) { 1624 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1625 goto done; 1626 } 1627 1628 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, 1629 sector_num * s->qdev.blocksize, 1630 nb_sectors * s->qdev.blocksize, 1631 scsi_unmap_complete, data); 1632 data->count--; 1633 data->inbuf += 16; 1634 return; 1635 } 1636 1637 scsi_req_complete(&r->req, GOOD); 1638 1639 done: 1640 scsi_req_unref(&r->req); 1641 g_free(data); 1642 } 1643 1644 static void scsi_unmap_complete(void *opaque, int ret) 1645 { 1646 UnmapCBData *data = opaque; 1647 SCSIDiskReq *r = data->r; 1648 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1649 1650 assert(r->req.aiocb != NULL); 1651 r->req.aiocb = NULL; 1652 1653 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1654 scsi_unmap_complete_noio(data, ret); 1655 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1656 } 1657 1658 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) 1659 { 1660 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1661 uint8_t *p = inbuf; 1662 int len = r->req.cmd.xfer; 1663 UnmapCBData *data; 1664 1665 /* Reject ANCHOR=1. */ 1666 if (r->req.cmd.buf[1] & 0x1) { 1667 goto invalid_field; 1668 } 1669 1670 if (len < 8) { 1671 goto invalid_param_len; 1672 } 1673 if (len < lduw_be_p(&p[0]) + 2) { 1674 goto invalid_param_len; 1675 } 1676 if (len < lduw_be_p(&p[2]) + 8) { 1677 goto invalid_param_len; 1678 } 1679 if (lduw_be_p(&p[2]) & 15) { 1680 goto invalid_param_len; 1681 } 1682 1683 if (blk_is_read_only(s->qdev.conf.blk)) { 1684 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1685 return; 1686 } 1687 1688 data = g_new0(UnmapCBData, 1); 1689 data->r = r; 1690 data->inbuf = &p[8]; 1691 data->count = lduw_be_p(&p[2]) >> 4; 1692 1693 /* The matching unref is in scsi_unmap_complete, before data is freed. */ 1694 scsi_req_ref(&r->req); 1695 scsi_unmap_complete_noio(data, 0); 1696 return; 1697 1698 invalid_param_len: 1699 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1700 return; 1701 1702 invalid_field: 1703 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1704 } 1705 1706 typedef struct WriteSameCBData { 1707 SCSIDiskReq *r; 1708 int64_t sector; 1709 int nb_sectors; 1710 QEMUIOVector qiov; 1711 struct iovec iov; 1712 } WriteSameCBData; 1713 1714 static void scsi_write_same_complete(void *opaque, int ret) 1715 { 1716 WriteSameCBData *data = opaque; 1717 SCSIDiskReq *r = data->r; 1718 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1719 1720 assert(r->req.aiocb != NULL); 1721 r->req.aiocb = NULL; 1722 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1723 if (scsi_disk_req_check_error(r, ret, true)) { 1724 goto done; 1725 } 1726 1727 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1728 1729 data->nb_sectors -= data->iov.iov_len / 512; 1730 data->sector += data->iov.iov_len / 512; 1731 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len); 1732 if (data->iov.iov_len) { 1733 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1734 data->iov.iov_len, BLOCK_ACCT_WRITE); 1735 /* Reinitialize qiov, to handle unaligned WRITE SAME request 1736 * where final qiov may need smaller size */ 1737 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1738 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1739 data->sector << BDRV_SECTOR_BITS, 1740 &data->qiov, 0, 1741 scsi_write_same_complete, data); 1742 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1743 return; 1744 } 1745 1746 scsi_req_complete(&r->req, GOOD); 1747 1748 done: 1749 scsi_req_unref(&r->req); 1750 qemu_vfree(data->iov.iov_base); 1751 g_free(data); 1752 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1753 } 1754 1755 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) 1756 { 1757 SCSIRequest *req = &r->req; 1758 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1759 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); 1760 WriteSameCBData *data; 1761 uint8_t *buf; 1762 int i; 1763 1764 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ 1765 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { 1766 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1767 return; 1768 } 1769 1770 if (blk_is_read_only(s->qdev.conf.blk)) { 1771 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1772 return; 1773 } 1774 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) { 1775 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1776 return; 1777 } 1778 1779 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) { 1780 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0; 1781 1782 /* The request is used as the AIO opaque value, so add a ref. */ 1783 scsi_req_ref(&r->req); 1784 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1785 nb_sectors * s->qdev.blocksize, 1786 BLOCK_ACCT_WRITE); 1787 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk, 1788 r->req.cmd.lba * s->qdev.blocksize, 1789 nb_sectors * s->qdev.blocksize, 1790 flags, scsi_aio_complete, r); 1791 return; 1792 } 1793 1794 data = g_new0(WriteSameCBData, 1); 1795 data->r = r; 1796 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 1797 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512); 1798 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX); 1799 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk, 1800 data->iov.iov_len); 1801 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1802 1803 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) { 1804 memcpy(&buf[i], inbuf, s->qdev.blocksize); 1805 } 1806 1807 scsi_req_ref(&r->req); 1808 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1809 data->iov.iov_len, BLOCK_ACCT_WRITE); 1810 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1811 data->sector << BDRV_SECTOR_BITS, 1812 &data->qiov, 0, 1813 scsi_write_same_complete, data); 1814 } 1815 1816 static void scsi_disk_emulate_write_data(SCSIRequest *req) 1817 { 1818 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1819 1820 if (r->iov.iov_len) { 1821 int buflen = r->iov.iov_len; 1822 trace_scsi_disk_emulate_write_data(buflen); 1823 r->iov.iov_len = 0; 1824 scsi_req_data(&r->req, buflen); 1825 return; 1826 } 1827 1828 switch (req->cmd.buf[0]) { 1829 case MODE_SELECT: 1830 case MODE_SELECT_10: 1831 /* This also clears the sense buffer for REQUEST SENSE. */ 1832 scsi_disk_emulate_mode_select(r, r->iov.iov_base); 1833 break; 1834 1835 case UNMAP: 1836 scsi_disk_emulate_unmap(r, r->iov.iov_base); 1837 break; 1838 1839 case VERIFY_10: 1840 case VERIFY_12: 1841 case VERIFY_16: 1842 if (r->req.status == -1) { 1843 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1844 } 1845 break; 1846 1847 case WRITE_SAME_10: 1848 case WRITE_SAME_16: 1849 scsi_disk_emulate_write_same(r, r->iov.iov_base); 1850 break; 1851 1852 default: 1853 abort(); 1854 } 1855 } 1856 1857 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) 1858 { 1859 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1860 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1861 uint64_t nb_sectors; 1862 uint8_t *outbuf; 1863 int buflen; 1864 1865 switch (req->cmd.buf[0]) { 1866 case INQUIRY: 1867 case MODE_SENSE: 1868 case MODE_SENSE_10: 1869 case RESERVE: 1870 case RESERVE_10: 1871 case RELEASE: 1872 case RELEASE_10: 1873 case START_STOP: 1874 case ALLOW_MEDIUM_REMOVAL: 1875 case GET_CONFIGURATION: 1876 case GET_EVENT_STATUS_NOTIFICATION: 1877 case MECHANISM_STATUS: 1878 case REQUEST_SENSE: 1879 break; 1880 1881 default: 1882 if (!blk_is_available(s->qdev.conf.blk)) { 1883 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 1884 return 0; 1885 } 1886 break; 1887 } 1888 1889 /* 1890 * FIXME: we shouldn't return anything bigger than 4k, but the code 1891 * requires the buffer to be as big as req->cmd.xfer in several 1892 * places. So, do not allow CDBs with a very large ALLOCATION 1893 * LENGTH. The real fix would be to modify scsi_read_data and 1894 * dma_buf_read, so that they return data beyond the buflen 1895 * as all zeros. 1896 */ 1897 if (req->cmd.xfer > 65536) { 1898 goto illegal_request; 1899 } 1900 r->buflen = MAX(4096, req->cmd.xfer); 1901 1902 if (!r->iov.iov_base) { 1903 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 1904 } 1905 1906 buflen = req->cmd.xfer; 1907 outbuf = r->iov.iov_base; 1908 memset(outbuf, 0, r->buflen); 1909 switch (req->cmd.buf[0]) { 1910 case TEST_UNIT_READY: 1911 assert(blk_is_available(s->qdev.conf.blk)); 1912 break; 1913 case INQUIRY: 1914 buflen = scsi_disk_emulate_inquiry(req, outbuf); 1915 if (buflen < 0) { 1916 goto illegal_request; 1917 } 1918 break; 1919 case MODE_SENSE: 1920 case MODE_SENSE_10: 1921 buflen = scsi_disk_emulate_mode_sense(r, outbuf); 1922 if (buflen < 0) { 1923 goto illegal_request; 1924 } 1925 break; 1926 case READ_TOC: 1927 buflen = scsi_disk_emulate_read_toc(req, outbuf); 1928 if (buflen < 0) { 1929 goto illegal_request; 1930 } 1931 break; 1932 case RESERVE: 1933 if (req->cmd.buf[1] & 1) { 1934 goto illegal_request; 1935 } 1936 break; 1937 case RESERVE_10: 1938 if (req->cmd.buf[1] & 3) { 1939 goto illegal_request; 1940 } 1941 break; 1942 case RELEASE: 1943 if (req->cmd.buf[1] & 1) { 1944 goto illegal_request; 1945 } 1946 break; 1947 case RELEASE_10: 1948 if (req->cmd.buf[1] & 3) { 1949 goto illegal_request; 1950 } 1951 break; 1952 case START_STOP: 1953 if (scsi_disk_emulate_start_stop(r) < 0) { 1954 return 0; 1955 } 1956 break; 1957 case ALLOW_MEDIUM_REMOVAL: 1958 s->tray_locked = req->cmd.buf[4] & 1; 1959 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1); 1960 break; 1961 case READ_CAPACITY_10: 1962 /* The normal LEN field for this command is zero. */ 1963 memset(outbuf, 0, 8); 1964 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1965 if (!nb_sectors) { 1966 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 1967 return 0; 1968 } 1969 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) { 1970 goto illegal_request; 1971 } 1972 nb_sectors /= s->qdev.blocksize / 512; 1973 /* Returned value is the address of the last sector. */ 1974 nb_sectors--; 1975 /* Remember the new size for read/write sanity checking. */ 1976 s->qdev.max_lba = nb_sectors; 1977 /* Clip to 2TB, instead of returning capacity modulo 2TB. */ 1978 if (nb_sectors > UINT32_MAX) { 1979 nb_sectors = UINT32_MAX; 1980 } 1981 outbuf[0] = (nb_sectors >> 24) & 0xff; 1982 outbuf[1] = (nb_sectors >> 16) & 0xff; 1983 outbuf[2] = (nb_sectors >> 8) & 0xff; 1984 outbuf[3] = nb_sectors & 0xff; 1985 outbuf[4] = 0; 1986 outbuf[5] = 0; 1987 outbuf[6] = s->qdev.blocksize >> 8; 1988 outbuf[7] = 0; 1989 break; 1990 case REQUEST_SENSE: 1991 /* Just return "NO SENSE". */ 1992 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, 1993 (req->cmd.buf[1] & 1) == 0); 1994 if (buflen < 0) { 1995 goto illegal_request; 1996 } 1997 break; 1998 case MECHANISM_STATUS: 1999 buflen = scsi_emulate_mechanism_status(s, outbuf); 2000 if (buflen < 0) { 2001 goto illegal_request; 2002 } 2003 break; 2004 case GET_CONFIGURATION: 2005 buflen = scsi_get_configuration(s, outbuf); 2006 if (buflen < 0) { 2007 goto illegal_request; 2008 } 2009 break; 2010 case GET_EVENT_STATUS_NOTIFICATION: 2011 buflen = scsi_get_event_status_notification(s, r, outbuf); 2012 if (buflen < 0) { 2013 goto illegal_request; 2014 } 2015 break; 2016 case READ_DISC_INFORMATION: 2017 buflen = scsi_read_disc_information(s, r, outbuf); 2018 if (buflen < 0) { 2019 goto illegal_request; 2020 } 2021 break; 2022 case READ_DVD_STRUCTURE: 2023 buflen = scsi_read_dvd_structure(s, r, outbuf); 2024 if (buflen < 0) { 2025 goto illegal_request; 2026 } 2027 break; 2028 case SERVICE_ACTION_IN_16: 2029 /* Service Action In subcommands. */ 2030 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 2031 trace_scsi_disk_emulate_command_SAI_16(); 2032 memset(outbuf, 0, req->cmd.xfer); 2033 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2034 if (!nb_sectors) { 2035 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2036 return 0; 2037 } 2038 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) { 2039 goto illegal_request; 2040 } 2041 nb_sectors /= s->qdev.blocksize / 512; 2042 /* Returned value is the address of the last sector. */ 2043 nb_sectors--; 2044 /* Remember the new size for read/write sanity checking. */ 2045 s->qdev.max_lba = nb_sectors; 2046 outbuf[0] = (nb_sectors >> 56) & 0xff; 2047 outbuf[1] = (nb_sectors >> 48) & 0xff; 2048 outbuf[2] = (nb_sectors >> 40) & 0xff; 2049 outbuf[3] = (nb_sectors >> 32) & 0xff; 2050 outbuf[4] = (nb_sectors >> 24) & 0xff; 2051 outbuf[5] = (nb_sectors >> 16) & 0xff; 2052 outbuf[6] = (nb_sectors >> 8) & 0xff; 2053 outbuf[7] = nb_sectors & 0xff; 2054 outbuf[8] = 0; 2055 outbuf[9] = 0; 2056 outbuf[10] = s->qdev.blocksize >> 8; 2057 outbuf[11] = 0; 2058 outbuf[12] = 0; 2059 outbuf[13] = get_physical_block_exp(&s->qdev.conf); 2060 2061 /* set TPE bit if the format supports discard */ 2062 if (s->qdev.conf.discard_granularity) { 2063 outbuf[14] = 0x80; 2064 } 2065 2066 /* Protection, exponent and lowest lba field left blank. */ 2067 break; 2068 } 2069 trace_scsi_disk_emulate_command_SAI_unsupported(); 2070 goto illegal_request; 2071 case SYNCHRONIZE_CACHE: 2072 /* The request is used as the AIO opaque value, so add a ref. */ 2073 scsi_req_ref(&r->req); 2074 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 2075 BLOCK_ACCT_FLUSH); 2076 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 2077 return 0; 2078 case SEEK_10: 2079 trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba); 2080 if (r->req.cmd.lba > s->qdev.max_lba) { 2081 goto illegal_lba; 2082 } 2083 break; 2084 case MODE_SELECT: 2085 trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer); 2086 break; 2087 case MODE_SELECT_10: 2088 trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer); 2089 break; 2090 case UNMAP: 2091 trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer); 2092 break; 2093 case VERIFY_10: 2094 case VERIFY_12: 2095 case VERIFY_16: 2096 trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3); 2097 if (req->cmd.buf[1] & 6) { 2098 goto illegal_request; 2099 } 2100 break; 2101 case WRITE_SAME_10: 2102 case WRITE_SAME_16: 2103 trace_scsi_disk_emulate_command_WRITE_SAME( 2104 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer); 2105 break; 2106 default: 2107 trace_scsi_disk_emulate_command_UNKNOWN(buf[0], 2108 scsi_command_name(buf[0])); 2109 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); 2110 return 0; 2111 } 2112 assert(!r->req.aiocb); 2113 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); 2114 if (r->iov.iov_len == 0) { 2115 scsi_req_complete(&r->req, GOOD); 2116 } 2117 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2118 assert(r->iov.iov_len == req->cmd.xfer); 2119 return -r->iov.iov_len; 2120 } else { 2121 return r->iov.iov_len; 2122 } 2123 2124 illegal_request: 2125 if (r->req.status == -1) { 2126 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2127 } 2128 return 0; 2129 2130 illegal_lba: 2131 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2132 return 0; 2133 } 2134 2135 /* Execute a scsi command. Returns the length of the data expected by the 2136 command. This will be Positive for data transfers from the device 2137 (eg. disk reads), negative for transfers to the device (eg. disk writes), 2138 and zero if the command does not transfer any data. */ 2139 2140 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) 2141 { 2142 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2143 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2144 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 2145 uint32_t len; 2146 uint8_t command; 2147 2148 command = buf[0]; 2149 2150 if (!blk_is_available(s->qdev.conf.blk)) { 2151 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 2152 return 0; 2153 } 2154 2155 len = scsi_data_cdb_xfer(r->req.cmd.buf); 2156 switch (command) { 2157 case READ_6: 2158 case READ_10: 2159 case READ_12: 2160 case READ_16: 2161 trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len); 2162 /* Protection information is not supported. For SCSI versions 2 and 2163 * older (as determined by snooping the guest's INQUIRY commands), 2164 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2165 */ 2166 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2167 goto illegal_request; 2168 } 2169 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2170 goto illegal_lba; 2171 } 2172 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2173 r->sector_count = len * (s->qdev.blocksize / 512); 2174 break; 2175 case WRITE_6: 2176 case WRITE_10: 2177 case WRITE_12: 2178 case WRITE_16: 2179 case WRITE_VERIFY_10: 2180 case WRITE_VERIFY_12: 2181 case WRITE_VERIFY_16: 2182 if (blk_is_read_only(s->qdev.conf.blk)) { 2183 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 2184 return 0; 2185 } 2186 trace_scsi_disk_dma_command_WRITE( 2187 (command & 0xe) == 0xe ? "And Verify " : "", 2188 r->req.cmd.lba, len); 2189 /* fall through */ 2190 case VERIFY_10: 2191 case VERIFY_12: 2192 case VERIFY_16: 2193 /* We get here only for BYTCHK == 0x01 and only for scsi-block. 2194 * As far as DMA is concerned, we can treat it the same as a write; 2195 * scsi_block_do_sgio will send VERIFY commands. 2196 */ 2197 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2198 goto illegal_request; 2199 } 2200 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2201 goto illegal_lba; 2202 } 2203 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2204 r->sector_count = len * (s->qdev.blocksize / 512); 2205 break; 2206 default: 2207 abort(); 2208 illegal_request: 2209 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2210 return 0; 2211 illegal_lba: 2212 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2213 return 0; 2214 } 2215 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); 2216 if (r->sector_count == 0) { 2217 scsi_req_complete(&r->req, GOOD); 2218 } 2219 assert(r->iov.iov_len == 0); 2220 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2221 return -r->sector_count * 512; 2222 } else { 2223 return r->sector_count * 512; 2224 } 2225 } 2226 2227 static void scsi_disk_reset(DeviceState *dev) 2228 { 2229 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); 2230 uint64_t nb_sectors; 2231 2232 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); 2233 2234 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2235 nb_sectors /= s->qdev.blocksize / 512; 2236 if (nb_sectors) { 2237 nb_sectors--; 2238 } 2239 s->qdev.max_lba = nb_sectors; 2240 /* reset tray statuses */ 2241 s->tray_locked = 0; 2242 s->tray_open = 0; 2243 2244 s->qdev.scsi_version = s->qdev.default_scsi_version; 2245 } 2246 2247 static void scsi_disk_resize_cb(void *opaque) 2248 { 2249 SCSIDiskState *s = opaque; 2250 2251 /* SPC lists this sense code as available only for 2252 * direct-access devices. 2253 */ 2254 if (s->qdev.type == TYPE_DISK) { 2255 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED)); 2256 } 2257 } 2258 2259 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp) 2260 { 2261 SCSIDiskState *s = opaque; 2262 2263 /* 2264 * When a CD gets changed, we have to report an ejected state and 2265 * then a loaded state to guests so that they detect tray 2266 * open/close and media change events. Guests that do not use 2267 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close 2268 * states rely on this behavior. 2269 * 2270 * media_changed governs the state machine used for unit attention 2271 * report. media_event is used by GET EVENT STATUS NOTIFICATION. 2272 */ 2273 s->media_changed = load; 2274 s->tray_open = !load; 2275 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM)); 2276 s->media_event = true; 2277 s->eject_request = false; 2278 } 2279 2280 static void scsi_cd_eject_request_cb(void *opaque, bool force) 2281 { 2282 SCSIDiskState *s = opaque; 2283 2284 s->eject_request = true; 2285 if (force) { 2286 s->tray_locked = false; 2287 } 2288 } 2289 2290 static bool scsi_cd_is_tray_open(void *opaque) 2291 { 2292 return ((SCSIDiskState *)opaque)->tray_open; 2293 } 2294 2295 static bool scsi_cd_is_medium_locked(void *opaque) 2296 { 2297 return ((SCSIDiskState *)opaque)->tray_locked; 2298 } 2299 2300 static const BlockDevOps scsi_disk_removable_block_ops = { 2301 .change_media_cb = scsi_cd_change_media_cb, 2302 .eject_request_cb = scsi_cd_eject_request_cb, 2303 .is_tray_open = scsi_cd_is_tray_open, 2304 .is_medium_locked = scsi_cd_is_medium_locked, 2305 2306 .resize_cb = scsi_disk_resize_cb, 2307 }; 2308 2309 static const BlockDevOps scsi_disk_block_ops = { 2310 .resize_cb = scsi_disk_resize_cb, 2311 }; 2312 2313 static void scsi_disk_unit_attention_reported(SCSIDevice *dev) 2314 { 2315 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2316 if (s->media_changed) { 2317 s->media_changed = false; 2318 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED)); 2319 } 2320 } 2321 2322 static void scsi_realize(SCSIDevice *dev, Error **errp) 2323 { 2324 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2325 bool read_only; 2326 2327 if (!s->qdev.conf.blk) { 2328 error_setg(errp, "drive property not set"); 2329 return; 2330 } 2331 2332 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2333 !blk_is_inserted(s->qdev.conf.blk)) { 2334 error_setg(errp, "Device needs media, but drive is empty"); 2335 return; 2336 } 2337 2338 blkconf_blocksizes(&s->qdev.conf); 2339 2340 if (s->qdev.conf.logical_block_size > 2341 s->qdev.conf.physical_block_size) { 2342 error_setg(errp, 2343 "logical_block_size > physical_block_size not supported"); 2344 return; 2345 } 2346 2347 if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() && 2348 !s->qdev.hba_supports_iothread) 2349 { 2350 error_setg(errp, "HBA does not support iothreads"); 2351 return; 2352 } 2353 2354 if (dev->type == TYPE_DISK) { 2355 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) { 2356 return; 2357 } 2358 } 2359 2360 read_only = blk_is_read_only(s->qdev.conf.blk); 2361 if (dev->type == TYPE_ROM) { 2362 read_only = true; 2363 } 2364 2365 if (!blkconf_apply_backend_options(&dev->conf, read_only, 2366 dev->type == TYPE_DISK, errp)) { 2367 return; 2368 } 2369 2370 if (s->qdev.conf.discard_granularity == -1) { 2371 s->qdev.conf.discard_granularity = 2372 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); 2373 } 2374 2375 if (!s->version) { 2376 s->version = g_strdup(qemu_hw_version()); 2377 } 2378 if (!s->vendor) { 2379 s->vendor = g_strdup("QEMU"); 2380 } 2381 if (!s->device_id) { 2382 if (s->serial) { 2383 s->device_id = g_strdup_printf("%.20s", s->serial); 2384 } else { 2385 const char *str = blk_name(s->qdev.conf.blk); 2386 if (str && *str) { 2387 s->device_id = g_strdup(str); 2388 } 2389 } 2390 } 2391 2392 if (blk_is_sg(s->qdev.conf.blk)) { 2393 error_setg(errp, "unwanted /dev/sg*"); 2394 return; 2395 } 2396 2397 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2398 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { 2399 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s); 2400 } else { 2401 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); 2402 } 2403 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize); 2404 2405 blk_iostatus_enable(s->qdev.conf.blk); 2406 } 2407 2408 static void scsi_hd_realize(SCSIDevice *dev, Error **errp) 2409 { 2410 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2411 AioContext *ctx = NULL; 2412 /* can happen for devices without drive. The error message for missing 2413 * backend will be issued in scsi_realize 2414 */ 2415 if (s->qdev.conf.blk) { 2416 ctx = blk_get_aio_context(s->qdev.conf.blk); 2417 aio_context_acquire(ctx); 2418 blkconf_blocksizes(&s->qdev.conf); 2419 } 2420 s->qdev.blocksize = s->qdev.conf.logical_block_size; 2421 s->qdev.type = TYPE_DISK; 2422 if (!s->product) { 2423 s->product = g_strdup("QEMU HARDDISK"); 2424 } 2425 scsi_realize(&s->qdev, errp); 2426 if (ctx) { 2427 aio_context_release(ctx); 2428 } 2429 } 2430 2431 static void scsi_cd_realize(SCSIDevice *dev, Error **errp) 2432 { 2433 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2434 AioContext *ctx; 2435 int ret; 2436 2437 if (!dev->conf.blk) { 2438 /* Anonymous BlockBackend for an empty drive. As we put it into 2439 * dev->conf, qdev takes care of detaching on unplug. */ 2440 dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); 2441 ret = blk_attach_dev(dev->conf.blk, &dev->qdev); 2442 assert(ret == 0); 2443 } 2444 2445 ctx = blk_get_aio_context(dev->conf.blk); 2446 aio_context_acquire(ctx); 2447 s->qdev.blocksize = 2048; 2448 s->qdev.type = TYPE_ROM; 2449 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2450 if (!s->product) { 2451 s->product = g_strdup("QEMU CD-ROM"); 2452 } 2453 scsi_realize(&s->qdev, errp); 2454 aio_context_release(ctx); 2455 } 2456 2457 static void scsi_disk_realize(SCSIDevice *dev, Error **errp) 2458 { 2459 DriveInfo *dinfo; 2460 Error *local_err = NULL; 2461 2462 if (!dev->conf.blk) { 2463 scsi_realize(dev, &local_err); 2464 assert(local_err); 2465 error_propagate(errp, local_err); 2466 return; 2467 } 2468 2469 dinfo = blk_legacy_dinfo(dev->conf.blk); 2470 if (dinfo && dinfo->media_cd) { 2471 scsi_cd_realize(dev, errp); 2472 } else { 2473 scsi_hd_realize(dev, errp); 2474 } 2475 } 2476 2477 static const SCSIReqOps scsi_disk_emulate_reqops = { 2478 .size = sizeof(SCSIDiskReq), 2479 .free_req = scsi_free_request, 2480 .send_command = scsi_disk_emulate_command, 2481 .read_data = scsi_disk_emulate_read_data, 2482 .write_data = scsi_disk_emulate_write_data, 2483 .get_buf = scsi_get_buf, 2484 }; 2485 2486 static const SCSIReqOps scsi_disk_dma_reqops = { 2487 .size = sizeof(SCSIDiskReq), 2488 .free_req = scsi_free_request, 2489 .send_command = scsi_disk_dma_command, 2490 .read_data = scsi_read_data, 2491 .write_data = scsi_write_data, 2492 .get_buf = scsi_get_buf, 2493 .load_request = scsi_disk_load_request, 2494 .save_request = scsi_disk_save_request, 2495 }; 2496 2497 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { 2498 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops, 2499 [INQUIRY] = &scsi_disk_emulate_reqops, 2500 [MODE_SENSE] = &scsi_disk_emulate_reqops, 2501 [MODE_SENSE_10] = &scsi_disk_emulate_reqops, 2502 [START_STOP] = &scsi_disk_emulate_reqops, 2503 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops, 2504 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops, 2505 [READ_TOC] = &scsi_disk_emulate_reqops, 2506 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops, 2507 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops, 2508 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops, 2509 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops, 2510 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops, 2511 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops, 2512 [REQUEST_SENSE] = &scsi_disk_emulate_reqops, 2513 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops, 2514 [SEEK_10] = &scsi_disk_emulate_reqops, 2515 [MODE_SELECT] = &scsi_disk_emulate_reqops, 2516 [MODE_SELECT_10] = &scsi_disk_emulate_reqops, 2517 [UNMAP] = &scsi_disk_emulate_reqops, 2518 [WRITE_SAME_10] = &scsi_disk_emulate_reqops, 2519 [WRITE_SAME_16] = &scsi_disk_emulate_reqops, 2520 [VERIFY_10] = &scsi_disk_emulate_reqops, 2521 [VERIFY_12] = &scsi_disk_emulate_reqops, 2522 [VERIFY_16] = &scsi_disk_emulate_reqops, 2523 2524 [READ_6] = &scsi_disk_dma_reqops, 2525 [READ_10] = &scsi_disk_dma_reqops, 2526 [READ_12] = &scsi_disk_dma_reqops, 2527 [READ_16] = &scsi_disk_dma_reqops, 2528 [WRITE_6] = &scsi_disk_dma_reqops, 2529 [WRITE_10] = &scsi_disk_dma_reqops, 2530 [WRITE_12] = &scsi_disk_dma_reqops, 2531 [WRITE_16] = &scsi_disk_dma_reqops, 2532 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops, 2533 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops, 2534 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops, 2535 }; 2536 2537 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf) 2538 { 2539 int i; 2540 int len = scsi_cdb_length(buf); 2541 char *line_buffer, *p; 2542 2543 line_buffer = g_malloc(len * 5 + 1); 2544 2545 for (i = 0, p = line_buffer; i < len; i++) { 2546 p += sprintf(p, " 0x%02x", buf[i]); 2547 } 2548 trace_scsi_disk_new_request(lun, tag, line_buffer); 2549 2550 g_free(line_buffer); 2551 } 2552 2553 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 2554 uint8_t *buf, void *hba_private) 2555 { 2556 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2557 SCSIRequest *req; 2558 const SCSIReqOps *ops; 2559 uint8_t command; 2560 2561 command = buf[0]; 2562 ops = scsi_disk_reqops_dispatch[command]; 2563 if (!ops) { 2564 ops = &scsi_disk_emulate_reqops; 2565 } 2566 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private); 2567 2568 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) { 2569 scsi_disk_new_request_dump(lun, tag, buf); 2570 } 2571 2572 return req; 2573 } 2574 2575 #ifdef __linux__ 2576 static int get_device_type(SCSIDiskState *s) 2577 { 2578 uint8_t cmd[16]; 2579 uint8_t buf[36]; 2580 int ret; 2581 2582 memset(cmd, 0, sizeof(cmd)); 2583 memset(buf, 0, sizeof(buf)); 2584 cmd[0] = INQUIRY; 2585 cmd[4] = sizeof(buf); 2586 2587 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd), 2588 buf, sizeof(buf)); 2589 if (ret < 0) { 2590 return -1; 2591 } 2592 s->qdev.type = buf[0]; 2593 if (buf[1] & 0x80) { 2594 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2595 } 2596 return 0; 2597 } 2598 2599 static void scsi_block_realize(SCSIDevice *dev, Error **errp) 2600 { 2601 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2602 AioContext *ctx; 2603 int sg_version; 2604 int rc; 2605 2606 if (!s->qdev.conf.blk) { 2607 error_setg(errp, "drive property not set"); 2608 return; 2609 } 2610 2611 if (s->rotation_rate) { 2612 error_report_once("rotation_rate is specified for scsi-block but is " 2613 "not implemented. This option is deprecated and will " 2614 "be removed in a future version"); 2615 } 2616 2617 ctx = blk_get_aio_context(s->qdev.conf.blk); 2618 aio_context_acquire(ctx); 2619 2620 /* check we are using a driver managing SG_IO (version 3 and after) */ 2621 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); 2622 if (rc < 0) { 2623 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 2624 if (rc != -EPERM) { 2625 error_append_hint(errp, "Is this a SCSI device?\n"); 2626 } 2627 goto out; 2628 } 2629 if (sg_version < 30000) { 2630 error_setg(errp, "scsi generic interface too old"); 2631 goto out; 2632 } 2633 2634 /* get device type from INQUIRY data */ 2635 rc = get_device_type(s); 2636 if (rc < 0) { 2637 error_setg(errp, "INQUIRY failed"); 2638 goto out; 2639 } 2640 2641 /* Make a guess for the block size, we'll fix it when the guest sends. 2642 * READ CAPACITY. If they don't, they likely would assume these sizes 2643 * anyway. (TODO: check in /sys). 2644 */ 2645 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { 2646 s->qdev.blocksize = 2048; 2647 } else { 2648 s->qdev.blocksize = 512; 2649 } 2650 2651 /* Makes the scsi-block device not removable by using HMP and QMP eject 2652 * command. 2653 */ 2654 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); 2655 2656 scsi_realize(&s->qdev, errp); 2657 scsi_generic_read_device_inquiry(&s->qdev); 2658 2659 out: 2660 aio_context_release(ctx); 2661 } 2662 2663 typedef struct SCSIBlockReq { 2664 SCSIDiskReq req; 2665 sg_io_hdr_t io_header; 2666 2667 /* Selected bytes of the original CDB, copied into our own CDB. */ 2668 uint8_t cmd, cdb1, group_number; 2669 2670 /* CDB passed to SG_IO. */ 2671 uint8_t cdb[16]; 2672 } SCSIBlockReq; 2673 2674 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req, 2675 int64_t offset, QEMUIOVector *iov, 2676 int direction, 2677 BlockCompletionFunc *cb, void *opaque) 2678 { 2679 sg_io_hdr_t *io_header = &req->io_header; 2680 SCSIDiskReq *r = &req->req; 2681 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2682 int nb_logical_blocks; 2683 uint64_t lba; 2684 BlockAIOCB *aiocb; 2685 2686 /* This is not supported yet. It can only happen if the guest does 2687 * reads and writes that are not aligned to one logical sectors 2688 * _and_ cover multiple MemoryRegions. 2689 */ 2690 assert(offset % s->qdev.blocksize == 0); 2691 assert(iov->size % s->qdev.blocksize == 0); 2692 2693 io_header->interface_id = 'S'; 2694 2695 /* The data transfer comes from the QEMUIOVector. */ 2696 io_header->dxfer_direction = direction; 2697 io_header->dxfer_len = iov->size; 2698 io_header->dxferp = (void *)iov->iov; 2699 io_header->iovec_count = iov->niov; 2700 assert(io_header->iovec_count == iov->niov); /* no overflow! */ 2701 2702 /* Build a new CDB with the LBA and length patched in, in case 2703 * DMA helpers split the transfer in multiple segments. Do not 2704 * build a CDB smaller than what the guest wanted, and only build 2705 * a larger one if strictly necessary. 2706 */ 2707 io_header->cmdp = req->cdb; 2708 lba = offset / s->qdev.blocksize; 2709 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize; 2710 2711 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) { 2712 /* 6-byte CDB */ 2713 stl_be_p(&req->cdb[0], lba | (req->cmd << 24)); 2714 req->cdb[4] = nb_logical_blocks; 2715 req->cdb[5] = 0; 2716 io_header->cmd_len = 6; 2717 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) { 2718 /* 10-byte CDB */ 2719 req->cdb[0] = (req->cmd & 0x1f) | 0x20; 2720 req->cdb[1] = req->cdb1; 2721 stl_be_p(&req->cdb[2], lba); 2722 req->cdb[6] = req->group_number; 2723 stw_be_p(&req->cdb[7], nb_logical_blocks); 2724 req->cdb[9] = 0; 2725 io_header->cmd_len = 10; 2726 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) { 2727 /* 12-byte CDB */ 2728 req->cdb[0] = (req->cmd & 0x1f) | 0xA0; 2729 req->cdb[1] = req->cdb1; 2730 stl_be_p(&req->cdb[2], lba); 2731 stl_be_p(&req->cdb[6], nb_logical_blocks); 2732 req->cdb[10] = req->group_number; 2733 req->cdb[11] = 0; 2734 io_header->cmd_len = 12; 2735 } else { 2736 /* 16-byte CDB */ 2737 req->cdb[0] = (req->cmd & 0x1f) | 0x80; 2738 req->cdb[1] = req->cdb1; 2739 stq_be_p(&req->cdb[2], lba); 2740 stl_be_p(&req->cdb[10], nb_logical_blocks); 2741 req->cdb[14] = req->group_number; 2742 req->cdb[15] = 0; 2743 io_header->cmd_len = 16; 2744 } 2745 2746 /* The rest is as in scsi-generic.c. */ 2747 io_header->mx_sb_len = sizeof(r->req.sense); 2748 io_header->sbp = r->req.sense; 2749 io_header->timeout = UINT_MAX; 2750 io_header->usr_ptr = r; 2751 io_header->flags |= SG_FLAG_DIRECT_IO; 2752 2753 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque); 2754 assert(aiocb != NULL); 2755 return aiocb; 2756 } 2757 2758 static bool scsi_block_no_fua(SCSICommand *cmd) 2759 { 2760 return false; 2761 } 2762 2763 static BlockAIOCB *scsi_block_dma_readv(int64_t offset, 2764 QEMUIOVector *iov, 2765 BlockCompletionFunc *cb, void *cb_opaque, 2766 void *opaque) 2767 { 2768 SCSIBlockReq *r = opaque; 2769 return scsi_block_do_sgio(r, offset, iov, 2770 SG_DXFER_FROM_DEV, cb, cb_opaque); 2771 } 2772 2773 static BlockAIOCB *scsi_block_dma_writev(int64_t offset, 2774 QEMUIOVector *iov, 2775 BlockCompletionFunc *cb, void *cb_opaque, 2776 void *opaque) 2777 { 2778 SCSIBlockReq *r = opaque; 2779 return scsi_block_do_sgio(r, offset, iov, 2780 SG_DXFER_TO_DEV, cb, cb_opaque); 2781 } 2782 2783 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) 2784 { 2785 switch (buf[0]) { 2786 case VERIFY_10: 2787 case VERIFY_12: 2788 case VERIFY_16: 2789 /* Check if BYTCHK == 0x01 (data-out buffer contains data 2790 * for the number of logical blocks specified in the length 2791 * field). For other modes, do not use scatter/gather operation. 2792 */ 2793 if ((buf[1] & 6) == 2) { 2794 return false; 2795 } 2796 break; 2797 2798 case READ_6: 2799 case READ_10: 2800 case READ_12: 2801 case READ_16: 2802 case WRITE_6: 2803 case WRITE_10: 2804 case WRITE_12: 2805 case WRITE_16: 2806 case WRITE_VERIFY_10: 2807 case WRITE_VERIFY_12: 2808 case WRITE_VERIFY_16: 2809 /* MMC writing cannot be done via DMA helpers, because it sometimes 2810 * involves writing beyond the maximum LBA or to negative LBA (lead-in). 2811 * We might use scsi_block_dma_reqops as long as no writing commands are 2812 * seen, but performance usually isn't paramount on optical media. So, 2813 * just make scsi-block operate the same as scsi-generic for them. 2814 */ 2815 if (s->qdev.type != TYPE_ROM) { 2816 return false; 2817 } 2818 break; 2819 2820 default: 2821 break; 2822 } 2823 2824 return true; 2825 } 2826 2827 2828 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf) 2829 { 2830 SCSIBlockReq *r = (SCSIBlockReq *)req; 2831 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2832 2833 r->cmd = req->cmd.buf[0]; 2834 switch (r->cmd >> 5) { 2835 case 0: 2836 /* 6-byte CDB. */ 2837 r->cdb1 = r->group_number = 0; 2838 break; 2839 case 1: 2840 /* 10-byte CDB. */ 2841 r->cdb1 = req->cmd.buf[1]; 2842 r->group_number = req->cmd.buf[6]; 2843 break; 2844 case 4: 2845 /* 12-byte CDB. */ 2846 r->cdb1 = req->cmd.buf[1]; 2847 r->group_number = req->cmd.buf[10]; 2848 break; 2849 case 5: 2850 /* 16-byte CDB. */ 2851 r->cdb1 = req->cmd.buf[1]; 2852 r->group_number = req->cmd.buf[14]; 2853 break; 2854 default: 2855 abort(); 2856 } 2857 2858 /* Protection information is not supported. For SCSI versions 2 and 2859 * older (as determined by snooping the guest's INQUIRY commands), 2860 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2861 */ 2862 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) { 2863 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD)); 2864 return 0; 2865 } 2866 2867 r->req.status = &r->io_header.status; 2868 return scsi_disk_dma_command(req, buf); 2869 } 2870 2871 static const SCSIReqOps scsi_block_dma_reqops = { 2872 .size = sizeof(SCSIBlockReq), 2873 .free_req = scsi_free_request, 2874 .send_command = scsi_block_dma_command, 2875 .read_data = scsi_read_data, 2876 .write_data = scsi_write_data, 2877 .get_buf = scsi_get_buf, 2878 .load_request = scsi_disk_load_request, 2879 .save_request = scsi_disk_save_request, 2880 }; 2881 2882 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, 2883 uint32_t lun, uint8_t *buf, 2884 void *hba_private) 2885 { 2886 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2887 2888 if (scsi_block_is_passthrough(s, buf)) { 2889 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, 2890 hba_private); 2891 } else { 2892 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun, 2893 hba_private); 2894 } 2895 } 2896 2897 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, 2898 uint8_t *buf, void *hba_private) 2899 { 2900 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2901 2902 if (scsi_block_is_passthrough(s, buf)) { 2903 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private); 2904 } else { 2905 return scsi_req_parse_cdb(&s->qdev, cmd, buf); 2906 } 2907 } 2908 2909 static void scsi_block_update_sense(SCSIRequest *req) 2910 { 2911 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2912 SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r); 2913 r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense)); 2914 } 2915 #endif 2916 2917 static 2918 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, 2919 BlockCompletionFunc *cb, void *cb_opaque, 2920 void *opaque) 2921 { 2922 SCSIDiskReq *r = opaque; 2923 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2924 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2925 } 2926 2927 static 2928 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, 2929 BlockCompletionFunc *cb, void *cb_opaque, 2930 void *opaque) 2931 { 2932 SCSIDiskReq *r = opaque; 2933 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2934 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2935 } 2936 2937 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) 2938 { 2939 DeviceClass *dc = DEVICE_CLASS(klass); 2940 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 2941 2942 dc->fw_name = "disk"; 2943 dc->reset = scsi_disk_reset; 2944 sdc->dma_readv = scsi_dma_readv; 2945 sdc->dma_writev = scsi_dma_writev; 2946 sdc->need_fua_emulation = scsi_is_cmd_fua; 2947 } 2948 2949 static const TypeInfo scsi_disk_base_info = { 2950 .name = TYPE_SCSI_DISK_BASE, 2951 .parent = TYPE_SCSI_DEVICE, 2952 .class_init = scsi_disk_base_class_initfn, 2953 .instance_size = sizeof(SCSIDiskState), 2954 .class_size = sizeof(SCSIDiskClass), 2955 .abstract = true, 2956 }; 2957 2958 #define DEFINE_SCSI_DISK_PROPERTIES() \ 2959 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \ 2960 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \ 2961 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 2962 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ 2963 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ 2964 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ 2965 DEFINE_PROP_STRING("product", SCSIDiskState, product), \ 2966 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id) 2967 2968 2969 static Property scsi_hd_properties[] = { 2970 DEFINE_SCSI_DISK_PROPERTIES(), 2971 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 2972 SCSI_DISK_F_REMOVABLE, false), 2973 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 2974 SCSI_DISK_F_DPOFUA, false), 2975 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 2976 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 2977 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 2978 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 2979 DEFAULT_MAX_UNMAP_SIZE), 2980 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 2981 DEFAULT_MAX_IO_SIZE), 2982 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 2983 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 2984 5), 2985 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), 2986 DEFINE_PROP_END_OF_LIST(), 2987 }; 2988 2989 static const VMStateDescription vmstate_scsi_disk_state = { 2990 .name = "scsi-disk", 2991 .version_id = 1, 2992 .minimum_version_id = 1, 2993 .fields = (VMStateField[]) { 2994 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), 2995 VMSTATE_BOOL(media_changed, SCSIDiskState), 2996 VMSTATE_BOOL(media_event, SCSIDiskState), 2997 VMSTATE_BOOL(eject_request, SCSIDiskState), 2998 VMSTATE_BOOL(tray_open, SCSIDiskState), 2999 VMSTATE_BOOL(tray_locked, SCSIDiskState), 3000 VMSTATE_END_OF_LIST() 3001 } 3002 }; 3003 3004 static void scsi_hd_class_initfn(ObjectClass *klass, void *data) 3005 { 3006 DeviceClass *dc = DEVICE_CLASS(klass); 3007 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3008 3009 sc->realize = scsi_hd_realize; 3010 sc->alloc_req = scsi_new_request; 3011 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3012 dc->desc = "virtual SCSI disk"; 3013 dc->props = scsi_hd_properties; 3014 dc->vmsd = &vmstate_scsi_disk_state; 3015 } 3016 3017 static const TypeInfo scsi_hd_info = { 3018 .name = "scsi-hd", 3019 .parent = TYPE_SCSI_DISK_BASE, 3020 .class_init = scsi_hd_class_initfn, 3021 }; 3022 3023 static Property scsi_cd_properties[] = { 3024 DEFINE_SCSI_DISK_PROPERTIES(), 3025 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3026 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3027 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3028 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3029 DEFAULT_MAX_IO_SIZE), 3030 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3031 5), 3032 DEFINE_PROP_END_OF_LIST(), 3033 }; 3034 3035 static void scsi_cd_class_initfn(ObjectClass *klass, void *data) 3036 { 3037 DeviceClass *dc = DEVICE_CLASS(klass); 3038 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3039 3040 sc->realize = scsi_cd_realize; 3041 sc->alloc_req = scsi_new_request; 3042 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3043 dc->desc = "virtual SCSI CD-ROM"; 3044 dc->props = scsi_cd_properties; 3045 dc->vmsd = &vmstate_scsi_disk_state; 3046 } 3047 3048 static const TypeInfo scsi_cd_info = { 3049 .name = "scsi-cd", 3050 .parent = TYPE_SCSI_DISK_BASE, 3051 .class_init = scsi_cd_class_initfn, 3052 }; 3053 3054 #ifdef __linux__ 3055 static Property scsi_block_properties[] = { 3056 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 3057 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), 3058 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false), 3059 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3060 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3061 DEFAULT_MAX_UNMAP_SIZE), 3062 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3063 DEFAULT_MAX_IO_SIZE), 3064 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3065 -1), 3066 DEFINE_PROP_END_OF_LIST(), 3067 }; 3068 3069 static void scsi_block_class_initfn(ObjectClass *klass, void *data) 3070 { 3071 DeviceClass *dc = DEVICE_CLASS(klass); 3072 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3073 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 3074 3075 sc->realize = scsi_block_realize; 3076 sc->alloc_req = scsi_block_new_request; 3077 sc->parse_cdb = scsi_block_parse_cdb; 3078 sdc->dma_readv = scsi_block_dma_readv; 3079 sdc->dma_writev = scsi_block_dma_writev; 3080 sdc->update_sense = scsi_block_update_sense; 3081 sdc->need_fua_emulation = scsi_block_no_fua; 3082 dc->desc = "SCSI block device passthrough"; 3083 dc->props = scsi_block_properties; 3084 dc->vmsd = &vmstate_scsi_disk_state; 3085 } 3086 3087 static const TypeInfo scsi_block_info = { 3088 .name = "scsi-block", 3089 .parent = TYPE_SCSI_DISK_BASE, 3090 .class_init = scsi_block_class_initfn, 3091 }; 3092 #endif 3093 3094 static Property scsi_disk_properties[] = { 3095 DEFINE_SCSI_DISK_PROPERTIES(), 3096 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3097 SCSI_DISK_F_REMOVABLE, false), 3098 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3099 SCSI_DISK_F_DPOFUA, false), 3100 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3101 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3102 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3103 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3104 DEFAULT_MAX_UNMAP_SIZE), 3105 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3106 DEFAULT_MAX_IO_SIZE), 3107 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3108 5), 3109 DEFINE_PROP_END_OF_LIST(), 3110 }; 3111 3112 static void scsi_disk_class_initfn(ObjectClass *klass, void *data) 3113 { 3114 DeviceClass *dc = DEVICE_CLASS(klass); 3115 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3116 3117 sc->realize = scsi_disk_realize; 3118 sc->alloc_req = scsi_new_request; 3119 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3120 dc->fw_name = "disk"; 3121 dc->desc = "virtual SCSI disk or CD-ROM (legacy)"; 3122 dc->reset = scsi_disk_reset; 3123 dc->props = scsi_disk_properties; 3124 dc->vmsd = &vmstate_scsi_disk_state; 3125 } 3126 3127 static const TypeInfo scsi_disk_info = { 3128 .name = "scsi-disk", 3129 .parent = TYPE_SCSI_DISK_BASE, 3130 .class_init = scsi_disk_class_initfn, 3131 }; 3132 3133 static void scsi_disk_register_types(void) 3134 { 3135 type_register_static(&scsi_disk_base_info); 3136 type_register_static(&scsi_hd_info); 3137 type_register_static(&scsi_cd_info); 3138 #ifdef __linux__ 3139 type_register_static(&scsi_block_info); 3140 #endif 3141 type_register_static(&scsi_disk_info); 3142 } 3143 3144 type_init(scsi_disk_register_types) 3145